summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2018-09-07 17:07:13 +0200
committerMichaël Zasso <targos@protonmail.com>2018-09-07 20:59:13 +0200
commit586db2414a338e1bf6eaf6e672a3adc7ce309f6a (patch)
tree139fa972aef648481ddee22a3a85b99707d28df5 /deps/v8/src
parent12ed7c94e5160aa6d38e3d2cb2a73dae0a6f9342 (diff)
downloadandroid-node-v8-586db2414a338e1bf6eaf6e672a3adc7ce309f6a.tar.gz
android-node-v8-586db2414a338e1bf6eaf6e672a3adc7ce309f6a.tar.bz2
android-node-v8-586db2414a338e1bf6eaf6e672a3adc7ce309f6a.zip
deps: update V8 to 6.9.427.22
PR-URL: https://github.com/nodejs/node/pull/21983 Reviewed-By: Refael Ackermann <refack@gmail.com> Reviewed-By: Gus Caplan <me@gus.host> Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/accessors.cc361
-rw-r--r--deps/v8/src/accessors.h16
-rw-r--r--deps/v8/src/address-map.h175
-rw-r--r--deps/v8/src/allocation.h4
-rw-r--r--deps/v8/src/api-arguments-inl.h3
-rw-r--r--deps/v8/src/api-arguments.h12
-rw-r--r--deps/v8/src/api-natives.cc38
-rw-r--r--deps/v8/src/api-natives.h2
-rw-r--r--deps/v8/src/api.cc907
-rw-r--r--deps/v8/src/api.h12
-rw-r--r--deps/v8/src/arguments.h2
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h64
-rw-r--r--deps/v8/src/arm/assembler-arm.cc138
-rw-r--r--deps/v8/src/arm/assembler-arm.h135
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc319
-rw-r--r--deps/v8/src/arm/constants-arm.h49
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/arm/frame-constants-arm.h16
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc93
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.h26
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc254
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h74
-rw-r--r--deps/v8/src/arm/simulator-arm.cc4
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h29
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc120
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h83
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc334
-rw-r--r--deps/v8/src/arm64/constants-arm64.h6
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc50
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h18
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc14
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h7
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc1
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc94
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.h26
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h61
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc290
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h124
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc4
-rw-r--r--deps/v8/src/asmjs/asm-js.cc10
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc6
-rw-r--r--deps/v8/src/asmjs/asm-parser.h9
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc19
-rw-r--r--deps/v8/src/assembler.cc265
-rw-r--r--deps/v8/src/assembler.h320
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h22
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc2
-rw-r--r--deps/v8/src/ast/ast-value-factory.h10
-rw-r--r--deps/v8/src/ast/ast.cc121
-rw-r--r--deps/v8/src/ast/ast.h251
-rw-r--r--deps/v8/src/ast/compile-time-value.cc53
-rw-r--r--deps/v8/src/ast/compile-time-value.h46
-rw-r--r--deps/v8/src/ast/modules.cc17
-rw-r--r--deps/v8/src/ast/modules.h31
-rw-r--r--deps/v8/src/ast/prettyprinter.cc21
-rw-r--r--deps/v8/src/ast/prettyprinter.h16
-rw-r--r--deps/v8/src/ast/scopes.cc205
-rw-r--r--deps/v8/src/ast/scopes.h29
-rw-r--r--deps/v8/src/ast/variables.cc2
-rw-r--r--deps/v8/src/ast/variables.h9
-rw-r--r--deps/v8/src/async-hooks-wrapper.cc259
-rw-r--r--deps/v8/src/async-hooks-wrapper.h95
-rw-r--r--deps/v8/src/bailout-reason.h3
-rw-r--r--deps/v8/src/base/atomic-utils.h40
-rw-r--r--deps/v8/src/base/flags.h40
-rw-r--r--deps/v8/src/base/list.h136
-rw-r--r--deps/v8/src/base/macros.h4
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc2
-rw-r--r--deps/v8/src/base/platform/platform.h4
-rw-r--r--deps/v8/src/base/platform/time.cc31
-rw-r--r--deps/v8/src/bootstrapper.cc2798
-rw-r--r--deps/v8/src/bootstrapper.h6
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc559
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc595
-rw-r--r--deps/v8/src/builtins/array-foreach.tq173
-rw-r--r--deps/v8/src/builtins/array-sort.tq8
-rw-r--r--deps/v8/src/builtins/array.tq202
-rw-r--r--deps/v8/src/builtins/base.tq658
-rw-r--r--deps/v8/src/builtins/builtins-api.cc12
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc650
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h46
-rw-r--r--deps/v8/src/builtins/builtins-array.cc207
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc3
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc93
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc122
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h28
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc43
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc141
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc7
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc4
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc210
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.h21
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc6
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc119
-rw-r--r--deps/v8/src/builtins/builtins-collections.cc9
-rw-r--r--deps/v8/src/builtins/builtins-console-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-console.cc48
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc134
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h4
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-data-view-gen.h67
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc294
-rw-r--r--deps/v8/src/builtins/builtins-date.cc185
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h715
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h25
-rw-r--r--deps/v8/src/builtins/builtins-error.cc2
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc41
-rw-r--r--deps/v8/src/builtins/builtins-function.cc18
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-global.cc2
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc181
-rw-r--r--deps/v8/src/builtins/builtins-internal.cc2
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc199
-rw-r--r--deps/v8/src/builtins/builtins-json.cc2
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc203
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.h37
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-math.cc5
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc60
-rw-r--r--deps/v8/src/builtins/builtins-number.cc24
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc182
-rw-r--r--deps/v8/src/builtins/builtins-object.cc51
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc90
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc18
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc197
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h4
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc18
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc13
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc102
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h6
-rw-r--r--deps/v8/src/builtins/builtins-string.cc57
-rw-r--r--deps/v8/src/builtins/builtins-symbol-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc2
-rw-r--r--deps/v8/src/builtins/builtins-trace.cc191
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc154
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h17
-rw-r--r--deps/v8/src/builtins/builtins-typed-array.cc17
-rw-r--r--deps/v8/src/builtins/builtins-utils-gen.h5
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc106
-rw-r--r--deps/v8/src/builtins/builtins.cc120
-rw-r--r--deps/v8/src/builtins/builtins.h43
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc18
-rw-r--r--deps/v8/src/builtins/data-view.tq896
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc600
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc517
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc540
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc540
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc570
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc77
-rw-r--r--deps/v8/src/builtins/typed-array.tq213
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc633
-rw-r--r--deps/v8/src/cancelable-task.h6
-rw-r--r--deps/v8/src/code-events.h9
-rw-r--r--deps/v8/src/code-factory.cc227
-rw-r--r--deps/v8/src/code-factory.h26
-rw-r--r--deps/v8/src/code-reference.h10
-rw-r--r--deps/v8/src/code-stub-assembler.cc1049
-rw-r--r--deps/v8/src/code-stub-assembler.h721
-rw-r--r--deps/v8/src/code-stubs.cc185
-rw-r--r--deps/v8/src/code-stubs.h212
-rw-r--r--deps/v8/src/code-tracer.h83
-rw-r--r--deps/v8/src/compilation-cache.cc32
-rw-r--r--deps/v8/src/compilation-cache.h4
-rw-r--r--deps/v8/src/compilation-dependencies.cc153
-rw-r--r--deps/v8/src/compilation-dependencies.h74
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc55
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h1
-rw-r--r--deps/v8/src/compiler.cc167
-rw-r--r--deps/v8/src/compiler.h5
-rw-r--r--deps/v8/src/compiler/OWNERS2
-rw-r--r--deps/v8/src/compiler/access-builder.cc11
-rw-r--r--deps/v8/src/compiler/access-builder.h6
-rw-r--r--deps/v8/src/compiler/access-info.cc86
-rw-r--r--deps/v8/src/compiler/access-info.h8
-rw-r--r--deps/v8/src/compiler/allocation-builder.h5
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc209
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc62
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc141
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h3
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc109
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc12
-rw-r--r--deps/v8/src/compiler/branch-elimination.h1
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc159
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h1
-rw-r--r--deps/v8/src/compiler/checkpoint-elimination.cc1
-rw-r--r--deps/v8/src/compiler/code-assembler.cc414
-rw-r--r--deps/v8/src/compiler/code-assembler.h216
-rw-r--r--deps/v8/src/compiler/code-generator.cc100
-rw-r--r--deps/v8/src/compiler/code-generator.h24
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc20
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h3
-rw-r--r--deps/v8/src/compiler/common-operator.cc57
-rw-r--r--deps/v8/src/compiler/common-operator.h17
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc396
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h77
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.cc13
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.h5
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc1
-rw-r--r--deps/v8/src/compiler/diamond.h4
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc440
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h2
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h1
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc1
-rw-r--r--deps/v8/src/compiler/escape-analysis.h1
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc6
-rw-r--r--deps/v8/src/compiler/graph-assembler.h6
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc12
-rw-r--r--deps/v8/src/compiler/graph-trimmer.cc5
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc21
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h1
-rw-r--r--deps/v8/src/compiler/graph.cc5
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc868
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h51
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc51
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc612
-rw-r--r--deps/v8/src/compiler/instruction-codes.h2
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc2
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h17
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc298
-rw-r--r--deps/v8/src/compiler/instruction-selector.h123
-rw-r--r--deps/v8/src/compiler/instruction.cc35
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc836
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h16
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc63
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h6
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc690
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h23
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc40
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-graph.cc29
-rw-r--r--deps/v8/src/compiler/js-graph.h3
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc848
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h432
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc8
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h1
-rw-r--r--deps/v8/src/compiler/js-inlining.cc15
-rw-r--r--deps/v8/src/compiler/js-inlining.h1
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc6
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc243
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h13
-rw-r--r--deps/v8/src/compiler/js-operator.cc13
-rw-r--r--deps/v8/src/compiler/js-operator.h24
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc3
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc192
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h5
-rw-r--r--deps/v8/src/compiler/jump-threading.cc7
-rw-r--r--deps/v8/src/compiler/jump-threading.h3
-rw-r--r--deps/v8/src/compiler/linkage.cc89
-rw-r--r--deps/v8/src/compiler/linkage.h11
-rw-r--r--deps/v8/src/compiler/load-elimination.cc9
-rw-r--r--deps/v8/src/compiler/load-elimination.h1
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc13
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc10
-rw-r--r--deps/v8/src/compiler/machine-operator.cc1
-rw-r--r--deps/v8/src/compiler/machine-operator.h4
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc10
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc94
-rw-r--r--deps/v8/src/compiler/mips/instruction-scheduler-mips.cc1713
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc41
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc110
-rw-r--r--deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc1666
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc40
-rw-r--r--deps/v8/src/compiler/node-matchers.h109
-rw-r--r--deps/v8/src/compiler/node-origin-table.cc10
-rw-r--r--deps/v8/src/compiler/node-origin-table.h13
-rw-r--r--deps/v8/src/compiler/node-properties.cc29
-rw-r--r--deps/v8/src/compiler/node-properties.h14
-rw-r--r--deps/v8/src/compiler/node.cc16
-rw-r--r--deps/v8/src/compiler/node.h3
-rw-r--r--deps/v8/src/compiler/opcodes.h4
-rw-r--r--deps/v8/src/compiler/operation-typer.cc27
-rw-r--r--deps/v8/src/compiler/operation-typer.h3
-rw-r--r--deps/v8/src/compiler/operator-properties.cc1
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc9
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h5
-rw-r--r--deps/v8/src/compiler/pipeline.cc652
-rw-r--r--deps/v8/src/compiler/pipeline.h52
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc181
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc4
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc37
-rw-r--r--deps/v8/src/compiler/property-access-builder.h20
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc14
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h5
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc2
-rw-r--r--deps/v8/src/compiler/register-allocator.cc111
-rw-r--r--deps/v8/src/compiler/register-allocator.h33
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc277
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc4
-rw-r--r--deps/v8/src/compiler/schedule.cc13
-rw-r--r--deps/v8/src/compiler/scheduler.cc8
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc180
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h9
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc66
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h5
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc14
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h8
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc60
-rw-r--r--deps/v8/src/compiler/simplified-operator.h32
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc1
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.cc12
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.h3
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc107
-rw-r--r--deps/v8/src/compiler/typed-optimization.h16
-rw-r--r--deps/v8/src/compiler/typer.cc590
-rw-r--r--deps/v8/src/compiler/typer.h7
-rw-r--r--deps/v8/src/compiler/types.cc155
-rw-r--r--deps/v8/src/compiler/types.h58
-rw-r--r--deps/v8/src/compiler/verifier.cc13
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc828
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h100
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc158
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc124
-rw-r--r--deps/v8/src/contexts-inl.h10
-rw-r--r--deps/v8/src/contexts.cc60
-rw-r--r--deps/v8/src/contexts.h20
-rw-r--r--deps/v8/src/conversions.cc9
-rw-r--r--deps/v8/src/conversions.h5
-rw-r--r--deps/v8/src/counters-inl.h5
-rw-r--r--deps/v8/src/counters.cc21
-rw-r--r--deps/v8/src/counters.h55
-rw-r--r--deps/v8/src/d8.cc202
-rw-r--r--deps/v8/src/d8.h58
-rw-r--r--deps/v8/src/debug/OWNERS1
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc4
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc4
-rw-r--r--deps/v8/src/debug/debug-coverage.cc4
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc211
-rw-r--r--deps/v8/src/debug/debug-evaluate.h24
-rw-r--r--deps/v8/src/debug/debug-frames.cc119
-rw-r--r--deps/v8/src/debug/debug-frames.h34
-rw-r--r--deps/v8/src/debug/debug-interface.h105
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.cc40
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.h2
-rw-r--r--deps/v8/src/debug/debug-scopes.cc1141
-rw-r--r--deps/v8/src/debug/debug-scopes.h114
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc7
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc2
-rw-r--r--deps/v8/src/debug/debug.cc838
-rw-r--r--deps/v8/src/debug/debug.h173
-rw-r--r--deps/v8/src/debug/debug.js379
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc4
-rw-r--r--deps/v8/src/debug/interface-types.h41
-rw-r--r--deps/v8/src/debug/liveedit.cc1773
-rw-r--r--deps/v8/src/debug/liveedit.h343
-rw-r--r--deps/v8/src/debug/liveedit.js1058
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc4
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc4
-rw-r--r--deps/v8/src/debug/mirrors.js2439
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc7
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc7
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc2
-rw-r--r--deps/v8/src/deoptimizer.cc335
-rw-r--r--deps/v8/src/deoptimizer.h79
-rw-r--r--deps/v8/src/disassembler.cc110
-rw-r--r--deps/v8/src/disassembler.h1
-rw-r--r--deps/v8/src/elements-kind.cc3
-rw-r--r--deps/v8/src/elements-kind.h1
-rw-r--r--deps/v8/src/elements.cc355
-rw-r--r--deps/v8/src/elements.h7
-rw-r--r--deps/v8/src/execution.cc15
-rw-r--r--deps/v8/src/execution.h11
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc8
-rw-r--r--deps/v8/src/external-reference-table.cc3
-rw-r--r--deps/v8/src/external-reference-table.h16
-rw-r--r--deps/v8/src/external-reference.cc45
-rw-r--r--deps/v8/src/external-reference.h12
-rw-r--r--deps/v8/src/feedback-vector-inl.h52
-rw-r--r--deps/v8/src/feedback-vector.cc311
-rw-r--r--deps/v8/src/feedback-vector.h23
-rw-r--r--deps/v8/src/flag-definitions.h87
-rw-r--r--deps/v8/src/flags.cc36
-rw-r--r--deps/v8/src/frame-constants.h7
-rw-r--r--deps/v8/src/frames-inl.h4
-rw-r--r--deps/v8/src/frames.cc203
-rw-r--r--deps/v8/src/frames.h45
-rw-r--r--deps/v8/src/futex-emulation.cc172
-rw-r--r--deps/v8/src/futex-emulation.h33
-rw-r--r--deps/v8/src/gdb-jit.cc50
-rw-r--r--deps/v8/src/global-handles.cc104
-rw-r--r--deps/v8/src/global-handles.h15
-rw-r--r--deps/v8/src/globals.h119
-rw-r--r--deps/v8/src/handles-inl.h3
-rw-r--r--deps/v8/src/handles.cc4
-rw-r--r--deps/v8/src/handles.h6
-rw-r--r--deps/v8/src/heap-symbols.h31
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc10
-rw-r--r--deps/v8/src/heap/array-buffer-collector.h4
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h31
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc45
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h11
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc111
-rw-r--r--deps/v8/src/heap/concurrent-marking.h11
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc9
-rw-r--r--deps/v8/src/heap/embedder-tracing.h24
-rw-r--r--deps/v8/src/heap/factory-inl.h34
-rw-r--r--deps/v8/src/heap/factory.cc415
-rw-r--r--deps/v8/src/heap/factory.h70
-rw-r--r--deps/v8/src/heap/gc-tracer.cc53
-rw-r--r--deps/v8/src/heap/gc-tracer.h5
-rw-r--r--deps/v8/src/heap/heap-controller.cc160
-rw-r--r--deps/v8/src/heap/heap-controller.h55
-rw-r--r--deps/v8/src/heap/heap-inl.h95
-rw-r--r--deps/v8/src/heap/heap.cc991
-rw-r--r--deps/v8/src/heap/heap.h692
-rw-r--r--deps/v8/src/heap/incremental-marking.cc137
-rw-r--r--deps/v8/src/heap/incremental-marking.h15
-rw-r--r--deps/v8/src/heap/item-parallel-job.h11
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h70
-rw-r--r--deps/v8/src/heap/mark-compact.cc558
-rw-r--r--deps/v8/src/heap/mark-compact.h147
-rw-r--r--deps/v8/src/heap/marking.h20
-rw-r--r--deps/v8/src/heap/memory-reducer.cc15
-rw-r--r--deps/v8/src/heap/memory-reducer.h7
-rw-r--r--deps/v8/src/heap/object-stats.cc76
-rw-r--r--deps/v8/src/heap/object-stats.h102
-rw-r--r--deps/v8/src/heap/objects-visiting.cc15
-rw-r--r--deps/v8/src/heap/objects-visiting.h76
-rw-r--r--deps/v8/src/heap/remembered-set.h14
-rw-r--r--deps/v8/src/heap/scavenger-inl.h24
-rw-r--r--deps/v8/src/heap/scavenger.cc12
-rw-r--r--deps/v8/src/heap/scavenger.h8
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc219
-rw-r--r--deps/v8/src/heap/spaces-inl.h6
-rw-r--r--deps/v8/src/heap/spaces.cc682
-rw-r--r--deps/v8/src/heap/spaces.h454
-rw-r--r--deps/v8/src/heap/sweeper.cc25
-rw-r--r--deps/v8/src/heap/sweeper.h2
-rw-r--r--deps/v8/src/heap/worklist.h43
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h22
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc132
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h70
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc313
-rw-r--r--deps/v8/src/ia32/constants-ia32.h23
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc3
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc51
-rw-r--r--deps/v8/src/ia32/frame-constants-ia32.h16
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc90
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc139
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h143
-rw-r--r--deps/v8/src/ia32/sse-instr.h15
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc446
-rw-r--r--deps/v8/src/ic/accessor-assembler.h30
-rw-r--r--deps/v8/src/ic/call-optimization.cc33
-rw-r--r--deps/v8/src/ic/call-optimization.h10
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h5
-rw-r--r--deps/v8/src/ic/handler-configuration.cc8
-rw-r--r--deps/v8/src/ic/ic-inl.h6
-rw-r--r--deps/v8/src/ic/ic.cc180
-rw-r--r--deps/v8/src/ic/ic.h5
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc18
-rw-r--r--deps/v8/src/ic/stub-cache.cc25
-rw-r--r--deps/v8/src/ic/stub-cache.h6
-rw-r--r--deps/v8/src/identity-map.cc20
-rw-r--r--deps/v8/src/inspector/PRESUBMIT.py3
-rw-r--r--deps/v8/src/inspector/injected-script.cc1
-rw-r--r--deps/v8/src/inspector/inspected-context.cc2
-rw-r--r--deps/v8/src/inspector/js_protocol.json69
-rw-r--r--deps/v8/src/inspector/js_protocol.pdl39
-rw-r--r--deps/v8/src/inspector/v8-console.cc29
-rw-r--r--deps/v8/src/inspector/v8-console.h6
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc118
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h9
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc80
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h2
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc118
-rw-r--r--deps/v8/src/inspector/v8-debugger.h28
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc1
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h13
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc114
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h15
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc21
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h13
-rw-r--r--deps/v8/src/instruction-stream.cc18
-rw-r--r--deps/v8/src/instruction-stream.h2
-rw-r--r--deps/v8/src/interface-descriptors.cc532
-rw-r--r--deps/v8/src/interface-descriptors.h852
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc25
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc37
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h20
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc58
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h8
-rw-r--r--deps/v8/src/interpreter/bytecode-node.h83
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h2
-rw-r--r--deps/v8/src/interpreter/bytecodes.h5
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h16
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc227
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h17
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc72
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc12
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h2
-rw-r--r--deps/v8/src/interpreter/interpreter.cc6
-rw-r--r--deps/v8/src/interpreter/interpreter.h2
-rw-r--r--deps/v8/src/interpreter/setup-interpreter-internal.cc7
-rw-r--r--deps/v8/src/intl.cc2
-rw-r--r--deps/v8/src/intl.h10
-rw-r--r--deps/v8/src/isolate-inl.h11
-rw-r--r--deps/v8/src/isolate.cc604
-rw-r--r--deps/v8/src/isolate.h334
-rw-r--r--deps/v8/src/js/OWNERS1
-rw-r--r--deps/v8/src/js/array.js94
-rw-r--r--deps/v8/src/js/intl.js142
-rw-r--r--deps/v8/src/js/macros.py8
-rw-r--r--deps/v8/src/js/typedarray.js6
-rw-r--r--deps/v8/src/json-parser.cc25
-rw-r--r--deps/v8/src/json-parser.h11
-rw-r--r--deps/v8/src/json-stringifier.cc33
-rw-r--r--deps/v8/src/json-stringifier.h38
-rw-r--r--deps/v8/src/keys.cc82
-rw-r--r--deps/v8/src/keys.h14
-rw-r--r--deps/v8/src/label.h14
-rw-r--r--deps/v8/src/layout-descriptor-inl.h8
-rw-r--r--deps/v8/src/layout-descriptor.cc37
-rw-r--r--deps/v8/src/layout-descriptor.h10
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc13
-rw-r--r--deps/v8/src/log-utils.cc2
-rw-r--r--deps/v8/src/log.cc317
-rw-r--r--deps/v8/src/log.h54
-rw-r--r--deps/v8/src/lookup.cc126
-rw-r--r--deps/v8/src/lookup.h18
-rw-r--r--deps/v8/src/macro-assembler.h6
-rw-r--r--deps/v8/src/map-updater.cc102
-rw-r--r--deps/v8/src/map-updater.h6
-rw-r--r--deps/v8/src/messages.cc83
-rw-r--r--deps/v8/src/messages.h12
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h24
-rw-r--r--deps/v8/src/mips/assembler-mips.cc158
-rw-r--r--deps/v8/src/mips/assembler-mips.h72
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc304
-rw-r--r--deps/v8/src/mips/constants-mips.h8
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc2
-rw-r--r--deps/v8/src/mips/frame-constants-mips.h16
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc87
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc410
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h78
-rw-r--r--deps/v8/src/mips/simulator-mips.cc4
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h24
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc117
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h66
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc306
-rw-r--r--deps/v8/src/mips64/constants-mips64.h11
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc2
-rw-r--r--deps/v8/src/mips64/frame-constants-mips64.h16
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc87
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc394
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h103
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc4
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h153
-rw-r--r--deps/v8/src/objects-debug.cc1235
-rw-r--r--deps/v8/src/objects-definitions.h344
-rw-r--r--deps/v8/src/objects-inl.h670
-rw-r--r--deps/v8/src/objects-printer.cc437
-rw-r--r--deps/v8/src/objects.cc3433
-rw-r--r--deps/v8/src/objects.h722
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h30
-rw-r--r--deps/v8/src/objects/api-callbacks.h7
-rw-r--r--deps/v8/src/objects/arguments-inl.h2
-rw-r--r--deps/v8/src/objects/arguments.h2
-rw-r--r--deps/v8/src/objects/bigint.cc400
-rw-r--r--deps/v8/src/objects/bigint.h64
-rw-r--r--deps/v8/src/objects/code-inl.h57
-rw-r--r--deps/v8/src/objects/code.h128
-rw-r--r--deps/v8/src/objects/compilation-cache-inl.h2
-rw-r--r--deps/v8/src/objects/compilation-cache.h8
-rw-r--r--deps/v8/src/objects/data-handler-inl.h4
-rw-r--r--deps/v8/src/objects/data-handler.h4
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h26
-rw-r--r--deps/v8/src/objects/debug-objects.cc117
-rw-r--r--deps/v8/src/objects/debug-objects.h95
-rw-r--r--deps/v8/src/objects/descriptor-array.h37
-rw-r--r--deps/v8/src/objects/dictionary.h65
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h112
-rw-r--r--deps/v8/src/objects/fixed-array.h58
-rw-r--r--deps/v8/src/objects/frame-array.h5
-rw-r--r--deps/v8/src/objects/hash-table-inl.h34
-rw-r--r--deps/v8/src/objects/hash-table.h99
-rw-r--r--deps/v8/src/objects/intl-objects-inl.h27
-rw-r--r--deps/v8/src/objects/intl-objects.cc662
-rw-r--r--deps/v8/src/objects/intl-objects.h146
-rw-r--r--deps/v8/src/objects/js-array-inl.h23
-rw-r--r--deps/v8/src/objects/js-array.h15
-rw-r--r--deps/v8/src/objects/js-collection-inl.h8
-rw-r--r--deps/v8/src/objects/js-collection.h21
-rw-r--r--deps/v8/src/objects/js-locale.cc177
-rw-r--r--deps/v8/src/objects/js-locale.h7
-rw-r--r--deps/v8/src/objects/js-promise-inl.h1
-rw-r--r--deps/v8/src/objects/js-promise.h4
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h4
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-regexp.h3
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h62
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc225
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h105
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h20
-rw-r--r--deps/v8/src/objects/literal-objects.cc87
-rw-r--r--deps/v8/src/objects/literal-objects.h48
-rw-r--r--deps/v8/src/objects/managed.cc2
-rw-r--r--deps/v8/src/objects/managed.h37
-rw-r--r--deps/v8/src/objects/map-inl.h75
-rw-r--r--deps/v8/src/objects/map.h292
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h2
-rw-r--r--deps/v8/src/objects/maybe-object.h4
-rw-r--r--deps/v8/src/objects/module-inl.h36
-rw-r--r--deps/v8/src/objects/module.cc208
-rw-r--r--deps/v8/src/objects/module.h101
-rw-r--r--deps/v8/src/objects/name-inl.h11
-rw-r--r--deps/v8/src/objects/name.h7
-rw-r--r--deps/v8/src/objects/object-macros.h138
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h23
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc278
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h148
-rw-r--r--deps/v8/src/objects/promise-inl.h1
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h64
-rw-r--r--deps/v8/src/objects/prototype-info.h112
-rw-r--r--deps/v8/src/objects/regexp-match-info.h2
-rw-r--r--deps/v8/src/objects/scope-info.cc235
-rw-r--r--deps/v8/src/objects/scope-info.h75
-rw-r--r--deps/v8/src/objects/script-inl.h1
-rw-r--r--deps/v8/src/objects/script.h14
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h428
-rw-r--r--deps/v8/src/objects/shared-function-info.h353
-rw-r--r--deps/v8/src/objects/string-inl.h41
-rw-r--r--deps/v8/src/objects/string-table.h10
-rw-r--r--deps/v8/src/objects/string.h33
-rw-r--r--deps/v8/src/objects/template-objects.cc4
-rw-r--r--deps/v8/src/objects/template-objects.h2
-rw-r--r--deps/v8/src/objects/templates.h5
-rw-r--r--deps/v8/src/optimized-compilation-info.cc13
-rw-r--r--deps/v8/src/optimized-compilation-info.h44
-rw-r--r--deps/v8/src/ostreams.cc35
-rw-r--r--deps/v8/src/ostreams.h61
-rw-r--r--deps/v8/src/parsing/expression-scope-reparenter.cc4
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc6
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h6
-rw-r--r--deps/v8/src/parsing/parse-info.cc165
-rw-r--r--deps/v8/src/parsing/parse-info.h37
-rw-r--r--deps/v8/src/parsing/parser-base.h148
-rw-r--r--deps/v8/src/parsing/parser.cc323
-rw-r--r--deps/v8/src/parsing/parser.h124
-rw-r--r--deps/v8/src/parsing/parsing.cc14
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc21
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc81
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.h5
-rw-r--r--deps/v8/src/parsing/preparser.cc6
-rw-r--r--deps/v8/src/parsing/preparser.h87
-rw-r--r--deps/v8/src/parsing/rewriter.cc11
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc758
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h6
-rw-r--r--deps/v8/src/parsing/scanner.cc288
-rw-r--r--deps/v8/src/parsing/scanner.h29
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc7
-rw-r--r--deps/v8/src/perf-jit.cc11
-rw-r--r--deps/v8/src/perf-jit.h8
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h16
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc68
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h76
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc321
-rw-r--r--deps/v8/src/ppc/constants-ppc.h8
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc2
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.h14
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc79
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc250
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h87
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc4
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc2
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h12
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc8
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h28
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc11
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc279
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h42
-rw-r--r--deps/v8/src/profiler/profile-generator.cc63
-rw-r--r--deps/v8/src/profiler/profile-generator.h8
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc93
-rw-r--r--deps/v8/src/profiler/profiler-listener.h13
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc4
-rw-r--r--deps/v8/src/profiler/strings-storage.cc13
-rw-r--r--deps/v8/src/profiler/strings-storage.h7
-rw-r--r--deps/v8/src/profiler/unbound-queue.h10
-rw-r--r--deps/v8/src/property-descriptor.cc36
-rw-r--r--deps/v8/src/property-details.h13
-rw-r--r--deps/v8/src/property.cc22
-rw-r--r--deps/v8/src/property.h26
-rw-r--r--deps/v8/src/prototype.h12
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h17
-rw-r--r--deps/v8/src/regexp/jsregexp.cc192
-rw-r--r--deps/v8/src/regexp/jsregexp.h61
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc4
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc50
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc20
-rw-r--r--deps/v8/src/register-configuration.cc6
-rw-r--r--deps/v8/src/register-configuration.h2
-rw-r--r--deps/v8/src/roots-inl.h96
-rw-r--r--deps/v8/src/roots.h352
-rw-r--r--deps/v8/src/runtime/runtime-array.cc55
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc41
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc37
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc85
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc39
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc1301
-rw-r--r--deps/v8/src/runtime/runtime-error.cc30
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc10
-rw-r--r--deps/v8/src/runtime/runtime-function.cc53
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc2
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc41
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc35
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc27
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc115
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc127
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc306
-rw-r--r--deps/v8/src/runtime/runtime-module.cc10
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc15
-rw-r--r--deps/v8/src/runtime/runtime-object.cc189
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc24
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc49
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc16
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc155
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc148
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc56
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc8
-rw-r--r--deps/v8/src/runtime/runtime-test.cc184
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc10
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc69
-rw-r--r--deps/v8/src/runtime/runtime.h93
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h35
-rw-r--r--deps/v8/src/s390/assembler-s390.cc1289
-rw-r--r--deps/v8/src/s390/assembler-s390.h1110
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc306
-rw-r--r--deps/v8/src/s390/constants-s390.h87
-rw-r--r--deps/v8/src/s390/deoptimizer-s390.cc2
-rw-r--r--deps/v8/src/s390/disasm-s390.cc1239
-rw-r--r--deps/v8/src/s390/frame-constants-s390.h20
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc75
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc322
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h102
-rw-r--r--deps/v8/src/s390/simulator-s390.cc104
-rw-r--r--deps/v8/src/safepoint-table.cc1
-rw-r--r--deps/v8/src/setup-isolate-deserialize.cc8
-rw-r--r--deps/v8/src/signature.h23
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.cc50
-rw-r--r--deps/v8/src/snapshot/builtin-serializer-allocator.cc18
-rw-r--r--deps/v8/src/snapshot/builtin-serializer-allocator.h6
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc94
-rw-r--r--deps/v8/src/snapshot/code-serializer.h9
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.cc6
-rw-r--r--deps/v8/src/snapshot/default-serializer-allocator.cc35
-rw-r--r--deps/v8/src/snapshot/default-serializer-allocator.h10
-rw-r--r--deps/v8/src/snapshot/deserializer.cc65
-rw-r--r--deps/v8/src/snapshot/embedded-empty.cc3
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc56
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc40
-rw-r--r--deps/v8/src/snapshot/object-deserializer.h5
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc13
-rw-r--r--deps/v8/src/snapshot/references.h197
-rw-r--r--deps/v8/src/snapshot/serializer-common.h7
-rw-r--r--deps/v8/src/snapshot/serializer.cc59
-rw-r--r--deps/v8/src/snapshot/serializer.h11
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc102
-rw-r--r--deps/v8/src/snapshot/snapshot.h4
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc15
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc7
-rw-r--r--deps/v8/src/source-position-table.cc45
-rw-r--r--deps/v8/src/source-position-table.h1
-rw-r--r--deps/v8/src/source-position.cc9
-rw-r--r--deps/v8/src/splay-tree.h16
-rw-r--r--deps/v8/src/string-builder.cc3
-rw-r--r--deps/v8/src/string-builder.h42
-rw-r--r--deps/v8/src/string-hasher-inl.h10
-rw-r--r--deps/v8/src/string-hasher.h28
-rw-r--r--deps/v8/src/string-search.h6
-rw-r--r--deps/v8/src/string-stream.cc96
-rw-r--r--deps/v8/src/string-stream.h4
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc3
-rw-r--r--deps/v8/src/torque/Torque.g465
-rw-r--r--deps/v8/src/torque/Torque.interp249
-rw-r--r--deps/v8/src/torque/Torque.tokens154
-rw-r--r--deps/v8/src/torque/TorqueBaseListener.h41
-rw-r--r--deps/v8/src/torque/TorqueBaseVisitor.h38
-rw-r--r--deps/v8/src/torque/TorqueLexer.cpp1237
-rw-r--r--deps/v8/src/torque/TorqueLexer.h112
-rw-r--r--deps/v8/src/torque/TorqueLexer.interp264
-rw-r--r--deps/v8/src/torque/TorqueLexer.tokens154
-rw-r--r--deps/v8/src/torque/TorqueListener.h41
-rw-r--r--deps/v8/src/torque/TorqueParser.cpp3807
-rw-r--r--deps/v8/src/torque/TorqueParser.h375
-rw-r--r--deps/v8/src/torque/TorqueVisitor.h25
-rw-r--r--deps/v8/src/torque/ast-generator.cc206
-rw-r--r--deps/v8/src/torque/ast-generator.h22
-rw-r--r--deps/v8/src/torque/ast.h148
-rw-r--r--deps/v8/src/torque/contextual.h28
-rw-r--r--deps/v8/src/torque/declarable.cc97
-rw-r--r--deps/v8/src/torque/declarable.h128
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc472
-rw-r--r--deps/v8/src/torque/declaration-visitor.h253
-rw-r--r--deps/v8/src/torque/declarations.cc157
-rw-r--r--deps/v8/src/torque/declarations.h71
-rw-r--r--deps/v8/src/torque/file-visitor.cc81
-rw-r--r--deps/v8/src/torque/file-visitor.h18
-rw-r--r--deps/v8/src/torque/global-context.h24
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc1045
-rw-r--r--deps/v8/src/torque/implementation-visitor.h63
-rw-r--r--deps/v8/src/torque/parameter-difference.h77
-rw-r--r--deps/v8/src/torque/torque.cc24
-rw-r--r--deps/v8/src/torque/type-oracle.h108
-rw-r--r--deps/v8/src/torque/types.cc244
-rw-r--r--deps/v8/src/torque/types.h226
-rw-r--r--deps/v8/src/torque/utils.cc2
-rw-r--r--deps/v8/src/torque/utils.h45
-rw-r--r--deps/v8/src/transitions-inl.h2
-rw-r--r--deps/v8/src/transitions.cc101
-rw-r--r--deps/v8/src/transitions.h26
-rw-r--r--deps/v8/src/trap-handler/OWNERS6
-rw-r--r--deps/v8/src/turbo-assembler.cc122
-rw-r--r--deps/v8/src/turbo-assembler.h114
-rw-r--r--deps/v8/src/unicode-inl.h5
-rw-r--r--deps/v8/src/unicode.h12
-rw-r--r--deps/v8/src/uri.cc8
-rw-r--r--deps/v8/src/utils.h62
-rw-r--r--deps/v8/src/v8.cc3
-rw-r--r--deps/v8/src/v8threads.cc8
-rw-r--r--deps/v8/src/v8threads.h1
-rw-r--r--deps/v8/src/value-serializer.cc60
-rw-r--r--deps/v8/src/value-serializer.h2
-rw-r--r--deps/v8/src/vector.h75
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h20
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h191
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h101
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc47
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h38
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc415
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h25
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h36
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h36
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h20
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h20
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h44
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h85
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc37
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h15
-rw-r--r--deps/v8/src/wasm/function-compiler.cc34
-rw-r--r--deps/v8/src/wasm/function-compiler.h10
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc210
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h79
-rw-r--r--deps/v8/src/wasm/module-compiler.cc1787
-rw-r--r--deps/v8/src/wasm/module-compiler.h54
-rw-r--r--deps/v8/src/wasm/module-decoder.cc204
-rw-r--r--deps/v8/src/wasm/module-decoder.h5
-rw-r--r--deps/v8/src/wasm/signature-map.cc22
-rw-r--r--deps/v8/src/wasm/signature-map.h16
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc8
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h3
-rw-r--r--deps/v8/src/wasm/value-type.h4
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc1040
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h362
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc186
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.h48
-rw-r--r--deps/v8/src/wasm/wasm-constants.h2
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc140
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc114
-rw-r--r--deps/v8/src/wasm/wasm-engine.h82
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc606
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h3
-rw-r--r--deps/v8/src/wasm/wasm-js.cc364
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc96
-rw-r--r--deps/v8/src/wasm/wasm-memory.h24
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc34
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h9
-rw-r--r--deps/v8/src/wasm/wasm-module.cc140
-rw-r--r--deps/v8/src/wasm/wasm-module.h61
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h108
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc1463
-rw-r--r--deps/v8/src/wasm/wasm-objects.h366
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc44
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h75
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc368
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h7
-rw-r--r--deps/v8/src/wasm/wasm-value.h13
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h43
-rw-r--r--deps/v8/src/x64/assembler-x64.cc77
-rw-r--r--deps/v8/src/x64/assembler-x64.h23
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc315
-rw-r--r--deps/v8/src/x64/constants-x64.h21
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc2
-rw-r--r--deps/v8/src/x64/frame-constants-x64.h16
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc82
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc353
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h112
-rw-r--r--deps/v8/src/zone/zone-containers.h11
-rw-r--r--deps/v8/src/zone/zone.h27
894 files changed, 59894 insertions, 54387 deletions
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 565c019092..ae59ec3356 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -13,6 +13,7 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/module-inl.h"
#include "src/property-details.h"
#include "src/prototype.h"
@@ -45,11 +46,11 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
return info;
}
-static V8_INLINE bool CheckForName(Handle<Name> name,
+static V8_INLINE bool CheckForName(Isolate* isolate, Handle<Name> name,
Handle<String> property_name, int offset,
FieldIndex::Encoding encoding,
FieldIndex* index) {
- if (Name::Equals(name, property_name)) {
+ if (Name::Equals(isolate, name, property_name)) {
*index = FieldIndex::ForInObjectOffset(offset, encoding);
return true;
}
@@ -59,17 +60,15 @@ static V8_INLINE bool CheckForName(Handle<Name> name,
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
-bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
- FieldIndex* index) {
- Isolate* isolate = name->GetIsolate();
-
+bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name, FieldIndex* index) {
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
- return CheckForName(name, isolate->factory()->length_string(),
+ return CheckForName(isolate, name, isolate->factory()->length_string(),
JSArray::kLengthOffset, FieldIndex::kTagged, index);
default:
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
- return CheckForName(name, isolate->factory()->length_string(),
+ return CheckForName(isolate, name, isolate->factory()->length_string(),
String::kLengthOffset, FieldIndex::kTagged, index);
}
@@ -169,7 +168,8 @@ void Accessors::ArrayLengthSetter(
RuntimeCallCounterId::kArrayLengthSetter);
HandleScope scope(isolate);
- DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
+ DCHECK(Utils::OpenHandle(*name)->SameValue(
+ ReadOnlyRoots(isolate).length_string()));
Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder());
Handle<JSArray> array = Handle<JSArray>::cast(object);
@@ -237,7 +237,8 @@ void Accessors::ModuleNamespaceEntryGetter(
JSModuleNamespace* holder =
JSModuleNamespace::cast(*Utils::OpenHandle(*info.Holder()));
Handle<Object> result;
- if (!holder->GetExport(Handle<String>::cast(Utils::OpenHandle(*name)))
+ if (!holder
+ ->GetExport(isolate, Handle<String>::cast(Utils::OpenHandle(*name)))
.ToHandle(&result)) {
isolate->OptionalRescheduleException(false);
} else {
@@ -305,333 +306,6 @@ Handle<AccessorInfo> Accessors::MakeStringLengthInfo(Isolate* isolate) {
}
//
-// Accessors::ScriptColumnOffset
-//
-
-
-void Accessors::ScriptColumnOffsetGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* res = Smi::FromInt(
- Script::cast(JSValue::cast(object)->value())->column_offset());
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptColumnOffsetInfo(Isolate* isolate) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("column_offset")));
- return MakeAccessor(isolate, name, &ScriptColumnOffsetGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptId
-//
-
-
-void Accessors::ScriptIdGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* id = Smi::FromInt(Script::cast(JSValue::cast(object)->value())->id());
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptIdInfo(Isolate* isolate) {
- Handle<String> name(
- isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("id")));
- return MakeAccessor(isolate, name, &ScriptIdGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptName
-//
-
-
-void Accessors::ScriptNameGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* source = Script::cast(JSValue::cast(object)->value())->name();
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptNameInfo(Isolate* isolate) {
- return MakeAccessor(isolate, isolate->factory()->name_string(),
- &ScriptNameGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptSource
-//
-
-
-void Accessors::ScriptSourceGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* source = Script::cast(JSValue::cast(object)->value())->source();
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptSourceInfo(Isolate* isolate) {
- return MakeAccessor(isolate, isolate->factory()->source_string(),
- &ScriptSourceGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptLineOffset
-//
-
-
-void Accessors::ScriptLineOffsetGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* res =
- Smi::FromInt(Script::cast(JSValue::cast(object)->value())->line_offset());
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptLineOffsetInfo(Isolate* isolate) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("line_offset")));
- return MakeAccessor(isolate, name, &ScriptLineOffsetGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptType
-//
-
-
-void Accessors::ScriptTypeGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* res =
- Smi::FromInt(Script::cast(JSValue::cast(object)->value())->type());
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptTypeInfo(Isolate* isolate) {
- Handle<String> name(
- isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("type")));
- return MakeAccessor(isolate, name, &ScriptTypeGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptCompilationType
-//
-
-
-void Accessors::ScriptCompilationTypeGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* res = Smi::FromInt(
- Script::cast(JSValue::cast(object)->value())->compilation_type());
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptCompilationTypeInfo(
- Isolate* isolate) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("compilation_type")));
- return MakeAccessor(isolate, name, &ScriptCompilationTypeGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptSourceUrl
-//
-
-
-void Accessors::ScriptSourceUrlGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* url = Script::cast(JSValue::cast(object)->value())->source_url();
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptSourceUrlInfo(Isolate* isolate) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("source_url")));
- return MakeAccessor(isolate, name, &ScriptSourceUrlGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptSourceMappingUrl
-//
-
-
-void Accessors::ScriptSourceMappingUrlGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* url =
- Script::cast(JSValue::cast(object)->value())->source_mapping_url();
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptSourceMappingUrlInfo(
- Isolate* isolate) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("source_mapping_url")));
- return MakeAccessor(isolate, name, &ScriptSourceMappingUrlGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptGetContextData
-//
-
-
-void Accessors::ScriptContextDataGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- Object* res = Script::cast(JSValue::cast(object)->value())->context_data();
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptContextDataInfo(Isolate* isolate) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("context_data")));
- return MakeAccessor(isolate, name, &ScriptContextDataGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptGetEvalFromScript
-//
-
-
-void Accessors::ScriptEvalFromScriptGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- HandleScope scope(isolate);
- Handle<Object> object = Utils::OpenHandle(*info.Holder());
- Handle<Script> script(
- Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
- Handle<Object> result = isolate->factory()->undefined_value();
- if (script->has_eval_from_shared()) {
- Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared());
- if (eval_from_shared->script()->IsScript()) {
- Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
- result = Script::GetWrapper(eval_from_script);
- }
- }
-
- info.GetReturnValue().Set(Utils::ToLocal(result));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptEvalFromScriptInfo(Isolate* isolate) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("eval_from_script")));
- return MakeAccessor(isolate, name, &ScriptEvalFromScriptGetter, nullptr);
-}
-
-
-//
-// Accessors::ScriptGetEvalFromScriptPosition
-//
-
-
-void Accessors::ScriptEvalFromScriptPositionGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- HandleScope scope(isolate);
- Handle<Object> object = Utils::OpenHandle(*info.Holder());
- Handle<Script> script(
- Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
- Handle<Object> result = isolate->factory()->undefined_value();
- if (script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
- result = Handle<Object>(Smi::FromInt(script->GetEvalPosition()), isolate);
- }
- info.GetReturnValue().Set(Utils::ToLocal(result));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptEvalFromScriptPositionInfo(
- Isolate* isolate) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("eval_from_script_position")));
- return MakeAccessor(isolate, name, &ScriptEvalFromScriptPositionGetter,
- nullptr);
-}
-
-
-//
-// Accessors::ScriptGetEvalFromFunctionName
-//
-
-
-void Accessors::ScriptEvalFromFunctionNameGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- HandleScope scope(isolate);
- Handle<Object> object = Utils::OpenHandle(*info.Holder());
- Handle<Script> script(
- Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
- Handle<Object> result = isolate->factory()->undefined_value();
- if (script->has_eval_from_shared()) {
- Handle<SharedFunctionInfo> shared(script->eval_from_shared());
- // Find the name of the function calling eval.
- result = Handle<Object>(shared->Name(), isolate);
- }
- info.GetReturnValue().Set(Utils::ToLocal(result));
-}
-
-Handle<AccessorInfo> Accessors::MakeScriptEvalFromFunctionNameInfo(
- Isolate* isolate) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("eval_from_function_name")));
- return MakeAccessor(isolate, name, &ScriptEvalFromFunctionNameGetter,
- nullptr);
-}
-
-
-//
// Accessors::FunctionPrototype
//
@@ -824,7 +498,7 @@ Handle<JSObject> GetFrameArguments(Isolate* isolate,
// Generators currently use holes as dummy arguments when resuming. We
// must not leak those.
DCHECK(IsResumableFunction(function->shared()->kind()));
- value = isolate->heap()->undefined_value();
+ value = ReadOnlyRoots(isolate).undefined_value();
}
array->set(i, value);
}
@@ -1118,9 +792,9 @@ MaybeHandle<JSReceiver> ClearInternalStackTrace(Isolate* isolate,
Handle<JSObject> error) {
RETURN_ON_EXCEPTION(
isolate,
- JSReceiver::SetProperty(error, isolate->factory()->stack_trace_symbol(),
- isolate->factory()->undefined_value(),
- LanguageMode::kStrict),
+ JSReceiver::SetProperty(
+ isolate, error, isolate->factory()->stack_trace_symbol(),
+ isolate->factory()->undefined_value(), LanguageMode::kStrict),
JSReceiver);
return error;
}
@@ -1152,7 +826,7 @@ void Accessors::ErrorStackGetter(
Handle<Object> stack_trace;
Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
MaybeHandle<Object> maybe_stack_trace =
- JSObject::GetProperty(holder, stack_trace_symbol);
+ JSObject::GetProperty(isolate, holder, stack_trace_symbol);
if (!maybe_stack_trace.ToHandle(&stack_trace) ||
stack_trace->IsUndefined(isolate)) {
Handle<Object> result = isolate->factory()->undefined_value();
@@ -1191,7 +865,8 @@ void Accessors::ErrorStackGetter(
}
} else {
// The stack property has been modified in the meantime.
- if (!JSObject::GetProperty(holder, name).ToHandle(&formatted_stack_trace)) {
+ if (!JSObject::GetProperty(isolate, holder, name)
+ .ToHandle(&formatted_stack_trace)) {
isolate->OptionalRescheduleException(false);
return;
}
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 4a1a67e93e..301f830a9a 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -33,19 +33,6 @@ class JavaScriptFrame;
V(function_name, FunctionName) \
V(function_length, FunctionLength) \
V(function_prototype, FunctionPrototype) \
- V(script_column_offset, ScriptColumnOffset) \
- V(script_compilation_type, ScriptCompilationType) \
- V(script_context_data, ScriptContextData) \
- V(script_eval_from_script, ScriptEvalFromScript) \
- V(script_eval_from_script_position, ScriptEvalFromScriptPosition) \
- V(script_eval_from_function_name, ScriptEvalFromFunctionName) \
- V(script_id, ScriptId) \
- V(script_line_offset, ScriptLineOffset) \
- V(script_name, ScriptName) \
- V(script_source, ScriptSource) \
- V(script_type, ScriptType) \
- V(script_source_url, ScriptSourceUrl) \
- V(script_source_mapping_url, ScriptSourceMappingUrl) \
V(string_length, StringLength)
#define SIDE_EFFECT_FREE_ACCESSOR_INFO_LIST(V) \
@@ -106,7 +93,8 @@ class Accessors : public AllStatic {
// Returns true for properties that are accessors to object fields.
// If true, the matching FieldIndex is returned through |field_index|.
- static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
+ static bool IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name,
FieldIndex* field_index);
static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
diff --git a/deps/v8/src/address-map.h b/deps/v8/src/address-map.h
index f3e2770847..599e44724a 100644
--- a/deps/v8/src/address-map.h
+++ b/deps/v8/src/address-map.h
@@ -69,181 +69,6 @@ class RootIndexMap {
DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
};
-class SerializerReference {
- public:
- SerializerReference() : bitfield_(Special(kInvalidValue)) {}
-
- static SerializerReference FromBitfield(uint32_t bitfield) {
- return SerializerReference(bitfield);
- }
-
- static SerializerReference BackReference(AllocationSpace space,
- uint32_t chunk_index,
- uint32_t chunk_offset) {
- DCHECK(IsAligned(chunk_offset, kObjectAlignment));
- DCHECK_NE(LO_SPACE, space);
- return SerializerReference(
- SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
- ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
- }
-
- static SerializerReference MapReference(uint32_t index) {
- return SerializerReference(SpaceBits::encode(MAP_SPACE) |
- ValueIndexBits::encode(index));
- }
-
- static SerializerReference OffHeapBackingStoreReference(uint32_t index) {
- return SerializerReference(SpaceBits::encode(kExternalSpace) |
- ValueIndexBits::encode(index));
- }
-
- static SerializerReference LargeObjectReference(uint32_t index) {
- return SerializerReference(SpaceBits::encode(LO_SPACE) |
- ValueIndexBits::encode(index));
- }
-
- static SerializerReference AttachedReference(uint32_t index) {
- return SerializerReference(SpaceBits::encode(kAttachedReferenceSpace) |
- ValueIndexBits::encode(index));
- }
-
- static SerializerReference DummyReference() {
- return SerializerReference(Special(kDummyValue));
- }
-
- bool is_valid() const { return bitfield_ != Special(kInvalidValue); }
-
- bool is_back_reference() const {
- return SpaceBits::decode(bitfield_) <= LAST_SPACE;
- }
-
- AllocationSpace space() const {
- DCHECK(is_back_reference());
- return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_));
- }
-
- uint32_t chunk_offset() const {
- DCHECK(is_back_reference());
- return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
- }
-
- uint32_t map_index() const {
- DCHECK(is_back_reference());
- return ValueIndexBits::decode(bitfield_);
- }
-
- bool is_off_heap_backing_store_reference() const {
- return SpaceBits::decode(bitfield_) == kExternalSpace;
- }
-
- uint32_t off_heap_backing_store_index() const {
- DCHECK(is_off_heap_backing_store_reference());
- return ValueIndexBits::decode(bitfield_);
- }
-
- uint32_t large_object_index() const {
- DCHECK(is_back_reference());
- return ValueIndexBits::decode(bitfield_);
- }
-
- uint32_t chunk_index() const {
- DCHECK(is_back_reference());
- return ChunkIndexBits::decode(bitfield_);
- }
-
- uint32_t back_reference() const {
- DCHECK(is_back_reference());
- return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
- }
-
- bool is_attached_reference() const {
- return SpaceBits::decode(bitfield_) == kAttachedReferenceSpace;
- }
-
- int attached_reference_index() const {
- DCHECK(is_attached_reference());
- return ValueIndexBits::decode(bitfield_);
- }
-
- private:
- explicit SerializerReference(uint32_t bitfield) : bitfield_(bitfield) {}
-
- inline static uint32_t Special(int value) {
- return SpaceBits::encode(kSpecialValueSpace) |
- ValueIndexBits::encode(value);
- }
-
- // We use the 32-bit bitfield to encode either a back reference, a special
- // value, or an attached reference index.
- // Back reference:
- // [ Space index ] [ Chunk index ] [ Chunk offset ]
- // [ LO_SPACE ] [ large object index ]
- // Special value
- // [ kSpecialValueSpace ] [ Special value index ]
- // Attached reference
- // [ kAttachedReferenceSpace ] [ Attached reference index ]
- // External
- // [ kExternalSpace ] [ External reference index ]
-
- static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
- static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
- static const int kValueIndexSize = kChunkOffsetSize + kChunkIndexSize;
-
- static const int kSpecialValueSpace = LAST_SPACE + 1;
- static const int kAttachedReferenceSpace = kSpecialValueSpace + 1;
- static const int kExternalSpace = kAttachedReferenceSpace + 1;
- STATIC_ASSERT(kExternalSpace < (1 << kSpaceTagSize));
-
- static const int kInvalidValue = 0;
- static const int kDummyValue = 1;
-
- // The chunk offset can also be used to encode the index of special values.
- class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
- class ChunkIndexBits
- : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
- class ValueIndexBits : public BitField<uint32_t, 0, kValueIndexSize> {};
- STATIC_ASSERT(ChunkIndexBits::kNext == ValueIndexBits::kNext);
- class SpaceBits : public BitField<int, kValueIndexSize, kSpaceTagSize> {};
- STATIC_ASSERT(SpaceBits::kNext == 32);
-
- uint32_t bitfield_;
-
- friend class SerializerReferenceMap;
-};
-
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class SerializerReferenceMap {
- public:
- SerializerReferenceMap()
- : no_allocation_(), map_(), attached_reference_index_(0) {}
-
- SerializerReference Lookup(void* obj) {
- Maybe<uint32_t> maybe_index = map_.Get(obj);
- return maybe_index.IsJust() ? SerializerReference(maybe_index.FromJust())
- : SerializerReference();
- }
-
- void Add(void* obj, SerializerReference b) {
- DCHECK(b.is_valid());
- DCHECK(map_.Get(obj).IsNothing());
- map_.Set(obj, b.bitfield_);
- }
-
- SerializerReference AddAttachedReference(HeapObject* attached_reference) {
- SerializerReference reference =
- SerializerReference::AttachedReference(attached_reference_index_++);
- Add(attached_reference, reference);
- return reference;
- }
-
- private:
- DisallowHeapAllocation no_allocation_;
- PointerToIndexHashMap<void*> map_;
- int attached_reference_index_;
- DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 67a510c611..8e17a35514 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -71,8 +71,8 @@ char* StrNDup(const char* str, int n);
// and free. Used as the default policy for lists.
class FreeStoreAllocationPolicy {
public:
- INLINE(void* New(size_t size)) { return Malloced::New(size); }
- INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
+ V8_INLINE void* New(size_t size) { return Malloced::New(size); }
+ V8_INLINE static void Delete(void* p) { Malloced::Delete(p); }
};
// Performs a malloc, with retry logic on failure. Returns nullptr on failure.
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index 503cea8dcb..4d91b68521 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -85,7 +85,8 @@ Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
v8::FunctionCallback f =
v8::ToCData<v8::FunctionCallback>(handler->callback());
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects &&
- !isolate->debug()->PerformSideEffectCheckForCallback(handle(handler))) {
+ !isolate->debug()->PerformSideEffectCheckForCallback(
+ handle(handler, isolate))) {
return Handle<Object>();
}
VMState<EXTERNAL> state(isolate);
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 0abbcdcafa..bed1c123e0 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -93,9 +93,9 @@ class PropertyCallbackArguments
// Here the hole is set as default value.
// It cannot escape into js as it's removed in Call below.
- values[T::kReturnValueDefaultValueIndex] =
- isolate->heap()->the_hole_value();
- values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+ HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ values[T::kReturnValueDefaultValueIndex] = the_hole;
+ values[T::kReturnValueIndex] = the_hole;
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
@@ -200,9 +200,9 @@ class FunctionCallbackArguments
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
- values[T::kReturnValueDefaultValueIndex] =
- isolate->heap()->the_hole_value();
- values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+ HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ values[T::kReturnValueDefaultValueIndex] = the_hole;
+ values[T::kReturnValueIndex] = the_hole;
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 1b6df15d7a..11dd4d67d5 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -122,18 +122,18 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
void DisableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
- Handle<Map> old_map(object->map());
+ Handle<Map> old_map(object->map(), isolate);
// Copy map so it won't interfere constructor's initial map.
- Handle<Map> new_map = Map::Copy(old_map, "DisableAccessChecks");
+ Handle<Map> new_map = Map::Copy(isolate, old_map, "DisableAccessChecks");
new_map->set_is_access_check_needed(false);
JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
}
void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
- Handle<Map> old_map(object->map());
+ Handle<Map> old_map(object->map(), isolate);
// Copy map so it won't interfere constructor's initial map.
- Handle<Map> new_map = Map::Copy(old_map, "EnableAccessChecks");
+ Handle<Map> new_map = Map::Copy(isolate, old_map, "EnableAccessChecks");
new_map->set_is_access_check_needed(true);
new_map->set_may_have_interesting_symbols(true);
JSObject::MigrateToMap(object, new_map);
@@ -202,19 +202,20 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
Handle<FixedArray> array =
isolate->factory()->NewFixedArray(max_number_of_properties);
- for (Handle<TemplateInfoT> temp(*data); *temp != nullptr;
+ for (Handle<TemplateInfoT> temp(*data, isolate); *temp != nullptr;
temp = handle(temp->GetParent(isolate), isolate)) {
// Accumulate accessors.
Object* maybe_properties = temp->property_accessors();
if (!maybe_properties->IsUndefined(isolate)) {
valid_descriptors = AccessorInfo::AppendUnique(
- handle(maybe_properties, isolate), array, valid_descriptors);
+ isolate, handle(maybe_properties, isolate), array,
+ valid_descriptors);
}
}
// Install accumulated accessors.
for (int i = 0; i < valid_descriptors; i++) {
- Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
+ Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)), isolate);
Handle<Name> name(Name::cast(accessor->name()), isolate);
JSObject::SetAccessor(obj, name, accessor,
accessor->initial_property_attributes())
@@ -290,7 +291,7 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<SimpleNumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
- int entry = slow_cache->FindEntry(serial_number);
+ int entry = slow_cache->FindEntry(isolate, serial_number);
if (entry == SimpleNumberDictionary::kNotFound) {
return MaybeHandle<JSObject>();
}
@@ -308,7 +309,7 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
Handle<FixedArray> fast_cache =
isolate->fast_template_instantiations_cache();
Handle<FixedArray> new_cache =
- FixedArray::SetAndGrow(fast_cache, serial_number - 1, object);
+ FixedArray::SetAndGrow(isolate, fast_cache, serial_number - 1, object);
if (*new_cache != *fast_cache) {
isolate->native_context()->set_fast_template_instantiations_cache(
*new_cache);
@@ -318,7 +319,8 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
- auto new_cache = SimpleNumberDictionary::Set(cache, serial_number, object);
+ auto new_cache =
+ SimpleNumberDictionary::Set(isolate, cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
@@ -339,9 +341,9 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
- int entry = cache->FindEntry(serial_number);
+ int entry = cache->FindEntry(isolate, serial_number);
DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
- cache = SimpleNumberDictionary::DeleteEntry(cache, entry);
+ cache = SimpleNumberDictionary::DeleteEntry(isolate, cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(*cache);
}
}
@@ -443,7 +445,7 @@ MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
// TODO(cbruni): decide what to do here.
ASSIGN_RETURN_ON_EXCEPTION(
isolate, instance_prototype,
- JSObject::GetProperty(parent_instance,
+ JSObject::GetProperty(isolate, parent_instance,
isolate->factory()->prototype_string()),
JSFunction);
return scope.CloseAndEscape(instance_prototype);
@@ -544,8 +546,8 @@ MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
}
MaybeHandle<JSObject> ApiNatives::InstantiateObject(
- Handle<ObjectTemplateInfo> data, Handle<JSReceiver> new_target) {
- Isolate* isolate = data->GetIsolate();
+ Isolate* isolate, Handle<ObjectTemplateInfo> data,
+ Handle<JSReceiver> new_target) {
InvokeScope invoke_scope(isolate);
return ::v8::internal::InstantiateObject(isolate, data, new_target, false,
false);
@@ -557,7 +559,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
InvokeScope invoke_scope(isolate);
Handle<FunctionTemplateInfo> constructor(
- FunctionTemplateInfo::cast(data->constructor()));
+ FunctionTemplateInfo::cast(data->constructor()), isolate);
Handle<Map> object_map = isolate->factory()->NewMap(
JS_SPECIAL_API_OBJECT_TYPE,
JSObject::kHeaderSize + data->embedder_field_count() * kPointerSize,
@@ -654,7 +656,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
if (prototype->IsTheHole(isolate)) {
prototype = isolate->factory()->NewFunctionPrototype(result);
} else if (obj->prototype_provider_template()->IsUndefined(isolate)) {
- JSObject::AddProperty(Handle<JSObject>::cast(prototype),
+ JSObject::AddProperty(isolate, Handle<JSObject>::cast(prototype),
isolate->factory()->constructor_string(), result,
DONT_ENUM);
}
@@ -663,7 +665,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
bool immutable_proto = false;
if (!obj->instance_template()->IsUndefined(isolate)) {
Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
- ObjectTemplateInfo::cast(obj->instance_template()));
+ ObjectTemplateInfo::cast(obj->instance_template()), isolate);
embedder_field_count = instance_template->embedder_field_count();
immutable_proto = instance_template->immutable_proto();
}
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index f73e7cee7e..67a4b80060 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -26,7 +26,7 @@ class ApiNatives {
MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> InstantiateObject(
- Handle<ObjectTemplateInfo> data,
+ Isolate* isolate, Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target = Handle<JSReceiver>());
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> InstantiateRemoteObject(
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 5df3188ab6..7c569e3a9f 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -33,6 +33,7 @@
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-type-profile.h"
#include "src/debug/debug.h"
+#include "src/debug/liveedit.h"
#include "src/deoptimizer.h"
#include "src/detachable-vector.h"
#include "src/execution.h"
@@ -47,6 +48,10 @@
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-promise-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/module-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parser.h"
@@ -202,12 +207,35 @@ namespace v8 {
#define RETURN_ESCAPED(value) return handle_scope.Escape(value);
+// TODO(v8:7786): Remove this when HeapObject::GetIsolate is removed.
+#ifdef DEPRECATE_GET_ISOLATE
+#define DISABLE_DEPRECATED_WARNINGS \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wdeprecated\"")
+#define RESET_DEPRECATED_WARNINGS _Pragma("clang diagnostic pop")
+#else
+#define DISABLE_DEPRECATED_WARNINGS
+#define RESET_DEPRECATED_WARNINGS
+#endif
namespace {
-Local<Context> ContextFromHeapObject(i::Handle<i::Object> obj) {
+Local<Context> ContextFromNeverReadOnlySpaceObject(
+ i::Handle<i::NeverReadOnlySpaceObject> obj) {
+ return reinterpret_cast<v8::Isolate*>(obj->GetIsolate())->GetCurrentContext();
+}
+
+// This is unsafe because obj could be in RO_SPACE which would not be tied to a
+// particular isolate.
+#ifdef DEPRECATE_GET_ISOLATE
+[[deprecated("Pass Context explicitly or use a NeverReadOnlySpaceObject")]]
+#endif
+ Local<Context>
+ UnsafeContextFromHeapObject(i::Handle<i::Object> obj) {
+ DISABLE_DEPRECATED_WARNINGS
return reinterpret_cast<v8::Isolate*>(i::HeapObject::cast(*obj)->GetIsolate())
->GetCurrentContext();
+ RESET_DEPRECATED_WARNINGS
}
class InternalEscapableScope : public v8::EscapableHandleScope {
@@ -300,8 +328,7 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
i::Handle<i::Object> source_map_url(script->source_mapping_url(), isolate);
i::Handle<i::FixedArray> host_defined_options(script->host_defined_options(),
isolate);
- v8::Isolate* v8_isolate =
- reinterpret_cast<v8::Isolate*>(script->GetIsolate());
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
ScriptOriginOptions options(script->origin_options());
v8::ScriptOrigin origin(
Utils::ToLocal(scriptName),
@@ -463,7 +490,7 @@ void Utils::ReportOOMFailure(i::Isolate* isolate, const char* location,
static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
if (isolate->has_scheduled_exception()) {
return isolate->scheduled_exception() ==
- isolate->heap()->termination_exception();
+ i::ReadOnlyRoots(isolate).termination_exception();
}
return false;
}
@@ -638,10 +665,10 @@ size_t SnapshotCreator::AddData(i::Object* object) {
list = i::ArrayList::New(isolate, 1);
} else {
list = i::Handle<i::ArrayList>(
- i::ArrayList::cast(isolate->heap()->serialized_objects()));
+ i::ArrayList::cast(isolate->heap()->serialized_objects()), isolate);
}
size_t index = static_cast<size_t>(list->Length());
- list = i::ArrayList::Add(list, obj);
+ list = i::ArrayList::Add(isolate, list, obj);
isolate->heap()->SetSerializedObjects(*list);
return index;
}
@@ -657,11 +684,11 @@ size_t SnapshotCreator::AddData(Local<Context> context, i::Object* object) {
if (!ctx->serialized_objects()->IsArrayList()) {
list = i::ArrayList::New(isolate, 1);
} else {
- list =
- i::Handle<i::ArrayList>(i::ArrayList::cast(ctx->serialized_objects()));
+ list = i::Handle<i::ArrayList>(
+ i::ArrayList::cast(ctx->serialized_objects()), isolate);
}
size_t index = static_cast<size_t>(list->Length());
- list = i::ArrayList::Add(list, obj);
+ list = i::ArrayList::Add(isolate, list, obj);
ctx->set_serialized_objects(*list);
return index;
}
@@ -671,21 +698,23 @@ void ConvertSerializedObjectsToFixedArray(Local<Context> context) {
i::Handle<i::Context> ctx = Utils::OpenHandle(*context);
i::Isolate* isolate = ctx->GetIsolate();
if (!ctx->serialized_objects()->IsArrayList()) {
- ctx->set_serialized_objects(isolate->heap()->empty_fixed_array());
+ ctx->set_serialized_objects(i::ReadOnlyRoots(isolate).empty_fixed_array());
} else {
- i::Handle<i::ArrayList> list(i::ArrayList::cast(ctx->serialized_objects()));
- i::Handle<i::FixedArray> elements = i::ArrayList::Elements(list);
+ i::Handle<i::ArrayList> list(i::ArrayList::cast(ctx->serialized_objects()),
+ isolate);
+ i::Handle<i::FixedArray> elements = i::ArrayList::Elements(isolate, list);
ctx->set_serialized_objects(*elements);
}
}
void ConvertSerializedObjectsToFixedArray(i::Isolate* isolate) {
if (!isolate->heap()->serialized_objects()->IsArrayList()) {
- isolate->heap()->SetSerializedObjects(isolate->heap()->empty_fixed_array());
+ isolate->heap()->SetSerializedObjects(
+ i::ReadOnlyRoots(isolate).empty_fixed_array());
} else {
i::Handle<i::ArrayList> list(
- i::ArrayList::cast(isolate->heap()->serialized_objects()));
- i::Handle<i::FixedArray> elements = i::ArrayList::Elements(list);
+ i::ArrayList::cast(isolate->heap()->serialized_objects()), isolate);
+ i::Handle<i::FixedArray> elements = i::ArrayList::Elements(isolate, list);
isolate->heap()->SetSerializedObjects(*elements);
}
}
@@ -732,10 +761,40 @@ StartupData SnapshotCreator::CreateBlob(
// context even after we have disposed of the context.
isolate->heap()->CollectAllAvailableGarbage(
i::GarbageCollectionReason::kSnapshotCreator);
- isolate->heap()->CompactFixedArraysOfWeakCells();
+ {
+ i::HandleScope scope(isolate);
+ isolate->heap()->CompactFixedArraysOfWeakCells();
+ }
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
+ if (function_code_handling == FunctionCodeHandling::kClear) {
+ // Clear out re-compilable data from all shared function infos. Any
+ // JSFunctions using these SFIs will have their code pointers reset by the
+ // partial serializer.
+ //
+ // We have to iterate the heap and collect handles to each clearable SFI,
+ // before we disable allocation, since we have to allocate UncompiledDatas
+ // to be able to recompile them.
+ i::HandleScope scope(isolate);
+ std::vector<i::Handle<i::SharedFunctionInfo>> sfis_to_clear;
+
+ i::HeapIterator heap_iterator(isolate->heap());
+ while (i::HeapObject* current_obj = heap_iterator.next()) {
+ if (current_obj->IsSharedFunctionInfo()) {
+ i::SharedFunctionInfo* shared =
+ i::SharedFunctionInfo::cast(current_obj);
+ if (shared->CanDiscardCompiled()) {
+ sfis_to_clear.emplace_back(shared, isolate);
+ }
+ }
+ }
+ i::AllowHeapAllocation allocate_for_discard;
+ for (i::Handle<i::SharedFunctionInfo> shared : sfis_to_clear) {
+ i::SharedFunctionInfo::DiscardCompiled(isolate, shared);
+ }
+ }
+
i::DisallowHeapAllocation no_gc_from_here_on;
int num_contexts = num_additional_contexts + 1;
@@ -768,22 +827,16 @@ StartupData SnapshotCreator::CreateBlob(
// Also, clear out feedback vectors, or any optimized code.
if (fun->has_feedback_vector()) {
- fun->feedback_cell()->set_value(isolate->heap()->undefined_value());
+ fun->feedback_cell()->set_value(
+ i::ReadOnlyRoots(isolate).undefined_value());
fun->set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy));
}
- }
-
- // Clear out re-compilable data from all shared function infos. Any
- // JSFunctions using these SFIs will have their code pointers reset by the
- // partial serializer.
- if (current_obj->IsSharedFunctionInfo() &&
- function_code_handling == FunctionCodeHandling::kClear) {
- i::SharedFunctionInfo* shared = i::SharedFunctionInfo::cast(current_obj);
- if (shared->CanFlushCompiled()) {
- shared->FlushCompiled();
+ if (function_code_handling == FunctionCodeHandling::kClear) {
+ DCHECK(fun->shared()->HasWasmExportedFunctionData() ||
+ fun->shared()->HasBuiltinId() ||
+ fun->shared()->IsApiFunction() ||
+ fun->shared()->HasUncompiledDataWithoutPreParsedScope());
}
- DCHECK(shared->HasWasmExportedFunctionData() || shared->HasBuiltinId() ||
- shared->IsApiFunction());
}
}
@@ -938,6 +991,21 @@ void RegisteredExtension::UnregisterAll() {
first_extension_ = nullptr;
}
+namespace {
+class ExtensionResource : public String::ExternalOneByteStringResource {
+ public:
+ ExtensionResource() : data_(0), length_(0) {}
+ ExtensionResource(const char* data, size_t length)
+ : data_(data), length_(length) {}
+ const char* data() const { return data_; }
+ size_t length() const { return length_; }
+ virtual void Dispose() {}
+
+ private:
+ const char* data_;
+ size_t length_;
+};
+} // anonymous namespace
void RegisterExtension(Extension* that) {
RegisteredExtension* extension = new RegisteredExtension(that);
@@ -954,10 +1022,10 @@ Extension::Extension(const char* name,
source_length_(source_length >= 0 ?
source_length :
(source ? static_cast<int>(strlen(source)) : 0)),
- source_(source, source_length_),
dep_count_(dep_count),
deps_(deps),
auto_enable_(false) {
+ source_ = new ExtensionResource(source, source_length_);
CHECK(source != nullptr || source_length_ == 0);
}
@@ -1008,7 +1076,7 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
- (*obj)->ObjectVerify();
+ (*obj)->ObjectVerify(isolate);
}
#endif // VERIFY_HEAP
return result.location();
@@ -1017,11 +1085,6 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
i::Object** V8::CopyPersistent(i::Object** obj) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
-#ifdef VERIFY_HEAP
- if (i::FLAG_verify_heap) {
- (*obj)->ObjectVerify();
- }
-#endif // VERIFY_HEAP
return result.location();
}
@@ -1144,17 +1207,17 @@ i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
return i::HandleScope::CreateHandle(isolate, value);
}
-
-i::Object** HandleScope::CreateHandle(i::HeapObject* heap_object,
- i::Object* value) {
- DCHECK(heap_object->IsHeapObject());
- return i::HandleScope::CreateHandle(heap_object->GetIsolate(), value);
+i::Object** HandleScope::CreateHandle(
+ i::NeverReadOnlySpaceObject* writable_object, i::Object* value) {
+ DCHECK(reinterpret_cast<i::HeapObject*>(writable_object)->IsHeapObject());
+ return i::HandleScope::CreateHandle(writable_object->GetIsolate(), value);
}
EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- escape_slot_ = CreateHandle(isolate, isolate->heap()->the_hole_value());
+ escape_slot_ =
+ CreateHandle(isolate, i::ReadOnlyRoots(isolate).the_hole_value());
Initialize(v8_isolate);
}
@@ -1164,7 +1227,7 @@ i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
Utils::ApiCheck((*escape_slot_)->IsTheHole(heap->isolate()),
"EscapableHandleScope::Escape", "Escape value set twice");
if (escape_value == nullptr) {
- *escape_slot_ = heap->undefined_value();
+ *escape_slot_ = i::ReadOnlyRoots(heap).undefined_value();
return nullptr;
}
*escape_slot_ = *escape_value;
@@ -1267,7 +1330,7 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
"Not a native context") &&
Utils::ApiCheck(index >= 0, location, "Negative index");
if (!ok) return i::Handle<i::FixedArray>();
- i::Handle<i::FixedArray> data(env->embedder_data());
+ i::Handle<i::FixedArray> data(env->embedder_data(), isolate);
if (index < data->length()) return data;
if (!Utils::ApiCheck(can_grow, location, "Index too large")) {
return i::Handle<i::FixedArray>();
@@ -1289,7 +1352,9 @@ v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
const char* location = "v8::Context::GetEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
if (data.is_null()) return Local<Value>();
- i::Handle<i::Object> result(data->get(index), data->GetIsolate());
+ i::Handle<i::Object> result(
+ data->get(index),
+ reinterpret_cast<i::Isolate*>(Utils::OpenHandle(this)->GetIsolate()));
return Utils::ToLocal(result);
}
@@ -1456,7 +1521,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
}
obj->set_cached_property_name(
cached_property_name.IsEmpty()
- ? isolate->heap()->the_hole_value()
+ ? i::ReadOnlyRoots(isolate).the_hole_value()
: *Utils::OpenHandle(*cached_property_name));
return Utils::ToLocal(obj);
}
@@ -1485,7 +1550,7 @@ MaybeLocal<FunctionTemplate> FunctionTemplate::FromSnapshot(Isolate* isolate,
i::Object* info = serialized_objects->get(int_index);
if (info->IsFunctionTemplateInfo()) {
return Utils::ToLocal(i::Handle<i::FunctionTemplateInfo>(
- i::FunctionTemplateInfo::cast(info)));
+ i::FunctionTemplateInfo::cast(info), i_isolate));
}
}
return Local<FunctionTemplate>();
@@ -1513,10 +1578,10 @@ Local<AccessorSignature> AccessorSignature::New(
return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
}
-
-#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
- i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
- (obj)->setter(*foreign); \
+#define SET_FIELD_WRAPPED(isolate, obj, setter, cdata) \
+ do { \
+ i::Handle<i::Object> foreign = FromCData(isolate, cdata); \
+ (obj)->setter(*foreign); \
} while (false)
void FunctionTemplate::SetCallHandler(FunctionCallback callback,
@@ -1529,8 +1594,8 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
i::HandleScope scope(isolate);
i::Handle<i::CallHandlerInfo> obj = isolate->factory()->NewCallHandlerInfo(
side_effect_type == SideEffectType::kHasNoSideEffect);
- SET_FIELD_WRAPPED(obj, set_callback, callback);
- SET_FIELD_WRAPPED(obj, set_js_callback, obj->redirected_callback());
+ SET_FIELD_WRAPPED(isolate, obj, set_callback, callback);
+ SET_FIELD_WRAPPED(isolate, obj, set_js_callback, obj->redirected_callback());
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
@@ -1548,16 +1613,16 @@ i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Local<AccessorSignature> signature, bool is_special_data_property,
bool replace_on_access) {
i::Handle<i::AccessorInfo> obj = isolate->factory()->NewAccessorInfo();
- SET_FIELD_WRAPPED(obj, set_getter, getter);
+ SET_FIELD_WRAPPED(isolate, obj, set_getter, getter);
DCHECK_IMPLIES(replace_on_access,
is_special_data_property && setter == nullptr);
if (is_special_data_property && setter == nullptr) {
setter = reinterpret_cast<Setter>(&i::Accessors::ReconfigureToDataProperty);
}
- SET_FIELD_WRAPPED(obj, set_setter, setter);
+ SET_FIELD_WRAPPED(isolate, obj, set_setter, setter);
i::Address redirected = obj->redirected_getter();
if (redirected != i::kNullAddress) {
- SET_FIELD_WRAPPED(obj, set_js_getter, redirected);
+ SET_FIELD_WRAPPED(isolate, obj, set_js_getter, redirected);
}
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1597,7 +1662,7 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
handle->set_instance_template(*Utils::OpenHandle(*templ));
}
i::Handle<i::ObjectTemplateInfo> result(
- i::ObjectTemplateInfo::cast(handle->instance_template()));
+ i::ObjectTemplateInfo::cast(handle->instance_template()), isolate);
return Utils::ToLocal(result);
}
@@ -1699,8 +1764,8 @@ MaybeLocal<ObjectTemplate> ObjectTemplate::FromSnapshot(Isolate* isolate,
if (int_index < serialized_objects->length()) {
i::Object* info = serialized_objects->get(int_index);
if (info->IsObjectTemplateInfo()) {
- return Utils::ToLocal(
- i::Handle<i::ObjectTemplateInfo>(i::ObjectTemplateInfo::cast(info)));
+ return Utils::ToLocal(i::Handle<i::ObjectTemplateInfo>(
+ i::ObjectTemplateInfo::cast(info), i_isolate));
}
}
return Local<ObjectTemplate>();
@@ -1814,17 +1879,23 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
i::Isolate* isolate, Getter getter, Setter setter, Query query,
Descriptor descriptor, Deleter remover, Enumerator enumerator,
Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
+ // Either intercept attributes or descriptor.
+ DCHECK(query == nullptr || descriptor == nullptr);
+ // Only use descriptor callback with definer callback.
+ DCHECK(query == nullptr || definer == nullptr);
auto obj = i::Handle<i::InterceptorInfo>::cast(
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE, i::TENURED));
obj->set_flags(0);
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- if (descriptor != 0) SET_FIELD_WRAPPED(obj, set_descriptor, descriptor);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
- if (definer != 0) SET_FIELD_WRAPPED(obj, set_definer, definer);
+ if (getter != 0) SET_FIELD_WRAPPED(isolate, obj, set_getter, getter);
+ if (setter != 0) SET_FIELD_WRAPPED(isolate, obj, set_setter, setter);
+ if (query != 0) SET_FIELD_WRAPPED(isolate, obj, set_query, query);
+ if (descriptor != 0)
+ SET_FIELD_WRAPPED(isolate, obj, set_descriptor, descriptor);
+ if (remover != 0) SET_FIELD_WRAPPED(isolate, obj, set_deleter, remover);
+ if (enumerator != 0)
+ SET_FIELD_WRAPPED(isolate, obj, set_enumerator, enumerator);
+ if (definer != 0) SET_FIELD_WRAPPED(isolate, obj, set_definer, definer);
obj->set_can_intercept_symbols(
!(static_cast<int>(flags) &
static_cast<int>(PropertyHandlerFlags::kOnlyInterceptStrings)));
@@ -1928,7 +1999,7 @@ void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
- SET_FIELD_WRAPPED(info, set_callback, callback);
+ SET_FIELD_WRAPPED(isolate, info, set_callback, callback);
info->set_named_interceptor(nullptr);
info->set_indexed_interceptor(nullptr);
@@ -1958,7 +2029,7 @@ void ObjectTemplate::SetAccessCheckCallbackAndHandler(
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
- SET_FIELD_WRAPPED(info, set_callback, callback);
+ SET_FIELD_WRAPPED(isolate, info, set_callback, callback);
auto named_interceptor = CreateNamedInterceptorInfo(
isolate, named_handler.getter, named_handler.setter, named_handler.query,
named_handler.descriptor, named_handler.deleter, named_handler.enumerator,
@@ -2002,8 +2073,8 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
i::Handle<i::CallHandlerInfo> obj = isolate->factory()->NewCallHandlerInfo();
- SET_FIELD_WRAPPED(obj, set_callback, callback);
- SET_FIELD_WRAPPED(obj, set_js_callback, obj->redirected_callback());
+ SET_FIELD_WRAPPED(isolate, obj, set_callback, callback);
+ SET_FIELD_WRAPPED(isolate, obj, set_js_callback, obj->redirected_callback());
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
@@ -2082,11 +2153,9 @@ ScriptCompiler::StreamedSource::GetCachedData() const {
Local<Script> UnboundScript::BindToCurrentContext() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- i::Handle<i::SharedFunctionInfo> function_info(
- i::SharedFunctionInfo::cast(*obj), isolate);
+ auto function_info =
+ i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = function_info->GetIsolate();
i::Handle<i::JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
function_info, isolate->native_context());
@@ -2095,14 +2164,13 @@ Local<Script> UnboundScript::BindToCurrentContext() {
int UnboundScript::GetId() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
+ auto function_info =
+ i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = function_info->GetIsolate();
LOG_API(isolate, UnboundScript, GetId);
i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info(
- i::SharedFunctionInfo::cast(*obj));
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
+ i::Handle<i::Script> script(i::Script::cast(function_info->script()),
+ isolate);
return script->id();
}
@@ -2113,7 +2181,7 @@ int UnboundScript::GetLineNumber(int code_pos) {
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, UnboundScript, GetLineNumber);
if (obj->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(obj->script()));
+ i::Handle<i::Script> script(i::Script::cast(obj->script()), isolate);
return i::Script::GetLineNumber(script, code_pos);
} else {
return -1;
@@ -2188,7 +2256,7 @@ Local<Value> Script::Run() {
// If execution is terminating, Compile(..)->Run() requires this
// check.
if (self.is_null()) return Local<Value>();
- auto context = ContextFromHeapObject(self);
+ auto context = ContextFromNeverReadOnlySpaceObject(self);
RETURN_TO_LOCAL_UNCHECKED(Run(context), Value);
}
@@ -2210,8 +2278,9 @@ Local<PrimitiveArray> ScriptOrModule::GetHostDefinedOptions() {
Local<UnboundScript> Script::GetUnboundScript() {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return ToApiHandle<UnboundScript>(
- i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared()));
+ i::SharedFunctionInfo* sfi = i::JSFunction::cast(*obj)->shared();
+ i::Isolate* isolate = sfi->GetIsolate();
+ return ToApiHandle<UnboundScript>(i::handle(sfi, isolate));
}
// static
@@ -2226,14 +2295,13 @@ Local<PrimitiveArray> PrimitiveArray::New(Isolate* v8_isolate, int length) {
int PrimitiveArray::Length() const {
i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
- i::Isolate* isolate = array->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
return array->length();
}
-void PrimitiveArray::Set(int index, Local<Primitive> item) {
+void PrimitiveArray::Set(Isolate* v8_isolate, int index,
+ Local<Primitive> item) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
- i::Isolate* isolate = array->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
Utils::ApiCheck(index >= 0 && index < array->length(),
"v8::PrimitiveArray::Set",
@@ -2243,9 +2311,17 @@ void PrimitiveArray::Set(int index, Local<Primitive> item) {
array->set(index, *i_item);
}
-Local<Primitive> PrimitiveArray::Get(int index) {
+void PrimitiveArray::Set(int index, Local<Primitive> item) {
i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
+ DISABLE_DEPRECATED_WARNINGS
i::Isolate* isolate = array->GetIsolate();
+ RESET_DEPRECATED_WARNINGS
+ Set(reinterpret_cast<Isolate*>(isolate), index, item);
+}
+
+Local<Primitive> PrimitiveArray::Get(Isolate* v8_isolate, int index) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
Utils::ApiCheck(index >= 0 && index < array->length(),
"v8::PrimitiveArray::Get",
@@ -2255,6 +2331,14 @@ Local<Primitive> PrimitiveArray::Get(int index) {
return ToApiHandle<Primitive>(i_item);
}
+Local<Primitive> PrimitiveArray::Get(int index) {
+ i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
+ DISABLE_DEPRECATED_WARNINGS
+ i::Isolate* isolate = array->GetIsolate();
+ RESET_DEPRECATED_WARNINGS
+ return Get(reinterpret_cast<Isolate*>(isolate), index);
+}
+
Module::Status Module::GetStatus() const {
i::Handle<i::Module> self = Utils::OpenHandle(this);
switch (self->status()) {
@@ -2319,7 +2403,7 @@ Local<Value> Module::GetModuleNamespace() {
"v8::Module::GetModuleNamespace must be used on an instantiated module");
i::Handle<i::Module> self = Utils::OpenHandle(this);
i::Handle<i::JSModuleNamespace> module_namespace =
- i::Module::GetModuleNamespace(self);
+ i::Module::GetModuleNamespace(self->GetIsolate(), self);
return ToApiHandle<Value>(module_namespace);
}
@@ -2328,8 +2412,8 @@ Local<UnboundModuleScript> Module::GetUnboundModuleScript() {
GetStatus() < kEvaluating, "v8::Module::GetUnboundScript",
"v8::Module::GetUnboundScript must be used on an unevaluated module");
i::Handle<i::Module> self = Utils::OpenHandle(this);
- return ToApiHandle<UnboundModuleScript>(
- i::Handle<i::SharedFunctionInfo>(self->GetSharedFunctionInfo()));
+ return ToApiHandle<UnboundModuleScript>(i::Handle<i::SharedFunctionInfo>(
+ self->GetSharedFunctionInfo(), self->GetIsolate()));
}
int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
@@ -2339,8 +2423,8 @@ Maybe<bool> Module::InstantiateModule(Local<Context> context,
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Module, InstantiateModule, Nothing<bool>(),
i::HandleScope);
- has_pending_exception =
- !i::Module::Instantiate(Utils::OpenHandle(this), context, callback);
+ has_pending_exception = !i::Module::Instantiate(
+ isolate, Utils::OpenHandle(this), context, callback);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -2359,7 +2443,7 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
CHECK_GE(self->status(), i::Module::kInstantiated);
Local<Value> result;
- has_pending_exception = !ToLocal(i::Module::Evaluate(self), &result);
+ has_pending_exception = !ToLocal(i::Module::Evaluate(isolate, self), &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -2470,15 +2554,18 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
return result->BindToCurrentContext();
}
-MaybeLocal<Module> ScriptCompiler::CompileModule(Isolate* isolate,
- Source* source) {
+MaybeLocal<Module> ScriptCompiler::CompileModule(
+ Isolate* isolate, Source* source, CompileOptions options,
+ NoCacheReason no_cache_reason) {
+ CHECK(options == kNoCompileOptions || options == kConsumeCodeCache);
+
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
Utils::ApiCheck(source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileModule",
"Invalid ScriptOrigin: is_module must be true");
- auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions,
- kNoCacheBecauseModule);
+ auto maybe =
+ CompileUnboundInternal(isolate, source, options, no_cache_reason);
Local<UnboundScript> unbound;
if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
@@ -2561,9 +2648,10 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
context = isolate->factory()->NewWithContext(
context,
i::ScopeInfo::CreateForWithScope(
- isolate, context->IsNativeContext()
- ? i::Handle<i::ScopeInfo>::null()
- : i::Handle<i::ScopeInfo>(context->scope_info())),
+ isolate,
+ context->IsNativeContext()
+ ? i::Handle<i::ScopeInfo>::null()
+ : i::Handle<i::ScopeInfo>(context->scope_info(), isolate)),
extension);
}
@@ -2690,8 +2778,10 @@ ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCacheForFunction(
ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCacheForFunction(
Local<Function> function) {
- i::Handle<i::SharedFunctionInfo> shared(
- i::Handle<i::JSFunction>::cast(Utils::OpenHandle(*function))->shared());
+ auto js_function =
+ i::Handle<i::JSFunction>::cast(Utils::OpenHandle(*function));
+ i::Handle<i::SharedFunctionInfo> shared(js_function->shared(),
+ js_function->GetIsolate());
CHECK(shared->is_wrapped());
return i::CodeSerializer::Serialize(shared);
}
@@ -2710,7 +2800,9 @@ MaybeLocal<Script> Script::Compile(Local<Context> context, Local<String> source,
Local<Script> Script::Compile(v8::Local<String> source,
v8::ScriptOrigin* origin) {
auto str = Utils::OpenHandle(*source);
- auto context = ContextFromHeapObject(str);
+ DISABLE_DEPRECATED_WARNINGS
+ auto context = UnsafeContextFromHeapObject(str);
+ RESET_DEPRECATED_WARNINGS
RETURN_TO_LOCAL_UNCHECKED(Compile(context, source, origin), Script);
}
@@ -2718,7 +2810,9 @@ Local<Script> Script::Compile(v8::Local<String> source,
Local<Script> Script::Compile(v8::Local<String> source,
v8::Local<String> file_name) {
auto str = Utils::OpenHandle(*source);
- auto context = ContextFromHeapObject(str);
+ DISABLE_DEPRECATED_WARNINGS
+ auto context = UnsafeContextFromHeapObject(str);
+ RESET_DEPRECATED_WARNINGS
ScriptOrigin origin(file_name);
return Compile(context, source, &origin).FromMaybe(Local<Script>());
}
@@ -2823,7 +2917,7 @@ MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
if (!maybe.FromJust()) return v8::Local<Value>();
Local<Value> result;
has_pending_exception =
- !ToLocal<Value>(i::JSReceiver::GetProperty(obj, name), &result);
+ !ToLocal<Value>(i::JSReceiver::GetProperty(isolate, obj, name), &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -2858,7 +2952,7 @@ void v8::TryCatch::Reset() {
void v8::TryCatch::ResetInternal() {
- i::Object* the_hole = isolate_->heap()->the_hole_value();
+ i::Object* the_hole = i::ReadOnlyRoots(isolate_).the_hole_value();
exception_ = the_hole;
message_obj_ = the_hole;
}
@@ -2893,9 +2987,7 @@ ScriptOrigin Message::GetScriptOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
auto message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- auto script_wraper = i::Handle<i::Object>(message->script(), isolate);
- auto script_value = i::Handle<i::JSValue>::cast(script_wraper);
- i::Handle<i::Script> script(i::Script::cast(script_value->value()));
+ i::Handle<i::Script> script(message->script(), isolate);
return GetScriptOriginForScript(isolate, script);
}
@@ -2928,7 +3020,7 @@ Maybe<int> Message::GetLineNumber(Local<Context> context) const {
int Message::GetLineNumber() const {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return GetLineNumber(context).FromMaybe(0);
}
@@ -2983,10 +3075,8 @@ Maybe<int> Message::GetEndColumn(Local<Context> context) const {
bool Message::IsSharedCrossOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- auto self = Utils::OpenHandle(this);
- auto script = i::Handle<i::JSValue>::cast(
- i::Handle<i::Object>(self->script(), isolate));
- return i::Script::cast(script->value())
+ return Utils::OpenHandle(this)
+ ->script()
->origin_options()
.IsSharedCrossOrigin();
}
@@ -2994,10 +3084,7 @@ bool Message::IsSharedCrossOrigin() const {
bool Message::IsOpaque() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- auto self = Utils::OpenHandle(this);
- auto script = i::Handle<i::JSValue>::cast(
- i::Handle<i::Object>(self->script(), isolate));
- return i::Script::cast(script->value())->origin_options().IsOpaque();
+ return Utils::OpenHandle(this)->script()->origin_options().IsOpaque();
}
@@ -3012,7 +3099,7 @@ MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
Local<String> Message::GetSourceLine() const {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(GetSourceLine(context), String)
}
@@ -3037,7 +3124,9 @@ Local<StackFrame> StackTrace::GetFrame(Isolate* v8_isolate,
}
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
+ DISABLE_DEPRECATED_WARNINGS
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ RESET_DEPRECATED_WARNINGS
return GetFrame(reinterpret_cast<Isolate*>(isolate), index);
}
@@ -3123,7 +3212,7 @@ bool StackFrame::IsWasm() const { return Utils::OpenHandle(this)->is_wasm(); }
MaybeLocal<Value> JSON::Parse(Isolate* v8_isolate, Local<String> json_string) {
PREPARE_FOR_EXECUTION(v8_isolate->GetCurrentContext(), JSON, Parse, Value);
i::Handle<i::String> string = Utils::OpenHandle(*json_string);
- i::Handle<i::String> source = i::String::Flatten(string);
+ i::Handle<i::String> source = i::String::Flatten(isolate, string);
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
auto maybe = source->IsSeqOneByteString()
? i::JsonParser<true>::Parse(isolate, source, undefined)
@@ -3138,7 +3227,7 @@ MaybeLocal<Value> JSON::Parse(Local<Context> context,
Local<String> json_string) {
PREPARE_FOR_EXECUTION(context, JSON, Parse, Value);
i::Handle<i::String> string = Utils::OpenHandle(*json_string);
- i::Handle<i::String> source = i::String::Flatten(string);
+ i::Handle<i::String> source = i::String::Flatten(isolate, string);
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
auto maybe = source->IsSeqOneByteString()
? i::JsonParser<true>::Parse(isolate, source, undefined)
@@ -3427,10 +3516,7 @@ bool ValueDeserializer::ReadRawBytes(size_t length, const void** data) {
bool Value::FullIsUndefined() const {
i::Handle<i::Object> object = Utils::OpenHandle(this);
- bool result = false;
- if (!object->IsSmi()) {
- result = object->IsUndefined(i::HeapObject::cast(*object)->GetIsolate());
- }
+ bool result = object->IsUndefined();
DCHECK_EQ(result, QuickIsUndefined());
return result;
}
@@ -3438,10 +3524,7 @@ bool Value::FullIsUndefined() const {
bool Value::FullIsNull() const {
i::Handle<i::Object> object = Utils::OpenHandle(this);
- bool result = false;
- if (!object->IsSmi()) {
- result = object->IsNull(i::HeapObject::cast(*object)->GetIsolate());
- }
+ bool result = object->IsNull();
DCHECK_EQ(result, QuickIsNull());
return result;
}
@@ -3450,14 +3533,14 @@ bool Value::FullIsNull() const {
bool Value::IsTrue() const {
i::Handle<i::Object> object = Utils::OpenHandle(this);
if (object->IsSmi()) return false;
- return object->IsTrue(i::HeapObject::cast(*object)->GetIsolate());
+ return object->IsTrue();
}
bool Value::IsFalse() const {
i::Handle<i::Object> object = Utils::OpenHandle(this);
if (object->IsSmi()) return false;
- return object->IsFalse(i::HeapObject::cast(*object)->GetIsolate());
+ return object->IsFalse();
}
@@ -3570,9 +3653,16 @@ bool Value::IsBoolean() const {
return Utils::OpenHandle(this)->IsBoolean();
}
-
bool Value::IsExternal() const {
- return Utils::OpenHandle(this)->IsExternal();
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (!obj->IsHeapObject()) return false;
+ i::Handle<i::HeapObject> heap_obj = i::Handle<i::HeapObject>::cast(obj);
+ // Check the instance type is JS_OBJECT (instance type of Externals) before
+ // attempting to get the Isolate since that guarantees the object is writable
+ // and GetIsolate will work.
+ if (heap_obj->map()->instance_type() != i::JS_OBJECT_TYPE) return false;
+ i::Isolate* isolate = i::JSObject::cast(*heap_obj)->GetIsolate();
+ return heap_obj->IsExternal(isolate);
}
@@ -3704,7 +3794,7 @@ MaybeLocal<Boolean> Value::ToBoolean(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsBoolean()) return ToApiHandle<Boolean>(obj);
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- auto val = isolate->factory()->ToBoolean(obj->BooleanValue());
+ auto val = isolate->factory()->ToBoolean(obj->BooleanValue(isolate));
return ToApiHandle<Boolean>(val);
}
@@ -3719,7 +3809,8 @@ MaybeLocal<Number> Value::ToNumber(Local<Context> context) const {
if (obj->IsNumber()) return ToApiHandle<Number>(obj);
PREPARE_FOR_EXECUTION(context, Object, ToNumber, Number);
Local<Number> result;
- has_pending_exception = !ToLocal<Number>(i::Object::ToNumber(obj), &result);
+ has_pending_exception =
+ !ToLocal<Number>(i::Object::ToNumber(isolate, obj), &result);
RETURN_ON_FAILED_EXECUTION(Number);
RETURN_ESCAPED(result);
}
@@ -3785,7 +3876,7 @@ void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
void External::CheckCast(v8::Value* that) {
- Utils::ApiCheck(Utils::OpenHandle(that)->IsExternal(), "v8::External::Cast",
+ Utils::ApiCheck(that->IsExternal(), "v8::External::Cast",
"Could not convert to external");
}
@@ -4018,12 +4109,19 @@ void v8::RegExp::CheckCast(v8::Value* that) {
Maybe<bool> Value::BooleanValue(Local<Context> context) const {
- return Just(Utils::OpenHandle(this)->BooleanValue());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ return Just(Utils::OpenHandle(this)->BooleanValue(isolate));
}
bool Value::BooleanValue() const {
- return Utils::OpenHandle(this)->BooleanValue();
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) return *obj != i::Smi::kZero;
+ DCHECK(obj->IsHeapObject());
+ DISABLE_DEPRECATED_WARNINGS
+ i::Isolate* isolate = i::Handle<i::HeapObject>::cast(obj)->GetIsolate();
+ RESET_DEPRECATED_WARNINGS
+ return obj->BooleanValue(isolate);
}
@@ -4034,7 +4132,7 @@ Maybe<double> Value::NumberValue(Local<Context> context) const {
ENTER_V8(isolate, context, Value, NumberValue, Nothing<double>(),
i::HandleScope);
i::Handle<i::Object> num;
- has_pending_exception = !i::Object::ToNumber(obj).ToHandle(&num);
+ has_pending_exception = !i::Object::ToNumber(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(double);
return Just(num->Number());
}
@@ -4043,8 +4141,10 @@ Maybe<double> Value::NumberValue(Local<Context> context) const {
double Value::NumberValue() const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return obj->Number();
- return NumberValue(ContextFromHeapObject(obj))
+ DISABLE_DEPRECATED_WARNINGS
+ return NumberValue(UnsafeContextFromHeapObject(obj))
.FromMaybe(std::numeric_limits<double>::quiet_NaN());
+ RESET_DEPRECATED_WARNINGS
}
@@ -4072,7 +4172,9 @@ int64_t Value::IntegerValue() const {
return static_cast<int64_t>(obj->Number());
}
}
- return IntegerValue(ContextFromHeapObject(obj)).FromMaybe(0);
+ DISABLE_DEPRECATED_WARNINGS
+ return IntegerValue(UnsafeContextFromHeapObject(obj)).FromMaybe(0);
+ RESET_DEPRECATED_WARNINGS
}
@@ -4093,7 +4195,9 @@ Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
int32_t Value::Int32Value() const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return NumberToInt32(*obj);
- return Int32Value(ContextFromHeapObject(obj)).FromMaybe(0);
+ DISABLE_DEPRECATED_WARNINGS
+ return Int32Value(UnsafeContextFromHeapObject(obj)).FromMaybe(0);
+ RESET_DEPRECATED_WARNINGS
}
@@ -4114,7 +4218,9 @@ Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
uint32_t Value::Uint32Value() const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return NumberToUint32(*obj);
- return Uint32Value(ContextFromHeapObject(obj)).FromMaybe(0);
+ DISABLE_DEPRECATED_WARNINGS
+ return Uint32Value(UnsafeContextFromHeapObject(obj)).FromMaybe(0);
+ RESET_DEPRECATED_WARNINGS
}
@@ -4145,9 +4251,10 @@ MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
+ i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
- return i::Object::Equals(self, other);
+ return i::Object::Equals(isolate, self, other);
}
@@ -4161,7 +4268,9 @@ bool Value::Equals(Local<Value> that) const {
return *self == *other;
}
auto heap_object = self->IsSmi() ? other : self;
- auto context = ContextFromHeapObject(heap_object);
+ DISABLE_DEPRECATED_WARNINGS
+ auto context = UnsafeContextFromHeapObject(heap_object);
+ RESET_DEPRECATED_WARNINGS
return Equals(context, that).FromMaybe(false);
}
@@ -4217,7 +4326,7 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
bool v8::Object::Set(v8::Local<Value> key, v8::Local<Value> value) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return Set(context, key, value).FromMaybe(false);
}
@@ -4237,7 +4346,7 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
bool v8::Object::Set(uint32_t index, v8::Local<Value> value) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return Set(context, index, value).FromMaybe(false);
}
@@ -4464,7 +4573,7 @@ MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
Local<Value> v8::Object::Get(v8::Local<Value> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Get(context, key), Value);
}
@@ -4481,7 +4590,7 @@ MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
Local<Value> v8::Object::Get(uint32_t index) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Get(context, index), Value);
}
@@ -4605,7 +4714,7 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(
Local<Array> v8::Object::GetPropertyNames() {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(GetPropertyNames(context), Array);
}
@@ -4615,7 +4724,7 @@ MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
}
Local<Array> v8::Object::GetOwnPropertyNames() {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyNames(context), Array);
}
@@ -4685,7 +4794,7 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
}
bool v8::Object::Delete(v8::Local<Value> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return Delete(context, key).FromMaybe(false);
}
@@ -4729,7 +4838,7 @@ Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
bool v8::Object::Has(v8::Local<Value> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return Has(context, key).FromMaybe(false);
}
@@ -4789,7 +4898,7 @@ static Maybe<bool> ObjectSetAccessor(
!i::JSObject::SetAccessor(obj, accessor_name, info, attrs)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- if (result->IsUndefined(obj->GetIsolate())) return Just(false);
+ if (result->IsUndefined(isolate)) return Just(false);
if (fast) {
i::JSObject::MigrateSlowToFast(obj, 0, "APISetAccessor");
}
@@ -4888,7 +4997,7 @@ Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
bool v8::Object::HasRealNamedProperty(Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return HasRealNamedProperty(context, key).FromMaybe(false);
}
@@ -4909,7 +5018,7 @@ Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
bool v8::Object::HasRealIndexedProperty(uint32_t index) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return HasRealIndexedProperty(context, index).FromMaybe(false);
}
@@ -4931,7 +5040,7 @@ Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
bool v8::Object::HasRealNamedCallbackProperty(Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return HasRealNamedCallbackProperty(context, key).FromMaybe(false);
}
@@ -5207,7 +5316,7 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
Local<v8::Value> Function::Call(v8::Local<v8::Value> recv, int argc,
v8::Local<v8::Value> argv[]) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Call(context, recv, argc, argv), Value);
}
@@ -5259,7 +5368,7 @@ Local<Value> Function::GetDebugName() const {
}
auto func = i::Handle<i::JSFunction>::cast(self);
i::Handle<i::String> name = i::JSFunction::GetDebugName(func);
- return Utils::ToLocal(i::Handle<i::Object>(*name, name->GetIsolate()));
+ return Utils::ToLocal(i::Handle<i::Object>(*name, self->GetIsolate()));
}
@@ -5290,7 +5399,8 @@ ScriptOrigin Function::GetScriptOrigin() const {
}
auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()),
+ func->GetIsolate());
return GetScriptOriginForScript(func->GetIsolate(), script);
}
return v8::ScriptOrigin(Local<Value>());
@@ -5307,7 +5417,8 @@ int Function::GetScriptLineNumber() const {
}
auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()),
+ func->GetIsolate());
return i::Script::GetLineNumber(script, func->shared()->StartPosition());
}
return kLineOffsetNotFound;
@@ -5321,7 +5432,8 @@ int Function::GetScriptColumnNumber() const {
}
auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()),
+ func->GetIsolate());
return i::Script::GetColumnNumber(script, func->shared()->StartPosition());
}
return kLineOffsetNotFound;
@@ -5337,7 +5449,8 @@ int Function::ScriptId() const {
if (!func->shared()->script()->IsScript()) {
return v8::UnboundScript::kNoScriptId;
}
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()),
+ func->GetIsolate());
return script->id();
}
@@ -5487,13 +5600,14 @@ bool String::ContainsOnlyOneByte() const {
return helper.Check(*str);
}
-int String::Utf8Length(Isolate* isolate) const {
- return Utf8Length();
+int String::Utf8Length() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ return Utf8Length(reinterpret_cast<Isolate*>(isolate));
}
-int String::Utf8Length() const {
+int String::Utf8Length(Isolate* isolate) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- str = i::String::Flatten(str);
+ str = i::String::Flatten(reinterpret_cast<i::Isolate*>(isolate), str);
int length = str->length();
if (length == 0) return 0;
i::DisallowHeapAllocation no_gc;
@@ -5515,7 +5629,6 @@ int String::Utf8Length() const {
return utf8_length;
}
-
class Utf8WriterVisitor {
public:
Utf8WriterVisitor(
@@ -5715,14 +5828,13 @@ static bool RecursivelySerializeToUtf8(i::String* current,
return true;
}
-
int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
int* nchars_ref, int options) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, String, WriteUtf8);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- str = i::String::Flatten(str); // Flatten the string for efficiency.
+ str = i::String::Flatten(isolate, str); // Flatten the string for efficiency.
const int string_length = str->length();
bool write_null = !(options & NO_NULL_TERMINATION);
bool replace_invalid_utf8 = (options & REPLACE_INVALID_UTF8);
@@ -5735,7 +5847,7 @@ int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
if (success) return writer.CompleteWrite(write_null, nchars_ref);
} else if (capacity >= string_length) {
// First check that the buffer is large enough.
- int utf8_bytes = Utf8Length();
+ int utf8_bytes = Utf8Length(reinterpret_cast<Isolate*>(isolate));
if (utf8_bytes <= capacity) {
// one-byte fast path.
if (utf8_bytes == string_length) {
@@ -5763,7 +5875,9 @@ int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
int String::WriteUtf8(char* buffer, int capacity, int* nchars_ref,
int options) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
+ DISABLE_DEPRECATED_WARNINGS
i::Isolate* isolate = str->GetIsolate();
+ RESET_DEPRECATED_WARNINGS
return WriteUtf8(reinterpret_cast<Isolate*>(isolate), buffer, capacity,
nchars_ref, options);
}
@@ -5776,7 +5890,7 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
DCHECK(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(string);
- str = i::String::Flatten(str);
+ str = i::String::Flatten(isolate, str);
int end = start + length;
if ((length == -1) || (length > str->length() - start) )
end = str->length();
@@ -5794,7 +5908,9 @@ int String::WriteOneByte(uint8_t* buffer,
int start,
int length,
int options) const {
+ DISABLE_DEPRECATED_WARNINGS
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ RESET_DEPRECATED_WARNINGS
return WriteHelper(isolate, this, buffer, start, length, options);
}
@@ -5809,7 +5925,9 @@ int String::Write(uint16_t* buffer,
int start,
int length,
int options) const {
+ DISABLE_DEPRECATED_WARNINGS
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ RESET_DEPRECATED_WARNINGS
return WriteHelper(isolate, this, buffer, start, length, options);
}
@@ -5885,7 +6003,19 @@ v8::String::GetExternalOneByteStringResource() const {
Local<Value> Symbol::Name() const {
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
- i::Handle<i::Object> name(sym->name(), sym->GetIsolate());
+
+ i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(*sym);
+ // If the Symbol is in RO_SPACE, then its name must be too. Since RO_SPACE
+ // objects are immovable we can use the Handle(T**) constructor with the
+ // address of the name field in the Symbol object without needing an isolate.
+ if (chunk->owner()->identity() == i::RO_SPACE) {
+ i::Handle<i::HeapObject> ro_name(reinterpret_cast<i::HeapObject**>(
+ sym->GetFieldAddress(i::Symbol::kNameOffset)));
+ return Utils::ToLocal(ro_name);
+ }
+
+ i::Handle<i::Object> name(sym->name(), chunk->heap()->isolate());
+
return Utils::ToLocal(name);
}
@@ -5903,7 +6033,7 @@ double Number::Value() const {
bool Boolean::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsTrue(i::HeapObject::cast(*obj)->GetIsolate());
+ return obj->IsTrue();
}
@@ -6006,8 +6136,7 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
static void* ExternalValue(i::Object* obj) {
// Obscure semantics for undefined, but somehow checked in our unit tests...
- if (!obj->IsSmi() &&
- obj->IsUndefined(i::HeapObject::cast(obj)->GetIsolate())) {
+ if (obj->IsUndefined()) {
return nullptr;
}
i::Object* foreign = i::JSObject::cast(obj)->GetEmbedderField(0);
@@ -6206,7 +6335,7 @@ static i::Handle<ObjectType> CreateEnvironment(
global_constructor->needs_access_check());
global_constructor->set_needs_access_check(false);
global_constructor->set_access_check_info(
- isolate->heap()->undefined_value());
+ i::ReadOnlyRoots(isolate).undefined_value());
}
// Same for other interceptors. If the global constructor has
@@ -6217,14 +6346,14 @@ static i::Handle<ObjectType> CreateEnvironment(
named_interceptor =
handle(global_constructor->named_property_handler(), isolate);
global_constructor->set_named_property_handler(
- isolate->heap()->noop_interceptor_info());
+ i::ReadOnlyRoots(isolate).noop_interceptor_info());
}
if (!global_constructor->indexed_property_handler()->IsUndefined(
isolate)) {
indexed_interceptor =
handle(global_constructor->indexed_property_handler(), isolate);
global_constructor->set_indexed_property_handler(
- isolate->heap()->noop_interceptor_info());
+ i::ReadOnlyRoots(isolate).noop_interceptor_info());
}
}
@@ -6397,7 +6526,8 @@ void Context::AllowCodeGenerationFromStrings(bool allow) {
i::Isolate* isolate = context->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
context->set_allow_code_gen_from_strings(
- allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
+ allow ? i::ReadOnlyRoots(isolate).true_value()
+ : i::ReadOnlyRoots(isolate).false_value());
}
@@ -6422,10 +6552,12 @@ i::Object** GetSerializedDataFromFixedArray(i::Isolate* isolate,
i::Object* object = list->get(int_index);
if (!object->IsTheHole(isolate)) {
list->set_the_hole(isolate, int_index);
- // Shrink the list so that the last element is not the hole.
+ // Shrink the list so that the last element is not the hole (unless it's
+ // the first element, because we don't want to end up with a non-canonical
+ // empty FixedArray).
int last = list->length() - 1;
while (last >= 0 && list->is_the_hole(isolate, last)) last--;
- list->Shrink(last + 1);
+ if (last != -1) list->Shrink(isolate, last + 1);
return i::Handle<i::Object>(object, isolate).location();
}
}
@@ -6436,7 +6568,7 @@ i::Object** GetSerializedDataFromFixedArray(i::Isolate* isolate,
i::Object** Context::GetDataFromSnapshotOnce(size_t index) {
auto context = Utils::OpenHandle(this);
i::Isolate* i_isolate = context->GetIsolate();
- i::FixedArray* list = i::FixedArray::cast(context->serialized_objects());
+ i::FixedArray* list = context->serialized_objects();
return GetSerializedDataFromFixedArray(i_isolate, list, index);
}
@@ -6444,15 +6576,15 @@ MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, ObjectTemplate, NewInstance, Object);
auto self = Utils::OpenHandle(this);
Local<Object> result;
- has_pending_exception =
- !ToLocal<Object>(i::ApiNatives::InstantiateObject(self), &result);
+ has_pending_exception = !ToLocal<Object>(
+ i::ApiNatives::InstantiateObject(isolate, self), &result);
RETURN_ON_FAILED_EXECUTION(Object);
RETURN_ESCAPED(result);
}
Local<v8::Object> ObjectTemplate::NewInstance() {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(NewInstance(context), Object);
}
@@ -6492,7 +6624,7 @@ MaybeLocal<v8::Function> FunctionTemplate::GetFunction(Local<Context> context) {
Local<v8::Function> FunctionTemplate::GetFunction() {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(GetFunction(context), Function);
}
@@ -6532,7 +6664,8 @@ bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
if (obj->IsJSGlobalProxy()) {
// If it's a global proxy, then test with the global object. Note that the
// inner global object may not necessarily be a JSGlobalObject.
- i::PrototypeIterator iter(i::JSObject::cast(*obj)->map());
+ i::PrototypeIterator iter(self->GetIsolate(),
+ i::JSObject::cast(*obj)->map());
// The global proxy should always have a prototype, as it is a bug to call
// this on a detached JSGlobalProxy.
DCHECK(!iter.IsAtEnd());
@@ -6692,7 +6825,9 @@ Local<String> v8::String::Concat(Isolate* v8_isolate, Local<String> left,
Local<String> v8::String::Concat(Local<String> left, Local<String> right) {
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
+ DISABLE_DEPRECATED_WARNINGS
i::Isolate* isolate = left_string->GetIsolate();
+ RESET_DEPRECATED_WARNINGS
return Concat(reinterpret_cast<Isolate*>(isolate), left, right);
}
@@ -6710,7 +6845,6 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromTwoByte(resource)
.ToHandleChecked();
- i_isolate->heap()->RegisterExternalString(*string);
return Utils::ToLocal(string);
} else {
// The resource isn't going to be used, free it immediately.
@@ -6734,7 +6868,6 @@ MaybeLocal<String> v8::String::NewExternalOneByte(
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromOneByte(resource)
.ToHandleChecked();
- i_isolate->heap()->RegisterExternalString(*string);
return Utils::ToLocal(string);
} else {
// The resource isn't going to be used, free it immediately.
@@ -6752,7 +6885,13 @@ Local<String> v8::String::NewExternal(
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
+ // RO_SPACE strings cannot be externalized.
+ i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(*obj);
+ if (chunk->owner()->identity() == i::RO_SPACE) {
+ return false;
+ }
+
+ i::Isolate* isolate = chunk->heap()->isolate();
if (i::StringShape(*obj).IsExternal()) {
return false; // Already an external string.
}
@@ -6767,7 +6906,6 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
DCHECK(!CanMakeExternal() || result);
if (result) {
DCHECK(obj->IsExternalString());
- isolate->heap()->RegisterExternalString(*obj);
}
return result;
}
@@ -6776,7 +6914,14 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool v8::String::MakeExternal(
v8::String::ExternalOneByteStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
+
+ // RO_SPACE strings cannot be externalized.
+ i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(*obj);
+ if (chunk->owner()->identity() == i::RO_SPACE) {
+ return false;
+ }
+
+ i::Isolate* isolate = chunk->heap()->isolate();
if (i::StringShape(*obj).IsExternal()) {
return false; // Already an external string.
}
@@ -6791,7 +6936,6 @@ bool v8::String::MakeExternal(
DCHECK(!CanMakeExternal() || result);
if (result) {
DCHECK(obj->IsExternalString());
- isolate->heap()->RegisterExternalString(*obj);
}
return result;
}
@@ -6801,10 +6945,10 @@ bool v8::String::CanMakeExternal() {
i::Handle<i::String> obj = Utils::OpenHandle(this);
if (obj->IsExternalString()) return false;
- // Old space strings should be externalized.
- i::Heap* heap = obj->GetIsolate()->heap();
- return !heap->new_space()->Contains(*obj) &&
- !heap->read_only_space()->Contains(*obj);
+ // Only old space strings should be externalized.
+ i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(*obj);
+ i::AllocationSpace space = chunk->owner()->identity();
+ return space != i::NEW_SPACE && space != i::RO_SPACE;
}
@@ -6860,15 +7004,16 @@ Local<v8::BigInt> v8::BigIntObject::ValueOf() const {
i::Isolate* isolate = jsvalue->GetIsolate();
LOG_API(isolate, BigIntObject, BigIntValue);
return Utils::ToLocal(
- i::Handle<i::BigInt>(i::BigInt::cast(jsvalue->value())));
+ i::Handle<i::BigInt>(i::BigInt::cast(jsvalue->value()), isolate));
}
Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, BooleanObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::Object> boolean(value ? i_isolate->heap()->true_value()
- : i_isolate->heap()->false_value(),
+ i::Handle<i::Object> boolean(value
+ ? i::ReadOnlyRoots(i_isolate).true_value()
+ : i::ReadOnlyRoots(i_isolate).false_value(),
i_isolate);
i::Handle<i::Object> obj =
i::Object::ToObject(i_isolate, boolean).ToHandleChecked();
@@ -6887,7 +7032,16 @@ bool v8::BooleanObject::ValueOf() const {
Local<v8::Value> v8::StringObject::New(Local<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
+ DISABLE_DEPRECATED_WARNINGS
i::Isolate* isolate = string->GetIsolate();
+ RESET_DEPRECATED_WARNINGS
+ return New(reinterpret_cast<Isolate*>(isolate), value);
+}
+
+Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
+ Local<String> value) {
+ i::Handle<i::String> string = Utils::OpenHandle(*value);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, StringObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Object> obj =
@@ -6902,7 +7056,7 @@ Local<v8::String> v8::StringObject::ValueOf() const {
i::Isolate* isolate = jsvalue->GetIsolate();
LOG_API(isolate, StringObject, StringValue);
return Utils::ToLocal(
- i::Handle<i::String>(i::String::cast(jsvalue->value())));
+ i::Handle<i::String>(i::String::cast(jsvalue->value()), isolate));
}
@@ -6922,7 +7076,7 @@ Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
i::Isolate* isolate = jsvalue->GetIsolate();
LOG_API(isolate, SymbolObject, SymbolValue);
return Utils::ToLocal(
- i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value())));
+ i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value()), isolate));
}
@@ -6980,7 +7134,7 @@ MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
PREPARE_FOR_EXECUTION(context, RegExp, New, RegExp);
Local<v8::RegExp> result;
has_pending_exception =
- !ToLocal<RegExp>(i::JSRegExp::New(Utils::OpenHandle(*pattern),
+ !ToLocal<RegExp>(i::JSRegExp::New(isolate, Utils::OpenHandle(*pattern),
static_cast<i::JSRegExp::Flags>(flags)),
&result);
RETURN_ON_FAILED_EXECUTION(RegExp);
@@ -6998,7 +7152,8 @@ Local<v8::RegExp> v8::RegExp::New(Local<String> pattern, Flags flags) {
Local<v8::String> v8::RegExp::GetSource() const {
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::String>(obj->Pattern()));
+ return Utils::ToLocal(
+ i::Handle<i::String>(obj->Pattern(), obj->GetIsolate()));
}
@@ -7064,7 +7219,7 @@ void Map::Clear() {
i::Isolate* isolate = self->GetIsolate();
LOG_API(isolate, Map, Clear);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::JSMap::Clear(self);
+ i::JSMap::Clear(isolate, self);
}
@@ -7135,7 +7290,8 @@ enum class MapAsArrayKind {
i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
int offset, MapAsArrayKind kind) {
i::Factory* factory = isolate->factory();
- i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(table_obj));
+ i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(table_obj),
+ isolate);
if (offset >= table->NumberOfElements()) return factory->NewJSArray(0);
int length = (table->NumberOfElements() - offset) *
(kind == MapAsArrayKind::kEntries ? 2 : 1);
@@ -7144,7 +7300,7 @@ i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
{
i::DisallowHeapAllocation no_gc;
int capacity = table->UsedCapacity();
- i::Oddball* the_hole = isolate->heap()->the_hole_value();
+ i::Oddball* the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
@@ -7194,7 +7350,7 @@ void Set::Clear() {
i::Isolate* isolate = self->GetIsolate();
LOG_API(isolate, Set, Clear);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::JSSet::Clear(self);
+ i::JSSet::Clear(isolate, self);
}
@@ -7242,7 +7398,8 @@ namespace {
i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object* table_obj,
int offset) {
i::Factory* factory = isolate->factory();
- i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(table_obj));
+ i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(table_obj),
+ isolate);
int length = table->NumberOfElements() - offset;
if (length <= 0) return factory->NewJSArray(0);
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
@@ -7250,7 +7407,7 @@ i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object* table_obj,
{
i::DisallowHeapAllocation no_gc;
int capacity = table->UsedCapacity();
- i::Oddball* the_hole = isolate->heap()->the_hole_value();
+ i::Oddball* the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
@@ -7315,7 +7472,7 @@ Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
void Promise::Resolver::Resolve(Local<Value> value) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
USE(Resolve(context, value));
}
@@ -7340,7 +7497,7 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
void Promise::Resolver::Reject(Local<Value> value) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
USE(Reject(context, value));
}
@@ -7442,11 +7599,19 @@ MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
RETURN_ESCAPED(result);
}
-Local<String> WasmCompiledModule::GetWasmWireBytes() {
+WasmCompiledModule::BufferReference WasmCompiledModule::GetWasmWireBytesRef() {
i::Handle<i::WasmModuleObject> obj =
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::String> wire_bytes(obj->shared()->module_bytes());
- return Local<String>::Cast(Utils::ToLocal(wire_bytes));
+ i::Vector<const uint8_t> bytes_vec = obj->native_module()->wire_bytes();
+ return {bytes_vec.start(), bytes_vec.size()};
+}
+
+Local<String> WasmCompiledModule::GetWasmWireBytes() {
+ BufferReference ref = GetWasmWireBytesRef();
+ CHECK_LE(ref.size, String::kMaxLength);
+ return String::NewFromOneByte(GetIsolate(), ref.start, NewStringType::kNormal,
+ static_cast<int>(ref.size))
+ .ToLocalChecked();
}
// Currently, wasm modules are bound, both to Isolate and to
@@ -7458,50 +7623,45 @@ WasmCompiledModule::GetTransferrableModule() {
i::DisallowHeapAllocation no_gc;
WasmCompiledModule::SerializedModule compiled_part = Serialize();
- Local<String> wire_bytes = GetWasmWireBytes();
- size_t wire_size = static_cast<size_t>(wire_bytes->Length());
- uint8_t* bytes = new uint8_t[wire_size];
- wire_bytes->WriteOneByte(bytes, 0, wire_bytes->Length());
+ BufferReference wire_bytes_ref = GetWasmWireBytesRef();
+ size_t wire_size = wire_bytes_ref.size;
+ std::unique_ptr<uint8_t[]> wire_bytes_copy(new uint8_t[wire_size]);
+ memcpy(wire_bytes_copy.get(), wire_bytes_ref.start, wire_size);
- return TransferrableModule(
- std::move(compiled_part),
- std::make_pair(
- std::unique_ptr<const uint8_t[]>(const_cast<const uint8_t*>(bytes)),
- wire_size));
+ return TransferrableModule(std::move(compiled_part),
+ {std::move(wire_bytes_copy), wire_size});
}
MaybeLocal<WasmCompiledModule> WasmCompiledModule::FromTransferrableModule(
Isolate* isolate,
const WasmCompiledModule::TransferrableModule& transferrable_module) {
MaybeLocal<WasmCompiledModule> ret =
- Deserialize(isolate, AsCallerOwned(transferrable_module.compiled_code),
- AsCallerOwned(transferrable_module.wire_bytes));
+ Deserialize(isolate, AsReference(transferrable_module.compiled_code),
+ AsReference(transferrable_module.wire_bytes));
return ret;
}
WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
i::Handle<i::WasmModuleObject> obj =
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::WasmCompiledModule> compiled_part =
- i::handle(i::WasmCompiledModule::cast(obj->compiled_module()));
+ i::wasm::NativeModule* native_module = obj->native_module();
size_t buffer_size =
- i::wasm::GetSerializedNativeModuleSize(obj->GetIsolate(), compiled_part);
+ i::wasm::GetSerializedNativeModuleSize(obj->GetIsolate(), native_module);
std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- if (i::wasm::SerializeNativeModule(obj->GetIsolate(), compiled_part,
+ if (i::wasm::SerializeNativeModule(obj->GetIsolate(), native_module,
{buffer.get(), buffer_size}))
return {std::move(buffer), buffer_size};
return {};
}
MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
- Isolate* isolate,
- const WasmCompiledModule::CallerOwnedBuffer& serialized_module,
- const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) {
+ Isolate* isolate, WasmCompiledModule::BufferReference serialized_module,
+ WasmCompiledModule::BufferReference wire_bytes) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::MaybeHandle<i::WasmModuleObject> maybe_module_object =
i::wasm::DeserializeNativeModule(
- i_isolate, {serialized_module.first, serialized_module.second},
- {wire_bytes.first, wire_bytes.second});
+ i_isolate, {serialized_module.start, serialized_module.size},
+ {wire_bytes.start, wire_bytes.size});
i::Handle<i::WasmModuleObject> module_object;
if (!maybe_module_object.ToHandle(&module_object)) {
return MaybeLocal<WasmCompiledModule>();
@@ -7511,15 +7671,14 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
}
MaybeLocal<WasmCompiledModule> WasmCompiledModule::DeserializeOrCompile(
- Isolate* isolate,
- const WasmCompiledModule::CallerOwnedBuffer& serialized_module,
- const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) {
+ Isolate* isolate, WasmCompiledModule::BufferReference serialized_module,
+ WasmCompiledModule::BufferReference wire_bytes) {
MaybeLocal<WasmCompiledModule> ret =
Deserialize(isolate, serialized_module, wire_bytes);
if (!ret.IsEmpty()) {
return ret;
}
- return Compile(isolate, wire_bytes.first, wire_bytes.second);
+ return Compile(isolate, wire_bytes.start, wire_bytes.size);
}
MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
@@ -7538,6 +7697,38 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
Utils::ToLocal(maybe_compiled.ToHandleChecked()));
}
+// Resolves the result of streaming compilation.
+// TODO(ahaas): Refactor the streaming compilation API so that this class can
+// move to wasm-js.cc.
+class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
+ public:
+ AsyncCompilationResolver(Isolate* isolate, Handle<Promise> promise)
+ : promise_(
+ reinterpret_cast<i::Isolate*>(isolate)->global_handles()->Create(
+ *Utils::OpenHandle(*promise))) {}
+
+ ~AsyncCompilationResolver() {
+ i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
+ }
+
+ void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override {
+ i::MaybeHandle<i::Object> promise_result =
+ i::JSPromise::Resolve(promise_, result);
+ CHECK_EQ(promise_result.is_null(),
+ promise_->GetIsolate()->has_pending_exception());
+ }
+
+ void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
+ i::MaybeHandle<i::Object> promise_result =
+ i::JSPromise::Reject(promise_, error_reason);
+ CHECK_EQ(promise_result.is_null(),
+ promise_->GetIsolate()->has_pending_exception());
+ }
+
+ private:
+ i::Handle<i::JSPromise> promise_;
+};
+
WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
Isolate* isolate)
: isolate_(isolate) {
@@ -7546,10 +7737,10 @@ WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
Local<Promise::Resolver> resolver = maybe_resolver.ToLocalChecked();
promise_.Reset(isolate, resolver->GetPromise());
- i::Handle<i::JSPromise> promise = Utils::OpenHandle(*GetPromise());
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
streaming_decoder_ = i_isolate->wasm_engine()->StartStreamingCompilation(
- i_isolate, handle(i_isolate->context()), promise);
+ i_isolate, handle(i_isolate->context(), i_isolate),
+ base::make_unique<AsyncCompilationResolver>(isolate, GetPromise()));
}
Local<Promise> WasmModuleObjectBuilderStreaming::GetPromise() {
@@ -7580,7 +7771,8 @@ void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {
Local<Promise::Resolver> resolver = promise.As<Promise::Resolver>();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
i::HandleScope scope(i_isolate);
- Local<Context> context = Utils::ToLocal(handle(i_isolate->context()));
+ Local<Context> context =
+ Utils::ToLocal(handle(i_isolate->context(), i_isolate));
auto maybe = resolver->Reject(context, exception.ToLocalChecked());
CHECK_IMPLIES(!maybe.FromMaybe(false), i_isolate->has_scheduled_exception());
}
@@ -7620,8 +7812,7 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
// longer track it.
//
// TODO(eholk): Find a way to track this across externalization
- isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(
- self->backing_store());
+ self->StopTrackingWasmMemory(isolate);
}
isolate->heap()->UnregisterArrayBuffer(*self);
@@ -7701,9 +7892,11 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
i::Handle<i::JSArrayBuffer> buffer;
if (obj->IsJSDataView()) {
- i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj));
+ i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj),
+ obj->GetIsolate());
DCHECK(data_view->buffer()->IsJSArrayBuffer());
- buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()));
+ buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()),
+ data_view->GetIsolate());
} else {
DCHECK(obj->IsJSTypedArray());
buffer = i::JSTypedArray::cast(*obj)->GetBuffer();
@@ -7719,13 +7912,16 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
i::Min(byte_length, i::NumberToSize(self->byte_length()));
if (bytes_to_copy) {
i::DisallowHeapAllocation no_gc;
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()));
+ i::Isolate* isolate = self->GetIsolate();
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()),
+ isolate);
const char* source = reinterpret_cast<char*>(buffer->backing_store());
if (source == nullptr) {
DCHECK(self->IsJSTypedArray());
- i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*self));
+ i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*self),
+ isolate);
i::Handle<i::FixedTypedArrayBase> fixed_array(
- i::FixedTypedArrayBase::cast(typed_array->elements()));
+ i::FixedTypedArrayBase::cast(typed_array->elements()), isolate);
source = reinterpret_cast<char*>(fixed_array->DataPtr());
}
memcpy(dest, source + byte_offset, bytes_to_copy);
@@ -7736,7 +7932,8 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
bool v8::ArrayBufferView::HasBuffer() const {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()));
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()),
+ self->GetIsolate());
return buffer->backing_store() != nullptr;
}
@@ -7849,8 +8046,7 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
// longer track it.
//
// TODO(eholk): Find a way to track this across externalization
- isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(
- self->backing_store());
+ self->StopTrackingWasmMemory(isolate);
}
isolate->heap()->UnregisterArrayBuffer(*self);
@@ -8089,13 +8285,6 @@ HeapProfiler* Isolate::GetHeapProfiler() {
return reinterpret_cast<HeapProfiler*>(heap_profiler);
}
-
-CpuProfiler* Isolate::GetCpuProfiler() {
- i::CpuProfiler* cpu_profiler =
- reinterpret_cast<i::Isolate*>(this)->EnsureCpuProfiler();
- return reinterpret_cast<CpuProfiler*>(cpu_profiler);
-}
-
void Isolate::SetIdle(bool is_idle) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetIdle(is_idle);
@@ -8113,7 +8302,7 @@ v8::Local<v8::Context> Isolate::GetCurrentContext() {
if (context == nullptr) return Local<Context>();
i::Context* native_context = context->native_context();
if (native_context == nullptr) return Local<Context>();
- return Utils::ToLocal(i::Handle<i::Context>(native_context));
+ return Utils::ToLocal(i::Handle<i::Context>(native_context, isolate));
}
@@ -8158,7 +8347,7 @@ v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
// If we're passed an empty handle, we throw an undefined exception
// to deal more gracefully with out of memory situations.
if (value.IsEmpty()) {
- isolate->ScheduleThrow(isolate->heap()->undefined_value());
+ isolate->ScheduleThrow(i::ReadOnlyRoots(isolate).undefined_value());
} else {
isolate->ScheduleThrow(*Utils::OpenHandle(*value));
}
@@ -8479,10 +8668,15 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_available_size_ = heap->Available();
heap_statistics->used_heap_size_ = heap->SizeOfObjects();
heap_statistics->heap_size_limit_ = heap->MaxReserved();
+ // TODO(7424): There is no public API for the {WasmEngine} yet. Once such an
+ // API becomes available we should report the malloced memory separately. For
+ // now we just add the values, thereby over-approximating the peak slightly.
heap_statistics->malloced_memory_ =
- isolate->allocator()->GetCurrentMemoryUsage();
+ isolate->allocator()->GetCurrentMemoryUsage() +
+ isolate->wasm_engine()->allocator()->GetCurrentMemoryUsage();
heap_statistics->peak_malloced_memory_ =
- isolate->allocator()->GetMaxMemoryUsage();
+ isolate->allocator()->GetMaxMemoryUsage() +
+ isolate->wasm_engine()->allocator()->GetMaxMemoryUsage();
heap_statistics->number_of_native_contexts_ = heap->NumberOfNativeContexts();
heap_statistics->number_of_detached_contexts_ =
heap->NumberOfDetachedContexts();
@@ -8628,6 +8822,15 @@ void Isolate::RemoveCallCompletedCallback(
reinterpret_cast<CallCompletedCallback>(callback));
}
+void Isolate::AtomicsWaitWakeHandle::Wake() {
+ reinterpret_cast<i::AtomicsWaitWakeHandle*>(this)->Wake();
+}
+
+void Isolate::SetAtomicsWaitCallback(AtomicsWaitCallback callback, void* data) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetAtomicsWaitCallback(callback, data);
+}
+
void Isolate::SetPromiseHook(PromiseHook hook) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetPromiseHook(hook);
@@ -8758,9 +8961,9 @@ void Isolate::LowMemoryNotification() {
int Isolate::ContextDisposedNotification(bool dependant_context) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!dependant_context) {
- // We left the current context, we can abort all running WebAssembly
- // compilations.
- isolate->wasm_engine()->AbortAllCompileJobs();
+ // We left the current context, we can abort all WebAssembly compilations on
+ // that isolate.
+ isolate->wasm_engine()->AbortCompileJobsOnIsolate(isolate);
}
// TODO(ahaas): move other non-heap activity out of the heap call.
return isolate->heap()->NotifyContextDisposed(dependant_context);
@@ -8790,6 +8993,16 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
on_isolate_thread);
}
+void Isolate::EnableMemorySavingsMode() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->EnableMemorySavingsMode();
+}
+
+void Isolate::DisableMemorySavingsMode() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->DisableMemorySavingsMode();
+}
+
void Isolate::SetRAILMode(RAILMode rail_mode) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->SetRAILMode(rail_mode);
@@ -8853,6 +9066,9 @@ CALLBACK_SETTER(WasmInstanceCallback, ExtensionCallback, wasm_instance_callback)
CALLBACK_SETTER(WasmCompileStreamingCallback, ApiImplementationCallback,
wasm_compile_streaming_callback)
+CALLBACK_SETTER(WasmStreamingCallback, WasmStreamingCallback,
+ wasm_streaming_callback)
+
void Isolate::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8885,7 +9101,7 @@ bool Isolate::AddMessageListenerWithErrorLevel(MessageCallback that,
i::Handle<i::Foreign> foreign =
isolate->factory()->NewForeign(FUNCTION_ADDR(that));
listener->set(0, *foreign);
- listener->set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
+ listener->set(1, data.IsEmpty() ? i::ReadOnlyRoots(isolate).undefined_value()
: *Utils::OpenHandle(*data));
listener->set(2, i::Smi::FromInt(message_levels));
list = i::TemplateList::Add(isolate, list, listener);
@@ -8905,7 +9121,7 @@ void Isolate::RemoveMessageListeners(MessageCallback that) {
i::FixedArray* listener = i::FixedArray::cast(listeners->get(i));
i::Foreign* callback_obj = i::Foreign::cast(listener->get(0));
if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
- listeners->set(i, isolate->heap()->undefined_value());
+ listeners->set(i, i::ReadOnlyRoots(isolate).undefined_value());
}
}
}
@@ -9021,7 +9237,7 @@ String::Utf8Value::Utf8Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
TryCatch try_catch(isolate);
Local<String> str;
if (!obj->ToString(context).ToLocal(&str)) return;
- length_ = str->Utf8Length();
+ length_ = str->Utf8Length(isolate);
str_ = i::NewArray<char>(length_ + 1);
str->WriteUtf8(str_);
}
@@ -9122,41 +9338,14 @@ v8_inspector::V8Inspector* debug::GetInspector(Isolate* isolate) {
return reinterpret_cast<i::Isolate*>(isolate)->inspector();
}
-Local<Context> debug::GetDebugContext(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- return Utils::ToLocal(i_isolate->debug()->GetDebugContext());
-}
-
-MaybeLocal<Value> debug::Call(Local<Context> context,
- v8::Local<v8::Function> fun,
- v8::Local<v8::Value> data) {
- PREPARE_FOR_EXECUTION(context, Debug, Call, Value);
- i::Handle<i::Object> data_obj;
- if (data.IsEmpty()) {
- data_obj = isolate->factory()->undefined_value();
- } else {
- data_obj = Utils::OpenHandle(*data);
- }
- Local<Value> result;
- has_pending_exception = !ToLocal<Value>(
- isolate->debug()->Call(Utils::OpenHandle(*fun), data_obj), &result);
- RETURN_ON_FAILED_EXECUTION(Value);
- RETURN_ESCAPED(result);
-}
-
-void debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debug()->set_live_edit_enabled(enable);
-}
-
-void debug::DebugBreak(Isolate* isolate) {
- reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->RequestDebugBreak();
+void debug::SetBreakOnNextFunctionCall(Isolate* isolate) {
+ reinterpret_cast<i::Isolate*>(isolate)->debug()->SetBreakOnNextFunctionCall();
}
-void debug::CancelDebugBreak(Isolate* isolate) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->stack_guard()->ClearDebugBreak();
+void debug::ClearBreakOnNextFunctionCall(Isolate* isolate) {
+ reinterpret_cast<i::Isolate*>(isolate)
+ ->debug()
+ ->ClearBreakOnNextFunctionCall();
}
MaybeLocal<Array> debug::GetInternalProperties(Isolate* v8_isolate,
@@ -9184,11 +9373,6 @@ void debug::SetBreakPointsActive(Isolate* v8_isolate, bool is_active) {
isolate->debug()->set_break_points_active(is_active);
}
-void debug::SetOutOfMemoryCallback(Isolate* isolate,
- OutOfMemoryCallback callback, void* data) {
- // No-op.
-}
-
void debug::PrepareStep(Isolate* v8_isolate, StepAction action) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_DO_NOT_USE(isolate);
@@ -9233,7 +9417,8 @@ bool debug::Script::WasCompiled() const {
bool debug::Script::IsEmbedded() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
- return script->context_data() == script->GetHeap()->uninitialized_symbol();
+ return script->context_data() ==
+ script->GetReadOnlyRoots().uninitialized_symbol();
}
int debug::Script::Id() const { return Utils::OpenHandle(this)->id(); }
@@ -9253,7 +9438,8 @@ std::vector<int> debug::Script::LineEnds() const {
i::HandleScope scope(isolate);
i::Script::InitLineEnds(script);
CHECK(script->line_ends()->IsFixedArray());
- i::Handle<i::FixedArray> line_ends(i::FixedArray::cast(script->line_ends()));
+ i::Handle<i::FixedArray> line_ends(i::FixedArray::cast(script->line_ends()),
+ isolate);
std::vector<int> result(line_ends->length());
for (int i = 0; i < line_ends->length(); ++i) {
i::Smi* line_end = i::Smi::cast(line_ends->get(i));
@@ -9337,9 +9523,9 @@ bool debug::Script::GetPossibleBreakpoints(
CHECK(!start.IsEmpty());
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
- i::WasmSharedModuleData* shared =
- i::WasmModuleObject::cast(script->wasm_module_object())->shared();
- return shared->GetPossibleBreakpoints(start, end, locations);
+ i::WasmModuleObject* module_object =
+ i::WasmModuleObject::cast(script->wasm_module_object());
+ return module_object->GetPossibleBreakpoints(start, end, locations);
}
i::Script::InitLineEnds(script);
@@ -9388,7 +9574,6 @@ int debug::Script::GetSourceOffset(const debug::Location& location) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
return i::WasmModuleObject::cast(script->wasm_module_object())
- ->shared()
->GetFunctionOffset(location.GetLineNumber()) +
location.GetColumnNumber();
}
@@ -9420,11 +9605,12 @@ v8::debug::Location debug::Script::GetSourceLocation(int offset) const {
}
bool debug::Script::SetScriptSource(v8::Local<v8::String> newSource,
- bool preview, bool* stack_changed) const {
+ bool preview,
+ debug::LiveEditResult* result) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Isolate* isolate = script->GetIsolate();
return isolate->debug()->SetScriptSource(
- script, Utils::OpenHandle(*newSource), preview, stack_changed);
+ script, Utils::OpenHandle(*newSource), preview, result);
}
bool debug::Script::SetBreakpoint(v8::Local<v8::String> condition,
@@ -9462,7 +9648,7 @@ int debug::WasmScript::NumFunctions() const {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject* module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- i::wasm::WasmModule* module = module_object->shared()->module();
+ const i::wasm::WasmModule* module = module_object->module();
DCHECK_GE(i::kMaxInt, module->functions.size());
return static_cast<int>(module->functions.size());
}
@@ -9473,7 +9659,7 @@ int debug::WasmScript::NumImportedFunctions() const {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject* module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- i::wasm::WasmModule* module = module_object->shared()->module();
+ const i::wasm::WasmModule* module = module_object->module();
DCHECK_GE(i::kMaxInt, module->num_imported_functions);
return static_cast<int>(module->num_imported_functions);
}
@@ -9485,10 +9671,10 @@ std::pair<int, int> debug::WasmScript::GetFunctionRange(
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject* module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- i::wasm::WasmModule* module = module_object->shared()->module();
+ const i::wasm::WasmModule* module = module_object->module();
DCHECK_LE(0, function_index);
DCHECK_GT(module->functions.size(), function_index);
- i::wasm::WasmFunction& func = module->functions[function_index];
+ const i::wasm::WasmFunction& func = module->functions[function_index];
DCHECK_GE(i::kMaxInt, func.code.offset());
DCHECK_GE(i::kMaxInt, func.code.end_offset());
return std::make_pair(static_cast<int>(func.code.offset()),
@@ -9501,13 +9687,12 @@ uint32_t debug::WasmScript::GetFunctionHash(int function_index) {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject* module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- i::wasm::WasmModule* module = module_object->shared()->module();
+ const i::wasm::WasmModule* module = module_object->module();
DCHECK_LE(0, function_index);
DCHECK_GT(module->functions.size(), function_index);
- i::wasm::WasmFunction& func = module->functions[function_index];
- i::SeqOneByteString* module_bytes = module_object->shared()->module_bytes();
+ const i::wasm::WasmFunction& func = module->functions[function_index];
i::wasm::ModuleWireBytes wire_bytes(
- module_bytes->GetFlatContent().ToOneByteVector());
+ module_object->native_module()->wire_bytes());
i::Vector<const i::byte> function_bytes = wire_bytes.GetFunctionBytes(&func);
// TODO(herhut): Maybe also take module, name and signature into account.
return i::StringHasher::HashSequentialString(function_bytes.start(),
@@ -9521,7 +9706,7 @@ debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject* module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- return module_object->shared()->DisassembleFunction(function_index);
+ return module_object->DisassembleFunction(function_index);
}
debug::Location::Location(int line_number, int column_number)
@@ -9590,9 +9775,12 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
void debug::SetDebugDelegate(Isolate* v8_isolate,
debug::DebugDelegate* delegate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- // Might create the Debug context.
- ENTER_V8_FOR_NEW_CONTEXT(isolate);
- isolate->debug()->SetDebugDelegate(delegate, false);
+ isolate->debug()->SetDebugDelegate(delegate);
+}
+
+void debug::SetAsyncEventDelegate(Isolate* v8_isolate,
+ debug::AsyncEventDelegate* delegate) {
+ reinterpret_cast<i::Isolate*>(v8_isolate)->set_async_event_delegate(delegate);
}
void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
@@ -9600,9 +9788,12 @@ void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::DisallowHeapAllocation no_gc;
- i::SharedFunctionInfo::ScriptIterator iter(Utils::OpenHandle(*script));
+ i::SharedFunctionInfo::ScriptIterator iter(isolate,
+ *Utils::OpenHandle(*script));
while (i::SharedFunctionInfo* info = iter.Next()) {
- info->set_computed_debug_is_blackboxed(false);
+ if (info->HasDebugInfo()) {
+ info->GetDebugInfo()->set_computed_debug_is_blackboxed(false);
+ }
}
}
@@ -9730,7 +9921,7 @@ MaybeLocal<debug::Script> debug::GeneratorObject::Script() {
Local<Function> debug::GeneratorObject::Function() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
- return Utils::ToLocal(handle(obj->function()));
+ return Utils::ToLocal(handle(obj->function(), obj->GetIsolate()));
}
debug::Location debug::GeneratorObject::SuspendedLocation() {
@@ -9782,17 +9973,20 @@ void debug::GlobalLexicalScopeNames(
v8::Local<v8::Context> v8_context,
v8::PersistentValueVector<v8::String>* names) {
i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
+ i::Isolate* isolate = context->GetIsolate();
i::Handle<i::ScriptContextTable> table(
- context->global_object()->native_context()->script_context_table());
+ context->global_object()->native_context()->script_context_table(),
+ isolate);
for (int i = 0; i < table->used(); i++) {
- i::Handle<i::Context> context = i::ScriptContextTable::GetContext(table, i);
+ i::Handle<i::Context> context =
+ i::ScriptContextTable::GetContext(isolate, table, i);
DCHECK(context->IsScriptContext());
- i::Handle<i::ScopeInfo> scope_info(context->scope_info());
+ i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
int local_count = scope_info->ContextLocalCount();
for (int j = 0; j < local_count; ++j) {
i::String* name = scope_info->ContextLocalName(j);
if (i::ScopeInfo::VariableIsSynthetic(name)) continue;
- names->Append(Utils::ToLocal(handle(name)));
+ names->Append(Utils::ToLocal(handle(name, isolate)));
}
}
}
@@ -9813,8 +10007,8 @@ int debug::GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
if (name->AsArrayIndex(&index)) {
return static_cast<int>(debug::NativeAccessorType::None);
}
- i::LookupIterator it =
- i::LookupIterator(object, name, i::LookupIterator::OWN);
+ i::LookupIterator it = i::LookupIterator(object->GetIsolate(), object, name,
+ i::LookupIterator::OWN);
if (!it.IsFound()) return static_cast<int>(debug::NativeAccessorType::None);
if (it.state() != i::LookupIterator::ACCESSOR) {
return static_cast<int>(debug::NativeAccessorType::None);
@@ -9846,16 +10040,11 @@ int64_t debug::GetNextRandomInt64(v8::Isolate* v8_isolate) {
}
int debug::GetDebuggingId(v8::Local<v8::Function> function) {
- i::JSReceiver* callable = *v8::Utils::OpenHandle(*function);
- if (!callable->IsJSFunction()) return i::SharedFunctionInfo::kNoDebuggingId;
- i::JSFunction* fun = i::JSFunction::cast(callable);
- i::SharedFunctionInfo* shared = fun->shared();
- int id = shared->debugging_id();
- if (id == i::SharedFunctionInfo::kNoDebuggingId) {
- id = shared->GetHeap()->NextDebuggingId();
- shared->set_debugging_id(id);
- }
- DCHECK_NE(i::SharedFunctionInfo::kNoDebuggingId, id);
+ i::Handle<i::JSReceiver> callable = v8::Utils::OpenHandle(*function);
+ if (!callable->IsJSFunction()) return i::DebugInfo::kNoDebuggingId;
+ i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(callable);
+ int id = func->GetIsolate()->debug()->GetFunctionDebuggingId(func);
+ DCHECK_NE(i::DebugInfo::kNoDebuggingId, id);
return id;
}
@@ -9874,6 +10063,13 @@ bool debug::SetFunctionBreakpoint(v8::Local<v8::Function> function,
condition_string, id);
}
+debug::PostponeInterruptsScope::PostponeInterruptsScope(v8::Isolate* isolate)
+ : scope_(
+ new i::PostponeInterruptsScope(reinterpret_cast<i::Isolate*>(isolate),
+ i::StackGuard::API_INTERRUPT)) {}
+
+debug::PostponeInterruptsScope::~PostponeInterruptsScope() {}
+
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@@ -10571,6 +10767,15 @@ void Testing::DeoptimizeAll(Isolate* isolate) {
i::Deoptimizer::DeoptimizeAll(i_isolate);
}
+void EmbedderHeapTracer::FinalizeTracing() {
+ if (isolate_) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(isolate_);
+ if (isolate->heap()->incremental_marking()->IsMarking()) {
+ isolate->heap()->FinalizeIncrementalMarkingAtomically(
+ i::GarbageCollectionReason::kExternalFinalize);
+ }
+ }
+}
namespace internal {
@@ -10767,6 +10972,8 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
#undef SET_FIELD_WRAPPED
#undef NEW_STRING
#undef CALLBACK_SETTER
+#undef DISABLE_DEPRECATED_WARNINGS
+#undef RESET_DEPRECATED_WARNINGS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 342ab855ac..8ffec881ed 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -11,7 +11,11 @@
#include "src/detachable-vector.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
+#include "src/objects/bigint.h"
#include "src/objects/js-collection.h"
+#include "src/objects/js-promise.h"
+#include "src/objects/module.h"
+#include "src/objects/templates.h"
namespace v8 {
@@ -248,9 +252,7 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
template<class From, class To>
static inline Local<To> Convert(v8::internal::Handle<From> obj) {
- DCHECK(obj.is_null() ||
- (obj->IsSmi() ||
- !obj->IsTheHole(i::HeapObject::cast(*obj)->GetIsolate())));
+ DCHECK(obj.is_null() || (obj->IsSmi() || !obj->IsTheHole()));
return Local<To>(reinterpret_cast<To*>(obj.location()));
}
@@ -635,7 +637,7 @@ bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
Handle<Context> HandleScopeImplementer::LastEnteredContext() {
if (entered_contexts_.empty()) return Handle<Context>::null();
- return Handle<Context>(entered_contexts_.back());
+ return Handle<Context>(entered_contexts_.back(), isolate_);
}
void HandleScopeImplementer::EnterMicrotaskContext(Handle<Context> context) {
@@ -650,7 +652,7 @@ void HandleScopeImplementer::LeaveMicrotaskContext() {
}
Handle<Context> HandleScopeImplementer::MicrotaskContext() {
- if (microtask_context_) return Handle<Context>(microtask_context_);
+ if (microtask_context_) return Handle<Context>(microtask_context_, isolate_);
return Handle<Context>::null();
}
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index d01e77314a..bef9cba698 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -81,7 +81,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
- static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
+ static V8_INLINE Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
\
V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
Isolate* isolate) { \
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 4c4eb00ec2..a432e44814 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -60,14 +60,17 @@ void RelocInfo::apply(intptr_t delta) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // relocate entry
+ } else if (RelocInfo::IsRelativeCodeTarget(rmode_)) {
+ Instruction* branch = Instruction::At(pc_);
+ int32_t branch_offset = branch->GetBranchOffset() + delta;
+ branch->SetBranchOffset(branch_offset);
}
- // We do not use pc relative addressing on ARM, so there is
- // nothing else to do.
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -101,12 +104,15 @@ HeapObject* RelocInfo::target_object() {
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ if (IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT) {
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+ }
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ return origin->relative_code_target_object_handle_at(pc_);
}
-void RelocInfo::set_target_object(HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -114,9 +120,8 @@ void RelocInfo::set_target_object(HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target);
- host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
+ heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
+ heap->RecordWriteIntoCode(host(), this, target);
}
}
@@ -144,13 +149,6 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
-void RelocInfo::set_wasm_code_table_entry(Address target,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
- Assembler::set_target_address_at(pc_, constant_pool_, target,
- icache_flush_mode);
-}
-
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@@ -180,12 +178,19 @@ void RelocInfo::WipeOut() {
}
}
+Handle<Code> Assembler::relative_code_target_object_handle_at(
+ Address pc) const {
+ Instruction* branch = Instruction::At(pc);
+ int code_target_index = branch->GetBranchOffset() / Instruction::kInstrSize;
+ return GetCodeTarget(code_target_index);
+}
+
template <typename ObjectVisitor>
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
@@ -317,7 +322,7 @@ Address Assembler::constant_pool_entry_address(Address pc,
Address constant_pool) {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc);
- return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
+ return pc + GetLdrRegisterImmediateOffset(instr) + Instruction::kPcLoadDelta;
}
@@ -325,7 +330,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
- } else if (CpuFeatures::IsSupported(ARMv7)) {
+ } else if (CpuFeatures::IsSupported(ARMv7) && IsMovW(Memory::int32_at(pc))) {
// This is an movw / movt immediate load. Return the immediate.
DCHECK(IsMovW(Memory::int32_at(pc)) &&
IsMovT(Memory::int32_at(pc + kInstrSize)));
@@ -333,7 +338,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instruction* movt_instr = Instruction::At(pc + kInstrSize);
return static_cast<Address>((movt_instr->ImmedMovwMovtValue() << 16) |
movw_instr->ImmedMovwMovtValue());
- } else {
+ } else if (IsMovImmed(Memory::int32_at(pc))) {
// This is an mov / orr immediate load. Return the immediate.
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
@@ -347,6 +352,10 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3));
return ret;
+ } else {
+ Instruction* branch = Instruction::At(pc);
+ int32_t delta = branch->GetBranchOffset();
+ return pc + delta + Instruction::kPcLoadDelta;
}
}
@@ -364,7 +373,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
// ldr ip, [pp, #...]
// since the instruction accessing this address in the constant pool remains
// unchanged.
- } else if (CpuFeatures::IsSupported(ARMv7)) {
+ } else if (CpuFeatures::IsSupported(ARMv7) && IsMovW(Memory::int32_at(pc))) {
// This is an movw / movt immediate load. Patch the immediate embedded in
// the instructions.
DCHECK(IsMovW(Memory::int32_at(pc)));
@@ -378,7 +387,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, 2 * kInstrSize);
}
- } else {
+ } else if (IsMovImmed(Memory::int32_at(pc))) {
// This is an mov / orr immediate load. Patch the immediate embedded in
// the instructions.
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
@@ -386,7 +395,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
- uint32_t immediate = reinterpret_cast<uint32_t>(target);
+ uint32_t immediate = static_cast<uint32_t>(target);
instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
@@ -398,6 +407,13 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, 4 * kInstrSize);
}
+ } else {
+ intptr_t branch_offset = target - pc - Instruction::kPcLoadDelta;
+ Instruction* branch = Instruction::At(pc);
+ branch->SetBranchOffset(branch_offset);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(pc, kInstrSize);
+ }
}
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index da5e466345..576bcb30f6 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -43,6 +43,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
+#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@@ -323,8 +324,8 @@ void CpuFeatures::PrintFeatures() {
// Implementation of RelocInfo
// static
-const int RelocInfo::kApplyMask = 0;
-
+const int RelocInfo::kApplyMask =
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded.  Being
@@ -333,39 +334,31 @@ bool RelocInfo::IsCodedSpecially() {
return false;
}
-
bool RelocInfo::IsInConstantPool() {
return Assembler::is_constant_pool_load(pc_);
}
-Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
-uint32_t RelocInfo::embedded_size() const {
- return reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_, constant_pool_));
-}
-
-void RelocInfo::set_embedded_address(Address address,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
-}
-
-void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_,
- reinterpret_cast<Address>(size), flush_mode);
+int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(address, icache_flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return embedded_address();
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return static_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
}
// -----------------------------------------------------------------------------
@@ -483,8 +476,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- IMMUTABLE, TENURED);
+ object =
+ isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
@@ -544,8 +537,9 @@ const Instr kLdrRegFpNegOffsetPattern =
const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | fp.code() * B16;
const Instr kLdrStrInstrTypeMask = 0xFFFF0000;
-Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options, void* buffer,
+ int buffer_size)
+ : AssemblerBase(options, buffer, buffer_size),
pending_32_bit_constants_(),
pending_64_bit_constants_(),
scratch_register_list_(ip.bit()) {
@@ -553,7 +547,6 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
pending_64_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
next_buffer_check_ = 0;
- code_target_sharing_blocked_nesting_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
first_const_pool_32_use_ = -1;
@@ -576,7 +569,6 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
Assembler::~Assembler() {
DCHECK_EQ(const_pool_blocked_nesting_, 0);
- DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
@@ -620,20 +612,6 @@ Condition Assembler::GetCondition(Instr instr) {
return Instruction::ConditionField(instr);
}
-
-bool Assembler::IsBranch(Instr instr) {
- return (instr & (B27 | B25)) == (B27 | B25);
-}
-
-
-int Assembler::GetBranchOffset(Instr instr) {
- DCHECK(IsBranch(instr));
- // Take the jump offset in the lower 24 bits, sign extend it and multiply it
- // with 4 to get the offset in bytes.
- return ((instr & kImm24Mask) << 8) >> 6;
-}
-
-
bool Assembler::IsLdrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
}
@@ -851,7 +829,7 @@ int Assembler::target_at(int pos) {
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
}
- return pos + kPcLoadDelta + imm26;
+ return pos + Instruction::kPcLoadDelta + imm26;
}
@@ -890,7 +868,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
- PatchingAssembler patcher(isolate_data(),
+ PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 1);
patcher.mov(dst, Operand(target24));
} else {
@@ -899,12 +877,12 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Patch with movw/movt.
if (target16_1 == 0) {
- PatchingAssembler patcher(isolate_data(),
+ PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 1);
CpuFeatureScope scope(&patcher, ARMv7);
patcher.movw(dst, target16_0);
} else {
- PatchingAssembler patcher(isolate_data(),
+ PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
CpuFeatureScope scope(&patcher, ARMv7);
patcher.movw(dst, target16_0);
@@ -916,12 +894,12 @@ void Assembler::target_at_put(int pos, int target_pos) {
uint8_t target8_1 = target16_0 >> 8;
uint8_t target8_2 = target16_1 & kImm8Mask;
if (target8_2 == 0) {
- PatchingAssembler patcher(isolate_data(),
+ PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
patcher.mov(dst, Operand(target8_0));
patcher.orr(dst, dst, Operand(target8_1 << 8));
} else {
- PatchingAssembler patcher(isolate_data(),
+ PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 3);
patcher.mov(dst, Operand(target8_0));
patcher.orr(dst, dst, Operand(target8_1 << 8));
@@ -931,7 +909,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
}
return;
}
- int imm26 = target_pos - (pos + kPcLoadDelta);
+ int imm26 = target_pos - (pos + Instruction::kPcLoadDelta);
DCHECK_EQ(5 * B25, instr & 7 * B25); // b, bl, or blx imm24
if (Instruction::ConditionField(instr) == kSpecialCondition) {
// blx uses bit 24 to encode bit 2 of imm26
@@ -1105,9 +1083,9 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- if (assembler != nullptr && assembler->predictable_code_size()) return true;
- return assembler->serializer_enabled();
+ if (RelocInfo::IsOnlyForSerializer(rmode)) {
+ if (assembler->predictable_code_size()) return true;
+ return assembler->options().record_reloc_info_for_serialization;
} else if (RelocInfo::IsNone(rmode)) {
return false;
}
@@ -1167,6 +1145,7 @@ int Operand::InstructionsRequired(const Assembler* assembler,
void Assembler::Move32BitImmediate(Register rd, const Operand& x,
Condition cond) {
if (UseMovImmediateLoad(x, this)) {
+ CpuFeatureScope scope(this, ARMv7);
// UseMovImmediateLoad should return false when we need to output
// relocation info, since we prefer the constant pool for values that
// can be patched.
@@ -1174,12 +1153,9 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
UseScratchRegisterScope temps(this);
// Re-use the destination register as a scratch if possible.
Register target = rd != pc ? rd : temps.Acquire();
- if (CpuFeatures::IsSupported(ARMv7)) {
- uint32_t imm32 = static_cast<uint32_t>(x.immediate());
- CpuFeatureScope scope(this, ARMv7);
- movw(target, imm32 & 0xFFFF, cond);
- movt(target, imm32 >> 16, cond);
- }
+ uint32_t imm32 = static_cast<uint32_t>(x.immediate());
+ movw(target, imm32 & 0xFFFF, cond);
+ movt(target, imm32 >> 16, cond);
if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond);
}
@@ -1436,15 +1412,17 @@ int Assembler::branch_offset(Label* L) {
// be emitted at the pc offset recorded by the label.
if (!is_const_pool_blocked()) BlockConstPoolFor(1);
- return target_pos - (pc_offset() + kPcLoadDelta);
+ return target_pos - (pc_offset() + Instruction::kPcLoadDelta);
}
// Branch instructions.
-void Assembler::b(int branch_offset, Condition cond) {
+void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
+ RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
- CHECK(is_int24(imm24));
+ const bool b_imm_check = is_int24(imm24);
+ CHECK(b_imm_check);
emit(cond | B27 | B25 | (imm24 & kImm24Mask));
if (cond == al) {
@@ -1453,11 +1431,12 @@ void Assembler::b(int branch_offset, Condition cond) {
}
}
-
-void Assembler::bl(int branch_offset, Condition cond) {
+void Assembler::bl(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
+ RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
- CHECK(is_int24(imm24));
+ const bool bl_imm_check = is_int24(imm24);
+ CHECK(bl_imm_check);
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
@@ -1465,7 +1444,8 @@ void Assembler::blx(int branch_offset) {
DCHECK_EQ(branch_offset & 1, 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
- CHECK(is_int24(imm24));
+ const bool blx_imm_check = is_int24(imm24);
+ CHECK(blx_imm_check);
emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
}
@@ -5145,10 +5125,11 @@ void Assembler::dq(uint64_t value) {
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (options().disable_reloc_info_for_patching) return;
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
- (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
- !emit_debug_code())) {
+ (RelocInfo::IsOnlyForSerializer(rmode) &&
+ !options().record_reloc_info_for_serialization && !emit_debug_code())) {
return;
}
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
@@ -5159,16 +5140,14 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL);
- bool sharing_ok = RelocInfo::IsNone(rmode) ||
- (rmode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
+ bool sharing_ok =
+ RelocInfo::IsNone(rmode) || RelocInfo::IsShareableRelocMode(rmode);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
}
- ConstantPoolEntry entry(position, value,
- sharing_ok || (rmode == RelocInfo::CODE_TARGET &&
- IsCodeTargetSharingAllowed()),
- rmode);
+ ConstantPoolEntry entry(
+ position, value, sharing_ok || (rmode == RelocInfo::CODE_TARGET), rmode);
bool shared = false;
if (sharing_ok) {
@@ -5187,8 +5166,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
// Share entries if allowed and possible.
// Null-values are placeholders and must be ignored.
- if (rmode == RelocInfo::CODE_TARGET && IsCodeTargetSharingAllowed() &&
- value != 0) {
+ if (rmode == RelocInfo::CODE_TARGET && value != 0) {
// Sharing entries here relies on canonicalized handles - without them, we
// will miss the optimisation opportunity.
Address handle_address = static_cast<Address>(value);
@@ -5384,7 +5362,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
DCHECK((IsVldrDPcImmediateOffset(instr) &&
GetVldrDRegisterImmediateOffset(instr) == 0));
- int delta = pc_offset() - entry.position() - kPcLoadDelta;
+ int delta = pc_offset() - entry.position() - Instruction::kPcLoadDelta;
DCHECK(is_uint10(delta));
if (entry.is_merged()) {
@@ -5415,7 +5393,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
DCHECK(IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0);
- int delta = pc_offset() - entry.position() - kPcLoadDelta;
+ int delta = pc_offset() - entry.position() - Instruction::kPcLoadDelta;
DCHECK(is_uint12(delta));
// 0 is the smallest delta:
// ldr rd, [pc, #0]
@@ -5460,9 +5438,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
}
-PatchingAssembler::PatchingAssembler(IsolateData isolate_data, byte* address,
- int instructions)
- : Assembler(isolate_data, address, instructions * kInstrSize + kGap) {
+PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
+ byte* address, int instructions)
+ : Assembler(options, address, instructions * kInstrSize + kGap) {
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 4a424ccea2..2e71ce59e6 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -397,26 +397,26 @@ enum Coprocessor {
class Operand BASE_EMBEDDED {
public:
// immediate
- INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
- INLINE(static Operand Zero());
- INLINE(explicit Operand(const ExternalReference& f));
+ V8_INLINE explicit Operand(int32_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE);
+ V8_INLINE static Operand Zero();
+ V8_INLINE explicit Operand(const ExternalReference& f);
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value));
+ V8_INLINE explicit Operand(Smi* value);
// rm
- INLINE(explicit Operand(Register rm));
+ V8_INLINE explicit Operand(Register rm);
// rm <shift_op> shift_imm
explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
- INLINE(static Operand SmiUntag(Register rm)) {
+ V8_INLINE static Operand SmiUntag(Register rm) {
return Operand(rm, ASR, kSmiTagSize);
}
- INLINE(static Operand PointerOffsetFromSmiKey(Register key)) {
+ V8_INLINE static Operand PointerOffsetFromSmiKey(Register key) {
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
}
- INLINE(static Operand DoubleOffsetFromSmiKey(Register key)) {
+ V8_INLINE static Operand DoubleOffsetFromSmiKey(Register key) {
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
}
@@ -519,9 +519,9 @@ class MemOperand BASE_EMBEDDED {
// [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
explicit MemOperand(Register rn, Register rm,
ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
- INLINE(static MemOperand PointerAddressFromSmiKey(Register array,
- Register key,
- AddrMode am = Offset)) {
+ V8_INLINE static MemOperand PointerAddressFromSmiKey(Register array,
+ Register key,
+ AddrMode am = Offset) {
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
}
@@ -628,9 +628,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : Assembler(IsolateData(isolate), buffer, buffer_size) {}
- Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
+ Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -662,27 +660,27 @@ class Assembler : public AssemblerBase {
// Returns true if the given pc address is the start of a constant pool load
// instruction sequence.
- INLINE(static bool is_constant_pool_load(Address pc));
+ V8_INLINE static bool is_constant_pool_load(Address pc);
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
- INLINE(static Address constant_pool_entry_address(Address pc,
- Address constant_pool));
+ V8_INLINE static Address constant_pool_entry_address(Address pc,
+ Address constant_pool);
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
- INLINE(static Address target_address_at(Address pc, Address constant_pool));
- INLINE(static void set_target_address_at(
+ V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
+ V8_INLINE static void set_target_address_at(
Address pc, Address constant_pool, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
- INLINE(static Address target_address_from_return_address(Address pc));
+ V8_INLINE static Address target_address_from_return_address(Address pc);
// Given the address of the beginning of a call, return the address
// in the instruction stream that the call will return from.
- INLINE(static Address return_address_from_call_start(Address pc));
+ V8_INLINE static Address return_address_from_call_start(Address pc);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
@@ -705,9 +703,6 @@ class Assembler : public AssemblerBase {
// Size of an instruction.
static constexpr int kInstrSize = sizeof(Instr);
- // Difference between address of current opcode and value read from pc
- // register.
- static constexpr int kPcLoadDelta = 8;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
VfpRegList* GetScratchVfpRegisterList() {
return &scratch_vfp_register_list_;
@@ -727,8 +722,10 @@ class Assembler : public AssemblerBase {
void CodeTargetAlign();
// Branch instructions
- void b(int branch_offset, Condition cond = al);
- void bl(int branch_offset, Condition cond = al);
+ void b(int branch_offset, Condition cond = al,
+ RelocInfo::Mode rmode = RelocInfo::NONE);
+ void bl(int branch_offset, Condition cond = al,
+ RelocInfo::Mode rmode = RelocInfo::NONE);
void blx(int branch_offset); // v5 and above
void blx(Register target, Condition cond = al); // v5 and above
void bx(Register target, Condition cond = al); // v5 and above, plus v4t
@@ -1427,36 +1424,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
- // Class for blocking sharing of code targets in constant pool.
- class BlockCodeTargetSharingScope {
- public:
- explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) {
- Open(assem);
- }
- // This constructor does not initialize the scope. The user needs to
- // explicitly call Open() before using it.
- BlockCodeTargetSharingScope() : assem_(nullptr) {}
- ~BlockCodeTargetSharingScope() {
- Close();
- }
- void Open(Assembler* assem) {
- DCHECK_NULL(assem_);
- DCHECK_NOT_NULL(assem);
- assem_ = assem;
- assem_->StartBlockCodeTargetSharing();
- }
-
- private:
- void Close() {
- if (assem_ != nullptr) {
- assem_->EndBlockCodeTargetSharing();
- }
- }
- Assembler* assem_;
-
- DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
- };
-
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@@ -1504,8 +1471,6 @@ class Assembler : public AssemblerBase {
*reinterpret_cast<Instr*>(pc) = instr;
}
static Condition GetCondition(Instr instr);
- static bool IsBranch(Instr instr);
- static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
@@ -1579,6 +1544,13 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
+ // Move a 32-bit immediate into a register, potentially via the constant pool.
+ void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
+
+ // Get the code target object for a pc-relative call or jump.
+ V8_INLINE Handle<Code> relative_code_target_object_handle_at(
+ Address pc_) const;
+
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1588,20 +1560,6 @@ class Assembler : public AssemblerBase {
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
- // Prevent sharing of code target constant pool entries until
- // EndBlockCodeTargetSharing is called. Calls to this function can be nested
- // but must be followed by an equal number of call to
- // EndBlockCodeTargetSharing.
- void StartBlockCodeTargetSharing() {
- ++code_target_sharing_blocked_nesting_;
- }
-
- // Resume sharing of constant pool code target entries. Needs to be called
- // as many times as StartBlockCodeTargetSharing to have an effect.
- void EndBlockCodeTargetSharing() {
- --code_target_sharing_blocked_nesting_;
- }
-
// Prevent contant pool emission until EndBlockConstPool is called.
// Calls to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
@@ -1709,12 +1667,6 @@ class Assembler : public AssemblerBase {
static constexpr int kCheckPoolIntervalInst = 32;
static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
- // Sharing of code target entries may be blocked in some code sequences.
- int code_target_sharing_blocked_nesting_;
- bool IsCodeTargetSharingAllowed() const {
- return code_target_sharing_blocked_nesting_ == 0;
- }
-
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
@@ -1730,9 +1682,6 @@ class Assembler : public AssemblerBase {
inline void CheckBuffer();
void GrowBuffer();
- // 32-bit immediate values
- void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
-
// Instruction generation
void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
// Attempt to encode operand |x| for instruction |instr| and return true on
@@ -1757,35 +1706,23 @@ class Assembler : public AssemblerBase {
void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value);
void ConstantPoolAddEntry(int position, Double value);
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
friend class RelocInfo;
friend class BlockConstPoolScope;
- friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;
friend class UseScratchRegisterScope;
-
- // The following functions help with avoiding allocations of embedded heap
- // objects during the code assembly phase. {RequestHeapObject} records the
- // need for a future heap number allocation or code stub generation. After
- // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
- // objects and place them where they are expected (determined by the pc offset
- // associated with each request). That is, for each request, it will patch the
- // dummy heap object handle that we emitted during code assembly with the
- // actual heap object handle.
- void RequestHeapObject(HeapObjectRequest request);
- void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
-
- std::forward_list<HeapObjectRequest> heap_object_requests_;
};
class EnsureSpace BASE_EMBEDDED {
public:
- INLINE(explicit EnsureSpace(Assembler* assembler));
+ V8_INLINE explicit EnsureSpace(Assembler* assembler);
};
class PatchingAssembler : public Assembler {
public:
- PatchingAssembler(IsolateData isolate_data, byte* address, int instructions);
+ PatchingAssembler(const AssemblerOptions& options, byte* address,
+ int instructions);
~PatchingAssembler();
void Emit(Address addr);
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 8267da4703..26131ea305 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -30,20 +30,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
- __ lsl(r5, r0, Operand(kPointerSizeLog2));
- __ str(r1, MemOperand(sp, r5));
- __ Push(r1);
- __ Push(r2);
- __ add(r0, r0, Operand(3));
- __ TailCallRuntime(Runtime::kNewArray);
-}
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
void JSEntryStub::Generate(MacroAssembler* masm) {
// r0: code entry
// r1: function
@@ -216,6 +202,18 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
+ if (FLAG_embedded_builtins) {
+ if (masm->root_array_available() &&
+ isolate()->ShouldLoadConstantsFromRootList()) {
+ // This is basically an inlined version of Call(Handle<Code>) that loads
+ // the code object into lr instead of ip.
+ __ Move(ip, target);
+ __ IndirectLoadConstant(lr, GetCode());
+ __ add(lr, lr, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ blx(lr);
+ return;
+ }
+ }
intptr_t code =
reinterpret_cast<intptr_t>(GetCode().location());
__ Move(ip, target);
@@ -315,280 +313,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kSavedRegs | pc.bit());
}
-
-template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(r3, Operand(kind));
- T stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
- // r0 - number of arguments
- // r1 - constructor?
- // sp[0] - last argument
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
- holey_initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
- } else if (mode == DONT_OVERRIDE) {
- // is the low bit set? If so, we are holey and that is good.
- Label normal_sequence;
- __ tst(r3, Operand(1));
- __ b(ne, &normal_sequence);
-
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
- __ add(r3, r3, Operand(1));
-
- if (FLAG_debug_code) {
- __ ldr(r5, FieldMemOperand(r2, 0));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, AbortReason::kExpectedAllocationSite);
- }
-
- // Save the resulting elements kind in type info. We can't just store r3
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field...upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ ldr(r4, FieldMemOperand(
- r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ str(r4, FieldMemOperand(
- r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
-
- __ bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(r3, Operand(kind));
- ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-template<class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(isolate, kind);
- stub.GetCode();
- if (AllocationSite::ShouldTrack(kind)) {
- T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode();
- }
- }
-}
-
-void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayNArgumentsConstructorStub stub(isolate);
- stub.GetCode();
- ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
- stubh1.GetCode();
- InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
- stubh2.GetCode();
- }
-}
-
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- Label not_zero_case, not_one_case;
- __ tst(r0, r0);
- __ b(ne, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ cmp(r0, Operand(1));
- __ b(gt, &not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argc (only if argument_count() == ANY)
- // -- r1 : constructor
- // -- r2 : AllocationSite or undefined
- // -- r3 : new target
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ tst(r4, Operand(kSmiTagMask));
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r4, r4, r5, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
-
- // We should either have undefined in r2 or a valid AllocationSite
- __ AssertUndefinedOrAllocationSite(r2, r4);
- }
-
- // Enter the context of the Array function.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- Label subclassing;
- __ cmp(r3, r1);
- __ b(ne, &subclassing);
-
- Label no_info;
- // Get the elements kind and case on that.
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(eq, &no_info);
-
- __ ldr(r3, FieldMemOperand(
- r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ SmiUntag(r3);
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-
- __ bind(&subclassing);
- __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ add(r0, r0, Operand(3));
- __ Push(r3, r2);
- __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
-}
-
-
-void InternalArrayConstructorStub::GenerateCase(
- MacroAssembler* masm, ElementsKind kind) {
- __ cmp(r0, Operand(1));
-
- InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
- __ TailCallStub(&stub0, lo);
-
- ArrayNArgumentsConstructorStub stubN(isolate());
- __ TailCallStub(&stubN, hi);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- __ ldr(r3, MemOperand(sp, 0));
- __ cmp(r3, Operand::Zero());
-
- InternalArraySingleArgumentConstructorStub
- stub1_holey(isolate(), GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey, ne);
- }
-
- InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
- __ TailCallStub(&stub1);
-}
-
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argc
- // -- r1 : constructor
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ tst(r3, Operand(kSmiTagMask));
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r3, r3, r4, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // Figure out the right elements kind
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(r3);
-
- if (FLAG_debug_code) {
- Label done;
- __ cmp(r3, Operand(PACKED_ELEMENTS));
- __ b(eq, &done);
- __ cmp(r3, Operand(HOLEY_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
- }
-
- Label fast_elements_case;
- __ cmp(r3, Operand(PACKED_ELEMENTS));
- __ b(eq, &fast_elements_case);
- GenerateCase(masm, HOLEY_ELEMENTS);
-
- __ bind(&fast_elements_case);
- GenerateCase(masm, PACKED_ELEMENTS);
-}
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@@ -617,13 +341,13 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Label profiler_disabled;
Label end_profiler_check;
- __ mov(r9, Operand(ExternalReference::is_profiling_address(isolate)));
+ __ Move(r9, ExternalReference::is_profiling_address(isolate));
__ ldrb(r9, MemOperand(r9, 0));
__ cmp(r9, Operand(0));
__ b(eq, &profiler_disabled);
// Additional parameter is the address of the actual callback.
- __ mov(r3, Operand(thunk_ref));
+ __ Move(r3, thunk_ref);
__ jmp(&end_profiler_check);
__ bind(&profiler_disabled);
@@ -631,7 +355,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
- __ mov(r9, Operand(next_address));
+ __ Move(r9, next_address);
__ ldr(r4, MemOperand(r9, kNextOffset));
__ ldr(r5, MemOperand(r9, kLimitOffset));
__ ldr(r6, MemOperand(r9, kLevelOffset));
@@ -642,7 +366,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1);
- __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
+ __ Move(r0, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::log_enter_external_function(), 1);
__ PopSafepointRegisters();
}
@@ -657,7 +381,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1);
- __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
+ __ Move(r0, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::log_leave_external_function(), 1);
__ PopSafepointRegisters();
}
@@ -696,7 +420,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- __ mov(r6, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ Move(r6, ExternalReference::scheduled_exception_address(isolate));
__ ldr(r5, MemOperand(r6));
__ cmp(r4, r5);
__ b(ne, &promote_scheduled_exception);
@@ -712,7 +436,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ str(r5, MemOperand(r9, kLimitOffset));
__ mov(r4, r0);
__ PrepareCallCFunction(1);
- __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
+ __ Move(r0, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
__ mov(r0, r4);
__ jmp(&leave_exit_frame);
@@ -759,8 +483,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// return value default
__ push(scratch0);
// isolate
- __ mov(scratch1,
- Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ Move(scratch1, ExternalReference::isolate_address(masm->isolate()));
__ push(scratch1);
// holder
__ push(holder);
@@ -829,7 +552,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ push(scratch);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ Push(scratch, scratch);
- __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ Move(scratch, ExternalReference::isolate_address(isolate()));
__ Push(scratch, holder);
__ Push(Smi::kZero); // should_throw_on_error -> false
__ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 02d1c6b1dd..1d041e75be 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -11,6 +11,7 @@
#include "src/base/macros.h"
#include "src/boxed-float.h"
#include "src/globals.h"
+#include "src/utils.h"
// ARM EABI is required.
#if defined(__arm__) && !defined(__ARM_EABI__)
@@ -51,6 +52,12 @@ const int kNoRegister = -1;
const int kLdrMaxReachBits = 12;
const int kVldrMaxReachBits = 10;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values. Loads allow a uint12
+// value with a separate sign bit (range [-4095, +4095]), so the first root
+// is still addressable with a single load instruction.
+constexpr int kRootRegisterBias = 4095;
+
// -----------------------------------------------------------------------------
// Conditions.
@@ -462,15 +469,19 @@ class Instruction {
kPCReadOffset = 8
};
- // Helper macro to define static accessors.
- // We use the cast to char* trick to bypass the strict anti-aliasing rules.
- #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
- static inline return_type Name(Instr instr) { \
- char* temp = reinterpret_cast<char*>(&instr); \
- return reinterpret_cast<Instruction*>(temp)->Name(); \
- }
+ // Difference between address of current opcode and value read from pc
+ // register.
+ static constexpr int kPcLoadDelta = 8;
- #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+// Helper macro to define static accessors.
+// We use the cast to char* trick to bypass the strict anti-aliasing rules.
+#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
+ static inline return_type Name(Instr instr) { \
+ char* temp = reinterpret_cast<char*>(&instr); \
+ return reinterpret_cast<Instruction*>(temp)->Name(); \
+ }
+
+#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
// Get the raw instruction bits.
inline Instr InstructionBits() const {
@@ -624,7 +635,25 @@ class Instruction {
// Fields used in Branch instructions
inline int LinkValue() const { return Bit(24); }
- inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
+ inline int SImmed24Value() const {
+ return signed_bitextract_32(23, 0, InstructionBits());
+ }
+
+ bool IsBranch() { return Bit(27) == 1 && Bit(25) == 1; }
+
+ int GetBranchOffset() {
+ DCHECK(IsBranch());
+ return SImmed24Value() * kInstrSize;
+ }
+
+ void SetBranchOffset(int32_t branch_offset) {
+ DCHECK(IsBranch());
+ DCHECK_EQ(branch_offset % kInstrSize, 0);
+ int32_t new_imm24 = branch_offset / kInstrSize;
+ CHECK(is_int24(new_imm24));
+ SetInstructionBits((InstructionBits() & ~(kImm24Mask)) |
+ (new_imm24 & kImm24Mask));
+ }
// Fields used in Software interrupt instructions
inline SoftwareInterruptCodes SvcValue() const {
@@ -729,6 +758,8 @@ class VFPRegisters {
static const char* names_[kNumVFPRegisters];
};
+// Relative jumps on ARM can address ±32 MB.
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 32;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 73131d7d18..032f610edc 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -86,7 +86,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(r1, &context_check);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ mov(r1, Operand(type())); // bailout type,
+ __ mov(r1, Operand(static_cast<int>(deopt_kind())));
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
diff --git a/deps/v8/src/arm/frame-constants-arm.h b/deps/v8/src/arm/frame-constants-arm.h
index 3d69f6dfd1..73e171009d 100644
--- a/deps/v8/src/arm/frame-constants-arm.h
+++ b/deps/v8/src/arm/frame-constants-arm.h
@@ -5,6 +5,9 @@
#ifndef V8_ARM_FRAME_CONSTANTS_ARM_H_
#define V8_ARM_FRAME_CONSTANTS_ARM_H_
+#include "src/base/macros.h"
+#include "src/frame-constants.h"
+
namespace v8 {
namespace internal {
@@ -30,6 +33,19 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
};
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 4;
+ static constexpr int kNumberOfSavedFpParamRegs = 8;
+
+ // FP-relative.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index a40b323d83..b96826264a 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/interface-descriptors-arm.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/interface-descriptors.h"
@@ -59,13 +57,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return r5; }
const Register ApiGetterDescriptor::HolderRegister() { return r0; }
const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
-const Register MathPowTaggedDescriptor::exponent() { return r2; }
-
-const Register MathPowIntegerDescriptor::exponent() {
- return MathPowTaggedDescriptor::exponent();
-}
-
-
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
@@ -179,24 +170,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ConstructTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r1 : the target to call
- // r3 : the new target
- Register registers[] = {r1, r3, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void TransitionElementsKindDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0, r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortJSDescriptor::InitializePlatformSpecific(
+void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -204,41 +178,7 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // r0 -- number of arguments
- // r1 -- function
- // r2 -- allocation site with elements kind
- Register registers[] = {r1, r2, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // r0 -- number of arguments
- // r1 -- function
- // r2 -- allocation site with elements kind
- Register registers[] = {r1, r2, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {r1, r2, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
@@ -247,7 +187,6 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
@@ -256,32 +195,24 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
Register registers[] = {
r1, // JSFunction
r3, // the new target
r0, // actual number of arguments
r2, // expected number of arguments
};
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
Register registers[] = {
JavaScriptFrame::context_register(), // callee context
r4, // call_data
r2, // holder
r1, // api_function_address
};
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
@@ -314,7 +245,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+namespace {
+
+void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (argc)
@@ -324,6 +257,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+} // namespace
+
+void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
+void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/arm/interface-descriptors-arm.h b/deps/v8/src/arm/interface-descriptors-arm.h
deleted file mode 100644
index a64927924e..0000000000
--- a/deps/v8/src/arm/interface-descriptors-arm.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
-#define V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
-
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-class PlatformInterfaceDescriptor {
- public:
- explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
- : storage_mode_(storage_mode) {}
-
- TargetAddressStorageMode storage_mode() { return storage_mode_; }
-
- private:
- TargetAddressStorageMode storage_mode_;
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 5a013da141..d02766791b 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -11,7 +11,6 @@
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
-#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -24,16 +23,17 @@
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot.h"
#include "src/arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, buffer, size, create_code_object) {
+MacroAssembler::MacroAssembler(Isolate* isolate,
+ const AssemblerOptions& options, void* buffer,
+ int size, CodeObjectRequired create_code_object)
+ : TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@@ -45,15 +45,6 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
}
}
-TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ = Handle<HeapObject>::New(
- isolate->heap()->self_reference_marker(), isolate);
- }
-}
-
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -135,33 +126,19 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-#ifdef V8_EMBEDDED_BUILTINS
-void TurboAssembler::LookupConstant(Register destination,
- Handle<Object> object) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Ensure the given object is in the builtins constants table and fetch its
- // index.
- BuiltinsConstantsTableBuilder* builder =
- isolate()->builtins_constants_table_builder();
- uint32_t index = builder->AddObject(object);
-
- // TODO(jgruber): Load builtins from the builtins table.
- // TODO(jgruber): Ensure that code generation can recognize constant targets
- // in kArchCallCodeObject.
-
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
- // The ldr call below could end up clobbering the destination register when
- // the offset does not fit into 12 bits (and thus needs to be loaded from the
- // constant pool). In that case, we need to be extra-careful and temporarily
- // use another register as the target.
+ // The ldr call below could end up clobbering ip when the offset does not fit
+ // into 12 bits (and thus needs to be loaded from the constant pool). In that
+ // case, we need to be extra-careful and temporarily use another register as
+ // the target.
const uint32_t offset =
- FixedArray::kHeaderSize + index * kPointerSize - kHeapObjectTag;
- const bool could_clobber_ip = !is_uint12(offset) && destination == ip;
+ FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
+ const bool could_clobber_ip = !is_uint12(offset);
Register reg = destination;
if (could_clobber_ip) {
@@ -178,30 +155,18 @@ void TurboAssembler::LookupConstant(Register destination,
}
}
-void TurboAssembler::LookupExternalReference(Register destination,
- ExternalReference reference) {
- CHECK(reference.address() !=
- ExternalReference::roots_array_start(isolate()).address());
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Encode as an index into the external reference table stored on the isolate.
-
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
- uint32_t index = v.index();
-
- // Generate code to load from the external reference table.
-
- int32_t roots_to_external_reference_offset =
- Heap::roots_to_external_reference_table_offset() +
- ExternalReferenceTable::OffsetOfEntry(index);
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ ldr(destination, MemOperand(kRootRegister, offset));
+}
- ldr(destination,
- MemOperand(kRootRegister, roots_to_external_reference_offset));
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ add(destination, kRootRegister, Operand(offset));
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
@@ -220,16 +185,36 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- LookupConstant(scratch, code);
- add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- Jump(scratch, cond);
- return;
+ if (FLAG_embedded_builtins) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ b(code_target_index * Instruction::kInstrSize, cond,
+ RelocInfo::RELATIVE_CODE_TARGET);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ IndirectLoadConstant(scratch, code);
+ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Jump(scratch, cond);
+ return;
+ } else if (target_is_isolate_independent_builtin &&
+ options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(ip, cond);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
// 'code' is always generated ARM code, never THUMB code
Jump(static_cast<intptr_t>(code.address()), rmode, cond);
}
@@ -312,16 +297,37 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, TargetAddressStorageMode mode,
bool check_constant_pool) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
- LookupConstant(ip, code);
- add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(ip, cond);
- return;
+ if (FLAG_embedded_builtins) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ bl(code_target_index * Instruction::kInstrSize, cond,
+ RelocInfo::RELATIVE_CODE_TARGET);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ IndirectLoadConstant(ip, code);
+ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(ip, cond);
+ return;
+ } else if (target_is_isolate_independent_builtin &&
+ options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(ip, cond);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
// 'code' is always generated ARM code, never THUMB code
Call(code.address(), rmode, cond, mode);
}
@@ -362,29 +368,22 @@ void TurboAssembler::Push(Smi* smi) {
void TurboAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- Heap::RootListIndex root_index;
- if (!isolate()->heap()->IsRootHandle(value, &root_index)) {
- LookupConstant(dst, value);
- } else {
- LoadRoot(dst, root_index);
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
}
- return;
}
-#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(value));
}
void TurboAssembler::Move(Register dst, ExternalReference reference) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
- reference.address() !=
- ExternalReference::roots_array_start(isolate()).address()) {
- LookupExternalReference(dst, reference);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, reference);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(reference));
}
@@ -564,7 +563,7 @@ void MacroAssembler::Store(Register src,
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond) {
- ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
+ ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), cond);
}
@@ -1224,10 +1223,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
Register scratch = temps.Acquire();
mov(scratch, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(scratch);
- if (type == StackFrame::INTERNAL) {
- Move(scratch, CodeObject());
- push(scratch);
- }
}
int TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -1599,9 +1594,9 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- ldr(expected_reg,
- FieldMemOperand(temp_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ ldrh(expected_reg,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(fun, new_target, expected, actual, flag);
@@ -1774,7 +1769,8 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
- DwVfpRegister double_input) {
+ DwVfpRegister double_input,
+ StubCallMode stub_mode) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -1784,7 +1780,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
vstr(double_input, MemOperand(sp, 0));
- Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
ldr(result, MemOperand(sp, 0));
add(sp, sp, Operand(kDoubleSize));
@@ -1793,8 +1793,8 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
bind(&done);
}
-void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles) {
+void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
+ Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -1802,9 +1802,9 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
mov(r0, Operand(f->nargs));
Move(r1, ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
- Call(code, RelocInfo::CODE_TARGET);
+ DCHECK(!AreAliased(centry, r0, r1));
+ add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -1905,18 +1905,17 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
const char* msg = GetAbortReason(reason);
- if (msg != nullptr) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+#endif
- if (FLAG_trap_on_abort) {
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
stop(msg);
return;
}
-#endif
Move(r1, Smi::FromInt(static_cast<int>(reason)));
@@ -1953,6 +1952,7 @@ void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
+ add(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
}
void MacroAssembler::SmiTag(Register reg, SBit s) {
@@ -1979,6 +1979,16 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
b(eq, smi_label);
}
+void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
+ cmp(x, Operand(y));
+ b(eq, dest);
+}
+
+void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
+ cmp(x, Operand(y));
+ b(lt, dest);
+}
+
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
@@ -2010,18 +2020,6 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
-void MacroAssembler::AssertFixedArray(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray);
- push(object);
- CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
- pop(object);
- Check(eq, AbortReason::kOperandIsNotAFixedArray);
- }
-}
-
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2094,7 +2092,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
CompareRoot(object, Heap::kUndefinedValueRootIndex);
b(eq, &done_checking);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
@@ -2453,7 +2451,7 @@ bool AreAliased(Register reg1,
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// We can use the register pc - 8 for the address of the current instruction.
- sub(dst, pc, Operand(pc_offset() + TurboAssembler::kPcLoadDelta));
+ sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 51ef552a92..87a8ff2834 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -9,6 +9,7 @@
#include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/globals.h"
+#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
@@ -25,9 +26,13 @@ constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
constexpr Register kInterpreterDispatchTableRegister = r8;
+
constexpr Register kJavaScriptCallArgCountRegister = r0;
constexpr Register kJavaScriptCallCodeStartRegister = r2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
+constexpr Register kJavaScriptCallExtraArg1Register = r2;
+
constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0;
@@ -85,20 +90,13 @@ enum TargetAddressStorageMode {
NEVER_INLINE_TARGET_ADDRESS
};
-class TurboAssembler : public Assembler {
+class TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object);
-
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() const { return has_frame_; }
-
- Isolate* isolate() const { return isolate_; }
-
- Handle<HeapObject> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
+ TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : TurboAssemblerBase(isolate, options, buffer, buffer_size,
+ create_code_object) {}
// Activation support.
void EnterFrame(StackFrame::Type type,
@@ -321,11 +319,10 @@ class TurboAssembler : public Assembler {
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
-#ifdef V8_EMBEDDED_BUILTINS
- void LookupConstant(Register destination, Handle<Object> object);
- void LookupExternalReference(Register destination,
- ExternalReference reference);
-#endif // V8_EMBEDDED_BUILTINS
+ void LoadFromConstantsTable(Register destination,
+ int constant_index) override;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
+ void LoadRootRelative(Register destination, int32_t offset) override;
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
@@ -338,9 +335,10 @@ class TurboAssembler : public Assembler {
int CallStubSize();
void CallStubDelayed(CodeStub* stub);
- // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
- void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ // Call a runtime routine. This expects {centry} to contain a fitting CEntry
+ // builtin for the target runtime function and uses an indirect call.
+ void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Call(Register target, Condition cond = al);
@@ -355,7 +353,9 @@ class TurboAssembler : public Assembler {
// This should only be used when assembling a deoptimizer call because of
// the CheckConstPool invocation, which is only needed for deoptimization.
- void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
+ void CallForDeoptimization(Address target, int deopt_id,
+ RelocInfo::Mode rmode) {
+ USE(deopt_id);
Call(target, rmode);
CheckConstPool(false, false);
}
@@ -511,12 +511,18 @@ class TurboAssembler : public Assembler {
}
// Load an object from the root table.
+ void LoadRoot(Register destination, Heap::RootListIndex index) override {
+ LoadRoot(destination, index, al);
+ }
void LoadRoot(Register destination, Heap::RootListIndex index,
- Condition cond = al);
+ Condition cond);
// Jump if the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
+ void JumpIfEqual(Register x, int32_t y, Label* dest);
+ void JumpIfLessThan(Register x, int32_t y, Label* dest);
+
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
@@ -530,7 +536,7 @@ class TurboAssembler : public Assembler {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
- DwVfpRegister double_input);
+ DwVfpRegister double_input, StubCallMode stub_mode);
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
@@ -549,18 +555,7 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister();
- bool root_array_available() const { return root_array_available_; }
- void set_root_array_available(bool v) { root_array_available_ = v; }
-
- protected:
- // This handle will be patched with the code object on installation.
- Handle<HeapObject> code_object_;
-
private:
- bool has_frame_ = false;
- bool root_array_available_ = true;
- Isolate* const isolate_;
-
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
@@ -602,7 +597,11 @@ class TurboAssembler : public Assembler {
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ CodeObjectRequired create_code_object)
+ : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
+ size, create_code_object) {}
+ MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int size, CodeObjectRequired create_code_object);
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
@@ -849,9 +848,6 @@ class MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
- // Abort execution if argument is not a FixedArray, enabled via --debug-code.
- void AssertFixedArray(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index e8eb474090..e0f57396c1 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -285,7 +285,7 @@ void ArmDebugger::Debug() {
|| (strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
int32_t value;
- OFStream os(stdout);
+ StdoutStream os;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
os << arg1 << ": \n";
@@ -514,7 +514,7 @@ void ArmDebugger::Debug() {
PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the ArmDebugger.\n");
+ PrintF(" stop and give control to the ArmDebugger.\n");
PrintF(" The first %d stop codes are watched:\n",
Simulator::kNumOfWatchedStops);
PrintF(" - They can be enabled / disabled: the Simulator\n");
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 0c43fbe0e1..72674b87a3 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -386,18 +386,16 @@ unsigned Operand::shift_amount() const {
Operand Operand::UntagSmi(Register smi) {
- STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
- kSmiValueSize));
DCHECK(smi.Is64Bits());
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
return Operand(smi, ASR, kSmiShift);
}
Operand Operand::UntagSmiAndScale(Register smi, int scale) {
- STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
- kSmiValueSize));
DCHECK(smi.Is64Bits());
DCHECK((scale >= 0) && (scale <= (64 - kSmiValueSize)));
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
if (scale > kSmiShift) {
return Operand(smi, LSL, scale - kSmiShift);
} else if (scale < kSmiShift) {
@@ -551,11 +549,8 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Assembler::target_address_at(pc, 0 /* unused */)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- DCHECK_GE(instr->ImmPCOffset(), 0);
DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
- DCHECK_LT(instr->ImmPCOffset() >> kInstructionSizeLog2,
- code_targets_.size());
- return code_targets_[instr->ImmPCOffset() >> kInstructionSizeLog2];
+ return GetCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2);
}
}
@@ -565,7 +560,7 @@ Address Assembler::runtime_entry_at(Address pc) {
return Assembler::target_address_at(pc, 0 /* unused */);
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- return instr->ImmPCOffset() + isolate_data().code_range_start_;
+ return instr->ImmPCOffset() + options().code_range_start;
}
}
@@ -708,7 +703,7 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
}
}
-void RelocInfo::set_target_object(HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -716,9 +711,8 @@ void RelocInfo::set_target_object(HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target);
- host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
+ heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
+ heap->RecordWriteIntoCode(host(), this, target);
}
}
@@ -746,13 +740,6 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
-void RelocInfo::set_wasm_code_table_entry(Address target,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
- Assembler::set_target_address_at(pc_, constant_pool_, target,
- icache_flush_mode);
-}
-
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return origin->runtime_entry_at(pc_);
@@ -788,7 +775,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 121c15aac9..af3f59bd48 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -157,9 +157,10 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::INTERNAL_REFERENCE;
+const int RelocInfo::kApplyMask =
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -179,33 +180,39 @@ bool RelocInfo::IsInConstantPool() {
return instr->IsLdrLiteralX();
}
-Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, constant_pool_);
-}
+int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ Instruction* movz_instr = reinterpret_cast<Instruction*>(pc_)->preceding();
+ DCHECK(movz_instr->IsMovz());
+ uint64_t imm = static_cast<uint64_t>(movz_instr->ImmMoveWide())
+ << (16 * movz_instr->ShiftMoveWide());
+ DCHECK_LE(imm, INT_MAX);
-uint32_t RelocInfo::embedded_size() const {
- return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
-}
-
-void RelocInfo::set_embedded_address(Address address,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
-}
-
-void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
- Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
- // No icache flushing needed, see comment in set_target_address_at.
+ return static_cast<int>(imm);
}
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(address, icache_flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return embedded_address();
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ if (instr->IsLdrLiteralX()) {
+ return static_cast<uint32_t>(
+ Memory::Address_at(Assembler::target_pointer_address_at(pc_)));
+ } else {
+ DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
+ return static_cast<uint32_t>(instr->ImmPCOffset() / kInstructionSize);
+ }
}
bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
@@ -304,8 +311,8 @@ void Immediate::InitializeHandle(Handle<HeapObject> handle) {
bool Operand::NeedsRelocation(const Assembler* assembler) const {
RelocInfo::Mode rmode = immediate_.rmode();
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- return assembler->serializer_enabled();
+ if (RelocInfo::IsOnlyForSerializer(rmode)) {
+ return assembler->options().record_reloc_info_for_serialization;
}
return !RelocInfo::IsNone(rmode);
@@ -342,8 +349,7 @@ bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
if (CanBeShared(mode)) {
write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
- } else if (mode == RelocInfo::CODE_TARGET &&
- assm_->IsCodeTargetSharingAllowed() && raw_data != 0) {
+ } else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
// A zero data value is a placeholder and must not be shared.
write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
} else {
@@ -471,8 +477,7 @@ void ConstPool::Clear() {
bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
- return RelocInfo::IsNone(mode) ||
- (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
+ return RelocInfo::IsNone(mode) || RelocInfo::IsShareableRelocMode(mode);
}
@@ -537,7 +542,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
+ instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc());
}
assm_->dc64(entry.first);
@@ -547,13 +552,13 @@ void ConstPool::EmitEntries() {
// Assembler
-Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options, void* buffer,
+ int buffer_size)
+ : AssemblerBase(options, buffer, buffer_size),
constpool_(this),
unresolved_branches_() {
const_pool_blocked_nesting_ = 0;
veneer_pool_blocked_nesting_ = 0;
- code_target_sharing_blocked_nesting_ = 0;
Reset();
}
@@ -562,7 +567,6 @@ Assembler::~Assembler() {
DCHECK(constpool_.IsEmpty());
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
- DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
}
@@ -571,12 +575,11 @@ void Assembler::Reset() {
DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
- DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
DCHECK(unresolved_branches_.empty());
memset(buffer_, 0, pc_ - buffer_);
#endif
pc_ = buffer_;
- code_targets_.reserve(64);
+ ReserveCodeTargetSpace(64);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
constpool_.Clear();
next_constant_pool_check_ = 0;
@@ -589,8 +592,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
- request.heap_number(), IMMUTABLE, TENURED);
+ Handle<HeapObject> object =
+ isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
set_target_address_at(pc, 0 /* unused */, object.address());
break;
}
@@ -598,12 +601,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
Instruction* instr = reinterpret_cast<Instruction*>(pc);
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- DCHECK_GE(instr->ImmPCOffset(), 0);
DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
- DCHECK_LT(instr->ImmPCOffset() >> kInstructionSizeLog2,
- code_targets_.size());
- code_targets_[instr->ImmPCOffset() >> kInstructionSizeLog2] =
- request.code_stub()->GetCode();
+ UpdateCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2,
+ request.code_stub()->GetCode());
break;
}
}
@@ -697,22 +697,22 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
} else if (branch == next_link) {
// The branch is the last (but not also the first) instruction in the chain.
- prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
+ prev_link->SetImmPCOffsetTarget(options(), prev_link);
} else {
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
- prev_link->SetImmPCOffsetTarget(isolate_data(), next_link);
+ prev_link->SetImmPCOffsetTarget(options(), next_link);
} else if (label_veneer != nullptr) {
// Use the veneer for all previous links in the chain.
- prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
+ prev_link->SetImmPCOffsetTarget(options(), prev_link);
end_of_chain = false;
link = next_link;
while (!end_of_chain) {
next_link = link->ImmPCOffsetTarget();
end_of_chain = (link == next_link);
- link->SetImmPCOffsetTarget(isolate_data(), label_veneer);
+ link->SetImmPCOffsetTarget(options(), label_veneer);
link = next_link;
}
} else {
@@ -783,11 +783,10 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
- PatchingAssembler patcher(isolate_data(), reinterpret_cast<byte*>(link),
- 2);
+ PatchingAssembler patcher(options(), reinterpret_cast<byte*>(link), 2);
patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
} else {
- link->SetImmPCOffsetTarget(isolate_data(),
+ link->SetImmPCOffsetTarget(options(),
reinterpret_cast<Instruction*>(pc_));
}
@@ -4082,9 +4081,7 @@ void Assembler::EmitStringData(const char* string) {
void Assembler::debug(const char* message, uint32_t code, Instr params) {
#ifdef USE_SIMULATOR
- // Don't generate simulator specific code if we are building a snapshot, which
- // might be run on real hardware.
- if (!serializer_enabled()) {
+ if (options().enable_simulator_code) {
// The arguments to the debug marker need to be contiguous in memory, so
// make sure we don't try to emit pools.
BlockPoolsScope scope(this);
@@ -4757,6 +4754,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
ConstantPoolMode constant_pool_mode) {
// Non-relocatable constants should not end up in the literal pool.
DCHECK(!RelocInfo::IsNone(rmode));
+ if (options().disable_reloc_info_for_patching) return;
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
@@ -4783,10 +4781,10 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
// For modes that cannot use the constant pool, a different sequence of
// instructions will be emitted by this function's caller.
- if (!RelocInfo::IsNone(rmode) && write_reloc_info) {
+ if (write_reloc_info) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !serializer_enabled() && !emit_debug_code()) {
+ if (RelocInfo::IsOnlyForSerializer(rmode) &&
+ !options().record_reloc_info_for_serialization && !emit_debug_code()) {
return;
}
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
@@ -4794,18 +4792,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
}
-int Assembler::GetCodeTargetIndex(Handle<Code> target) {
- int current = static_cast<int>(code_targets_.size());
- if (current > 0 && !target.is_null() &&
- code_targets_.back().address() == target.address()) {
- // Optimization if we keep jumping to the same code target.
- return (current - 1);
- } else {
- code_targets_.push_back(target);
- return current;
- }
-}
-
void Assembler::near_jump(int offset, RelocInfo::Mode rmode) {
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
b(offset);
@@ -4818,7 +4804,7 @@ void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
void Assembler::near_call(HeapObjectRequest request) {
RequestHeapObject(request);
- int index = GetCodeTargetIndex(Handle<Code>());
+ int index = AddCodeTarget(Handle<Code>());
RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
bl(index);
}
@@ -4945,7 +4931,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
- branch->SetImmPCOffsetTarget(isolate_data(), veneer);
+ branch->SetImmPCOffsetTarget(options(), veneer);
b(label);
#ifdef DEBUG
DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 431b520811..e2945d5999 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -898,9 +898,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : Assembler(IsolateData(isolate), buffer, buffer_size) {}
- Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
+ Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler();
@@ -972,10 +970,6 @@ class Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- // Add 'target' to the code_targets_ vector, if necessary, and return the
- // offset at which it is stored.
- int GetCodeTargetIndex(Handle<Code> target);
-
// Returns the handle for the code object called at 'pc'.
// This might need to be temporarily encoded as an offset into code_targets_.
inline Handle<Code> code_target_object_handle_at(Address pc);
@@ -984,7 +978,7 @@ class Assembler : public AssemblerBase {
// at 'pc'.
// Runtime entries can be temporarily encoded as the offset between the
// runtime function entrypoint and the code range start (stored in the
- // code_range_start_ field), in order to be encodable as we generate the code,
+ // code_range_start field), in order to be encodable as we generate the code,
// before it is moved into the code space.
inline Address runtime_entry_at(Address pc);
@@ -2887,6 +2881,10 @@ class Assembler : public AssemblerBase {
return reinterpret_cast<byte*>(instr) - buffer_;
}
+ static const char* GetSpecialRegisterName(int code) {
+ return (code == kSPRegInternalCode) ? "sp" : "UNKNOWN";
+ }
+
// Register encoding.
static Instr Rd(CPURegister rd) {
DCHECK_NE(rd.code(), kSPRegInternalCode);
@@ -3229,34 +3227,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
- // Class for blocking sharing of code targets in constant pool.
- class BlockCodeTargetSharingScope {
- public:
- explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) {
- Open(assem);
- }
- // This constructor does not initialize the scope. The user needs to
- // explicitly call Open() before using it.
- BlockCodeTargetSharingScope() : assem_(nullptr) {}
- ~BlockCodeTargetSharingScope() { Close(); }
- void Open(Assembler* assem) {
- DCHECK_NULL(assem_);
- DCHECK_NOT_NULL(assem);
- assem_ = assem;
- assem_->StartBlockCodeTargetSharing();
- }
-
- private:
- void Close() {
- if (assem_ != nullptr) {
- assem_->EndBlockCodeTargetSharing();
- }
- }
- Assembler* assem_;
-
- DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
- };
-
protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
@@ -3341,16 +3311,6 @@ class Assembler : public AssemblerBase {
void RemoveBranchFromLabelLinkChain(Instruction* branch, Label* label,
Instruction* label_veneer = nullptr);
- // Prevent sharing of code target constant pool entries until
- // EndBlockCodeTargetSharing is called. Calls to this function can be nested
- // but must be followed by an equal number of call to
- // EndBlockCodeTargetSharing.
- void StartBlockCodeTargetSharing() { ++code_target_sharing_blocked_nesting_; }
-
- // Resume sharing of constant pool code target entries. Needs to be called
- // as many times as StartBlockCodeTargetSharing to have an effect.
- void EndBlockCodeTargetSharing() { --code_target_sharing_blocked_nesting_; }
-
private:
static uint32_t FPToImm8(double imm);
@@ -3530,12 +3490,6 @@ class Assembler : public AssemblerBase {
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
- // Sharing of code target entries may be blocked in some code sequences.
- int code_target_sharing_blocked_nesting_;
- bool IsCodeTargetSharingAllowed() const {
- return code_target_sharing_blocked_nesting_ == 0;
- }
-
// Relocation info generation
// Each relocation is encoded as a variable size value
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -3546,14 +3500,6 @@ class Assembler : public AssemblerBase {
// are already bound.
std::deque<int> internal_reference_positions_;
- // Before we copy code into the code space, we cannot encode calls to code
- // targets as we normally would, as the difference between the instruction's
- // location in the temporary buffer and the call target is not guaranteed to
- // fit in the offset field. We keep track of the code handles we encounter
- // in calls in this vector, and encode the index of the code handle in the
- // vector instead.
- std::vector<Handle<Code>> code_targets_;
-
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
@@ -3649,20 +3595,8 @@ class Assembler : public AssemblerBase {
// the length of the label chain.
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
- // The following functions help with avoiding allocations of embedded heap
- // objects during the code assembly phase. {RequestHeapObject} records the
- // need for a future heap number allocation or code stub generation. After
- // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
- // objects and place them where they are expected (determined by the pc offset
- // associated with each request). That is, for each request, it will patch the
- // dummy heap object handle that we emitted during code assembly with the
- // actual heap object handle.
- void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
- std::forward_list<HeapObjectRequest> heap_object_requests_;
-
- private:
friend class EnsureSpace;
friend class ConstPool;
};
@@ -3678,8 +3612,9 @@ class PatchingAssembler : public Assembler {
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
// Note that the instruction cache will not be flushed.
- PatchingAssembler(IsolateData isolate_data, byte* start, unsigned count)
- : Assembler(isolate_data, start, count * kInstructionSize + kGap) {
+ PatchingAssembler(const AssemblerOptions& options, byte* start,
+ unsigned count)
+ : Assembler(options, start, count * kInstructionSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index cca4c30269..7a5f06c492 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -29,21 +29,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
- __ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
- __ Poke(x1, Operand(x5));
- __ Push(x1, x2);
- __ Add(x0, x0, Operand(3));
- __ TailCallRuntime(Runtime::kNewArray);
-}
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- // It is important that the following stubs are generated in this order
- // because pregenerated stubs can only call other pregenerated stubs.
- CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
// This is the entry point from C++. 5 arguments are provided in x0-x4.
// See use of the JSEntryFunction for example in src/execution.cc.
// Input:
@@ -324,322 +309,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Call(GetCode(), RelocInfo::CODE_TARGET);
}
-template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- ASM_LOCATION("CreateArrayDispatch");
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
- __ TailCallStub(&stub);
-
- } else if (mode == DONT_OVERRIDE) {
- Register kind = x3;
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
- // TODO(jbramley): Is this the best way to handle this? Can we make the
- // tail calls conditional, rather than hopping over each one?
- __ CompareAndBranch(kind, candidate_kind, ne, &next);
- T stub(masm->isolate(), candidate_kind);
- __ TailCallStub(&stub);
- __ Bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
-
- } else {
- UNREACHABLE();
- }
-}
-
-
-// TODO(jbramley): If this needs to be a special case, make it a proper template
-// specialization, and not a separate function.
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- ASM_LOCATION("CreateArrayDispatchOneArgument");
- // x0 - argc
- // x1 - constructor?
- // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
- // sp[0] - last argument
-
- Register allocation_site = x2;
- Register kind = x3;
-
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
- holey_initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
- } else if (mode == DONT_OVERRIDE) {
- // Is the low bit set? If so, the array is holey.
- Label normal_sequence;
- __ Tbnz(kind, 0, &normal_sequence);
-
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
- __ Orr(kind, kind, 1);
-
- if (FLAG_debug_code) {
- __ Ldr(x10, FieldMemOperand(allocation_site, 0));
- __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
- &normal_sequence);
- __ Assert(eq, AbortReason::kExpectedAllocationSite);
- }
-
- // Save the resulting elements kind in type info. We can't just store 'kind'
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field; upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ Ldr(x11,
- FieldMemOperand(allocation_site,
- AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
- __ Str(x11,
- FieldMemOperand(allocation_site,
- AllocationSite::kTransitionInfoOrBoilerplateOffset));
-
- __ Bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
- __ CompareAndBranch(kind, candidate_kind, ne, &next);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
- __ TailCallStub(&stub);
- __ Bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-template<class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(isolate, kind);
- stub.GetCode();
- if (AllocationSite::ShouldTrack(kind)) {
- T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode();
- }
- }
-}
-
-void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayNArgumentsConstructorStub stub(isolate);
- stub.GetCode();
- ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
- stubh1.GetCode();
- InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
- stubh2.GetCode();
- }
-}
-
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- Register argc = x0;
- Label zero_case, n_case;
- __ Cbz(argc, &zero_case);
- __ Cmp(argc, 1);
- __ B(ne, &n_case);
-
- // One argument.
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ Bind(&zero_case);
- // No arguments.
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ Bind(&n_case);
- // N arguments.
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("ArrayConstructorStub::Generate");
- // ----------- S t a t e -------------
- // -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
- // -- x1 : constructor
- // -- x2 : AllocationSite or undefined
- // -- x3 : new target
- // -- sp[0] : last argument
- // -----------------------------------
- Register constructor = x1;
- Register allocation_site = x2;
- Register new_target = x3;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- Label unexpected_map, map_ok;
- // Initial map for the builtin Array function should be a map.
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ JumpIfSmi(x10, &unexpected_map);
- __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
- __ Bind(&unexpected_map);
- __ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ Bind(&map_ok);
-
- // We should either have undefined in the allocation_site register or a
- // valid AllocationSite.
- __ AssertUndefinedOrAllocationSite(allocation_site);
- }
-
- // Enter the context of the Array function.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- Label subclassing;
- __ Cmp(new_target, constructor);
- __ B(ne, &subclassing);
-
- Register kind = x3;
- Label no_info;
- // Get the elements kind and case on that.
- __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
-
- __ Ldrsw(kind, UntagSmiFieldMemOperand(
- allocation_site,
- AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ Bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-
- // Subclassing support.
- __ Bind(&subclassing);
- __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x0, x0, Operand(3));
- __ Push(new_target, allocation_site);
- __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
-}
-
-
-void InternalArrayConstructorStub::GenerateCase(
- MacroAssembler* masm, ElementsKind kind) {
- Label zero_case, n_case;
- Register argc = x0;
-
- __ Cbz(argc, &zero_case);
- __ CompareAndBranch(argc, 1, ne, &n_case);
-
- // One argument.
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
-
- // We might need to create a holey array; look at the first argument.
- __ Peek(x10, 0);
- __ Cbz(x10, &packed_case);
-
- InternalArraySingleArgumentConstructorStub
- stub1_holey(isolate(), GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
-
- __ Bind(&packed_case);
- }
- InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
- __ TailCallStub(&stub1);
-
- __ Bind(&zero_case);
- // No arguments.
- InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
- __ TailCallStub(&stub0);
-
- __ Bind(&n_case);
- // N arguments.
- ArrayNArgumentsConstructorStub stubN(isolate());
- __ TailCallStub(&stubN);
-}
-
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : argc
- // -- x1 : constructor
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
-
- Register constructor = x1;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- Label unexpected_map, map_ok;
- // Initial map for the builtin Array function should be a map.
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ JumpIfSmi(x10, &unexpected_map);
- __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
- __ Bind(&unexpected_map);
- __ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ Bind(&map_ok);
- }
-
- Register kind = w3;
- // Figure out the right elements kind
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Retrieve elements_kind from map.
- __ LoadElementsKindFromMap(kind, x10);
-
- if (FLAG_debug_code) {
- Label done;
- __ Cmp(x3, PACKED_ELEMENTS);
- __ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- }
-
- Label fast_elements_case;
- __ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case);
- GenerateCase(masm, HOLEY_ELEMENTS);
-
- __ Bind(&fast_elements_case);
- GenerateCase(masm, PACKED_ELEMENTS);
-}
-
// The number of register that CallApiFunctionAndReturn will need to save on
// the stack. The space for these registers need to be allocated in the
// ExitFrame before calling CallApiFunctionAndReturn.
@@ -894,8 +563,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
- __ Mov(isolate_address,
- Operand(ExternalReference::isolate_address(isolate())));
+ __ Mov(isolate_address, ExternalReference::isolate_address(isolate()));
__ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
// PropertyCallbackArguments:
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 406b139a50..1b87ce572c 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -26,6 +26,7 @@ STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
namespace v8 {
namespace internal {
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
const unsigned kInstructionSize = 4;
const unsigned kInstructionSizeLog2 = 2;
@@ -140,6 +141,11 @@ const unsigned kFloat16MantissaBits = 10;
const unsigned kFloat16ExponentBits = 5;
const unsigned kFloat16ExponentBias = 15;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 256;
+
typedef uint16_t float16;
#define INSTRUCTION_FIELDS_LIST(V_) \
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 397f4cb36d..b2f534ac45 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -155,7 +155,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Tst(x1, kSmiTagMask);
__ CzeroX(x0, eq);
- __ Mov(x1, type());
+ __ Mov(x1, static_cast<int>(deopt_kind()));
// Following arguments are already loaded:
// - x2: bailout id
// - x3: code object address
@@ -275,50 +275,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Br(continuation);
}
-// Size of an entry of the second level deopt table.
-// This is the code size generated by GeneratePrologue for one entry.
-const int Deoptimizer::table_entry_size_ = kInstructionSize;
+// Size of an entry of the second level deopt table. Since we do not generate
+// a table for ARM64, the size is zero.
+const int Deoptimizer::table_entry_size_ = 0 * kInstructionSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UseScratchRegisterScope temps(masm());
- // The address at which the deopt table is entered should be in x16, the first
- // temp register allocated. We can't assert that the address is in there, but
- // we can check that it's the first allocated temp. Later, we'll also check
- // the computed entry_id is in the expected range.
- Register entry_addr = temps.AcquireX();
+ // The MacroAssembler will have put the deoptimization id in x16, the first
+ // temp register allocated. We can't assert that the id is in there, but we
+ // can check that x16 the first allocated temp and that the value it contains
+ // is in the expected range.
Register entry_id = temps.AcquireX();
- DCHECK(entry_addr.Is(x16));
- DCHECK(entry_id.Is(x17));
-
- // Create a sequence of deoptimization entries.
- // Note that registers are still live when jumping to an entry.
- {
- InstructionAccurateScope scope(masm());
-
- Label start_of_table, end_of_table;
- __ bind(&start_of_table);
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ b(&end_of_table);
- DCHECK(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&end_of_table);
-
- // Get the address of the start of the table.
- DCHECK(is_int21(table_entry_size_ * count()));
- __ adr(entry_id, &start_of_table);
-
- // Compute the gap in bytes between the entry address, which should have
- // been left in entry_addr (x16) by CallForDeoptimization, and the start of
- // the table.
- __ sub(entry_id, entry_addr, entry_id);
-
- // Shift down to obtain the entry_id.
- DCHECK_EQ(table_entry_size_, kInstructionSize);
- __ lsr(entry_id, entry_id, kInstructionSizeLog2);
- }
-
+ DCHECK(entry_id.Is(x16));
__ Push(padreg, entry_id);
if (__ emit_debug_code()) {
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index 662f426033..96fc72f126 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -5,6 +5,10 @@
#ifndef V8_ARM64_FRAME_CONSTANTS_ARM64_H_
#define V8_ARM64_FRAME_CONSTANTS_ARM64_H_
+#include "src/base/macros.h"
+#include "src/frame-constants.h"
+#include "src/globals.h"
+
namespace v8 {
namespace internal {
@@ -46,6 +50,20 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kConstantPoolOffset = 0; // Not used
};
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 8;
+ static constexpr int kNumberOfSavedFpParamRegs = 8;
+
+ // FP-relative.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kFixedFrameSizeFromFp =
+ // Header is padded to 16 byte (see {MacroAssembler::EnterFrame}).
+ RoundUp<16>(TypedFrameConstants::kFixedFrameSizeFromFp) +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 272948a819..4a10594590 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -227,21 +227,21 @@ bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
return IsValidImmPCOffset(BranchType(), DistanceTo(target));
}
-void Instruction::SetImmPCOffsetTarget(Assembler::IsolateData isolate_data,
+void Instruction::SetImmPCOffsetTarget(const AssemblerOptions& options,
Instruction* target) {
if (IsPCRelAddressing()) {
- SetPCRelImmTarget(isolate_data, target);
+ SetPCRelImmTarget(options, target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else if (IsUnresolvedInternalReference()) {
- SetUnresolvedInternalReferenceImmTarget(isolate_data, target);
+ SetUnresolvedInternalReferenceImmTarget(options, target);
} else {
// Load literal (offset from PC).
SetImmLLiteral(target);
}
}
-void Instruction::SetPCRelImmTarget(Assembler::IsolateData isolate_data,
+void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
DCHECK(IsAdr());
@@ -252,7 +252,7 @@ void Instruction::SetPCRelImmTarget(Assembler::IsolateData isolate_data,
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
- PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this),
+ PatchingAssembler patcher(options, reinterpret_cast<byte*>(this),
PatchingAssembler::kAdrFarPatchableNInstrs);
patcher.PatchAdrFar(target_offset);
}
@@ -293,7 +293,7 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
}
void Instruction::SetUnresolvedInternalReferenceImmTarget(
- Assembler::IsolateData isolate_data, Instruction* target) {
+ const AssemblerOptions& options, Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
@@ -302,7 +302,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
- PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this), 2);
+ PatchingAssembler patcher(options, reinterpret_cast<byte*>(this), 2);
patcher.brk(high16);
patcher.brk(low16);
}
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index b1c488eb65..bb1791becb 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -402,9 +402,9 @@ class Instruction {
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
- void SetImmPCOffsetTarget(AssemblerBase::IsolateData isolate_data,
+ void SetImmPCOffsetTarget(const AssemblerOptions& options,
Instruction* target);
- void SetUnresolvedInternalReferenceImmTarget(AssemblerBase::IsolateData,
+ void SetUnresolvedInternalReferenceImmTarget(const AssemblerOptions& options,
Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
@@ -441,8 +441,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
- void SetPCRelImmTarget(AssemblerBase::IsolateData isolate_data,
- Instruction* target);
+ void SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target);
void SetBranchImmTarget(Instruction* target);
};
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index f9550782c1..10d8ee4bc7 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -69,6 +69,7 @@ static const CounterDescriptor kCounterList[] = {
{"Logical DP", Gauge},
{"Other Int DP", Gauge},
{"FP DP", Gauge},
+ {"NEON", Gauge},
{"Conditional Select", Gauge},
{"Conditional Compare", Gauge},
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 89c7b98f51..357161d57f 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/interface-descriptors-arm64.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/interface-descriptors.h"
@@ -59,12 +57,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return x5; }
const Register ApiGetterDescriptor::HolderRegister() { return x0; }
const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
-const Register MathPowTaggedDescriptor::exponent() { return x11; }
-
-
-const Register MathPowIntegerDescriptor::exponent() { return x12; }
-
-
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
@@ -179,26 +171,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ConstructTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x3: new target
- // x1: target
- // x0: number of arguments
- Register registers[] = {x1, x3, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void TransitionElementsKindDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0: value (js_array)
- // x1: to_map
- Register registers[] = {x0, x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortJSDescriptor::InitializePlatformSpecific(
+void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -206,41 +179,7 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
- Register registers[] = {x1, x3, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // x1: function
- // x2: allocation site with elements kind
- // x0: number of arguments to the constructor function
- Register registers[] = {x1, x2, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // x0: number of arguments
- // x1: function
- // x2: allocation site with elements kind
- Register registers[] = {x1, x2, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {x1, x2, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
@@ -251,7 +190,6 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
@@ -262,32 +200,24 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
Register registers[] = {
x1, // JSFunction
x3, // the new target
x0, // actual number of arguments
x2, // expected number of arguments
};
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
Register registers[] = {
JavaScriptFrame::context_register(), // callee context
x4, // call_data
x2, // holder
x1, // api_function_address
};
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
@@ -320,7 +250,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+namespace {
+
+void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (argc)
@@ -330,6 +262,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+} // namespace
+
+void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
+void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.h b/deps/v8/src/arm64/interface-descriptors-arm64.h
deleted file mode 100644
index 20ab8cb612..0000000000
--- a/deps/v8/src/arm64/interface-descriptors-arm64.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
-#define V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
-
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-class PlatformInterfaceDescriptor {
- public:
- explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
- : storage_mode_(storage_mode) {}
-
- TargetAddressStorageMode storage_mode() { return storage_mode_; }
-
- private:
- TargetAddressStorageMode storage_mode_;
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 20533362bc..b583d7ba14 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -24,17 +24,6 @@ MemOperand FieldMemOperand(Register object, int offset) {
}
-MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
- return UntagSmiMemOperand(object, offset - kHeapObjectTag);
-}
-
-
-MemOperand UntagSmiMemOperand(Register object, int offset) {
- // Assumes that Smis are shifted by 32 bits and little endianness.
- STATIC_ASSERT(kSmiShift == 32);
- return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
-}
-
void TurboAssembler::And(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
@@ -297,6 +286,7 @@ void TurboAssembler::Asr(const Register& rd, const Register& rn,
}
void TurboAssembler::B(Label* label) {
+ DCHECK(allow_macro_instructions());
b(label);
CheckVeneerPool(false, false);
}
@@ -1040,46 +1030,50 @@ void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Mov(kRootRegister, Operand(roots_array_start));
+ Add(kRootRegister, kRootRegister, kRootRegisterBias);
}
void MacroAssembler::SmiTag(Register dst, Register src) {
- STATIC_ASSERT(kXRegSizeInBits ==
- static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits() && src.Is64Bits());
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Lsl(dst, src, kSmiShift);
}
-
void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
void TurboAssembler::SmiUntag(Register dst, Register src) {
- STATIC_ASSERT(kXRegSizeInBits ==
- static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
}
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Asr(dst, src, kSmiShift);
}
-void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+ DCHECK(dst.Is64Bits());
+ if (SmiValuesAre32Bits()) {
+ if (src.IsImmediateOffset() && src.shift_amount() == 0) {
+ // Load value directly from the upper half-word.
+ // Assumes that Smis are shifted by 32 bits and little endianness.
+ DCHECK_EQ(kSmiShift, 32);
+ Ldrsw(dst,
+ MemOperand(src.base(), src.offset() + (kSmiShift / kBitsPerByte),
+ src.addrmode()));
-void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src) {
- DCHECK(dst.Is64Bits() && src.Is64Bits());
- if (FLAG_enable_slow_asserts) {
- AssertSmi(src);
+ } else {
+ Ldr(dst, src);
+ SmiUntag(dst);
+ }
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Ldr(dst, src);
+ SmiUntag(dst);
}
- Scvtf(dst, src, kSmiShift);
}
-void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src) {
- DCHECK(dst.Is32Bits() && src.Is64Bits());
- if (FLAG_enable_slow_asserts) {
- AssertSmi(src);
- }
- Scvtf(dst, src, kSmiShift);
-}
+void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) {
@@ -1096,6 +1090,15 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
}
}
+void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
+ Cmp(x, y);
+ B(eq, dest);
+}
+
+void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
+ Cmp(x, y);
+ B(lt, dest);
+}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
JumpIfSmi(value, nullptr, not_smi_label);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 784ffbb275..74583523af 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -8,7 +8,6 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
-#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -20,7 +19,7 @@
#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
@@ -28,10 +27,10 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer,
- unsigned buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, buffer, buffer_size, create_code_object) {
+MacroAssembler::MacroAssembler(Isolate* isolate,
+ const AssemblerOptions& options, void* buffer,
+ int size, CodeObjectRequired create_code_object)
+ : TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@@ -49,22 +48,6 @@ CPURegList TurboAssembler::DefaultFPTmpList() {
return CPURegList(fp_scratch1, fp_scratch2);
}
-TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size),
- isolate_(isolate),
-#if DEBUG
- allow_macro_instructions_(true),
-#endif
- tmp_list_(DefaultTmpList()),
- fptmp_list_(DefaultFPTmpList()),
- use_real_aborts_(true) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ = Handle<HeapObject>::New(
- isolate->heap()->self_reference_marker(), isolate);
- }
-}
-
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
int bytes = 0;
@@ -365,12 +348,12 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
void TurboAssembler::Mov(const Register& rd, ExternalReference reference) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupExternalReference(rd, reference);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(rd, reference);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Mov(rd, Operand(reference));
}
@@ -1445,7 +1428,8 @@ void MacroAssembler::PopCalleeSavedRegisters() {
}
void TurboAssembler::AssertSpAligned() {
- if (emit_debug_code() && use_real_aborts()) {
+ if (emit_debug_code()) {
+ TrapOnAbortScope trap_on_abort_scope(this); // Avoid calls to Abort.
// Arm64 requires the stack pointer to be 16-byte aligned prior to address
// calculation.
UseScratchRegisterScope scope(this);
@@ -1569,11 +1553,10 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
Fsub(dst, src, fp_zero);
}
-void TurboAssembler::LoadRoot(CPURegister destination,
- Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
// TODO(jbramley): Most root values are constants, and can be synthesized
// without a load. Refer to the ARM back end for details.
- Ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2));
+ Ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
}
@@ -1588,14 +1571,14 @@ void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
-void TurboAssembler::Move(Register dst, Handle<HeapObject> x) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(dst, x);
- return;
+void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
- Mov(dst, x);
+ Mov(dst, value);
}
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
@@ -1644,18 +1627,6 @@ void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
}
}
-void MacroAssembler::AssertFixedArray(Register object) {
- if (emit_debug_code()) {
- AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFixedArray);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
-
- CompareObjectType(object, temp, temp, FIXED_ARRAY_TYPE);
- Check(eq, AbortReason::kOperandIsNotAFixedArray);
- }
-}
-
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor);
@@ -1726,7 +1697,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
AssertNotSmi(object);
JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
Bind(&done_checking);
}
@@ -1765,8 +1736,8 @@ void MacroAssembler::TailCallStub(CodeStub* stub) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles) {
+void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
+ Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -1774,9 +1745,9 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
Mov(x0, f->nargs);
Mov(x1, ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
- Call(code, RelocInfo::CODE_TARGET);
+ DCHECK(!AreAliased(centry, x0, x1));
+ Add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -1883,54 +1854,28 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
}
}
-#ifdef V8_EMBEDDED_BUILTINS
-void TurboAssembler::LookupConstant(Register destination,
- Handle<Object> object) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Ensure the given object is in the builtins constants table and fetch its
- // index.
- BuiltinsConstantsTableBuilder* builder =
- isolate()->builtins_constants_table_builder();
- uint32_t index = builder->AddObject(object);
-
- // TODO(jgruber): Load builtins from the builtins table.
- // TODO(jgruber): Ensure that code generation can recognize constant targets
- // in kArchCallCodeObject.
-
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
-
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
- Ldr(destination, FieldMemOperand(destination, FixedArray::kHeaderSize +
- index * kPointerSize));
+ Ldr(destination,
+ FieldMemOperand(destination,
+ FixedArray::kHeaderSize + constant_index * kPointerSize));
}
-void TurboAssembler::LookupExternalReference(Register destination,
- ExternalReference reference) {
- CHECK(reference.address() !=
- ExternalReference::roots_array_start(isolate()).address());
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Encode as an index into the external reference table stored on the isolate.
-
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
- uint32_t index = v.index();
-
- // Generate code to load from the external reference table.
-
- int32_t roots_to_external_reference_offset =
- Heap::roots_to_external_reference_table_offset() +
- ExternalReferenceTable::OffsetOfEntry(index);
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ Ldr(destination, MemOperand(kRootRegister, offset));
+}
- Ldr(destination,
- MemOperand(kRootRegister, roots_to_external_reference_offset));
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ Add(destination, kRootRegister, Operand(offset));
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::Jump(Register target, Condition cond) {
if (cond == nv) return;
@@ -1969,7 +1914,7 @@ static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
int64_t offset = static_cast<int64_t>(target);
// The target of WebAssembly calls is still an index instead of an actual
// address at this point, and needs to be encoded as-is.
- if (rmode != RelocInfo::WASM_CALL) {
+ if (rmode != RelocInfo::WASM_CALL && rmode != RelocInfo::WASM_STUB_CALL) {
offset -= reinterpret_cast<int64_t>(pc);
DCHECK_EQ(offset % kInstructionSize, 0);
offset = offset / static_cast<int>(kInstructionSize);
@@ -1986,18 +1931,38 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- LookupConstant(scratch, code);
- Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- Jump(scratch, cond);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code &&
+ !Builtins::IsIsolateIndependentBuiltin(*code)) {
+ // Calls to embedded targets are initially generated as standard
+ // pc-relative calls below. When creating the embedded blob, call offsets
+ // are patched up to point directly to the off-heap instruction start.
+ // Note: It is safe to dereference {code} above since code generation
+ // for builtins and code stubs happens on the main thread.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ IndirectLoadConstant(scratch, code);
+ Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Jump(scratch, cond);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(scratch, cond);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
if (CanUseNearCallOrJump(rmode)) {
- JumpHelper(static_cast<int64_t>(GetCodeTargetIndex(code)), rmode, cond);
+ JumpHelper(static_cast<int64_t>(AddCodeTarget(code)), rmode, cond);
} else {
Jump(code.address(), rmode, cond);
}
@@ -2045,18 +2010,38 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
Bind(&start_call);
#endif
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- LookupConstant(scratch, code);
- Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(scratch);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code &&
+ !Builtins::IsIsolateIndependentBuiltin(*code)) {
+ // Calls to embedded targets are initially generated as standard
+ // pc-relative calls below. When creating the embedded blob, call offsets
+ // are patched up to point directly to the off-heap instruction start.
+ // Note: It is safe to dereference {code} above since code generation
+ // for builtins and code stubs happens on the main thread.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ IndirectLoadConstant(scratch, code);
+ Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(scratch);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(scratch);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
if (CanUseNearCallOrJump(rmode)) {
- near_call(GetCodeTargetIndex(code), rmode);
+ near_call(AddCodeTarget(code), rmode);
} else {
IndirectCall(code.address(), rmode);
}
@@ -2087,7 +2072,7 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
-void TurboAssembler::CallForDeoptimization(Address target,
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode) {
DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY);
@@ -2096,22 +2081,20 @@ void TurboAssembler::CallForDeoptimization(Address target,
Label start_call;
Bind(&start_call);
#endif
+ // The deoptimizer requires the deoptimization id to be in x16.
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
-
- // Deoptimisation table entries require the call address to be in x16, in
- // order to compute the entry id.
- // TODO(all): Put the entry id back in the table now that we are using
- // a direct branch for the call and do not need to set up x16.
DCHECK(temp.Is(x16));
- Mov(temp, Immediate(target, rmode));
-
+ // Make sure that the deopt id can be encoded in 16 bits, so can be encoded
+ // in a single movz instruction with a zero shift.
+ DCHECK(is_uint16(deopt_id));
+ movz(temp, deopt_id);
int64_t offset = static_cast<int64_t>(target) -
- static_cast<int64_t>(isolate_data().code_range_start_);
+ static_cast<int64_t>(options().code_range_start);
DCHECK_EQ(offset % kInstructionSize, 0);
offset = offset / static_cast<int>(kInstructionSize);
DCHECK(IsNearCallOffset(offset));
- near_call(static_cast<int>(offset), rmode);
+ near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
#ifdef DEBUG
AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize + kInstructionSize);
@@ -2385,9 +2368,9 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
// extension to correctly handle it.
Ldr(expected_reg, FieldMemOperand(function,
JSFunction::kSharedFunctionInfoOffset));
- Ldrsw(expected_reg,
- FieldMemOperand(expected_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ Ldrh(expected_reg,
+ FieldMemOperand(expected_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(function, new_target, expected, actual, flag);
@@ -2436,7 +2419,8 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
- DoubleRegister double_input) {
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
Label done;
// Try to convert the double to an int64. If successful, the bottom 32 bits
@@ -2447,7 +2431,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Push(lr, double_input);
// DoubleToI preserves any registers it needs to clobber.
- Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
Ldr(result, MemOperand(sp, 0));
DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
@@ -2467,17 +2455,19 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
if (type == StackFrame::INTERNAL) {
- Register code_reg = temps.AcquireX();
- Move(code_reg, CodeObject());
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
- Push(lr, fp, type_reg, code_reg);
- Add(fp, sp, InternalFrameConstants::kFixedFrameSizeFromFp);
- // sp[4] : lr
- // sp[3] : fp
+ // type_reg pushed twice for alignment.
+ Push(lr, fp, type_reg, type_reg);
+ const int kFrameSize =
+ TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize;
+ Add(fp, sp, kFrameSize);
+ // sp[3] : lr
+ // sp[2] : fp
// sp[1] : type
- // sp[0] : [code object]
- } else if (type == StackFrame::WASM_COMPILED) {
+ // sp[0] : for alignment
+ } else if (type == StackFrame::WASM_COMPILED ||
+ type == StackFrame::WASM_COMPILE_LAZY) {
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
@@ -2507,15 +2497,10 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
- if (type == StackFrame::WASM_COMPILED) {
- Mov(sp, fp);
- Pop(fp, lr);
- } else {
- // Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer and return address.
- Mov(sp, fp);
- Pop(fp, lr);
- }
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer and return address.
+ Mov(sp, fp);
+ Pop(fp, lr);
}
@@ -3046,12 +3031,13 @@ void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
RecordComment("Abort message: ");
RecordComment(GetAbortReason(reason));
+#endif
- if (FLAG_trap_on_abort) {
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
Brk(0);
return;
}
-#endif
// We need some scratch registers for the MacroAssembler, so make sure we have
// some. This is safe here because Abort never returns.
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 16aa006b2f..a73fc2f47b 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -11,6 +11,7 @@
#include "src/bailout-reason.h"
#include "src/base/bits.h"
#include "src/globals.h"
+#include "src/turbo-assembler.h"
// Simulator specific helpers.
#if USE_SIMULATOR
@@ -41,24 +42,28 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-#define kReturnRegister0 x0
-#define kReturnRegister1 x1
-#define kReturnRegister2 x2
-#define kJSFunctionRegister x1
-#define kContextRegister cp
-#define kAllocateSizeRegister x1
-#define kSpeculationPoisonRegister x18
-#define kInterpreterAccumulatorRegister x0
-#define kInterpreterBytecodeOffsetRegister x19
-#define kInterpreterBytecodeArrayRegister x20
-#define kInterpreterDispatchTableRegister x21
-#define kJavaScriptCallArgCountRegister x0
-#define kJavaScriptCallCodeStartRegister x2
-#define kJavaScriptCallNewTargetRegister x3
-#define kOffHeapTrampolineRegister ip0
-#define kRuntimeCallFunctionRegister x1
-#define kRuntimeCallArgCountRegister x0
-#define kWasmInstanceRegister x7
+constexpr Register kReturnRegister0 = x0;
+constexpr Register kReturnRegister1 = x1;
+constexpr Register kReturnRegister2 = x2;
+constexpr Register kJSFunctionRegister = x1;
+constexpr Register kContextRegister = cp;
+constexpr Register kAllocateSizeRegister = x1;
+constexpr Register kSpeculationPoisonRegister = x18;
+constexpr Register kInterpreterAccumulatorRegister = x0;
+constexpr Register kInterpreterBytecodeOffsetRegister = x19;
+constexpr Register kInterpreterBytecodeArrayRegister = x20;
+constexpr Register kInterpreterDispatchTableRegister = x21;
+
+constexpr Register kJavaScriptCallArgCountRegister = x0;
+constexpr Register kJavaScriptCallCodeStartRegister = x2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = x3;
+constexpr Register kJavaScriptCallExtraArg1Register = x2;
+
+constexpr Register kOffHeapTrampolineRegister = ip0;
+constexpr Register kRuntimeCallFunctionRegister = x1;
+constexpr Register kRuntimeCallArgCountRegister = x0;
+constexpr Register kWasmInstanceRegister = x7;
#define LS_MACRO_LIST(V) \
V(Ldrb, Register&, rt, LDRB_w) \
@@ -97,11 +102,6 @@ namespace internal {
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset);
-inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
-
-// Generate a MemOperand for loading a SMI from memory.
-inline MemOperand UntagSmiMemOperand(Register object, int offset);
-
// ----------------------------------------------------------------------------
// MacroAssembler
@@ -177,10 +177,13 @@ enum PreShiftImmMode {
kAnyShift // Allow any pre-shift.
};
-class TurboAssembler : public Assembler {
+class TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object);
+ TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : TurboAssemblerBase(isolate, options, buffer, buffer_size,
+ create_code_object) {}
// The Abort method should call a V8 runtime function, but the CallRuntime
// mechanism depends on CEntry. If use_real_aborts is false, Abort will
@@ -203,16 +206,6 @@ class TurboAssembler : public Assembler {
TurboAssembler* tasm_;
};
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() const { return has_frame_; }
-
- Isolate* isolate() const { return isolate_; }
-
- Handle<HeapObject> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
-
#if DEBUG
void set_allow_macro_instructions(bool value) {
allow_macro_instructions_ = value;
@@ -264,7 +257,7 @@ class TurboAssembler : public Assembler {
// This is required for compatibility with architecture independent code.
// Remove if not needed.
void Move(Register dst, Register src);
- void Move(Register dst, Handle<HeapObject> x);
+ void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Smi* src);
// Register swap. Note that the register operands should be distinct.
@@ -573,9 +566,10 @@ class TurboAssembler : public Assembler {
bool AllowThisStubCall(CodeStub* stub);
void CallStubDelayed(CodeStub* stub);
- // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
- void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ // Call a runtime routine. This expects {centry} to contain a fitting CEntry
+ // builtin for the target runtime function and uses an indirect call.
+ void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
@@ -587,6 +581,7 @@ class TurboAssembler : public Assembler {
Register scratch1);
inline void SmiUntag(Register dst, Register src);
+ inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi);
// Calls Abort(msg) if the condition cond is not satisfied.
@@ -862,6 +857,9 @@ class TurboAssembler : public Assembler {
inline void JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label = nullptr);
+ inline void JumpIfEqual(Register x, int32_t y, Label* dest);
+ inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
+
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, Register rn);
// Provide explicit double and float interfaces for FP immediate moves, rather
@@ -882,11 +880,10 @@ class TurboAssembler : public Assembler {
int shift_amount = 0);
void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
-#ifdef V8_EMBEDDED_BUILTINS
- void LookupConstant(Register destination, Handle<Object> object);
- void LookupExternalReference(Register destination,
- ExternalReference reference);
-#endif // V8_EMBEDDED_BUILTINS
+ void LoadFromConstantsTable(Register destination,
+ int constant_index) override;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
+ void LoadRootRelative(Register destination, int32_t offset) override;
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
@@ -900,7 +897,8 @@ class TurboAssembler : public Assembler {
// Generate an indirect call (for when a direct call's range is not adequate).
void IndirectCall(Address target, RelocInfo::Mode rmode);
- void CallForDeoptimization(Address target, RelocInfo::Mode rmode);
+ void CallForDeoptimization(Address target, int deopt_id,
+ RelocInfo::Mode rmode);
// For every Call variant, there is a matching CallSize function that returns
// the size (in bytes) of the call sequence.
@@ -924,7 +922,7 @@ class TurboAssembler : public Assembler {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
- DoubleRegister double_input);
+ DoubleRegister double_input, StubCallMode stub_mode);
inline void Mul(const Register& rd, const Register& rn, const Register& rm);
@@ -1167,7 +1165,7 @@ class TurboAssembler : public Assembler {
#undef DECLARE_FUNCTION
// Load an object from the root table.
- void LoadRoot(CPURegister destination, Heap::RootListIndex index);
+ void LoadRoot(Register destination, Heap::RootListIndex index) override;
inline void Ret(const Register& xn = lr);
@@ -1231,9 +1229,6 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister();
- bool root_array_available() const { return root_array_available_; }
- void set_root_array_available(bool v) { root_array_available_ = v; }
-
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@@ -1266,26 +1261,20 @@ class TurboAssembler : public Assembler {
// have mixed types. The format string (x0) should not be included.
void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
- // This handle will be patched with the code object on installation.
- Handle<HeapObject> code_object_;
-
private:
- bool has_frame_ = false;
- bool root_array_available_ = true;
- Isolate* const isolate_;
#if DEBUG
// Tell whether any of the macro instruction can be used. When false the
// MacroAssembler will assert if a method which can emit a variable number
// of instructions is called.
- bool allow_macro_instructions_;
+ bool allow_macro_instructions_ = true;
#endif
// Scratch registers available for use by the MacroAssembler.
- CPURegList tmp_list_;
- CPURegList fptmp_list_;
+ CPURegList tmp_list_ = DefaultTmpList();
+ CPURegList fptmp_list_ = DefaultFPTmpList();
- bool use_real_aborts_;
+ bool use_real_aborts_ = true;
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later
@@ -1314,8 +1303,12 @@ class TurboAssembler : public Assembler {
class MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
- CodeObjectRequired create_code_object);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
+ : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
+ size, create_code_object) {}
+ MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int size, CodeObjectRequired create_code_object);
// Instruction set functions ------------------------------------------------
// Logical macros.
@@ -1716,8 +1709,6 @@ class MacroAssembler : public TurboAssembler {
inline void SmiTag(Register dst, Register src);
inline void SmiTag(Register smi);
- inline void SmiUntagToDouble(VRegister dst, Register src);
- inline void SmiUntagToFloat(VRegister dst, Register src);
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
inline void JumpIfBothSmi(Register value1, Register value2,
@@ -1740,9 +1731,6 @@ class MacroAssembler : public TurboAssembler {
inline void ObjectTag(Register tagged_obj, Register obj);
inline void ObjectUntag(Register untagged_obj, Register obj);
- // Abort execution if argument is not a FixedArray, enabled via --debug-code.
- void AssertFixedArray(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 839c4edda6..09c447fdb5 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -3184,7 +3184,7 @@ void Simulator::Debug() {
(strcmp(cmd, "po") == 0)) {
if (argc == 2) {
int64_t value;
- OFStream os(stdout);
+ StdoutStream os;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
os << arg1 << ": \n";
@@ -3246,7 +3246,7 @@ void Simulator::Debug() {
current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & kSmiTagMask) == 0) {
- STATIC_ASSERT(kSmiValueSize == 32);
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
int32_t untagged = (value >> kSmiShift) & 0xFFFFFFFF;
PrintF("smi %" PRId32, untagged);
} else {
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 3d835fb2cb..604207bc0d 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -338,10 +338,10 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
base::ElapsedTimer instantiate_timer;
instantiate_timer.Start();
Handle<HeapNumber> uses_bitset(
- HeapNumber::cast(wasm_data->get(kWasmDataUsesBitSet)));
+ HeapNumber::cast(wasm_data->get(kWasmDataUsesBitSet)), isolate);
Handle<WasmModuleObject> module(
- WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)));
- Handle<Script> script(Script::cast(shared->script()));
+ WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)), isolate);
+ Handle<Script> script(Script::cast(shared->script()), isolate);
// TODO(mstarzinger): The position currently points to the module definition
// but should instead point to the instantiation site (more intuitive).
int position = shared->StartPosition();
@@ -405,7 +405,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
Handle<Name> single_function_name(
isolate->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName));
MaybeHandle<Object> single_function =
- Object::GetProperty(module_object, single_function_name);
+ Object::GetProperty(isolate, module_object, single_function_name);
if (!single_function.is_null() &&
!single_function.ToHandleChecked()->IsUndefined(isolate)) {
return single_function;
@@ -413,7 +413,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
Handle<String> exports_name =
isolate->factory()->InternalizeUtf8String("exports");
- return Object::GetProperty(module_object, exports_name);
+ return Object::GetProperty(isolate, module_object, exports_name);
}
} // namespace internal
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 1fca56b0fc..fee309d9fb 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -553,7 +553,7 @@ void AsmJsParser::ValidateModuleVarImport(VarInfo* info,
} else {
info->kind = VarKind::kImportedFunction;
info->import = new (zone()->New(sizeof(FunctionImportInfo)))
- FunctionImportInfo({name, WasmModuleBuilder::SignatureMap(zone())});
+ FunctionImportInfo(name, zone());
info->mutable_variable = false;
}
}
@@ -2210,14 +2210,14 @@ AsmType* AsmJsParser::ValidateCall() {
DCHECK_NOT_NULL(function_info->import);
// TODO(bradnelson): Factor out.
uint32_t index;
- auto it = function_info->import->cache.find(sig);
+ auto it = function_info->import->cache.find(*sig);
if (it != function_info->import->cache.end()) {
index = it->second;
DCHECK(function_info->function_defined);
} else {
index =
module_builder_->AddImport(function_info->import->function_name, sig);
- function_info->import->cache[sig] = index;
+ function_info->import->cache[*sig] = index;
function_info->function_defined = true;
}
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index bddb8c62e9..ac8a05a028 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -76,9 +76,16 @@ class AsmJsParser {
};
// clang-format on
+ // A single import in asm.js can require multiple imports in wasm, if the
+ // function is used with different signatures. {cache} keeps the wasm
+ // imports for the single asm.js import of name {function_name}.
struct FunctionImportInfo {
Vector<const char> function_name;
- WasmModuleBuilder::SignatureMap cache;
+ ZoneUnorderedMap<FunctionSig, uint32_t> cache;
+
+ // Constructor.
+ FunctionImportInfo(Vector<const char> name, Zone* zone)
+ : function_name(name), cache(zone) {}
};
struct VarInfo {
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index af41208ead..c7144e3be6 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -4,6 +4,7 @@
#include "src/asmjs/asm-scanner.h"
+#include "src/char-predicates-inl.h"
#include "src/conversions.h"
#include "src/flags.h"
#include "src/parsing/scanner.h"
@@ -273,17 +274,22 @@ void AsmJsScanner::ConsumeNumber(uc32 ch) {
std::string number;
number = ch;
bool has_dot = ch == '.';
+ bool has_prefix = false;
for (;;) {
ch = stream_->Advance();
if ((ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') ||
(ch >= 'A' && ch <= 'F') || ch == '.' || ch == 'b' || ch == 'o' ||
ch == 'x' ||
- ((ch == '-' || ch == '+') && (number[number.size() - 1] == 'e' ||
- number[number.size() - 1] == 'E'))) {
+ ((ch == '-' || ch == '+') && !has_prefix &&
+ (number[number.size() - 1] == 'e' ||
+ number[number.size() - 1] == 'E'))) {
// TODO(bradnelson): Test weird cases ending in -.
if (ch == '.') {
has_dot = true;
}
+ if (ch == 'b' || ch == 'o' || ch == 'x') {
+ has_prefix = true;
+ }
number.push_back(ch);
} else {
break;
@@ -413,16 +419,13 @@ void AsmJsScanner::ConsumeCompareOrShift(uc32 ch) {
}
bool AsmJsScanner::IsIdentifierStart(uc32 ch) {
- return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_' ||
- ch == '$';
+ return IsInRange(AsciiAlphaToLower(ch), 'a', 'z') || ch == '_' || ch == '$';
}
-bool AsmJsScanner::IsIdentifierPart(uc32 ch) {
- return IsIdentifierStart(ch) || (ch >= '0' && ch <= '9');
-}
+bool AsmJsScanner::IsIdentifierPart(uc32 ch) { return IsAsciiIdentifier(ch); }
bool AsmJsScanner::IsNumberStart(uc32 ch) {
- return ch == '.' || (ch >= '0' && ch <= '9');
+ return ch == '.' || IsDecimalDigit(ch);
}
} // namespace internal
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 799f08a4d8..a431c7442d 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -43,28 +43,40 @@
#include "src/ostreams.h"
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
-// -----------------------------------------------------------------------------
-// Implementation of AssemblerBase
-
-AssemblerBase::IsolateData::IsolateData(Isolate* isolate)
- : serializer_enabled_(isolate->serializer_enabled())
+AssemblerOptions AssemblerOptions::Default(
+ Isolate* isolate, bool explicitly_support_serialization) {
+ AssemblerOptions options;
+ bool serializer =
+ isolate->serializer_enabled() || explicitly_support_serialization;
+ options.record_reloc_info_for_serialization = serializer;
+ options.enable_root_array_delta_access = !serializer;
+#ifdef USE_SIMULATOR
+ // Don't generate simulator specific code if we are building a snapshot, which
+ // might be run on real hardware.
+ options.enable_simulator_code = !serializer;
+#endif
+ options.isolate_independent_code = isolate->ShouldLoadConstantsFromRootList();
+ options.inline_offheap_trampolines = !serializer;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
- ,
- code_range_start_(
- isolate->heap()->memory_allocator()->code_range()->start())
+ options.code_range_start =
+ isolate->heap()->memory_allocator()->code_range()->start();
#endif
-{
+ return options;
}
-AssemblerBase::AssemblerBase(IsolateData isolate_data, void* buffer,
+// -----------------------------------------------------------------------------
+// Implementation of AssemblerBase
+
+AssemblerBase::AssemblerBase(const AssemblerOptions& options, void* buffer,
int buffer_size)
- : isolate_data_(isolate_data),
+ : options_(options),
enabled_cpu_features_(0),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false),
@@ -95,7 +107,7 @@ void AssemblerBase::FlushICache(void* start, size_t size) {
}
void AssemblerBase::Print(Isolate* isolate) {
- OFStream os(stdout);
+ StdoutStream os;
v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_);
}
@@ -164,8 +176,7 @@ unsigned CpuFeatures::dcache_line_size_ = 0;
//
// 01: code_target: [6-bit pc delta] 01
//
-// 10: short_data_record: [6-bit pc delta] 10 followed by
-// [8-bit data delta]
+// 10: wasm_stub_call: [6-bit pc delta] 10
//
// 11: long_record [6 bit reloc mode] 11
// followed by pc delta
@@ -189,7 +200,7 @@ const int kLongTagBits = 6;
const int kEmbeddedObjectTag = 0;
const int kCodeTargetTag = 1;
-const int kLocatableTag = 2;
+const int kWasmStubCallTag = 2;
const int kDefaultTag = 3;
const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
@@ -202,54 +213,6 @@ const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
-// static
-bool RelocInfo::OffHeapTargetIsCodedSpecially() {
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
- defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
- return false;
-#elif defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390)
- return true;
-#endif
-}
-
-void RelocInfo::set_global_handle(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
- set_embedded_address(address, icache_flush_mode);
-}
-
-Address RelocInfo::wasm_call_address() const {
- DCHECK_EQ(rmode_, WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
-void RelocInfo::set_wasm_call_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::global_handle() const {
- DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
- return embedded_address();
-}
-
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- Assembler::set_target_address_at(pc_, constant_pool_, target,
- icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
- IsCodeTarget(rmode_)) {
- Code* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target_code);
- }
-}
-
uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
// Otherwise write a variable length PC jump for the bits that do
@@ -324,14 +287,15 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteShortTaggedPC(pc_delta, kCodeTargetTag);
DCHECK_LE(begin_pos - pos_, RelocInfo::kMaxCallSize);
- } else if (rmode == RelocInfo::DEOPT_REASON) {
- DCHECK(rinfo->data() < (1 << kBitsPerByte));
- WriteShortTaggedPC(pc_delta, kLocatableTag);
- WriteShortData(rinfo->data());
+ } else if (rmode == RelocInfo::WASM_STUB_CALL) {
+ WriteShortTaggedPC(pc_delta, kWasmStubCallTag);
} else {
WriteModeAndPC(pc_delta, rmode);
if (RelocInfo::IsComment(rmode)) {
WriteData(rinfo->data());
+ } else if (RelocInfo::IsDeoptReason(rmode)) {
+ DCHECK_LT(rinfo->data(), 1 << kBitsPerByte);
+ WriteShortData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode)) {
@@ -412,13 +376,9 @@ void RelocIterator::next() {
} else if (tag == kCodeTargetTag) {
ReadShortTaggedPC();
if (SetMode(RelocInfo::CODE_TARGET)) return;
- } else if (tag == kLocatableTag) {
+ } else if (tag == kWasmStubCallTag) {
ReadShortTaggedPC();
- Advance();
- if (SetMode(RelocInfo::DEOPT_REASON)) {
- ReadShortData();
- return;
- }
+ if (SetMode(RelocInfo::WASM_STUB_CALL)) return;
} else {
DCHECK_EQ(tag, kDefaultTag);
RelocInfo::Mode rmode = GetMode();
@@ -432,6 +392,12 @@ void RelocIterator::next() {
return;
}
Advance(kIntptrSize);
+ } else if (RelocInfo::IsDeoptReason(rmode)) {
+ Advance();
+ if (SetMode(rmode)) {
+ ReadShortData();
+ return;
+ }
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDeoptId(rmode) ||
@@ -461,6 +427,14 @@ RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
code_reference.relocation_end(),
code_reference.relocation_start(), mode_mask) {}
+RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code* code,
+ int mode_mask)
+ : RelocIterator(
+ code, embedded_data->InstructionStartOfBuiltin(code->builtin_index()),
+ code->constant_pool(),
+ code->relocation_start() + code->relocation_size(),
+ code->relocation_start(), mode_mask) {}
+
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
: RelocIterator(nullptr, reinterpret_cast<Address>(desc.buffer), 0,
desc.buffer + desc.buffer_size,
@@ -472,9 +446,7 @@ RelocIterator::RelocIterator(Vector<byte> instructions,
int mode_mask)
: RelocIterator(nullptr, reinterpret_cast<Address>(instructions.start()),
const_pool, reloc_info.start() + reloc_info.size(),
- reloc_info.start(), mode_mask) {
- rinfo_.flags_ = RelocInfo::kInNativeWasmCode;
-}
+ reloc_info.start(), mode_mask) {}
RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool,
const byte* pos, const byte* end, int mode_mask)
@@ -491,12 +463,63 @@ RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool,
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
+// static
+bool RelocInfo::OffHeapTargetIsCodedSpecially() {
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
+ defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
+ return false;
+#elif defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390)
+ return true;
+#endif
+}
+
+Address RelocInfo::wasm_call_address() const {
+ DCHECK_EQ(rmode_, WASM_CALL);
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::set_wasm_call_address(Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, WASM_CALL);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
+}
+
+Address RelocInfo::wasm_stub_call_address() const {
+ DCHECK_EQ(rmode_, WASM_STUB_CALL);
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::set_wasm_stub_call_address(Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, WASM_STUB_CALL);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
+}
+
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
+ IsCodeTargetMode(rmode_)) {
+ Code* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ target_code);
+ }
+}
+
#ifdef DEBUG
bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
// Ensure there are no code targets or embedded objects present in the
// deoptimization entries, they would require relocation after code
// generation.
- int mode_mask = RelocInfo::kCodeTargetMask |
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::kApplyMask;
RelocIterator it(desc, mode_mask);
@@ -513,6 +536,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "embedded object";
case CODE_TARGET:
return "code target";
+ case RELATIVE_CODE_TARGET:
+ return "relative code target";
case RUNTIME_ENTRY:
return "runtime entry";
case COMMENT:
@@ -537,12 +562,10 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "constant pool";
case VENEER_POOL:
return "veneer pool";
- case WASM_GLOBAL_HANDLE:
- return "global handle";
case WASM_CALL:
return "internal wasm call";
- case WASM_CODE_TABLE_ENTRY:
- return "wasm code table entry";
+ case WASM_STUB_CALL:
+ return "wasm stub call";
case JS_TO_WASM_CALL:
return "js to wasm call";
case NUMBER_OF_MODES:
@@ -564,33 +587,32 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
} else if (rmode_ == EMBEDDED_OBJECT) {
os << " (" << Brief(target_object()) << ")";
} else if (rmode_ == EXTERNAL_REFERENCE) {
- ExternalReferenceEncoder ref_encoder(isolate);
- os << " ("
- << ref_encoder.NameOfAddress(isolate, target_external_reference())
- << ") (" << reinterpret_cast<const void*>(target_external_reference())
+ if (isolate) {
+ ExternalReferenceEncoder ref_encoder(isolate);
+ os << " ("
+ << ref_encoder.NameOfAddress(isolate, target_external_reference())
+ << ") ";
+ }
+ os << " (" << reinterpret_cast<const void*>(target_external_reference())
<< ")";
- } else if (IsCodeTarget(rmode_)) {
+ } else if (IsCodeTargetMode(rmode_)) {
const Address code_target = target_address();
- if (flags_ & kInNativeWasmCode) {
- os << " (wasm trampoline) ";
- } else {
- Code* code = Code::GetCodeFromTargetAddress(code_target);
- DCHECK(code->IsCode());
- os << " (" << Code::Kind2String(code->kind());
- if (Builtins::IsBuiltin(code)) {
- os << " " << Builtins::name(code->builtin_index());
- } else if (code->kind() == Code::STUB) {
- os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
- }
- os << ") ";
+ Code* code = Code::GetCodeFromTargetAddress(code_target);
+ DCHECK(code->IsCode());
+ os << " (" << Code::Kind2String(code->kind());
+ if (Builtins::IsBuiltin(code)) {
+ os << " " << Builtins::name(code->builtin_index());
+ } else if (code->kind() == Code::STUB) {
+ os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
}
- os << " (" << reinterpret_cast<const void*>(target_address()) << ")";
+ os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
- // Depotimization bailouts are stored as runtime entries.
- int id = Deoptimizer::GetDeoptimizationId(
- isolate, target_address(), Deoptimizer::EAGER);
- if (id != Deoptimizer::kNotDeoptimizationEntry) {
- os << " (deoptimization bailout " << id << ")";
+ // Deoptimization bailouts are stored as runtime entries.
+ DeoptimizeKind type;
+ if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
+ int id = GetDeoptimizationId(isolate, type);
+ os << " (" << Deoptimizer::MessageFor(type) << " deoptimization bailout "
+ << id << ")";
}
} else if (IsConstPool(rmode_)) {
os << " (size " << static_cast<int>(data_) << ")";
@@ -604,9 +626,10 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
void RelocInfo::Verify(Isolate* isolate) {
switch (rmode_) {
case EMBEDDED_OBJECT:
- Object::VerifyPointer(target_object());
+ Object::VerifyPointer(isolate, target_object());
break;
- case CODE_TARGET: {
+ case CODE_TARGET:
+ case RELATIVE_CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
CHECK_NE(addr, kNullAddress);
@@ -641,10 +664,9 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEOPT_ID:
case CONST_POOL:
case VENEER_POOL:
- case WASM_GLOBAL_HANDLE:
case WASM_CALL:
+ case WASM_STUB_CALL:
case JS_TO_WASM_CALL:
- case WASM_CODE_TABLE_ENTRY:
case NONE:
break;
case NUMBER_OF_MODES:
@@ -888,10 +910,35 @@ void Assembler::DataAlign(int m) {
}
}
-void Assembler::RequestHeapObject(HeapObjectRequest request) {
+void AssemblerBase::RequestHeapObject(HeapObjectRequest request) {
request.set_offset(pc_offset());
heap_object_requests_.push_front(request);
}
+int AssemblerBase::AddCodeTarget(Handle<Code> target) {
+ int current = static_cast<int>(code_targets_.size());
+ if (current > 0 && !target.is_null() &&
+ code_targets_.back().address() == target.address()) {
+ // Optimization if we keep jumping to the same code target.
+ return current - 1;
+ } else {
+ code_targets_.push_back(target);
+ return current;
+ }
+}
+
+Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
+ DCHECK_LE(0, code_target_index);
+ DCHECK_LT(code_target_index, code_targets_.size());
+ return code_targets_[code_target_index];
+}
+
+void AssemblerBase::UpdateCodeTarget(intptr_t code_target_index,
+ Handle<Code> code) {
+ DCHECK_LE(0, code_target_index);
+ DCHECK_LT(code_target_index, code_targets_.size());
+ code_targets_[code_target_index] = code;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 35f6147053..28ec2a68c6 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -60,6 +60,7 @@ class ApiFunction;
namespace internal {
// Forward declarations.
+class EmbeddedData;
class InstructionStream;
class Isolate;
class SCTableReference;
@@ -78,12 +79,57 @@ class JumpOptimizationInfo {
bool is_optimizable() const { return optimizable_; }
void set_optimizable() { optimizable_ = true; }
+ // Used to verify the instruction sequence is always the same in two stages.
+ size_t hash_code() const { return hash_code_; }
+ void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }
+
std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }
private:
enum { kCollection, kOptimization } stage_ = kCollection;
bool optimizable_ = false;
std::vector<uint32_t> farjmp_bitmap_;
+ size_t hash_code_ = 0u;
+};
+
+class HeapObjectRequest {
+ public:
+ explicit HeapObjectRequest(double heap_number, int offset = -1);
+ explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
+
+ enum Kind { kHeapNumber, kCodeStub };
+ Kind kind() const { return kind_; }
+
+ double heap_number() const {
+ DCHECK_EQ(kind(), kHeapNumber);
+ return value_.heap_number;
+ }
+
+ CodeStub* code_stub() const {
+ DCHECK_EQ(kind(), kCodeStub);
+ return value_.code_stub;
+ }
+
+ // The code buffer offset at the time of the request.
+ int offset() const {
+ DCHECK_GE(offset_, 0);
+ return offset_;
+ }
+ void set_offset(int offset) {
+ DCHECK_LT(offset_, 0);
+ offset_ = offset;
+ DCHECK_GE(offset_, 0);
+ }
+
+ private:
+ Kind kind_;
+
+ union {
+ double heap_number;
+ CodeStub* code_stub;
+ } value_;
+
+ int offset_;
};
// -----------------------------------------------------------------------------
@@ -91,26 +137,46 @@ class JumpOptimizationInfo {
enum class CodeObjectRequired { kNo, kYes };
+struct V8_EXPORT_PRIVATE AssemblerOptions {
+ // Recording reloc info for external references and off-heap targets is
+ // needed whenever code is serialized, e.g. into the snapshot or as a WASM
+ // module. This flag allows this reloc info to be disabled for code that
+ // will not survive process destruction.
+ bool record_reloc_info_for_serialization = true;
+ // Recording reloc info can be disabled wholesale. This is needed when the
+ // assembler is used on existing code directly (e.g. JumpTableAssembler)
+ // without any buffer to hold reloc information.
+ bool disable_reloc_info_for_patching = false;
+ // Enables access to exrefs by computing a delta from the root array.
+ // Only valid if code will not survive the process.
+ bool enable_root_array_delta_access = false;
+ // Enables specific assembler sequences only used for the simulator.
+ bool enable_simulator_code = false;
+ // Enables use of isolate-independent constants, indirected through the
+ // root array.
+ // (macro assembler feature).
+ bool isolate_independent_code = false;
+ // Enables the use of isolate-independent builtins through an off-heap
+ // trampoline. (macro assembler feature).
+ bool inline_offheap_trampolines = false;
+ // On some platforms, all code is within a given range in the process,
+ // and the start of this range is configured here.
+ Address code_range_start = 0;
+ // Enable pc-relative calls/jumps on platforms that support it. When setting
+ // this flag, the code range must be small enough to fit all offsets into
+ // the instruction immediates.
+ bool use_pc_relative_calls_and_jumps = false;
+
+ static AssemblerOptions Default(
+ Isolate* isolate, bool explicitly_support_serialization = false);
+};
-class AssemblerBase: public Malloced {
+class AssemblerBase : public Malloced {
public:
- struct IsolateData {
- explicit IsolateData(Isolate* isolate);
- IsolateData(const IsolateData&) = default;
-
- bool serializer_enabled_;
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
- Address code_range_start_;
-#endif
- };
-
- AssemblerBase(IsolateData isolate_data, void* buffer, int buffer_size);
+ AssemblerBase(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~AssemblerBase();
- IsolateData isolate_data() const { return isolate_data_; }
-
- bool serializer_enabled() const { return isolate_data_.serializer_enabled_; }
- void enable_serializer() { isolate_data_.serializer_enabled_ = true; }
+ const AssemblerOptions& options() const { return options_; }
bool emit_debug_code() const { return emit_debug_code_; }
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
@@ -167,12 +233,30 @@ class AssemblerBase: public Malloced {
return FlushICache(reinterpret_cast<void*>(start), size);
}
+ // Used to print the name of some special registers.
+ static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
+
protected:
+ // Add 'target' to the {code_targets_} vector, if necessary, and return the
+ // offset at which it is stored.
+ int AddCodeTarget(Handle<Code> target);
+ Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
+ // Update to the code target at {code_target_index} to {target}.
+ void UpdateCodeTarget(intptr_t code_target_index, Handle<Code> target);
+ // Reserves space in the code target vector.
+ void ReserveCodeTargetSpace(size_t num_of_code_targets) {
+ code_targets_.reserve(num_of_code_targets);
+ }
+
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
byte* buffer_;
int buffer_size_;
bool own_buffer_;
+ std::forward_list<HeapObjectRequest> heap_object_requests_;
+ // The program counter, which points into the buffer above and moves forward.
+ // TODO(jkummerow): This should probably have type {Address}.
+ byte* pc_;
void set_constant_pool_available(bool available) {
if (FLAG_enable_embedded_constant_pool) {
@@ -183,12 +267,23 @@ class AssemblerBase: public Malloced {
}
}
- // The program counter, which points into the buffer above and moves forward.
- // TODO(jkummerow): This should probably have type {Address}.
- byte* pc_;
+ // {RequestHeapObject} records the need for a future heap number allocation or
+ // code stub generation. After code assembly, each platform's
+ // {Assembler::AllocateAndInstallRequestedHeapObjects} will allocate these
+ // objects and place them where they are expected (determined by the pc offset
+ // associated with each request).
+ void RequestHeapObject(HeapObjectRequest request);
private:
- IsolateData isolate_data_;
+ // Before we copy code into the code space, we sometimes cannot encode
+ // call/jump code targets as we normally would, as the difference between the
+ // instruction's location in the temporary buffer and the call target is not
+ // guaranteed to fit in the instruction's offset field. We keep track of the
+ // code handles we encounter in calls in this vector, and encode the index of
+ // the code handle in the vector instead.
+ std::vector<Handle<Code>> code_targets_;
+
+ const AssemblerOptions options_;
uint64_t enabled_cpu_features_;
bool emit_debug_code_;
bool predictable_code_size_;
@@ -340,12 +435,6 @@ enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
class RelocInfo {
public:
- enum Flag : uint8_t {
- kNoFlags = 0,
- kInNativeWasmCode = 1u << 0, // Reloc info belongs to native wasm code.
- };
- typedef base::Flags<Flag> Flags;
-
// This string is used to add padding comments to the reloc info in cases
// where we are not sure to have enough space for patching in during
// lazy deoptimization. This is the case if we have indirect calls for which
@@ -363,12 +452,16 @@ class RelocInfo {
static const int kMaxSmallPCDelta;
enum Mode : int8_t {
- // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
+ // Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
+ // and IsShareableRelocMode predicates below).
+
CODE_TARGET,
- EMBEDDED_OBJECT,
- WASM_GLOBAL_HANDLE,
- WASM_CALL,
+ RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
+ EMBEDDED_OBJECT, // LAST_GCED_ENUM
+
JS_TO_WASM_CALL,
+ WASM_CALL, // FIRST_SHAREABLE_RELOC_MODE
+ WASM_STUB_CALL,
RUNTIME_ENTRY,
COMMENT,
@@ -396,32 +489,43 @@ class RelocInfo {
// cannot be encoded as part of another record.
PC_JUMP,
- // Points to a wasm code table entry.
- WASM_CODE_TABLE_ENTRY,
-
// Pseudo-types
NUMBER_OF_MODES,
NONE, // never recorded value
+ LAST_CODE_TARGET_MODE = RELATIVE_CODE_TARGET,
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
- LAST_CODE_ENUM = CODE_TARGET,
LAST_GCED_ENUM = EMBEDDED_OBJECT,
- FIRST_SHAREABLE_RELOC_MODE = RUNTIME_ENTRY,
+ FIRST_SHAREABLE_RELOC_MODE = WASM_CALL,
};
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
RelocInfo() = default;
- RelocInfo(Address pc, Mode rmode, intptr_t data, Code* host)
- : pc_(pc), rmode_(rmode), data_(data), host_(host) {}
+ RelocInfo(Address pc, Mode rmode, intptr_t data, Code* host,
+ Address constant_pool = kNullAddress)
+ : pc_(pc),
+ rmode_(rmode),
+ data_(data),
+ host_(host),
+ constant_pool_(constant_pool) {}
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
}
- static inline bool IsCodeTarget(Mode mode) {
- return mode <= LAST_CODE_ENUM;
+ // Is the relocation mode affected by GC?
+ static inline bool IsGCRelocMode(Mode mode) { return mode <= LAST_GCED_ENUM; }
+ static inline bool IsShareableRelocMode(Mode mode) {
+ return mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
+ }
+ static inline bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
+ static inline bool IsCodeTargetMode(Mode mode) {
+ return mode <= LAST_CODE_TARGET_MODE;
+ }
+ static inline bool IsRelativeCodeTarget(Mode mode) {
+ return mode == RELATIVE_CODE_TARGET;
}
static inline bool IsEmbeddedObject(Mode mode) {
return mode == EMBEDDED_OBJECT;
@@ -430,9 +534,8 @@ class RelocInfo {
return mode == RUNTIME_ENTRY;
}
static inline bool IsWasmCall(Mode mode) { return mode == WASM_CALL; }
- // Is the relocation mode affected by GC?
- static inline bool IsGCRelocMode(Mode mode) {
- return mode <= LAST_GCED_ENUM;
+ static inline bool IsWasmStubCall(Mode mode) {
+ return mode == WASM_STUB_CALL;
}
static inline bool IsComment(Mode mode) {
return mode == COMMENT;
@@ -469,8 +572,11 @@ class RelocInfo {
return IsWasmPtrReference(mode);
}
static inline bool IsWasmPtrReference(Mode mode) {
- return mode == WASM_GLOBAL_HANDLE || mode == WASM_CALL ||
- mode == JS_TO_WASM_CALL;
+ return mode == WASM_CALL || mode == JS_TO_WASM_CALL;
+ }
+
+ static inline bool IsOnlyForSerializer(Mode mode) {
+ return mode == EXTERNAL_REFERENCE || mode == OFF_HEAP_TARGET;
}
static constexpr int ModeMask(Mode mode) { return 1 << mode; }
@@ -481,15 +587,12 @@ class RelocInfo {
intptr_t data() const { return data_; }
Code* host() const { return host_; }
Address constant_pool() const { return constant_pool_; }
- void set_constant_pool(Address constant_pool) {
- constant_pool_ = constant_pool;
- }
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
// inside the code (internal references).
// Do not forget to flush the icache afterwards!
- INLINE(void apply(intptr_t delta));
+ V8_INLINE void apply(intptr_t delta);
// Is the pointer this relocation info refers to coded like a plain pointer
// or is it strange in some way (e.g. relative or patched into a series of
@@ -504,50 +607,55 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
- Address global_handle() const;
- Address js_to_wasm_address() const;
+ // Returns the deoptimization id for the entry associated with the reloc info
+ // where {kind} is the deoptimization kind.
+ // This is only used for printing RUNTIME_ENTRY relocation info.
+ int GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind);
+
Address wasm_call_address() const;
+ Address wasm_stub_call_address() const;
+ Address js_to_wasm_address() const;
- void set_target_address(
- Address target,
- WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ uint32_t wasm_call_tag() const;
- void set_global_handle(Address address, ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
void set_wasm_call_address(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void set_wasm_stub_call_address(
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_js_to_wasm_address(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void set_target_address(
+ Address target,
+ WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
- INLINE(Address target_address());
- INLINE(HeapObject* target_object());
- INLINE(Handle<HeapObject> target_object_handle(Assembler* origin));
- INLINE(void set_target_object(
- HeapObject* target,
+ V8_INLINE Address target_address();
+ V8_INLINE HeapObject* target_object();
+ V8_INLINE Handle<HeapObject> target_object_handle(Assembler* origin);
+ V8_INLINE void set_target_object(
+ Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(Address target_runtime_entry(Assembler* origin));
- INLINE(void set_target_runtime_entry(
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ V8_INLINE Address target_runtime_entry(Assembler* origin);
+ V8_INLINE void set_target_runtime_entry(
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(Address target_off_heap_target());
- INLINE(Cell* target_cell());
- INLINE(Handle<Cell> target_cell_handle());
- INLINE(void set_target_cell(
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ V8_INLINE Address target_off_heap_target();
+ V8_INLINE Cell* target_cell();
+ V8_INLINE Handle<Cell> target_cell_handle();
+ V8_INLINE void set_target_cell(
Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(void set_wasm_code_table_entry(
- Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(void set_target_external_reference(
- Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ V8_INLINE void set_target_external_reference(
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Returns the address of the constant pool entry where the target address
// is held. This should only be called if IsInConstantPool returns true.
- INLINE(Address constant_pool_entry_address());
+ V8_INLINE Address constant_pool_entry_address();
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
@@ -555,7 +663,7 @@ class RelocInfo {
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target. Architecture-independent code shouldn't
// dereference the pointer it gets back from this.
- INLINE(Address target_address_address());
+ V8_INLINE Address target_address_address();
// This indicates how much space a target takes up when deserializing a code
// stream. For most architectures this is just the size of a pointer. For
@@ -566,23 +674,23 @@ class RelocInfo {
// should return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target.
- INLINE(int target_address_size());
+ V8_INLINE int target_address_size();
// Read the reference in the instruction this relocation
// applies to; can only be called if rmode_ is EXTERNAL_REFERENCE.
- INLINE(Address target_external_reference());
+ V8_INLINE Address target_external_reference();
// Read the reference in the instruction this relocation
// applies to; can only be called if rmode_ is INTERNAL_REFERENCE.
- INLINE(Address target_internal_reference());
+ V8_INLINE Address target_internal_reference();
// Return the reference address this relocation applies to;
// can only be called if rmode_ is INTERNAL_REFERENCE.
- INLINE(Address target_internal_reference_address());
+ V8_INLINE Address target_internal_reference_address();
// Wipe out a relocation to a fixed value, used for making snapshots
// reproducible.
- INLINE(void WipeOut());
+ V8_INLINE void WipeOut();
template <typename ObjectVisitor>
inline void Visit(ObjectVisitor* v);
@@ -602,16 +710,9 @@ class RelocInfo {
void Verify(Isolate* isolate);
#endif
- static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
- void set_embedded_address(Address address, ICacheFlushMode flush_mode);
- void set_embedded_size(uint32_t size, ICacheFlushMode flush_mode);
-
- uint32_t embedded_size() const;
- Address embedded_address() const;
-
// On ARM/ARM64, note that pc_ is the address of the instruction referencing
// the constant pool and not the address of the constant pool entry.
Address pc_;
@@ -619,7 +720,6 @@ class RelocInfo {
intptr_t data_ = 0;
Code* host_;
Address constant_pool_ = kNullAddress;
- Flags flags_;
friend class RelocIterator;
};
@@ -679,6 +779,8 @@ class RelocIterator: public Malloced {
// Relocation information with mode k is included in the
// iteration iff bit k of mode_mask is set.
explicit RelocIterator(Code* code, int mode_mask = -1);
+ explicit RelocIterator(EmbeddedData* embedded_data, Code* code,
+ int mode_mask);
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
explicit RelocIterator(const CodeReference code_reference,
int mode_mask = -1);
@@ -877,46 +979,6 @@ class ConstantPoolBuilder BASE_EMBEDDED {
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
};
-class HeapObjectRequest {
- public:
- explicit HeapObjectRequest(double heap_number, int offset = -1);
- explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
-
- enum Kind { kHeapNumber, kCodeStub };
- Kind kind() const { return kind_; }
-
- double heap_number() const {
- DCHECK_EQ(kind(), kHeapNumber);
- return value_.heap_number;
- }
-
- CodeStub* code_stub() const {
- DCHECK_EQ(kind(), kCodeStub);
- return value_.code_stub;
- }
-
- // The code buffer offset at the time of the request.
- int offset() const {
- DCHECK_GE(offset_, 0);
- return offset_;
- }
- void set_offset(int offset) {
- DCHECK_LT(offset_, 0);
- offset_ = offset;
- DCHECK_GE(offset_, 0);
- }
-
- private:
- Kind kind_;
-
- union {
- double heap_number;
- CodeStub* code_stub;
- } value_;
-
- int offset_;
-};
-
// Base type for CPU Registers.
//
// 1) We would prefer to use an enum for registers, but enum values are
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 35432fa647..640de541b5 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -41,7 +41,7 @@ class AstTraversalVisitor : public AstVisitor<Subclass> {
// Iteration left-to-right.
void VisitDeclarations(Declaration::List* declarations);
- void VisitStatements(ZoneList<Statement*>* statements);
+ void VisitStatements(ZonePtrList<Statement>* statements);
// Individual nodes
#define DECLARE_VISIT(type) void Visit##type(type* node);
@@ -112,7 +112,7 @@ void AstTraversalVisitor<Subclass>::VisitDeclarations(
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitStatements(
- ZoneList<Statement*>* stmts) {
+ ZonePtrList<Statement>* stmts) {
for (int i = 0; i < stmts->length(); ++i) {
Statement* stmt = stmts->at(i);
RECURSE(Visit(stmt));
@@ -198,14 +198,14 @@ void AstTraversalVisitor<Subclass>::VisitSwitchStatement(
PROCESS_NODE(stmt);
RECURSE(Visit(stmt->tag()));
- ZoneList<CaseClause*>* clauses = stmt->cases();
+ ZonePtrList<CaseClause>* clauses = stmt->cases();
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
if (!clause->is_default()) {
Expression* label = clause->label();
RECURSE(Visit(label));
}
- ZoneList<Statement*>* stmts = clause->statements();
+ ZonePtrList<Statement>* stmts = clause->statements();
RECURSE(VisitStatements(stmts));
}
}
@@ -330,7 +330,7 @@ void AstTraversalVisitor<Subclass>::VisitRegExpLiteral(RegExpLiteral* expr) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitObjectLiteral(ObjectLiteral* expr) {
PROCESS_EXPRESSION(expr);
- ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ ZonePtrList<ObjectLiteralProperty>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ObjectLiteralProperty* prop = props->at(i);
RECURSE_EXPRESSION(Visit(prop->key()));
@@ -341,7 +341,7 @@ void AstTraversalVisitor<Subclass>::VisitObjectLiteral(ObjectLiteral* expr) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitArrayLiteral(ArrayLiteral* expr) {
PROCESS_EXPRESSION(expr);
- ZoneList<Expression*>* values = expr->values();
+ ZonePtrList<Expression>* values = expr->values();
for (int i = 0; i < values->length(); ++i) {
Expression* value = values->at(i);
RECURSE_EXPRESSION(Visit(value));
@@ -404,7 +404,7 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCall(Call* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
- ZoneList<Expression*>* args = expr->arguments();
+ ZonePtrList<Expression>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE_EXPRESSION(Visit(arg));
@@ -415,7 +415,7 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCallNew(CallNew* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
- ZoneList<Expression*>* args = expr->arguments();
+ ZonePtrList<Expression>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE_EXPRESSION(Visit(arg));
@@ -425,7 +425,7 @@ void AstTraversalVisitor<Subclass>::VisitCallNew(CallNew* expr) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCallRuntime(CallRuntime* expr) {
PROCESS_EXPRESSION(expr);
- ZoneList<Expression*>* args = expr->arguments();
+ ZonePtrList<Expression>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE_EXPRESSION(Visit(arg));
@@ -487,7 +487,7 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
if (expr->instance_fields_initializer_function() != nullptr) {
RECURSE_EXPRESSION(Visit(expr->instance_fields_initializer_function()));
}
- ZoneList<ClassLiteralProperty*>* props = expr->properties();
+ ZonePtrList<ClassLiteral::Property>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ClassLiteralProperty* prop = props->at(i);
if (!prop->key()->IsLiteral()) {
@@ -501,7 +501,7 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitInitializeClassFieldsStatement(
InitializeClassFieldsStatement* stmt) {
PROCESS_NODE(stmt);
- ZoneList<ClassLiteralProperty*>* props = stmt->fields();
+ ZonePtrList<ClassLiteral::Property>* props = stmt->fields();
for (int i = 0; i < props->length(); ++i) {
ClassLiteralProperty* prop = props->at(i);
if (!prop->key()->IsLiteral()) {
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 5efecc5375..fc8be819f6 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -182,7 +182,7 @@ std::forward_list<const AstRawString*> AstConsString::ToRawStrings() const {
return result;
}
-AstStringConstants::AstStringConstants(Isolate* isolate, uint32_t hash_seed)
+AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed)
: zone_(isolate->allocator(), ZONE_NAME),
string_table_(AstRawString::Compare),
hash_seed_(hash_seed) {
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index a453455dd0..e85b0675bf 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -240,14 +240,14 @@ class AstBigInt {
class AstStringConstants final {
public:
- AstStringConstants(Isolate* isolate, uint32_t hash_seed);
+ AstStringConstants(Isolate* isolate, uint64_t hash_seed);
#define F(name, str) \
const AstRawString* name##_string() const { return name##_string_; }
AST_STRING_CONSTANTS(F)
#undef F
- uint32_t hash_seed() const { return hash_seed_; }
+ uint64_t hash_seed() const { return hash_seed_; }
const base::CustomMatcherHashMap* string_table() const {
return &string_table_;
}
@@ -255,7 +255,7 @@ class AstStringConstants final {
private:
Zone zone_;
base::CustomMatcherHashMap string_table_;
- uint32_t hash_seed_;
+ uint64_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
AST_STRING_CONSTANTS(F)
@@ -267,7 +267,7 @@ class AstStringConstants final {
class AstValueFactory {
public:
AstValueFactory(Zone* zone, const AstStringConstants* string_constants,
- uint32_t hash_seed)
+ uint64_t hash_seed)
: string_table_(string_constants->string_table()),
strings_(nullptr),
strings_end_(&strings_),
@@ -354,7 +354,7 @@ class AstValueFactory {
Zone* zone_;
- uint32_t hash_seed_;
+ uint64_t hash_seed_;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 15b8bff61b..5a4add6039 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -7,7 +7,6 @@
#include <cmath> // For isfinite.
#include <vector>
-#include "src/ast/compile-time-value.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/base/hashmap.h"
@@ -19,6 +18,7 @@
#include "src/double.h"
#include "src/elements.h"
#include "src/objects-inl.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/map.h"
#include "src/property-details.h"
@@ -114,6 +114,13 @@ bool Expression::IsTheHoleLiteral() const {
return IsLiteral() && AsLiteral()->type() == Literal::kTheHole;
}
+bool Expression::IsCompileTimeValue() {
+ if (IsLiteral()) return true;
+ MaterializedLiteral* literal = AsMaterializedLiteral();
+ if (literal == nullptr) return false;
+ return literal->IsSimple();
+}
+
bool Expression::IsUndefinedLiteral() const {
if (IsLiteral() && AsLiteral()->type() == Literal::kUndefined) return true;
@@ -334,8 +341,7 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
bool ObjectLiteral::Property::IsCompileTimeValue() const {
return kind_ == CONSTANT ||
- (kind_ == MATERIALIZED_LITERAL &&
- CompileTimeValue::IsCompileTimeValue(value_));
+ (kind_ == MATERIALIZED_LITERAL && value_->IsCompileTimeValue());
}
@@ -360,19 +366,37 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
Literal* literal = property->key()->AsLiteral();
DCHECK(!literal->IsNullLiteral());
- // If there is an existing entry do not emit a store unless the previous
- // entry was also an accessor.
uint32_t hash = literal->Hash();
ZoneHashMap::Entry* entry = table.LookupOrInsert(literal, hash, allocator);
- if (entry->value != nullptr) {
- auto previous_kind =
+ if (entry->value == nullptr) {
+ entry->value = property;
+ } else {
+ // We already have a later definition of this property, so we don't need
+ // to emit a store for the current one.
+ //
+ // There are two subtleties here.
+ //
+ // (1) Emitting a store might actually be incorrect. For example, in {get
+ // foo() {}, foo: 42}, the getter store would override the data property
+ // (which, being a non-computed compile-time valued property, is already
+ // part of the initial literal object.
+ //
+ // (2) If the later definition is an accessor (say, a getter), and the
+ // current definition is a complementary accessor (here, a setter), then
+ // we still must emit a store for the current definition.
+
+ auto later_kind =
static_cast<ObjectLiteral::Property*>(entry->value)->kind();
- if (!((property->kind() == GETTER && previous_kind == SETTER) ||
- (property->kind() == SETTER && previous_kind == GETTER))) {
+ bool complementary_accessors =
+ (property->kind() == GETTER && later_kind == SETTER) ||
+ (property->kind() == SETTER && later_kind == GETTER);
+ if (!complementary_accessors) {
property->set_emit_store(false);
+ if (later_kind == GETTER || later_kind == SETTER) {
+ entry->value = property;
+ }
}
}
- entry->value = property;
}
}
@@ -427,7 +451,7 @@ int ObjectLiteral::InitDepthAndFlags() {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- bool is_compile_time_value = CompileTimeValue::IsCompileTimeValue(value);
+ bool is_compile_time_value = value->IsCompileTimeValue();
is_simple = is_simple && is_compile_time_value;
// Keep track of the number of elements in the object literal and
@@ -454,8 +478,8 @@ int ObjectLiteral::InitDepthAndFlags() {
return depth_acc;
}
-void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
- if (!constant_properties_.is_null()) return;
+void ObjectLiteral::BuildBoilerplateDescription(Isolate* isolate) {
+ if (!boilerplate_description_.is_null()) return;
int index_keys = 0;
bool has_seen_proto = false;
@@ -476,17 +500,17 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
}
}
- Handle<BoilerplateDescription> constant_properties =
- isolate->factory()->NewBoilerplateDescription(boilerplate_properties_,
- properties()->length(),
- index_keys, has_seen_proto);
+ Handle<ObjectBoilerplateDescription> boilerplate_description =
+ isolate->factory()->NewObjectBoilerplateDescription(
+ boilerplate_properties_, properties()->length(), index_keys,
+ has_seen_proto);
int position = 0;
for (int i = 0; i < properties()->length(); i++) {
ObjectLiteral::Property* property = properties()->at(i);
if (property->IsPrototype()) continue;
- if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
+ if (static_cast<uint32_t>(position) == boilerplate_properties_) {
DCHECK(property->is_computed_name());
break;
}
@@ -510,11 +534,12 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
// Add name, value pair to the fixed array.
- constant_properties->set(position++, *key);
- constant_properties->set(position++, *value);
+ boilerplate_description->set_key_value(position++, *key, *value);
}
- constant_properties_ = constant_properties;
+ boilerplate_description->set_flags(EncodeLiteralType());
+
+ boilerplate_description_ = boilerplate_description;
}
bool ObjectLiteral::IsFastCloningSupported() const {
@@ -528,8 +553,8 @@ bool ObjectLiteral::IsFastCloningSupported() const {
bool ArrayLiteral::is_empty() const {
DCHECK(is_initialized());
- return values()->is_empty() &&
- (constant_elements().is_null() || constant_elements()->is_empty());
+ return values()->is_empty() && (boilerplate_description().is_null() ||
+ boilerplate_description()->is_empty());
}
int ArrayLiteral::InitDepthAndFlags() {
@@ -550,7 +575,7 @@ int ArrayLiteral::InitDepthAndFlags() {
if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
}
- if (!CompileTimeValue::IsCompileTimeValue(element)) {
+ if (!element->IsCompileTimeValue()) {
is_simple = false;
}
}
@@ -563,8 +588,8 @@ int ArrayLiteral::InitDepthAndFlags() {
return depth_acc;
}
-void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
- if (!constant_elements_.is_null()) return;
+void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
+ if (!boilerplate_description_.is_null()) return;
int constants_length =
first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
@@ -606,7 +631,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// elements array to a copy-on-write array.
if (is_simple() && depth() == 1 && array_index > 0 &&
IsSmiOrObjectElementsKind(kind)) {
- fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
+ fixed_array->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
}
Handle<FixedArrayBase> elements = fixed_array;
@@ -615,14 +640,12 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
elements = isolate->factory()->NewFixedDoubleArray(constants_length);
// We are copying from non-fast-double to fast-double.
ElementsKind from_kind = TERMINAL_FAST_ELEMENTS_KIND;
- accessor->CopyElements(fixed_array, from_kind, elements, constants_length);
+ accessor->CopyElements(isolate, fixed_array, from_kind, elements,
+ constants_length);
}
- // Remember both the literal's constant values as well as the ElementsKind.
- Handle<ConstantElementsPair> literals =
- isolate->factory()->NewConstantElementsPair(kind, elements);
-
- constant_elements_ = literals;
+ boilerplate_description_ =
+ isolate->factory()->NewArrayBoilerplateDescription(kind, elements);
}
bool ArrayLiteral::IsFastCloningSupported() const {
@@ -643,8 +666,17 @@ Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
if (expression->IsLiteral()) {
return expression->AsLiteral()->BuildValue(isolate);
}
- if (CompileTimeValue::IsCompileTimeValue(expression)) {
- return CompileTimeValue::GetValue(isolate, expression);
+ if (expression->IsCompileTimeValue()) {
+ if (expression->IsObjectLiteral()) {
+ ObjectLiteral* object_literal = expression->AsObjectLiteral();
+ DCHECK(object_literal->is_simple());
+ return object_literal->boilerplate_description();
+ } else {
+ DCHECK(expression->IsArrayLiteral());
+ ArrayLiteral* array_literal = expression->AsArrayLiteral();
+ DCHECK(array_literal->is_simple());
+ return array_literal->boilerplate_description();
+ }
}
return isolate->factory()->uninitialized_value();
}
@@ -669,10 +701,12 @@ bool MaterializedLiteral::NeedsInitialAllocationSite() {
void MaterializedLiteral::BuildConstants(Isolate* isolate) {
if (IsArrayLiteral()) {
- return AsArrayLiteral()->BuildConstantElements(isolate);
+ AsArrayLiteral()->BuildBoilerplateDescription(isolate);
+ return;
}
if (IsObjectLiteral()) {
- return AsObjectLiteral()->BuildConstantProperties(isolate);
+ AsObjectLiteral()->BuildBoilerplateDescription(isolate);
+ return;
}
DCHECK(IsRegExpLiteral());
}
@@ -698,7 +732,7 @@ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
if (this->cooked_strings()->at(i) != nullptr) {
cooked_strings->set(i, *this->cooked_strings()->at(i)->string());
} else {
- cooked_strings->set(i, isolate->heap()->undefined_value());
+ cooked_strings->set(i, ReadOnlyRoots(isolate).undefined_value());
}
}
}
@@ -806,9 +840,10 @@ Call::CallType Call::GetCallType() const {
if (proxy->var()->IsUnallocated()) {
return GLOBAL_CALL;
} else if (proxy->var()->IsLookupSlot()) {
- // Calls going through 'with' always use DYNAMIC rather than DYNAMIC_LOCAL
- // or DYNAMIC_GLOBAL.
- return proxy->var()->mode() == DYNAMIC ? WITH_CALL : OTHER_CALL;
+ // Calls going through 'with' always use VariableMode::kDynamic rather
+ // than VariableMode::kDynamicLocal or VariableMode::kDynamicGlobal.
+ return proxy->var()->mode() == VariableMode::kDynamic ? WITH_CALL
+ : OTHER_CALL;
}
}
@@ -831,7 +866,7 @@ Call::CallType Call::GetCallType() const {
return OTHER_CALL;
}
-CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
+CaseClause::CaseClause(Expression* label, ZonePtrList<Statement>* statements)
: label_(label), statements_(statements) {}
bool Literal::IsPropertyName() const {
@@ -954,7 +989,7 @@ const char* CallRuntime::debug_name() {
case k##NodeType: \
return static_cast<const NodeType*>(this)->labels();
-ZoneList<const AstRawString*>* BreakableStatement::labels() const {
+ZonePtrList<const AstRawString>* BreakableStatement::labels() const {
switch (node_type()) {
BREAKABLE_NODE_LIST(RETURN_LABELS)
ITERATION_NODE_LIST(RETURN_LABELS)
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 35dede266b..5a2346ad9f 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -241,6 +241,8 @@ class Expression : public AstNode {
// that this also checks for loads of the global "undefined" variable.
bool IsUndefinedLiteral() const;
+ bool IsCompileTimeValue();
+
protected:
Expression(int pos, NodeType type) : AstNode(pos, type) {}
@@ -255,7 +257,7 @@ class BreakableStatement : public Statement {
TARGET_FOR_NAMED_ONLY
};
- ZoneList<const AstRawString*>* labels() const;
+ ZonePtrList<const AstRawString>* labels() const;
// Testers.
bool is_target_for_anonymous() const {
@@ -277,12 +279,12 @@ class BreakableStatement : public Statement {
class Block : public BreakableStatement {
public:
- ZoneList<Statement*>* statements() { return &statements_; }
+ ZonePtrList<Statement>* statements() { return &statements_; }
bool ignore_completion_value() const {
return IgnoreCompletionField::decode(bit_field_);
}
- inline ZoneList<const AstRawString*>* labels() const;
+ inline ZonePtrList<const AstRawString>* labels() const;
bool IsJump() const {
return !statements_.is_empty() && statements_.last()->IsJump() &&
@@ -295,7 +297,7 @@ class Block : public BreakableStatement {
private:
friend class AstNodeFactory;
- ZoneList<Statement*> statements_;
+ ZonePtrList<Statement> statements_;
Scope* scope_;
class IgnoreCompletionField
@@ -304,7 +306,7 @@ class Block : public BreakableStatement {
: public BitField<bool, IgnoreCompletionField::kNext, 1> {};
protected:
- Block(Zone* zone, ZoneList<const AstRawString*>* labels, int capacity,
+ Block(Zone* zone, ZonePtrList<const AstRawString>* labels, int capacity,
bool ignore_completion_value)
: BreakableStatement(TARGET_FOR_NAMED_ONLY, kNoSourcePosition, kBlock),
statements_(capacity, zone),
@@ -319,18 +321,18 @@ class LabeledBlock final : public Block {
friend class AstNodeFactory;
friend class Block;
- LabeledBlock(Zone* zone, ZoneList<const AstRawString*>* labels, int capacity,
- bool ignore_completion_value)
+ LabeledBlock(Zone* zone, ZonePtrList<const AstRawString>* labels,
+ int capacity, bool ignore_completion_value)
: Block(zone, labels, capacity, ignore_completion_value),
labels_(labels) {
DCHECK_NOT_NULL(labels);
DCHECK_GT(labels->length(), 0);
}
- ZoneList<const AstRawString*>* labels_;
+ ZonePtrList<const AstRawString>* labels_;
};
-inline ZoneList<const AstRawString*>* Block::labels() const {
+inline ZonePtrList<const AstRawString>* Block::labels() const {
if (IsLabeledField::decode(bit_field_)) {
return static_cast<const LabeledBlock*>(this)->labels_;
}
@@ -437,10 +439,10 @@ class IterationStatement : public BreakableStatement {
Statement* body() const { return body_; }
void set_body(Statement* s) { body_ = s; }
- ZoneList<const AstRawString*>* labels() const { return labels_; }
+ ZonePtrList<const AstRawString>* labels() const { return labels_; }
protected:
- IterationStatement(ZoneList<const AstRawString*>* labels, int pos,
+ IterationStatement(ZonePtrList<const AstRawString>* labels, int pos,
NodeType type)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, type),
labels_(labels),
@@ -451,7 +453,7 @@ class IterationStatement : public BreakableStatement {
BreakableStatement::kNextBitFieldIndex;
private:
- ZoneList<const AstRawString*>* labels_;
+ ZonePtrList<const AstRawString>* labels_;
Statement* body_;
};
@@ -468,7 +470,7 @@ class DoWhileStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
- DoWhileStatement(ZoneList<const AstRawString*>* labels, int pos)
+ DoWhileStatement(ZonePtrList<const AstRawString>* labels, int pos)
: IterationStatement(labels, pos, kDoWhileStatement), cond_(nullptr) {}
Expression* cond_;
@@ -487,7 +489,7 @@ class WhileStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
- WhileStatement(ZoneList<const AstRawString*>* labels, int pos)
+ WhileStatement(ZonePtrList<const AstRawString>* labels, int pos)
: IterationStatement(labels, pos, kWhileStatement), cond_(nullptr) {}
Expression* cond_;
@@ -511,7 +513,7 @@ class ForStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
- ForStatement(ZoneList<const AstRawString*>* labels, int pos)
+ ForStatement(ZonePtrList<const AstRawString>* labels, int pos)
: IterationStatement(labels, pos, kForStatement),
init_(nullptr),
cond_(nullptr),
@@ -537,7 +539,7 @@ class ForEachStatement : public IterationStatement {
}
protected:
- ForEachStatement(ZoneList<const AstRawString*>* labels, int pos,
+ ForEachStatement(ZonePtrList<const AstRawString>* labels, int pos,
NodeType type)
: IterationStatement(labels, pos, type) {}
};
@@ -564,7 +566,7 @@ class ForInStatement final : public ForEachStatement {
private:
friend class AstNodeFactory;
- ForInStatement(ZoneList<const AstRawString*>* labels, int pos)
+ ForInStatement(ZonePtrList<const AstRawString>* labels, int pos)
: ForEachStatement(labels, pos, kForInStatement),
each_(nullptr),
subject_(nullptr) {
@@ -630,7 +632,7 @@ class ForOfStatement final : public ForEachStatement {
private:
friend class AstNodeFactory;
- ForOfStatement(ZoneList<const AstRawString*>* labels, int pos)
+ ForOfStatement(ZonePtrList<const AstRawString>* labels, int pos)
: ForEachStatement(labels, pos, kForOfStatement),
iterator_(nullptr),
assign_iterator_(nullptr),
@@ -757,40 +759,40 @@ class CaseClause final : public ZoneObject {
DCHECK(!is_default());
return label_;
}
- ZoneList<Statement*>* statements() const { return statements_; }
+ ZonePtrList<Statement>* statements() const { return statements_; }
private:
friend class AstNodeFactory;
- CaseClause(Expression* label, ZoneList<Statement*>* statements);
+ CaseClause(Expression* label, ZonePtrList<Statement>* statements);
Expression* label_;
- ZoneList<Statement*>* statements_;
+ ZonePtrList<Statement>* statements_;
};
class SwitchStatement final : public BreakableStatement {
public:
- ZoneList<const AstRawString*>* labels() const { return labels_; }
+ ZonePtrList<const AstRawString>* labels() const { return labels_; }
Expression* tag() const { return tag_; }
void set_tag(Expression* t) { tag_ = t; }
- ZoneList<CaseClause*>* cases() { return &cases_; }
+ ZonePtrList<CaseClause>* cases() { return &cases_; }
private:
friend class AstNodeFactory;
- SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels,
+ SwitchStatement(Zone* zone, ZonePtrList<const AstRawString>* labels,
Expression* tag, int pos)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, kSwitchStatement),
labels_(labels),
tag_(tag),
cases_(4, zone) {}
- ZoneList<const AstRawString*>* labels_;
+ ZonePtrList<const AstRawString>* labels_;
Expression* tag_;
- ZoneList<CaseClause*> cases_;
+ ZonePtrList<CaseClause> cases_;
};
@@ -1120,8 +1122,8 @@ class MaterializedLiteral : public Expression {
void BuildConstants(Isolate* isolate);
// If the expression is a literal, return the literal value;
- // if the expression is a materialized literal and is simple return a
- // compile time value as encoded by CompileTimeValue::GetValue().
+ // if the expression is a materialized literal and is_simple
+ // then return an Array or Object Boilerplate Description
// Otherwise, return undefined literal as the placeholder
// in the object literal boilerplate.
Handle<Object> GetBoilerplateValue(Expression* expression, Isolate* isolate);
@@ -1275,12 +1277,12 @@ class ObjectLiteral final : public AggregateLiteral {
public:
typedef ObjectLiteralProperty Property;
- Handle<BoilerplateDescription> constant_properties() const {
- DCHECK(!constant_properties_.is_null());
- return constant_properties_;
+ Handle<ObjectBoilerplateDescription> boilerplate_description() const {
+ DCHECK(!boilerplate_description_.is_null());
+ return boilerplate_description_;
}
int properties_count() const { return boilerplate_properties_; }
- ZoneList<Property*>* properties() const { return properties_; }
+ ZonePtrList<Property>* properties() const { return properties_; }
bool has_elements() const { return HasElementsField::decode(bit_field_); }
bool has_rest_property() const {
return HasRestPropertyField::decode(bit_field_);
@@ -1303,17 +1305,17 @@ class ObjectLiteral final : public AggregateLiteral {
// Populate the depth field and flags, returns the depth.
int InitDepthAndFlags();
- // Get the constant properties fixed array, populating it if necessary.
- Handle<BoilerplateDescription> GetOrBuildConstantProperties(
+ // Get the boilerplate description, populating it if necessary.
+ Handle<ObjectBoilerplateDescription> GetOrBuildBoilerplateDescription(
Isolate* isolate) {
- if (constant_properties_.is_null()) {
- BuildConstantProperties(isolate);
+ if (boilerplate_description_.is_null()) {
+ BuildBoilerplateDescription(isolate);
}
- return constant_properties();
+ return boilerplate_description();
}
- // Populate the constant properties fixed array.
- void BuildConstantProperties(Isolate* isolate);
+ // Populate the boilerplate description.
+ void BuildBoilerplateDescription(Isolate* isolate);
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
@@ -1355,7 +1357,7 @@ class ObjectLiteral final : public AggregateLiteral {
private:
friend class AstNodeFactory;
- ObjectLiteral(ZoneList<Property*>* properties,
+ ObjectLiteral(ZonePtrList<Property>* properties,
uint32_t boilerplate_properties, int pos,
bool has_rest_property)
: AggregateLiteral(pos, kObjectLiteral),
@@ -1380,7 +1382,7 @@ class ObjectLiteral final : public AggregateLiteral {
}
uint32_t boilerplate_properties_;
- Handle<BoilerplateDescription> constant_properties_;
+ Handle<ObjectBoilerplateDescription> boilerplate_description_;
ZoneList<Property*>* properties_;
class HasElementsField
@@ -1423,11 +1425,11 @@ class AccessorTable
// for minimizing the work when constructing it at runtime.
class ArrayLiteral final : public AggregateLiteral {
public:
- Handle<ConstantElementsPair> constant_elements() const {
- return constant_elements_;
+ Handle<ArrayBoilerplateDescription> boilerplate_description() const {
+ return boilerplate_description_;
}
- ZoneList<Expression*>* values() const { return values_; }
+ ZonePtrList<Expression>* values() const { return values_; }
int first_spread_index() const { return first_spread_index_; }
@@ -1436,16 +1438,17 @@ class ArrayLiteral final : public AggregateLiteral {
// Populate the depth field and flags, returns the depth.
int InitDepthAndFlags();
- // Get the constant elements fixed array, populating it if necessary.
- Handle<ConstantElementsPair> GetOrBuildConstantElements(Isolate* isolate) {
- if (constant_elements_.is_null()) {
- BuildConstantElements(isolate);
+ // Get the boilerplate description, populating it if necessary.
+ Handle<ArrayBoilerplateDescription> GetOrBuildBoilerplateDescription(
+ Isolate* isolate) {
+ if (boilerplate_description_.is_null()) {
+ BuildBoilerplateDescription(isolate);
}
- return constant_elements();
+ return boilerplate_description();
}
- // Populate the constant elements fixed array.
- void BuildConstantElements(Isolate* isolate);
+ // Populate the boilerplate description.
+ void BuildBoilerplateDescription(Isolate* isolate);
// Determines whether the {CreateShallowArrayLiteral} builtin can be used.
bool IsFastCloningSupported() const;
@@ -1458,15 +1461,14 @@ class ArrayLiteral final : public AggregateLiteral {
private:
friend class AstNodeFactory;
- ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index, int pos)
+ ArrayLiteral(ZonePtrList<Expression>* values, int first_spread_index, int pos)
: AggregateLiteral(pos, kArrayLiteral),
first_spread_index_(first_spread_index),
- values_(values) {
- }
+ values_(values) {}
int first_spread_index_;
- Handle<ConstantElementsPair> constant_elements_;
- ZoneList<Expression*>* values_;
+ Handle<ArrayBoilerplateDescription> boilerplate_description_;
+ ZonePtrList<Expression>* values_;
};
enum class HoleCheckMode { kRequired, kElided };
@@ -1633,7 +1635,7 @@ class ResolvedProperty final : public Expression {
class Call final : public Expression {
public:
Expression* expression() const { return expression_; }
- ZoneList<Expression*>* arguments() const { return arguments_; }
+ ZonePtrList<Expression>* arguments() const { return arguments_; }
bool is_possibly_eval() const {
return IsPossiblyEvalField::decode(bit_field_);
@@ -1672,17 +1674,15 @@ class Call final : public Expression {
private:
friend class AstNodeFactory;
- Call(Expression* expression, ZoneList<Expression*>* arguments, int pos,
+ Call(Expression* expression, ZonePtrList<Expression>* arguments, int pos,
PossiblyEval possibly_eval)
- : Expression(pos, kCall),
- expression_(expression),
- arguments_(arguments) {
+ : Expression(pos, kCall), expression_(expression), arguments_(arguments) {
bit_field_ |=
IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL) |
IsTaggedTemplateField::encode(false);
}
- Call(Expression* expression, ZoneList<Expression*>* arguments, int pos,
+ Call(Expression* expression, ZonePtrList<Expression>* arguments, int pos,
TaggedTemplateTag tag)
: Expression(pos, kCall), expression_(expression), arguments_(arguments) {
bit_field_ |= IsPossiblyEvalField::encode(false) |
@@ -1695,14 +1695,14 @@ class Call final : public Expression {
: public BitField<bool, IsPossiblyEvalField::kNext, 1> {};
Expression* expression_;
- ZoneList<Expression*>* arguments_;
+ ZonePtrList<Expression>* arguments_;
};
class CallNew final : public Expression {
public:
Expression* expression() const { return expression_; }
- ZoneList<Expression*>* arguments() const { return arguments_; }
+ ZonePtrList<Expression>* arguments() const { return arguments_; }
bool only_last_arg_is_spread() {
return !arguments_->is_empty() && arguments_->last()->IsSpread();
@@ -1711,14 +1711,13 @@ class CallNew final : public Expression {
private:
friend class AstNodeFactory;
- CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+ CallNew(Expression* expression, ZonePtrList<Expression>* arguments, int pos)
: Expression(pos, kCallNew),
expression_(expression),
- arguments_(arguments) {
- }
+ arguments_(arguments) {}
Expression* expression_;
- ZoneList<Expression*>* arguments_;
+ ZonePtrList<Expression>* arguments_;
};
// The CallRuntime class does not represent any official JavaScript
@@ -1727,7 +1726,7 @@ class CallNew final : public Expression {
// implemented in JavaScript.
class CallRuntime final : public Expression {
public:
- ZoneList<Expression*>* arguments() const { return arguments_; }
+ ZonePtrList<Expression>* arguments() const { return arguments_; }
bool is_jsruntime() const { return function_ == nullptr; }
int context_index() const {
@@ -1745,11 +1744,11 @@ class CallRuntime final : public Expression {
friend class AstNodeFactory;
CallRuntime(const Runtime::Function* function,
- ZoneList<Expression*>* arguments, int pos)
+ ZonePtrList<Expression>* arguments, int pos)
: Expression(pos, kCallRuntime),
function_(function),
arguments_(arguments) {}
- CallRuntime(int context_index, ZoneList<Expression*>* arguments, int pos)
+ CallRuntime(int context_index, ZonePtrList<Expression>* arguments, int pos)
: Expression(pos, kCallRuntime),
context_index_(context_index),
function_(nullptr),
@@ -1757,7 +1756,7 @@ class CallRuntime final : public Expression {
int context_index_;
const Runtime::Function* function_;
- ZoneList<Expression*>* arguments_;
+ ZonePtrList<Expression>* arguments_;
};
@@ -2190,7 +2189,7 @@ class FunctionLiteral final : public Expression {
const AstConsString* raw_name() const { return raw_name_; }
void set_raw_name(const AstConsString* name) { raw_name_ = name; }
DeclarationScope* scope() const { return scope_; }
- ZoneList<Statement*>* body() const { return body_; }
+ ZonePtrList<Statement>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
int function_token_position() const { return function_token_position_; }
int start_position() const;
@@ -2310,7 +2309,7 @@ class FunctionLiteral final : public Expression {
FunctionLiteral(
Zone* zone, const AstRawString* name, AstValueFactory* ast_value_factory,
- DeclarationScope* scope, ZoneList<Statement*>* body,
+ DeclarationScope* scope, ZonePtrList<Statement>* body,
int expected_property_count, int parameter_count, int function_length,
FunctionType function_type, ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, int position, bool has_braces,
@@ -2359,7 +2358,7 @@ class FunctionLiteral final : public Expression {
const AstConsString* raw_name_;
DeclarationScope* scope_;
- ZoneList<Statement*>* body_;
+ ZonePtrList<Statement>* body_;
const AstConsString* raw_inferred_name_;
Handle<String> inferred_name_;
ProducedPreParsedScopeData* produced_preparsed_scope_data_;
@@ -2407,15 +2406,16 @@ class ClassLiteralProperty final : public LiteralProperty {
class InitializeClassFieldsStatement final : public Statement {
public:
typedef ClassLiteralProperty Property;
- ZoneList<Property*>* fields() const { return fields_; }
+
+ ZonePtrList<Property>* fields() const { return fields_; }
private:
friend class AstNodeFactory;
- InitializeClassFieldsStatement(ZoneList<Property*>* fields, int pos)
+ InitializeClassFieldsStatement(ZonePtrList<Property>* fields, int pos)
: Statement(pos, kInitializeClassFieldsStatement), fields_(fields) {}
- ZoneList<Property*>* fields_;
+ ZonePtrList<Property>* fields_;
};
class ClassLiteral final : public Expression {
@@ -2426,7 +2426,7 @@ class ClassLiteral final : public Expression {
Variable* class_variable() const { return class_variable_; }
Expression* extends() const { return extends_; }
FunctionLiteral* constructor() const { return constructor_; }
- ZoneList<Property*>* properties() const { return properties_; }
+ ZonePtrList<Property>* properties() const { return properties_; }
int start_position() const { return position(); }
int end_position() const { return end_position_; }
bool has_name_static_property() const {
@@ -2455,7 +2455,7 @@ class ClassLiteral final : public Expression {
friend class AstNodeFactory;
ClassLiteral(Scope* scope, Variable* class_variable, Expression* extends,
- FunctionLiteral* constructor, ZoneList<Property*>* properties,
+ FunctionLiteral* constructor, ZonePtrList<Property>* properties,
FunctionLiteral* static_fields_initializer,
FunctionLiteral* instance_fields_initializer_function,
int start_position, int end_position,
@@ -2481,7 +2481,7 @@ class ClassLiteral final : public Expression {
Variable* class_variable_;
Expression* extends_;
FunctionLiteral* constructor_;
- ZoneList<Property*>* properties_;
+ ZonePtrList<Property>* properties_;
FunctionLiteral* static_fields_initializer_;
FunctionLiteral* instance_fields_initializer_function_;
class HasNameStaticProperty
@@ -2636,10 +2636,10 @@ class GetIterator final : public Expression {
// (defined at https://tc39.github.io/ecma262/#sec-gettemplateobject).
class GetTemplateObject final : public Expression {
public:
- const ZoneList<const AstRawString*>* cooked_strings() const {
+ const ZonePtrList<const AstRawString>* cooked_strings() const {
return cooked_strings_;
}
- const ZoneList<const AstRawString*>* raw_strings() const {
+ const ZonePtrList<const AstRawString>* raw_strings() const {
return raw_strings_;
}
@@ -2648,34 +2648,35 @@ class GetTemplateObject final : public Expression {
private:
friend class AstNodeFactory;
- GetTemplateObject(const ZoneList<const AstRawString*>* cooked_strings,
- const ZoneList<const AstRawString*>* raw_strings, int pos)
+ GetTemplateObject(const ZonePtrList<const AstRawString>* cooked_strings,
+ const ZonePtrList<const AstRawString>* raw_strings, int pos)
: Expression(pos, kGetTemplateObject),
cooked_strings_(cooked_strings),
raw_strings_(raw_strings) {}
- const ZoneList<const AstRawString*>* cooked_strings_;
- const ZoneList<const AstRawString*>* raw_strings_;
+ const ZonePtrList<const AstRawString>* cooked_strings_;
+ const ZonePtrList<const AstRawString>* raw_strings_;
};
class TemplateLiteral final : public Expression {
public:
- using StringList = ZoneList<const AstRawString*>;
- using ExpressionList = ZoneList<Expression*>;
-
- const StringList* string_parts() const { return string_parts_; }
- const ExpressionList* substitutions() const { return substitutions_; }
+ const ZonePtrList<const AstRawString>* string_parts() const {
+ return string_parts_;
+ }
+ const ZonePtrList<Expression>* substitutions() const {
+ return substitutions_;
+ }
private:
friend class AstNodeFactory;
- TemplateLiteral(const StringList* parts, const ExpressionList* substitutions,
- int pos)
+ TemplateLiteral(const ZonePtrList<const AstRawString>* parts,
+ const ZonePtrList<Expression>* substitutions, int pos)
: Expression(pos, kTemplateLiteral),
string_parts_(parts),
substitutions_(substitutions) {}
- const StringList* string_parts_;
- const ExpressionList* substitutions_;
+ const ZonePtrList<const AstRawString>* string_parts_;
+ const ZonePtrList<Expression>* substitutions_;
};
// ----------------------------------------------------------------------------
@@ -2692,7 +2693,7 @@ class AstVisitor BASE_EMBEDDED {
for (Declaration* decl : *declarations) Visit(decl);
}
- void VisitStatements(ZoneList<Statement*>* statements) {
+ void VisitStatements(ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
Statement* stmt = statements->at(i);
Visit(stmt);
@@ -2700,7 +2701,7 @@ class AstVisitor BASE_EMBEDDED {
}
}
- void VisitExpressions(ZoneList<Expression*>* expressions) {
+ void VisitExpressions(ZonePtrList<Expression>* expressions) {
for (int i = 0; i < expressions->length(); i++) {
// The variable statement visiting code may pass null expressions
// to this code. Maybe this should be handled by introducing an
@@ -2794,7 +2795,7 @@ class AstNodeFactory final BASE_EMBEDDED {
}
Block* NewBlock(int capacity, bool ignore_completion_value,
- ZoneList<const AstRawString*>* labels = nullptr) {
+ ZonePtrList<const AstRawString>* labels = nullptr) {
return labels != nullptr
? new (zone_) LabeledBlock(zone_, labels, capacity,
ignore_completion_value)
@@ -2802,22 +2803,22 @@ class AstNodeFactory final BASE_EMBEDDED {
Block(zone_, labels, capacity, ignore_completion_value);
}
-#define STATEMENT_WITH_LABELS(NodeType) \
- NodeType* New##NodeType(ZoneList<const AstRawString*>* labels, int pos) { \
- return new (zone_) NodeType(labels, pos); \
+#define STATEMENT_WITH_LABELS(NodeType) \
+ NodeType* New##NodeType(ZonePtrList<const AstRawString>* labels, int pos) { \
+ return new (zone_) NodeType(labels, pos); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
STATEMENT_WITH_LABELS(ForStatement)
#undef STATEMENT_WITH_LABELS
- SwitchStatement* NewSwitchStatement(ZoneList<const AstRawString*>* labels,
+ SwitchStatement* NewSwitchStatement(ZonePtrList<const AstRawString>* labels,
Expression* tag, int pos) {
return new (zone_) SwitchStatement(zone_, labels, tag, pos);
}
ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
- ZoneList<const AstRawString*>* labels,
+ ZonePtrList<const AstRawString>* labels,
int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
@@ -2830,7 +2831,7 @@ class AstNodeFactory final BASE_EMBEDDED {
UNREACHABLE();
}
- ForOfStatement* NewForOfStatement(ZoneList<const AstRawString*>* labels,
+ ForOfStatement* NewForOfStatement(ZonePtrList<const AstRawString>* labels,
int pos) {
return new (zone_) ForOfStatement(labels, pos);
}
@@ -2921,7 +2922,7 @@ class AstNodeFactory final BASE_EMBEDDED {
}
CaseClause* NewCaseClause(Expression* label,
- ZoneList<Statement*>* statements) {
+ ZonePtrList<Statement>* statements) {
return new (zone_) CaseClause(label, statements);
}
@@ -2961,7 +2962,7 @@ class AstNodeFactory final BASE_EMBEDDED {
}
ObjectLiteral* NewObjectLiteral(
- ZoneList<ObjectLiteral::Property*>* properties,
+ ZonePtrList<ObjectLiteral::Property>* properties,
uint32_t boilerplate_properties, int pos, bool has_rest_property) {
return new (zone_) ObjectLiteral(properties, boilerplate_properties, pos,
has_rest_property);
@@ -2986,12 +2987,11 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) RegExpLiteral(pattern, flags, pos);
}
- ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
- int pos) {
+ ArrayLiteral* NewArrayLiteral(ZonePtrList<Expression>* values, int pos) {
return new (zone_) ArrayLiteral(values, -1, pos);
}
- ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
+ ArrayLiteral* NewArrayLiteral(ZonePtrList<Expression>* values,
int first_spread_index, int pos) {
return new (zone_) ArrayLiteral(values, first_spread_index, pos);
}
@@ -3027,35 +3027,34 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) ResolvedProperty(obj, property, pos);
}
- Call* NewCall(Expression* expression, ZoneList<Expression*>* arguments,
+ Call* NewCall(Expression* expression, ZonePtrList<Expression>* arguments,
int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
return new (zone_) Call(expression, arguments, pos, possibly_eval);
}
Call* NewTaggedTemplate(Expression* expression,
- ZoneList<Expression*>* arguments, int pos) {
+ ZonePtrList<Expression>* arguments, int pos) {
return new (zone_)
Call(expression, arguments, pos, Call::TaggedTemplateTag::kTrue);
}
CallNew* NewCallNew(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
+ ZonePtrList<Expression>* arguments, int pos) {
return new (zone_) CallNew(expression, arguments, pos);
}
CallRuntime* NewCallRuntime(Runtime::FunctionId id,
- ZoneList<Expression*>* arguments, int pos) {
+ ZonePtrList<Expression>* arguments, int pos) {
return new (zone_) CallRuntime(Runtime::FunctionForId(id), arguments, pos);
}
CallRuntime* NewCallRuntime(const Runtime::Function* function,
- ZoneList<Expression*>* arguments, int pos) {
+ ZonePtrList<Expression>* arguments, int pos) {
return new (zone_) CallRuntime(function, arguments, pos);
}
CallRuntime* NewCallRuntime(int context_index,
- ZoneList<Expression*>* arguments, int pos) {
+ ZonePtrList<Expression>* arguments, int pos) {
return new (zone_) CallRuntime(context_index, arguments, pos);
}
@@ -3158,7 +3157,7 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral* NewFunctionLiteral(
const AstRawString* name, DeclarationScope* scope,
- ZoneList<Statement*>* body, int expected_property_count,
+ ZonePtrList<Statement>* body, int expected_property_count,
int parameter_count, int function_length,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
@@ -3176,7 +3175,7 @@ class AstNodeFactory final BASE_EMBEDDED {
// result of an eval (top-level or otherwise), or the result of calling
// the Function constructor.
FunctionLiteral* NewScriptOrEvalFunctionLiteral(DeclarationScope* scope,
- ZoneList<Statement*>* body,
+ ZonePtrList<Statement>* body,
int expected_property_count,
int parameter_count) {
return new (zone_) FunctionLiteral(
@@ -3184,7 +3183,7 @@ class AstNodeFactory final BASE_EMBEDDED {
body, expected_property_count, parameter_count, parameter_count,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kShouldLazyCompile, 0, true,
+ FunctionLiteral::kShouldLazyCompile, 0, /* has_braces */ false,
FunctionLiteral::kIdTypeTopLevel);
}
@@ -3198,7 +3197,7 @@ class AstNodeFactory final BASE_EMBEDDED {
ClassLiteral* NewClassLiteral(
Scope* scope, Variable* variable, Expression* extends,
FunctionLiteral* constructor,
- ZoneList<ClassLiteral::Property*>* properties,
+ ZonePtrList<ClassLiteral::Property>* properties,
FunctionLiteral* static_fields_initializer,
FunctionLiteral* instance_fields_initializer_function, int start_position,
int end_position, bool has_name_static_property,
@@ -3255,14 +3254,14 @@ class AstNodeFactory final BASE_EMBEDDED {
}
GetTemplateObject* NewGetTemplateObject(
- const ZoneList<const AstRawString*>* cooked_strings,
- const ZoneList<const AstRawString*>* raw_strings, int pos) {
+ const ZonePtrList<const AstRawString>* cooked_strings,
+ const ZonePtrList<const AstRawString>* raw_strings, int pos) {
return new (zone_) GetTemplateObject(cooked_strings, raw_strings, pos);
}
TemplateLiteral* NewTemplateLiteral(
- const ZoneList<const AstRawString*>* string_parts,
- const ZoneList<Expression*>* substitutions, int pos) {
+ const ZonePtrList<const AstRawString>* string_parts,
+ const ZonePtrList<Expression>* substitutions, int pos) {
return new (zone_) TemplateLiteral(string_parts, substitutions, pos);
}
@@ -3271,7 +3270,7 @@ class AstNodeFactory final BASE_EMBEDDED {
}
InitializeClassFieldsStatement* NewInitializeClassFieldsStatement(
- ZoneList<ClassLiteralProperty*>* args, int pos) {
+ ZonePtrList<ClassLiteral::Property>* args, int pos) {
return new (zone_) InitializeClassFieldsStatement(args, pos);
}
diff --git a/deps/v8/src/ast/compile-time-value.cc b/deps/v8/src/ast/compile-time-value.cc
deleted file mode 100644
index f21759ab7d..0000000000
--- a/deps/v8/src/ast/compile-time-value.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ast/compile-time-value.h"
-
-#include "src/ast/ast.h"
-#include "src/handles-inl.h"
-#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
- if (expression->IsLiteral()) return true;
- MaterializedLiteral* literal = expression->AsMaterializedLiteral();
- if (literal == nullptr) return false;
- return literal->IsSimple();
-}
-
-Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
- Expression* expression) {
- Factory* factory = isolate->factory();
- DCHECK(IsCompileTimeValue(expression));
- Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
- if (expression->IsObjectLiteral()) {
- ObjectLiteral* object_literal = expression->AsObjectLiteral();
- DCHECK(object_literal->is_simple());
- int literalTypeFlag = object_literal->EncodeLiteralType();
- DCHECK_NE(kArrayLiteralFlag, literalTypeFlag);
- result->set(kLiteralTypeSlot, Smi::FromInt(literalTypeFlag));
- result->set(kElementsSlot, *object_literal->constant_properties());
- } else {
- ArrayLiteral* array_literal = expression->AsArrayLiteral();
- DCHECK(array_literal->is_simple());
- result->set(kLiteralTypeSlot, Smi::FromInt(kArrayLiteralFlag));
- result->set(kElementsSlot, *array_literal->constant_elements());
- }
- return result;
-}
-
-int CompileTimeValue::GetLiteralTypeFlags(Handle<FixedArray> value) {
- return Smi::ToInt(value->get(kLiteralTypeSlot));
-}
-
-Handle<HeapObject> CompileTimeValue::GetElements(Handle<FixedArray> value) {
- return Handle<HeapObject>(HeapObject::cast(value->get(kElementsSlot)));
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ast/compile-time-value.h b/deps/v8/src/ast/compile-time-value.h
deleted file mode 100644
index 874bc1b32f..0000000000
--- a/deps/v8/src/ast/compile-time-value.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_COMPILE_TIME_VALUE_H_
-#define V8_AST_COMPILE_TIME_VALUE_H_
-
-#include "src/allocation.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-class Expression;
-
-// Support for handling complex values (array and object literals) that
-// can be fully handled at compile time.
-class CompileTimeValue : public AllStatic {
- public:
- // This is a special marker used to encode array literals. The value has to be
- // different from any value possibly returned by
- // ObjectLiteral::EncodeLiteralType.
- static const int kArrayLiteralFlag = -1;
-
- static bool IsCompileTimeValue(Expression* expression);
-
- // Get the value as a compile time value.
- static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
-
- // Get the encoded literal type. This can either be kArrayLiteralFlag or
- // encoded properties of an ObjectLiteral returned by
- // ObjectLiteral::EncodeLiteralType.
- static int GetLiteralTypeFlags(Handle<FixedArray> value);
-
- // Get the elements of a compile time value returned by GetValue().
- static Handle<HeapObject> GetElements(Handle<FixedArray> value);
-
- private:
- static const int kLiteralTypeSlot = 0;
- static const int kElementsSlot = 1;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_COMPILE_TIME_VALUE_H_
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index b583f35793..0f66ac91ec 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -6,12 +6,27 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
#include "src/objects-inl.h"
-#include "src/objects/module.h"
+#include "src/objects/module-inl.h"
#include "src/pending-compilation-error-handler.h"
namespace v8 {
namespace internal {
+bool ModuleDescriptor::AstRawStringComparer::operator()(
+ const AstRawString* lhs, const AstRawString* rhs) const {
+ // Fast path for equal pointers: a pointer is not strictly less than itself.
+ if (lhs == rhs) return false;
+
+ // Order by contents (ordering by hash is unstable across runs).
+ if (lhs->is_one_byte() != rhs->is_one_byte()) {
+ return lhs->is_one_byte();
+ }
+ if (lhs->byte_length() != rhs->byte_length()) {
+ return lhs->byte_length() < rhs->byte_length();
+ }
+ return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length()) < 0;
+}
+
void ModuleDescriptor::AddImport(const AstRawString* import_name,
const AstRawString* local_name,
const AstRawString* module_request,
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index 465eca447f..44e86dce42 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -124,10 +124,21 @@ class ModuleDescriptor : public ZoneObject {
ModuleRequest(int index, int position) : index(index), position(position) {}
};
+ // Custom content-based comparer for the below maps, to keep them stable
+ // across parses.
+ struct AstRawStringComparer {
+ bool operator()(const AstRawString* lhs, const AstRawString* rhs) const;
+ };
+
+ typedef ZoneMap<const AstRawString*, ModuleRequest, AstRawStringComparer>
+ ModuleRequestMap;
+ typedef ZoneMultimap<const AstRawString*, Entry*, AstRawStringComparer>
+ RegularExportMap;
+ typedef ZoneMap<const AstRawString*, Entry*, AstRawStringComparer>
+ RegularImportMap;
+
// Module requests.
- const ZoneMap<const AstRawString*, ModuleRequest>& module_requests() const {
- return module_requests_;
- }
+ const ModuleRequestMap& module_requests() const { return module_requests_; }
// Namespace imports.
const ZoneVector<const Entry*>& namespace_imports() const {
@@ -135,9 +146,7 @@ class ModuleDescriptor : public ZoneObject {
}
// All the remaining imports, indexed by local name.
- const ZoneMap<const AstRawString*, Entry*>& regular_imports() const {
- return regular_imports_;
- }
+ const RegularImportMap& regular_imports() const { return regular_imports_; }
// Star exports and explicitly indirect exports.
const ZoneVector<const Entry*>& special_exports() const {
@@ -146,9 +155,7 @@ class ModuleDescriptor : public ZoneObject {
// All the remaining exports, indexed by local name.
// After canonicalization (see Validate), these are exactly the local exports.
- const ZoneMultimap<const AstRawString*, Entry*>& regular_exports() const {
- return regular_exports_;
- }
+ const RegularExportMap& regular_exports() const { return regular_exports_; }
void AddRegularExport(Entry* entry) {
DCHECK_NOT_NULL(entry->export_name);
@@ -188,11 +195,11 @@ class ModuleDescriptor : public ZoneObject {
Handle<ModuleInfo> module_info);
private:
- ZoneMap<const AstRawString*, ModuleRequest> module_requests_;
+ ModuleRequestMap module_requests_;
ZoneVector<const Entry*> special_exports_;
ZoneVector<const Entry*> namespace_imports_;
- ZoneMultimap<const AstRawString*, Entry*> regular_exports_;
- ZoneMap<const AstRawString*, Entry*> regular_imports_;
+ RegularExportMap regular_exports_;
+ RegularImportMap regular_imports_;
// If there are multiple export entries with the same export name, return the
// last of them (in source order). Otherwise return nullptr.
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 4f9029810a..ef086bcefc 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -498,16 +498,14 @@ void CallPrinter::VisitRewritableExpression(RewritableExpression* node) {
Find(node->expression());
}
-
-void CallPrinter::FindStatements(ZoneList<Statement*>* statements) {
+void CallPrinter::FindStatements(ZonePtrList<Statement>* statements) {
if (statements == nullptr) return;
for (int i = 0; i < statements->length(); i++) {
Find(statements->at(i));
}
}
-
-void CallPrinter::FindArguments(ZoneList<Expression*>* arguments) {
+void CallPrinter::FindArguments(ZonePtrList<Expression>* arguments) {
if (found_) return;
for (int i = 0; i < arguments->length(); i++) {
Find(arguments->at(i));
@@ -589,7 +587,7 @@ void AstPrinter::Print(const char* format, ...) {
}
}
-void AstPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) {
+void AstPrinter::PrintLabels(ZonePtrList<const AstRawString>* labels) {
if (labels != nullptr) {
for (int i = 0; i < labels->length(); i++) {
PrintLiteral(labels->at(i), false);
@@ -748,8 +746,7 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info, Variable* var,
}
}
-
-void AstPrinter::PrintLabelsIndented(ZoneList<const AstRawString*>* labels) {
+void AstPrinter::PrintLabelsIndented(ZonePtrList<const AstRawString>* labels) {
if (labels == nullptr || labels->length() == 0) return;
PrintIndented("LABELS ");
PrintLabels(labels);
@@ -809,15 +806,13 @@ void AstPrinter::PrintParameters(DeclarationScope* scope) {
}
}
-
-void AstPrinter::PrintStatements(ZoneList<Statement*>* statements) {
+void AstPrinter::PrintStatements(ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
}
}
-
-void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
+void AstPrinter::PrintArguments(ZonePtrList<Expression>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
Visit(arguments->at(i));
}
@@ -1040,7 +1035,7 @@ void AstPrinter::VisitInitializeClassFieldsStatement(
}
void AstPrinter::PrintClassProperties(
- ZoneList<ClassLiteral::Property*>* properties) {
+ ZonePtrList<ClassLiteral::Property>* properties) {
for (int i = 0; i < properties->length(); i++) {
ClassLiteral::Property* property = properties->at(i);
const char* prop_kind = nullptr;
@@ -1119,7 +1114,7 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
}
void AstPrinter::PrintObjectProperties(
- ZoneList<ObjectLiteral::Property*>* properties) {
+ ZonePtrList<ObjectLiteral::Property>* properties) {
for (int i = 0; i < properties->length(); i++) {
ObjectLiteral::Property* property = properties->at(i);
const char* prop_kind = nullptr;
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index d93137b7cf..cc29052c2d 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -56,8 +56,8 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
protected:
void PrintLiteral(Handle<Object> value, bool quote);
void PrintLiteral(const AstRawString* value, bool quote);
- void FindStatements(ZoneList<Statement*>* statements);
- void FindArguments(ZoneList<Expression*>* arguments);
+ void FindStatements(ZonePtrList<Statement>* statements);
+ void FindArguments(ZonePtrList<Expression>* arguments);
};
@@ -88,17 +88,17 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
void Init();
- void PrintLabels(ZoneList<const AstRawString*>* labels);
+ void PrintLabels(ZonePtrList<const AstRawString>* labels);
void PrintLiteral(const AstRawString* value, bool quote);
void PrintLiteral(const AstConsString* value, bool quote);
void PrintLiteral(Literal* literal, bool quote);
void PrintIndented(const char* txt);
void PrintIndentedVisit(const char* s, AstNode* node);
- void PrintStatements(ZoneList<Statement*>* statements);
+ void PrintStatements(ZonePtrList<Statement>* statements);
void PrintDeclarations(Declaration::List* declarations);
void PrintParameters(DeclarationScope* scope);
- void PrintArguments(ZoneList<Expression*>* arguments);
+ void PrintArguments(ZonePtrList<Expression>* arguments);
void PrintCaseClause(CaseClause* clause);
void PrintLiteralIndented(const char* info, Literal* literal, bool quote);
void PrintLiteralIndented(const char* info, const AstRawString* value,
@@ -107,9 +107,9 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
bool quote);
void PrintLiteralWithModeIndented(const char* info, Variable* var,
const AstRawString* value);
- void PrintLabelsIndented(ZoneList<const AstRawString*>* labels);
- void PrintObjectProperties(ZoneList<ObjectLiteral::Property*>* properties);
- void PrintClassProperties(ZoneList<ClassLiteral::Property*>* properties);
+ void PrintLabelsIndented(ZonePtrList<const AstRawString>* labels);
+ void PrintObjectProperties(ZonePtrList<ObjectLiteral::Property>* properties);
+ void PrintClassProperties(ZonePtrList<ClassLiteral::Property>* properties);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 42affeea2c..18db88f950 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -13,10 +13,11 @@
#include "src/counters.h"
#include "src/messages.h"
#include "src/objects-inl.h"
-#include "src/objects/module.h"
+#include "src/objects/module-inl.h"
#include "src/objects/scope-info.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/preparsed-scope-data.h"
+#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
@@ -75,8 +76,8 @@ Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
if (p->value == nullptr) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
- p->value =
- mode == VAR ? kDummyPreParserVariable : kDummyPreParserLexicalVariable;
+ p->value = mode == VariableMode::kVar ? kDummyPreParserVariable
+ : kDummyPreParserLexicalVariable;
}
return reinterpret_cast<Variable*>(p->value);
}
@@ -189,6 +190,13 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
SetDefaults();
}
+bool DeclarationScope::IsDeclaredParameter(const AstRawString* name) {
+ // If IsSimpleParameterList is false, duplicate parameters are not allowed,
+ // however `arguments` may be allowed if function is not strict code. Thus,
+ // the assumptions explained above do not hold.
+ return params_.Contains(variables_.Lookup(name));
+}
+
ModuleScope::ModuleScope(DeclarationScope* script_scope,
AstValueFactory* ast_value_factory)
: DeclarationScope(ast_value_factory->zone(), script_scope, MODULE_SCOPE,
@@ -199,11 +207,10 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope,
DeclareThis(ast_value_factory);
}
-ModuleScope::ModuleScope(Handle<ScopeInfo> scope_info,
+ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory)
: DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) {
Zone* zone = avfactory->zone();
- Isolate* isolate = scope_info->GetIsolate();
Handle<ModuleInfo> module_info(scope_info->ModuleDescriptorInfo(), isolate);
set_language_mode(LanguageMode::kStrict);
@@ -289,8 +296,9 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
// Cache the catch variable, even though it's also available via the
// scope_info, as the parser expects that a catch scope always has the catch
// variable as first and only variable.
- Variable* variable = Declare(zone, catch_variable_name, VAR, NORMAL_VARIABLE,
- kCreatedInitialized, maybe_assigned);
+ Variable* variable =
+ Declare(zone, catch_variable_name, VariableMode::kVar, NORMAL_VARIABLE,
+ kCreatedInitialized, maybe_assigned);
AllocateHeapSlot(variable);
}
@@ -389,7 +397,8 @@ bool Scope::ContainsAsmModule() const {
return false;
}
-Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
+Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
+ ScopeInfo* scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode) {
@@ -400,7 +409,8 @@ Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
while (scope_info) {
if (scope_info->scope_type() == WITH_SCOPE) {
// For scope analysis, debug-evaluate is equivalent to a with scope.
- outer_scope = new (zone) Scope(zone, WITH_SCOPE, handle(scope_info));
+ outer_scope =
+ new (zone) Scope(zone, WITH_SCOPE, handle(scope_info, isolate));
// TODO(yangguo): Remove once debug-evaluate properly keeps track of the
// function scope in which we are evaluating.
@@ -412,40 +422,40 @@ Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
// scope info of this script context onto the existing script scope to
// avoid nesting script scopes.
if (deserialization_mode == DeserializationMode::kIncludingVariables) {
- script_scope->SetScriptScopeInfo(handle(scope_info));
+ script_scope->SetScriptScopeInfo(handle(scope_info, isolate));
}
DCHECK(!scope_info->HasOuterScopeInfo());
break;
} else if (scope_info->scope_type() == FUNCTION_SCOPE) {
- outer_scope =
- new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info));
+ outer_scope = new (zone)
+ DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info, isolate));
if (scope_info->IsAsmModule())
outer_scope->AsDeclarationScope()->set_asm_module();
} else if (scope_info->scope_type() == EVAL_SCOPE) {
- outer_scope =
- new (zone) DeclarationScope(zone, EVAL_SCOPE, handle(scope_info));
+ outer_scope = new (zone)
+ DeclarationScope(zone, EVAL_SCOPE, handle(scope_info, isolate));
} else if (scope_info->scope_type() == BLOCK_SCOPE) {
if (scope_info->is_declaration_scope()) {
- outer_scope =
- new (zone) DeclarationScope(zone, BLOCK_SCOPE, handle(scope_info));
+ outer_scope = new (zone)
+ DeclarationScope(zone, BLOCK_SCOPE, handle(scope_info, isolate));
} else {
- outer_scope = new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info));
+ outer_scope =
+ new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info, isolate));
}
} else if (scope_info->scope_type() == MODULE_SCOPE) {
- outer_scope =
- new (zone) ModuleScope(handle(scope_info), ast_value_factory);
+ outer_scope = new (zone)
+ ModuleScope(isolate, handle(scope_info, isolate), ast_value_factory);
} else {
DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE);
- DCHECK_EQ(scope_info->LocalCount(), 1);
DCHECK_EQ(scope_info->ContextLocalCount(), 1);
- DCHECK_EQ(scope_info->ContextLocalMode(0), VAR);
+ DCHECK_EQ(scope_info->ContextLocalMode(0), VariableMode::kVar);
DCHECK_EQ(scope_info->ContextLocalInitFlag(0), kCreatedInitialized);
String* name = scope_info->ContextLocalName(0);
MaybeAssignedFlag maybe_assigned =
scope_info->ContextLocalMaybeAssignedFlag(0);
- outer_scope =
- new (zone) Scope(zone, ast_value_factory->GetString(handle(name)),
- maybe_assigned, handle(scope_info));
+ outer_scope = new (zone)
+ Scope(zone, ast_value_factory->GetString(handle(name, isolate)),
+ maybe_assigned, handle(scope_info, isolate));
}
if (deserialization_mode == DeserializationMode::kScopesOnly) {
outer_scope->scope_info_ = Handle<ScopeInfo>::null();
@@ -605,12 +615,13 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// Based on the preceding checks, it doesn't matter what we pass as
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
- DeclareVariable(declaration, VAR,
- Variable::DefaultInitializationFlag(VAR), nullptr, &ok);
+ DeclareVariable(declaration, VariableMode::kVar,
+ Variable::DefaultInitializationFlag(VariableMode::kVar),
+ nullptr, &ok);
DCHECK(ok);
} else {
DCHECK(is_being_lazily_parsed_);
- Variable* var = DeclareVariableName(name, VAR);
+ Variable* var = DeclareVariableName(name, VariableMode::kVar);
if (var != kDummyPreParserVariable &&
var != kDummyPreParserLexicalVariable) {
DCHECK(FLAG_preparser_scope_analysis);
@@ -633,7 +644,7 @@ void DeclarationScope::AttachOuterScopeInfo(ParseInfo* info, Isolate* isolate) {
DeclarationScope(info->zone(), info->ast_value_factory());
info->set_script_scope(script_scope);
ReplaceOuterScope(Scope::DeserializeScopeChain(
- info->zone(), *outer_scope_info, script_scope,
+ isolate, info->zone(), *outer_scope_info, script_scope,
info->ast_value_factory(),
Scope::DeserializationMode::kIncludingVariables));
} else {
@@ -703,7 +714,8 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
bool derived_constructor = IsDerivedConstructor(function_kind_);
Variable* var =
Declare(zone(), ast_value_factory->this_string(),
- derived_constructor ? CONST : VAR, THIS_VARIABLE,
+ derived_constructor ? VariableMode::kConst : VariableMode::kVar,
+ THIS_VARIABLE,
derived_constructor ? kNeedsInitialization : kCreatedInitialized);
receiver_ = var;
}
@@ -717,7 +729,8 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
// Declare 'arguments' variable which exists in all non arrow functions.
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
- arguments_ = Declare(zone(), ast_value_factory->arguments_string(), VAR);
+ arguments_ = Declare(zone(), ast_value_factory->arguments_string(),
+ VariableMode::kVar);
} else if (IsLexical(arguments_)) {
// Check if there's lexically declared variable named arguments to avoid
// redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
@@ -731,12 +744,14 @@ void DeclarationScope::DeclareDefaultFunctionVariables(
DCHECK(!is_arrow_scope());
DeclareThis(ast_value_factory);
- new_target_ = Declare(zone(), ast_value_factory->new_target_string(), CONST);
+ new_target_ = Declare(zone(), ast_value_factory->new_target_string(),
+ VariableMode::kConst);
if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) {
EnsureRareData()->this_function =
- Declare(zone(), ast_value_factory->this_function_string(), CONST);
+ Declare(zone(), ast_value_factory->this_function_string(),
+ VariableMode::kConst);
}
}
@@ -746,10 +761,10 @@ Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name) {
DCHECK_NULL(variables_.Lookup(name));
VariableKind kind = is_sloppy(language_mode()) ? SLOPPY_FUNCTION_NAME_VARIABLE
: NORMAL_VARIABLE;
- function_ =
- new (zone()) Variable(this, name, CONST, kind, kCreatedInitialized);
+ function_ = new (zone())
+ Variable(this, name, VariableMode::kConst, kind, kCreatedInitialized);
if (calls_sloppy_eval()) {
- NonLocal(name, DYNAMIC);
+ NonLocal(name, VariableMode::kDynamic);
} else {
variables_.Add(zone(), function_);
}
@@ -913,11 +928,12 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
new_parent->locals_.MoveTail(outer_closure->locals(), top_local_);
for (Variable* local : new_parent->locals_) {
- DCHECK(local->mode() == TEMPORARY || local->mode() == VAR);
+ DCHECK(local->mode() == VariableMode::kTemporary ||
+ local->mode() == VariableMode::kVar);
DCHECK_EQ(local->scope(), local->scope()->GetClosureScope());
DCHECK_NE(local->scope(), new_parent);
local->set_scope(new_parent);
- if (local->mode() == VAR) {
+ if (local->mode() == VariableMode::kVar) {
outer_closure->variables_.Remove(local);
new_parent->variables_.Add(new_parent->zone(), local);
}
@@ -949,10 +965,6 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
// The Scope is backed up by ScopeInfo. This means it cannot operate in a
// heap-independent mode, and all strings must be internalized immediately. So
// it's ok to get the Handle<String> here.
- // If we have a serialized scope info, we might find the variable there.
- // There should be no local slot with the given name.
- DCHECK_LT(scope_info_->StackSlotIndex(*name_handle), 0);
-
bool found = false;
VariableLocation location;
@@ -979,7 +991,7 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
index = scope_info_->FunctionContextSlotIndex(*name_handle);
if (index < 0) return nullptr; // Nowhere found.
Variable* var = AsDeclarationScope()->DeclareFunctionVar(name);
- DCHECK_EQ(CONST, var->mode());
+ DCHECK_EQ(VariableMode::kConst, var->mode());
var->AllocateTo(VariableLocation::CONTEXT, index);
return variables_.Lookup(name);
}
@@ -1016,10 +1028,10 @@ Variable* DeclarationScope::DeclareParameter(
DCHECK(!is_being_lazily_parsed_);
DCHECK(!was_lazily_parsed_);
Variable* var;
- if (mode == TEMPORARY) {
+ if (mode == VariableMode::kTemporary) {
var = NewTemporary(name);
} else {
- DCHECK_EQ(mode, VAR);
+ DCHECK_EQ(mode, VariableMode::kVar);
var = Declare(zone(), name, mode);
// TODO(wingo): Avoid O(n^2) check.
if (is_duplicate != nullptr) {
@@ -1049,17 +1061,17 @@ Variable* DeclarationScope::DeclareParameterName(
if (FLAG_preparser_scope_analysis) {
Variable* var;
if (declare_as_local) {
- var = Declare(zone(), name, VAR);
+ var = Declare(zone(), name, VariableMode::kVar);
} else {
- var = new (zone())
- Variable(this, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
+ var = new (zone()) Variable(this, name, VariableMode::kTemporary,
+ NORMAL_VARIABLE, kCreatedInitialized);
}
if (add_parameter) {
params_.Add(var, zone());
}
return var;
}
- DeclareVariableName(name, VAR);
+ DeclareVariableName(name, VariableMode::kVar);
return nullptr;
}
@@ -1067,12 +1079,14 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, VariableKind kind,
MaybeAssignedFlag maybe_assigned_flag) {
DCHECK(!already_resolved_);
- // This function handles VAR, LET, and CONST modes. DYNAMIC variables are
- // introduced during variable allocation, and TEMPORARY variables are
- // allocated via NewTemporary().
+ // This function handles VariableMode::kVar, VariableMode::kLet, and
+ // VariableMode::kConst modes. VariableMode::kDynamic variables are
+ // introduced during variable allocation, and VariableMode::kTemporary
+ // variables are allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
DCHECK_IMPLIES(GetDeclarationScope()->is_being_lazily_parsed(),
- mode == VAR || mode == LET || mode == CONST);
+ mode == VariableMode::kVar || mode == VariableMode::kLet ||
+ mode == VariableMode::kConst);
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
return Declare(zone(), name, mode, kind, init_flag, maybe_assigned_flag);
}
@@ -1085,7 +1099,7 @@ Variable* Scope::DeclareVariable(
DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
- if (mode == VAR && !is_declaration_scope()) {
+ if (mode == VariableMode::kVar && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariable(
declaration, mode, init, sloppy_mode_block_scope_function_redefinition,
ok);
@@ -1108,11 +1122,12 @@ Variable* Scope::DeclareVariable(
// assigned because they might be accessed by a lazily parsed top-level
// function, which, for efficiency, we preparse without variable tracking.
if (is_script_scope() || is_module_scope()) {
- if (mode != CONST) proxy->set_is_assigned();
+ if (mode != VariableMode::kConst) proxy->set_is_assigned();
}
Variable* var = nullptr;
- if (is_eval_scope() && is_sloppy(language_mode()) && mode == VAR) {
+ if (is_eval_scope() && is_sloppy(language_mode()) &&
+ mode == VariableMode::kVar) {
// In a var binding in a sloppy direct eval, pollute the enclosing scope
// with this new binding by doing the following:
// The proxy is bound to a lookup variable to force a dynamic declaration
@@ -1173,7 +1188,7 @@ Variable* Scope::DeclareVariable(
*ok = false;
return nullptr;
}
- } else if (mode == VAR) {
+ } else if (mode == VariableMode::kVar) {
var->set_maybe_assigned();
}
}
@@ -1199,7 +1214,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
DCHECK(!already_resolved_);
DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
- if (mode == VAR && !is_declaration_scope()) {
+ if (mode == VariableMode::kVar && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariableName(name, mode);
}
DCHECK(!is_with_scope());
@@ -1220,7 +1235,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
// a function declaration, it's an error. This is an error PreParser
// hasn't previously detected. TODO(marja): Investigate whether we can now
// start returning this error.
- } else if (mode == VAR) {
+ } else if (mode == VariableMode::kVar) {
var->set_maybe_assigned();
}
var->set_is_used();
@@ -1237,9 +1252,9 @@ void Scope::DeclareCatchVariableName(const AstRawString* name) {
DCHECK(scope_info_.is_null());
if (FLAG_preparser_scope_analysis) {
- Declare(zone(), name, VAR);
+ Declare(zone(), name, VariableMode::kVar);
} else {
- variables_.DeclareName(zone(), name, VAR);
+ variables_.DeclareName(zone(), name, VariableMode::kVar);
}
}
@@ -1253,11 +1268,11 @@ void Scope::AddUnresolved(VariableProxy* proxy) {
Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
VariableKind kind) {
DCHECK(is_script_scope());
- return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind);
+ return variables_.Declare(zone(), this, name, VariableMode::kDynamicGlobal,
+ kind);
// TODO(neis): Mark variable as maybe-assigned?
}
-
bool Scope::RemoveUnresolved(VariableProxy* var) {
if (unresolved_ == var) {
unresolved_ = var->next_unresolved();
@@ -1284,8 +1299,8 @@ Variable* Scope::NewTemporary(const AstRawString* name) {
Variable* Scope::NewTemporary(const AstRawString* name,
MaybeAssignedFlag maybe_assigned) {
DeclarationScope* scope = GetClosureScope();
- Variable* var = new (zone())
- Variable(scope, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
+ Variable* var = new (zone()) Variable(scope, name, VariableMode::kTemporary,
+ NORMAL_VARIABLE, kCreatedInitialized);
scope->AddLocal(var);
if (maybe_assigned == kMaybeAssigned) var->set_maybe_assigned();
return var;
@@ -1302,7 +1317,7 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
Scope* current = this;
if (decl->IsVariableDeclaration() &&
decl->AsVariableDeclaration()->AsNested() != nullptr) {
- DCHECK_EQ(mode, VAR);
+ DCHECK_EQ(mode, VariableMode::kVar);
current = decl->AsVariableDeclaration()->AsNested()->scope();
} else if (IsLexicalVariableMode(mode)) {
if (!is_block_scope()) continue;
@@ -1327,7 +1342,7 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
}
Declaration* Scope::CheckLexDeclarationsConflictingWith(
- const ZoneList<const AstRawString*>& names) {
+ const ZonePtrList<const AstRawString>& names) {
DCHECK(is_block_scope());
for (int i = 0; i < names.length(); ++i) {
Variable* var = LookupLocal(names.at(i));
@@ -1467,11 +1482,11 @@ Scope* Scope::GetOuterScopeWithContext() {
}
Handle<StringSet> DeclarationScope::CollectNonLocals(
- ParseInfo* info, Handle<StringSet> non_locals) {
+ Isolate* isolate, ParseInfo* info, Handle<StringSet> non_locals) {
VariableProxy* free_variables = FetchFreeVariables(this, info);
for (VariableProxy* proxy = free_variables; proxy != nullptr;
proxy = proxy->next_unresolved()) {
- non_locals = StringSet::Add(non_locals, proxy->name());
+ non_locals = StringSet::Add(isolate, non_locals, proxy->name());
}
return non_locals;
}
@@ -1759,7 +1774,7 @@ void Scope::Print(int n) {
{
bool printed_header = false;
for (Variable* local : locals_) {
- if (local->mode() != TEMPORARY) continue;
+ if (local->mode() != VariableMode::kTemporary) continue;
if (!printed_header) {
printed_header = true;
Indent(n1, "// temporary vars:\n");
@@ -1829,7 +1844,8 @@ Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
// variables.
// TODO(yangguo): Remove once debug-evaluate creates proper ScopeInfo for the
// scopes in which it's evaluating.
- if (is_debug_evaluate_scope_) return NonLocal(proxy->raw_name(), DYNAMIC);
+ if (is_debug_evaluate_scope_)
+ return NonLocal(proxy->raw_name(), VariableMode::kDynamic);
// Try to find the variable in this scope.
Variable* var = LookupLocal(proxy->raw_name());
@@ -1892,7 +1908,7 @@ Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
var->ForceContextAllocation();
if (proxy->is_assigned()) var->set_maybe_assigned();
}
- return NonLocal(proxy->raw_name(), DYNAMIC);
+ return NonLocal(proxy->raw_name(), VariableMode::kDynamic);
}
if (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval()) {
@@ -1904,13 +1920,13 @@ Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
// here (this excludes block and catch scopes), and variable lookups at
// script scope are always dynamic.
if (var->IsGlobalObjectProperty()) {
- return NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
+ return NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal);
}
if (var->is_dynamic()) return var;
Variable* invalidated = var;
- var = NonLocal(proxy->raw_name(), DYNAMIC_LOCAL);
+ var = NonLocal(proxy->raw_name(), VariableMode::kDynamicLocal);
var->set_local_if_not_shadowed(invalidated);
}
@@ -1937,11 +1953,11 @@ void SetNeedsHoleCheck(Variable* var, VariableProxy* proxy) {
}
void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
- if (var->mode() == DYNAMIC_LOCAL) {
+ if (var->mode() == VariableMode::kDynamicLocal) {
// Dynamically introduced variables never need a hole check (since they're
- // VAR bindings, either from var or function declarations), but the variable
- // they shadow might need a hole check, which we want to do if we decide
- // that no shadowing variable was dynamically introoduced.
+ // VariableMode::kVar bindings, either from var or function declarations),
+ // but the variable they shadow might need a hole check, which we want to do
+ // if we decide that no shadowing variable was dynamically introoduced.
DCHECK_EQ(kCreatedInitialized, var->initialization_flag());
return UpdateNeedsHoleCheck(var->local_if_not_shadowed(), proxy, scope);
}
@@ -1956,12 +1972,12 @@ void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
}
// Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding, both the Variable and the VariableProxy have the same
- // declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code), the VariableProxy is in
- // the source physically located after the initializer of the variable,
- // and that the initializer cannot be skipped due to a nonlinear scope.
+ // can be skipped in the following situation: we have a VariableMode::kLet or
+ // VariableMode::kConst binding, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the same
+ // function or in the same eval code), the VariableProxy is in the source
+ // physically located after the initializer of the variable, and that the
+ // initializer cannot be skipped due to a nonlinear scope.
//
// The condition on the closure scopes is a conservative check for
// nested functions that access a binding and are called before the
@@ -2136,7 +2152,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
//
// Temporary variables are always stack-allocated. Catch-bound variables are
// always context-allocated.
- if (var->mode() == TEMPORARY) return false;
+ if (var->mode() == VariableMode::kTemporary) return false;
if (is_catch_scope()) return true;
if ((is_script_scope() || is_eval_scope()) &&
IsLexicalVariableMode(var->mode())) {
@@ -2356,21 +2372,8 @@ void Scope::AllocateScopeInfosRecursively(Isolate* isolate,
}
}
-void Scope::AllocateDebuggerScopeInfos(Isolate* isolate,
- MaybeHandle<ScopeInfo> outer_scope) {
- if (scope_info_.is_null()) {
- scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
- }
- MaybeHandle<ScopeInfo> outer = NeedsContext() ? scope_info_ : outer_scope;
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- if (scope->is_function_scope()) continue;
- scope->AllocateDebuggerScopeInfos(isolate, outer);
- }
-}
-
// static
-void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
- AnalyzeMode mode) {
+void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) {
DeclarationScope* scope = info->literal()->scope();
if (!scope->scope_info_.is_null()) return; // Allocated by outer function.
@@ -2380,9 +2383,6 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
}
scope->AllocateScopeInfosRecursively(isolate, outer_scope);
- if (mode == AnalyzeMode::kDebugger) {
- scope->AllocateDebuggerScopeInfos(isolate, outer_scope);
- }
// The debugger expects all shared function infos to contain a scope info.
// Since the top-most scope will end up in a shared function info, make sure
@@ -2396,7 +2396,8 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
// Ensuring that the outer script scope has a scope info avoids having
// special case for native contexts vs other contexts.
if (info->script_scope() && info->script_scope()->scope_info_.is_null()) {
- info->script_scope()->scope_info_ = handle(ScopeInfo::Empty(isolate));
+ info->script_scope()->scope_info_ =
+ handle(ScopeInfo::Empty(isolate), isolate);
}
}
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index c95e3a380a..5618adee9e 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -78,8 +78,6 @@ class SloppyBlockFunctionMap : public ZoneHashMap {
int count_;
};
-enum class AnalyzeMode { kRegular, kDebugger };
-
// Global invariants after AST construction: Each reference (i.e. identifier)
// to a JavaScript variable (including global properties) is represented by a
// VariableProxy node. Immediately after AST construction and before variable
@@ -134,7 +132,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
enum class DeserializationMode { kIncludingVariables, kScopesOnly };
- static Scope* DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
+ static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
+ ScopeInfo* scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode);
@@ -256,7 +255,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// which is an error even though the two 'e's are declared in different
// scopes.
Declaration* CheckLexDeclarationsConflictingWith(
- const ZoneList<const AstRawString*>& names);
+ const ZonePtrList<const AstRawString>& names);
// ---------------------------------------------------------------------------
// Scope-specific info.
@@ -366,7 +365,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Whether this needs to be represented by a runtime context.
bool NeedsContext() const {
// Catch scopes always have heap slots.
- DCHECK(!is_catch_scope() || num_heap_slots() > 0);
+ DCHECK_IMPLIES(is_catch_scope(), num_heap_slots() > 0);
+ DCHECK_IMPLIES(is_with_scope(), num_heap_slots() > 0);
return num_heap_slots() > 0;
}
@@ -646,12 +646,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Creates a script scope.
DeclarationScope(Zone* zone, AstValueFactory* ast_value_factory);
- bool IsDeclaredParameter(const AstRawString* name) {
- // If IsSimpleParameterList is false, duplicate parameters are not allowed,
- // however `arguments` may be allowed if function is not strict code. Thus,
- // the assumptions explained above do not hold.
- return params_.Contains(variables_.Lookup(name));
- }
+ bool IsDeclaredParameter(const AstRawString* name);
FunctionKind function_kind() const { return function_kind_; }
@@ -812,7 +807,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// The local variable 'arguments' if we need to allocate it; nullptr
// otherwise.
Variable* arguments() const {
- DCHECK(!is_arrow_scope() || arguments_ == nullptr);
+ DCHECK_IMPLIES(is_arrow_scope(), arguments_ == nullptr);
return arguments_;
}
@@ -867,10 +862,9 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Allocate ScopeInfos for top scope and any inner scopes that need them.
// Does nothing if ScopeInfo is already allocated.
- static void AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
- AnalyzeMode mode);
+ static void AllocateScopeInfos(ParseInfo* info, Isolate* isolate);
- Handle<StringSet> CollectNonLocals(ParseInfo* info,
+ Handle<StringSet> CollectNonLocals(Isolate* isolate, ParseInfo* info,
Handle<StringSet> non_locals);
// Determine if we can use lazy compilation for this scope.
@@ -964,7 +958,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool has_inferred_function_name_ : 1;
// Parameter list in source order.
- ZoneList<Variable*> params_;
+ ZonePtrList<Variable> params_;
// Map of function names to lists of functions defined in sloppy blocks
SloppyBlockFunctionMap* sloppy_block_function_map_;
// Convenience variable.
@@ -1031,7 +1025,8 @@ class ModuleScope final : public DeclarationScope {
// The generated ModuleDescriptor does not preserve all information. In
// particular, its module_requests map will be empty because we no longer need
// the map after parsing.
- ModuleScope(Handle<ScopeInfo> scope_info, AstValueFactory* ast_value_factory);
+ ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
+ AstValueFactory* ast_value_factory);
ModuleDescriptor* module() const {
DCHECK_NOT_NULL(module_descriptor_);
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index bce552c2c1..addcf8db2b 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -26,7 +26,7 @@ Variable::Variable(Variable* other)
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return (IsDynamicVariableMode(mode()) || mode() == VAR) &&
+ return (IsDynamicVariableMode(mode()) || mode() == VariableMode::kVar) &&
scope_ != nullptr && scope_->is_script_scope();
}
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 4d58c8fed9..10ac5c48a5 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -36,7 +36,8 @@ class Variable final : public ZoneObject {
LocationField::encode(VariableLocation::UNALLOCATED) |
VariableKindField::encode(kind)) {
// Var declared variables never need initialization.
- DCHECK(!(mode == VAR && initialization_flag == kNeedsInitialization));
+ DCHECK(!(mode == VariableMode::kVar &&
+ initialization_flag == kNeedsInitialization));
}
explicit Variable(Variable* other);
@@ -137,7 +138,8 @@ class Variable final : public ZoneObject {
}
Variable* local_if_not_shadowed() const {
- DCHECK(mode() == DYNAMIC_LOCAL && local_if_not_shadowed_ != nullptr);
+ DCHECK(mode() == VariableMode::kDynamicLocal &&
+ local_if_not_shadowed_ != nullptr);
return local_if_not_shadowed_;
}
@@ -175,7 +177,8 @@ class Variable final : public ZoneObject {
static InitializationFlag DefaultInitializationFlag(VariableMode mode) {
DCHECK(IsDeclaredVariableMode(mode));
- return mode == VAR ? kCreatedInitialized : kNeedsInitialization;
+ return mode == VariableMode::kVar ? kCreatedInitialized
+ : kNeedsInitialization;
}
typedef ThreadedList<Variable> List;
diff --git a/deps/v8/src/async-hooks-wrapper.cc b/deps/v8/src/async-hooks-wrapper.cc
new file mode 100644
index 0000000000..cc080d9cfc
--- /dev/null
+++ b/deps/v8/src/async-hooks-wrapper.cc
@@ -0,0 +1,259 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/async-hooks-wrapper.h"
+#include "src/d8.h"
+
+namespace v8 {
+
+void AsyncHooksWrap::Enable() { enabled_ = true; }
+
+void AsyncHooksWrap::Disable() { enabled_ = false; }
+
+v8::Local<v8::Function> AsyncHooksWrap::init_function() const {
+ return init_function_.Get(isolate_);
+}
+void AsyncHooksWrap::set_init_function(v8::Local<v8::Function> value) {
+ init_function_.Reset(isolate_, value);
+}
+v8::Local<v8::Function> AsyncHooksWrap::before_function() const {
+ return before_function_.Get(isolate_);
+}
+void AsyncHooksWrap::set_before_function(v8::Local<v8::Function> value) {
+ before_function_.Reset(isolate_, value);
+}
+v8::Local<v8::Function> AsyncHooksWrap::after_function() const {
+ return after_function_.Get(isolate_);
+}
+void AsyncHooksWrap::set_after_function(v8::Local<v8::Function> value) {
+ after_function_.Reset(isolate_, value);
+}
+v8::Local<v8::Function> AsyncHooksWrap::promiseResolve_function() const {
+ return promiseResolve_function_.Get(isolate_);
+}
+void AsyncHooksWrap::set_promiseResolve_function(
+ v8::Local<v8::Function> value) {
+ promiseResolve_function_.Reset(isolate_, value);
+}
+
+static AsyncHooksWrap* UnwrapHook(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ Local<Object> hook = args.This();
+ Local<External> wrap = Local<External>::Cast(hook->GetInternalField(0));
+ void* ptr = wrap->Value();
+ return static_cast<AsyncHooksWrap*>(ptr);
+}
+
+static void EnableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ AsyncHooksWrap* wrap = UnwrapHook(args);
+ wrap->Enable();
+}
+
+static void DisableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ AsyncHooksWrap* wrap = UnwrapHook(args);
+ wrap->Disable();
+}
+
+async_id_t AsyncHooks::GetExecutionAsyncId() const {
+ return asyncContexts.top().execution_async_id;
+}
+
+async_id_t AsyncHooks::GetTriggerAsyncId() const {
+ return asyncContexts.top().trigger_async_id;
+}
+
+Local<Object> AsyncHooks::CreateHook(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ EscapableHandleScope handle_scope(isolate);
+
+ Local<Context> currentContext = isolate->GetCurrentContext();
+
+ if (args.Length() != 1 || !args[0]->IsObject()) {
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, "Invalid arguments passed to createHook",
+ NewStringType::kNormal)
+ .ToLocalChecked());
+ return Local<Object>();
+ }
+
+ AsyncHooksWrap* wrap = new AsyncHooksWrap(isolate);
+
+ Local<Object> fn_obj = args[0].As<Object>();
+
+#define SET_HOOK_FN(name) \
+ Local<Value> name##_v = \
+ fn_obj \
+ ->Get(currentContext, \
+ String::NewFromUtf8(isolate, #name, NewStringType::kNormal) \
+ .ToLocalChecked()) \
+ .ToLocalChecked(); \
+ if (name##_v->IsFunction()) { \
+ wrap->set_##name##_function(name##_v.As<Function>()); \
+ }
+
+ SET_HOOK_FN(init);
+ SET_HOOK_FN(before);
+ SET_HOOK_FN(after);
+ SET_HOOK_FN(promiseResolve);
+#undef SET_HOOK_FN
+
+ async_wraps_.push_back(wrap);
+
+ Local<Object> obj = async_hooks_templ.Get(isolate)
+ ->NewInstance(currentContext)
+ .ToLocalChecked();
+ obj->SetInternalField(0, External::New(isolate, wrap));
+
+ return handle_scope.Escape(obj);
+}
+
+void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
+ Local<Value> parent) {
+ AsyncHooks* hooks =
+ PerIsolateData::Get(promise->GetIsolate())->GetAsyncHooks();
+
+ HandleScope handle_scope(hooks->isolate_);
+
+ Local<Context> currentContext = hooks->isolate_->GetCurrentContext();
+
+ if (type == PromiseHookType::kInit) {
+ ++hooks->current_async_id;
+ Local<Integer> async_id =
+ Integer::New(hooks->isolate_, hooks->current_async_id);
+
+ promise->SetPrivate(currentContext,
+ hooks->async_id_smb.Get(hooks->isolate_), async_id);
+ if (parent->IsPromise()) {
+ Local<Promise> parent_promise = parent.As<Promise>();
+ Local<Value> parent_async_id =
+ parent_promise
+ ->GetPrivate(hooks->isolate_->GetCurrentContext(),
+ hooks->async_id_smb.Get(hooks->isolate_))
+ .ToLocalChecked();
+ promise->SetPrivate(currentContext,
+ hooks->trigger_id_smb.Get(hooks->isolate_),
+ parent_async_id);
+ } else {
+ CHECK(parent->IsUndefined());
+ Local<Integer> trigger_id = Integer::New(hooks->isolate_, 0);
+ promise->SetPrivate(currentContext,
+ hooks->trigger_id_smb.Get(hooks->isolate_),
+ trigger_id);
+ }
+ } else if (type == PromiseHookType::kBefore) {
+ AsyncContext ctx;
+ ctx.execution_async_id =
+ promise
+ ->GetPrivate(hooks->isolate_->GetCurrentContext(),
+ hooks->async_id_smb.Get(hooks->isolate_))
+ .ToLocalChecked()
+ .As<Integer>()
+ ->Value();
+ ctx.trigger_async_id =
+ promise
+ ->GetPrivate(hooks->isolate_->GetCurrentContext(),
+ hooks->trigger_id_smb.Get(hooks->isolate_))
+ .ToLocalChecked()
+ .As<Integer>()
+ ->Value();
+ hooks->asyncContexts.push(ctx);
+ } else if (type == PromiseHookType::kAfter) {
+ hooks->asyncContexts.pop();
+ }
+
+ for (AsyncHooksWrap* wrap : hooks->async_wraps_) {
+ PromiseHookDispatch(type, promise, parent, wrap, hooks);
+ }
+}
+
+void AsyncHooks::Initialize() {
+ HandleScope handle_scope(isolate_);
+
+ async_hook_ctor.Reset(isolate_, FunctionTemplate::New(isolate_));
+ async_hook_ctor.Get(isolate_)->SetClassName(
+ String::NewFromUtf8(isolate_, "AsyncHook", NewStringType::kNormal)
+ .ToLocalChecked());
+
+ async_hooks_templ.Reset(isolate_,
+ async_hook_ctor.Get(isolate_)->InstanceTemplate());
+ async_hooks_templ.Get(isolate_)->SetInternalFieldCount(1);
+ async_hooks_templ.Get(isolate_)->Set(
+ String::NewFromUtf8(isolate_, "enable"),
+ FunctionTemplate::New(isolate_, EnableHook));
+ async_hooks_templ.Get(isolate_)->Set(
+ String::NewFromUtf8(isolate_, "disable"),
+ FunctionTemplate::New(isolate_, DisableHook));
+
+ async_id_smb.Reset(isolate_, Private::New(isolate_));
+ trigger_id_smb.Reset(isolate_, Private::New(isolate_));
+
+ isolate_->SetPromiseHook(ShellPromiseHook);
+}
+
+void AsyncHooks::Deinitialize() {
+ isolate_->SetPromiseHook(nullptr);
+ for (AsyncHooksWrap* wrap : async_wraps_) {
+ delete wrap;
+ }
+}
+
+void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
+ Local<Promise> promise,
+ Local<Value> parent, AsyncHooksWrap* wrap,
+ AsyncHooks* hooks) {
+ if (!wrap->IsEnabled()) {
+ return;
+ }
+
+ HandleScope handle_scope(hooks->isolate_);
+
+ TryCatch try_catch(hooks->isolate_);
+ try_catch.SetVerbose(true);
+
+ Local<Value> rcv = Undefined(hooks->isolate_);
+ Local<Value> async_id =
+ promise
+ ->GetPrivate(hooks->isolate_->GetCurrentContext(),
+ hooks->async_id_smb.Get(hooks->isolate_))
+ .ToLocalChecked();
+ Local<Value> args[1] = {async_id};
+
+ // Sacrifice the brevity for readability and debugfulness
+ if (type == PromiseHookType::kInit) {
+ if (!wrap->init_function().IsEmpty()) {
+ Local<Value> initArgs[4] = {
+ async_id,
+ String::NewFromUtf8(hooks->isolate_, "PROMISE",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ promise
+ ->GetPrivate(hooks->isolate_->GetCurrentContext(),
+ hooks->trigger_id_smb.Get(hooks->isolate_))
+ .ToLocalChecked(),
+ promise};
+ wrap->init_function()->Call(rcv, 4, initArgs);
+ }
+ } else if (type == PromiseHookType::kBefore) {
+ if (!wrap->before_function().IsEmpty()) {
+ wrap->before_function()->Call(rcv, 1, args);
+ }
+ } else if (type == PromiseHookType::kAfter) {
+ if (!wrap->after_function().IsEmpty()) {
+ wrap->after_function()->Call(rcv, 1, args);
+ }
+ } else if (type == PromiseHookType::kResolve) {
+ if (!wrap->promiseResolve_function().IsEmpty()) {
+ wrap->promiseResolve_function()->Call(rcv, 1, args);
+ }
+ }
+
+ if (try_catch.HasCaught()) {
+ Shell::ReportException(hooks->isolate_, &try_catch);
+ }
+}
+
+} // namespace v8
diff --git a/deps/v8/src/async-hooks-wrapper.h b/deps/v8/src/async-hooks-wrapper.h
new file mode 100644
index 0000000000..c0c72373e0
--- /dev/null
+++ b/deps/v8/src/async-hooks-wrapper.h
@@ -0,0 +1,95 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ASYNC_HOOKS_WRAPPER_H_
+#define V8_ASYNC_HOOKS_WRAPPER_H_
+
+#include <stack>
+
+#include "include/v8.h"
+#include "src/objects.h"
+
+namespace v8 {
+
+typedef double async_id_t;
+
+struct AsyncContext {
+ async_id_t execution_async_id;
+ async_id_t trigger_async_id;
+};
+
+class AsyncHooksWrap {
+ public:
+ explicit AsyncHooksWrap(Isolate* isolate) {
+ enabled_ = false;
+ isolate_ = isolate;
+ }
+ void Enable();
+ void Disable();
+ bool IsEnabled() const { return enabled_; }
+
+ inline v8::Local<v8::Function> init_function() const;
+ inline void set_init_function(v8::Local<v8::Function> value);
+ inline v8::Local<v8::Function> before_function() const;
+ inline void set_before_function(v8::Local<v8::Function> value);
+ inline v8::Local<v8::Function> after_function() const;
+ inline void set_after_function(v8::Local<v8::Function> value);
+ inline v8::Local<v8::Function> promiseResolve_function() const;
+ inline void set_promiseResolve_function(v8::Local<v8::Function> value);
+
+ private:
+ Isolate* isolate_;
+
+ Persistent<v8::Function> init_function_;
+ Persistent<v8::Function> before_function_;
+ Persistent<v8::Function> after_function_;
+ Persistent<v8::Function> promiseResolve_function_;
+
+ bool enabled_;
+};
+
+class AsyncHooks {
+ public:
+ explicit AsyncHooks(Isolate* isolate) {
+ isolate_ = isolate;
+
+ AsyncContext ctx;
+ ctx.execution_async_id = 1;
+ ctx.trigger_async_id = 0;
+ asyncContexts.push(ctx);
+ current_async_id = 1;
+
+ Initialize();
+ }
+ ~AsyncHooks() { Deinitialize(); }
+
+ async_id_t GetExecutionAsyncId() const;
+ async_id_t GetTriggerAsyncId() const;
+
+ Local<Object> CreateHook(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ private:
+ std::vector<AsyncHooksWrap*> async_wraps_;
+ Isolate* isolate_;
+ Persistent<FunctionTemplate> async_hook_ctor;
+ Persistent<ObjectTemplate> async_hooks_templ;
+ Persistent<Private> async_id_smb;
+ Persistent<Private> trigger_id_smb;
+
+ void Initialize();
+ void Deinitialize();
+
+ static void ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
+ Local<Value> parent);
+ static void PromiseHookDispatch(PromiseHookType type, Local<Promise> promise,
+ Local<Value> parent, AsyncHooksWrap* wrap,
+ AsyncHooks* hooks);
+
+ std::stack<AsyncContext> asyncContexts;
+ async_id_t current_async_id;
+};
+
+} // namespace v8
+
+#endif // V8_ASYNC_HOOKS_WRAPPER_H_
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index d29693e3f8..d48d696022 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -31,7 +31,8 @@ namespace internal {
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidJumpTableIndex, "Invalid jump table index") \
- V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
+ V(kInvalidParametersAndRegistersInGenerator, \
+ "invalid parameters and registers in generator") \
V(kInvalidSharedFunctionInfoData, "Invalid SharedFunctionInfo data") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kObjectNotTagged, "The object is not tagged") \
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index 7787e4ff52..d81c537e57 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -15,46 +15,6 @@ namespace v8 {
namespace base {
// Deprecated. Use std::atomic<T> for new code.
-template <class T>
-class AtomicNumber {
- public:
- AtomicNumber() : value_(0) {}
- explicit AtomicNumber(T initial) : value_(initial) {}
-
- // Returns the value after incrementing.
- V8_INLINE T Increment(T increment) {
- return static_cast<T>(base::Barrier_AtomicIncrement(
- &value_, static_cast<base::AtomicWord>(increment)));
- }
-
- // Returns the value after decrementing.
- V8_INLINE T Decrement(T decrement) {
- return static_cast<T>(base::Barrier_AtomicIncrement(
- &value_, -static_cast<base::AtomicWord>(decrement)));
- }
-
- V8_INLINE T Value() const {
- return static_cast<T>(base::Acquire_Load(&value_));
- }
-
- V8_INLINE void SetValue(T new_value) {
- base::Release_Store(&value_, static_cast<base::AtomicWord>(new_value));
- }
-
- V8_INLINE T operator=(T value) {
- SetValue(value);
- return value;
- }
-
- V8_INLINE T operator+=(T value) { return Increment(value); }
- V8_INLINE T operator-=(T value) { return Decrement(value); }
-
- private:
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
-
- base::AtomicWord value_;
-};
-
// Flag using T atomically. Also accepts void* as T.
template <typename T>
class AtomicValue {
diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h
index 3bec12cb88..c1637de81c 100644
--- a/deps/v8/src/base/flags.h
+++ b/deps/v8/src/base/flags.h
@@ -27,15 +27,15 @@ class Flags final {
typedef T flag_type;
typedef S mask_type;
- Flags() : mask_(0) {}
- Flags(flag_type flag) // NOLINT(runtime/explicit)
+ constexpr Flags() : mask_(0) {}
+ constexpr Flags(flag_type flag) // NOLINT(runtime/explicit)
: mask_(static_cast<S>(flag)) {}
- explicit Flags(mask_type mask) : mask_(static_cast<S>(mask)) {}
+ constexpr explicit Flags(mask_type mask) : mask_(static_cast<S>(mask)) {}
- bool operator==(flag_type flag) const {
+ constexpr bool operator==(flag_type flag) const {
return mask_ == static_cast<S>(flag);
}
- bool operator!=(flag_type flag) const {
+ constexpr bool operator!=(flag_type flag) const {
return mask_ != static_cast<S>(flag);
}
@@ -52,22 +52,34 @@ class Flags final {
return *this;
}
- Flags operator&(const Flags& flags) const { return Flags(*this) &= flags; }
- Flags operator|(const Flags& flags) const { return Flags(*this) |= flags; }
- Flags operator^(const Flags& flags) const { return Flags(*this) ^= flags; }
+ constexpr Flags operator&(const Flags& flags) const {
+ return Flags(*this) &= flags;
+ }
+ constexpr Flags operator|(const Flags& flags) const {
+ return Flags(*this) |= flags;
+ }
+ constexpr Flags operator^(const Flags& flags) const {
+ return Flags(*this) ^= flags;
+ }
Flags& operator&=(flag_type flag) { return operator&=(Flags(flag)); }
Flags& operator|=(flag_type flag) { return operator|=(Flags(flag)); }
Flags& operator^=(flag_type flag) { return operator^=(Flags(flag)); }
- Flags operator&(flag_type flag) const { return operator&(Flags(flag)); }
- Flags operator|(flag_type flag) const { return operator|(Flags(flag)); }
- Flags operator^(flag_type flag) const { return operator^(Flags(flag)); }
+ constexpr Flags operator&(flag_type flag) const {
+ return operator&(Flags(flag));
+ }
+ constexpr Flags operator|(flag_type flag) const {
+ return operator|(Flags(flag));
+ }
+ constexpr Flags operator^(flag_type flag) const {
+ return operator^(Flags(flag));
+ }
- Flags operator~() const { return Flags(~mask_); }
+ constexpr Flags operator~() const { return Flags(~mask_); }
- operator mask_type() const { return mask_; }
- bool operator!() const { return !mask_; }
+ constexpr operator mask_type() const { return mask_; }
+ constexpr bool operator!() const { return !mask_; }
friend size_t hash_value(const Flags& flags) { return flags.mask_; }
diff --git a/deps/v8/src/base/list.h b/deps/v8/src/base/list.h
new file mode 100644
index 0000000000..18e45318a2
--- /dev/null
+++ b/deps/v8/src/base/list.h
@@ -0,0 +1,136 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_LIST_H_
+#define V8_BASE_LIST_H_
+
+#include <atomic>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace base {
+
+template <class T>
+class List {
+ public:
+ List() : front_(nullptr), back_(nullptr) {}
+
+ void PushBack(T* element) {
+ DCHECK(!element->list_node().next());
+ DCHECK(!element->list_node().prev());
+ if (back_) {
+ DCHECK(front_);
+ InsertAfter(element, back_);
+ } else {
+ AddFirstElement(element);
+ }
+ }
+
+ void PushFront(T* element) {
+ DCHECK(!element->list_node().next());
+ DCHECK(!element->list_node().prev());
+ if (front_) {
+ DCHECK(back_);
+ InsertBefore(element, front_);
+ } else {
+ AddFirstElement(element);
+ }
+ }
+
+ void Remove(T* element) {
+ DCHECK(Contains(element));
+ if (back_ == element) {
+ back_ = element->list_node().prev();
+ }
+ if (front_ == element) {
+ front_ = element->list_node().next();
+ }
+ T* next = element->list_node().next();
+ T* prev = element->list_node().prev();
+ if (next) next->list_node().set_prev(prev);
+ if (prev) prev->list_node().set_next(next);
+ element->list_node().set_prev(nullptr);
+ element->list_node().set_next(nullptr);
+ }
+
+ bool Contains(T* element) {
+ T* it = front_;
+ while (it) {
+ if (it == element) return true;
+ it = it->list_node().next();
+ }
+ return false;
+ }
+
+ bool Empty() { return !front_ && !back_; }
+
+ T* front() { return front_; }
+ T* back() { return back_; }
+
+ private:
+ void AddFirstElement(T* element) {
+ DCHECK(!back_);
+ DCHECK(!front_);
+ DCHECK(!element->list_node().next());
+ DCHECK(!element->list_node().prev());
+ element->list_node().set_prev(nullptr);
+ element->list_node().set_next(nullptr);
+ front_ = element;
+ back_ = element;
+ }
+
+ void InsertAfter(T* element, T* other) {
+ T* other_next = other->list_node().next();
+ element->list_node().set_next(other_next);
+ element->list_node().set_prev(other);
+ other->list_node().set_next(element);
+ if (other_next)
+ other_next->list_node().set_prev(element);
+ else
+ back_ = element;
+ }
+
+ void InsertBefore(T* element, T* other) {
+ T* other_prev = other->list_node().prev();
+ element->list_node().set_next(other);
+ element->list_node().set_prev(other_prev);
+ other->list_node().set_prev(element);
+ if (other_prev) {
+ other_prev->list_node().set_next(element);
+ } else {
+ front_ = element;
+ }
+ }
+
+ T* front_;
+ T* back_;
+};
+
+template <class T>
+class ListNode {
+ public:
+ ListNode() { Initialize(); }
+
+ T* next() { return next_; }
+ T* prev() { return prev_; }
+
+ void Initialize() {
+ next_ = nullptr;
+ prev_ = nullptr;
+ }
+
+ private:
+ void set_next(T* next) { next_ = next; }
+ void set_prev(T* prev) { prev_ = prev; }
+
+ T* next_;
+ T* prev_;
+
+ friend class List<T>;
+};
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_LIST_H_
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 3437309bc7..5d10ae4ec0 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -145,10 +145,6 @@ V8_INLINE Dest bit_cast(Source const& source) {
void operator delete(void*, size_t) { base::OS::Abort(); } \
void operator delete[](void*, size_t) { base::OS::Abort(); }
-// Newly written code should use V8_INLINE and V8_NOINLINE directly.
-#define INLINE(declarator) V8_INLINE declarator
-#define NO_INLINE(declarator) V8_NOINLINE declarator
-
// Define V8_USE_ADDRESS_SANITIZER macro.
#if defined(__has_feature)
#if __has_feature(address_sanitizer)
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index d21107d6f7..cf7f3ec9bb 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -156,6 +156,8 @@ int ReclaimInaccessibleMemory(void* address, size_t size) {
#else
int ret = madvise(address, size, MADV_FREE);
#endif
+ if (ret != 0 && errno == ENOSYS)
+ return 0; // madvise is not available on all systems.
if (ret != 0 && errno == EINVAL) {
// MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
// MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 5d015eeeac..51b6014821 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -48,7 +48,7 @@ namespace base {
#define V8_FAST_TLS_SUPPORTED 1
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+V8_INLINE intptr_t InternalGetExistingThreadLocal(intptr_t index);
inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
const intptr_t kTibInlineTlsOffset = 0xE10;
@@ -74,7 +74,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
extern V8_BASE_EXPORT intptr_t kMacTlsBaseOffset;
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+V8_INLINE intptr_t InternalGetExistingThreadLocal(intptr_t index);
inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
intptr_t result;
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index cf34af646c..1ab56f42b5 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -86,6 +86,26 @@ V8_INLINE int64_t ClockNow(clockid_t clk_id) {
return 0;
#endif
}
+
+V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
+ // Limit duration of timer resolution measurement to 100 ms. If we cannot
+ // measure timer resoltuion within this time, we assume a low resolution
+ // timer.
+ int64_t end =
+ ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond;
+ int64_t start, delta;
+ do {
+ start = ClockNow(clk_id);
+ // Loop until we can detect that the clock has changed. Non-HighRes timers
+ // will increment in chunks, i.e. 15ms. By spinning until we see a clock
+ // change, we detect the minimum time between measurements.
+ do {
+ delta = ClockNow(clk_id) - start;
+ } while (delta == 0);
+ } while (delta > 1 && start < end);
+ return delta <= 1;
+}
+
#elif V8_OS_WIN
V8_INLINE bool IsQPCReliable() {
v8::base::CPU cpu;
@@ -735,7 +755,16 @@ TimeTicks TimeTicks::Now() {
}
// static
-bool TimeTicks::IsHighResolution() { return true; }
+bool TimeTicks::IsHighResolution() {
+#if V8_OS_MACOSX
+ return true;
+#elif V8_OS_POSIX
+ static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
+ return is_high_resolution;
+#else
+ return true;
+#endif
+}
#endif // V8_OS_WIN
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 548ef5109a..6723f3d5d4 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -20,12 +20,17 @@
#include "src/heap/heap.h"
#include "src/isolate-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/arguments.h"
+#include "src/objects/hash-table-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#include "src/objects/js-locale.h"
#endif // V8_INTL_SUPPORT
-#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-relative-time-format.h"
+#endif // V8_INTL_SUPPORT
#include "src/objects/templates.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
@@ -35,25 +40,25 @@ namespace v8 {
namespace internal {
void SourceCodeCache::Initialize(Isolate* isolate, bool create_heap_objects) {
- cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : nullptr;
+ cache_ = create_heap_objects ? ReadOnlyRoots(isolate).empty_fixed_array()
+ : nullptr;
}
-bool SourceCodeCache::Lookup(Vector<const char> name,
+bool SourceCodeCache::Lookup(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i += 2) {
SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
if (str->IsUtf8EqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
- SharedFunctionInfo::cast(cache_->get(i + 1)));
+ SharedFunctionInfo::cast(cache_->get(i + 1)), isolate);
return true;
}
}
return false;
}
-void SourceCodeCache::Add(Vector<const char> name,
+void SourceCodeCache::Add(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo> shared) {
- Isolate* isolate = shared->GetIsolate();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
int length = cache_->length();
@@ -79,7 +84,6 @@ Handle<String> Bootstrapper::GetNativeSource(NativeType type, int index) {
new NativesExternalStringResource(type, index);
Handle<ExternalOneByteString> source_code =
isolate_->factory()->NewNativeSourceString(resource);
- isolate_->heap()->RegisterExternalString(*source_code);
DCHECK(source_code->is_short());
return source_code;
}
@@ -254,7 +258,8 @@ class Genesis BASE_EMBEDDED {
// Used both for deserialized and from-scratch contexts to add the extensions
// provided.
- static bool InstallExtensions(Handle<Context> native_context,
+ static bool InstallExtensions(Isolate* isolate,
+ Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
static bool InstallAutoExtensions(Isolate* isolate,
ExtensionStates* extension_states);
@@ -267,7 +272,8 @@ class Genesis BASE_EMBEDDED {
static bool InstallExtension(Isolate* isolate,
v8::RegisteredExtension* current,
ExtensionStates* extension_states);
- static bool InstallSpecialObjects(Handle<Context> native_context);
+ static bool InstallSpecialObjects(Isolate* isolate,
+ Handle<Context> native_context);
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
bool ConfigureGlobalObjects(
@@ -343,17 +349,17 @@ Handle<JSGlobalProxy> Bootstrapper::NewRemoteContext(
}
void Bootstrapper::DetachGlobal(Handle<Context> env) {
- Isolate* isolate = env->GetIsolate();
- isolate->counters()->errors_thrown_per_context()->AddSample(
+ isolate_->counters()->errors_thrown_per_context()->AddSample(
env->GetErrorsThrown());
- Heap* heap = isolate->heap();
- Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
- global_proxy->set_native_context(heap->null_value());
- JSObject::ForceSetPrototype(global_proxy, isolate->factory()->null_value());
- global_proxy->map()->SetConstructor(heap->null_value());
+ ReadOnlyRoots roots(isolate_);
+ Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()),
+ isolate_);
+ global_proxy->set_native_context(roots.null_value());
+ JSObject::ForceSetPrototype(global_proxy, isolate_->factory()->null_value());
+ global_proxy->map()->SetConstructor(roots.null_value());
if (FLAG_track_detached_contexts) {
- env->GetIsolate()->AddDetachedContext(env);
+ isolate_->AddDetachedContext(env);
}
}
@@ -380,19 +386,20 @@ V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateBuiltinSharedFunctionInfo(
return shared;
}
-V8_NOINLINE void InstallFunction(Handle<JSObject> target,
+V8_NOINLINE void InstallFunction(Isolate* isolate, Handle<JSObject> target,
Handle<Name> property_name,
Handle<JSFunction> function,
Handle<String> function_name,
PropertyAttributes attributes = DONT_ENUM) {
- JSObject::AddProperty(target, property_name, function, attributes);
+ JSObject::AddProperty(isolate, target, property_name, function, attributes);
}
-V8_NOINLINE void InstallFunction(Handle<JSObject> target,
+V8_NOINLINE void InstallFunction(Isolate* isolate, Handle<JSObject> target,
Handle<JSFunction> function, Handle<Name> name,
PropertyAttributes attributes = DONT_ENUM) {
- Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
- InstallFunction(target, name, function, name_string, attributes);
+ Handle<String> name_string =
+ Name::ToFunctionName(isolate, name).ToHandleChecked();
+ InstallFunction(isolate, target, name, function, name_string, attributes);
}
V8_NOINLINE Handle<JSFunction> CreateFunction(
@@ -424,27 +431,27 @@ V8_NOINLINE Handle<JSFunction> CreateFunction(
}
V8_NOINLINE Handle<JSFunction> InstallFunction(
- Handle<JSObject> target, Handle<Name> name, InstanceType type,
- int instance_size, int inobject_properties,
+ Isolate* isolate, Handle<JSObject> target, Handle<Name> name,
+ InstanceType type, int instance_size, int inobject_properties,
MaybeHandle<Object> maybe_prototype, Builtins::Name call,
PropertyAttributes attributes) {
- Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
+ Handle<String> name_string =
+ Name::ToFunctionName(isolate, name).ToHandleChecked();
Handle<JSFunction> function =
- CreateFunction(target->GetIsolate(), name_string, type, instance_size,
+ CreateFunction(isolate, name_string, type, instance_size,
inobject_properties, maybe_prototype, call);
- InstallFunction(target, name, function, name_string, attributes);
+ InstallFunction(isolate, target, name, function, name_string, attributes);
return function;
}
V8_NOINLINE Handle<JSFunction> InstallFunction(
- Handle<JSObject> target, const char* name, InstanceType type,
- int instance_size, int inobject_properties,
+ Isolate* isolate, Handle<JSObject> target, const char* name,
+ InstanceType type, int instance_size, int inobject_properties,
MaybeHandle<Object> maybe_prototype, Builtins::Name call) {
- Factory* const factory = target->GetIsolate()->factory();
PropertyAttributes attributes = DONT_ENUM;
- return InstallFunction(target, factory->InternalizeUtf8String(name), type,
- instance_size, inobject_properties, maybe_prototype,
- call, attributes);
+ return InstallFunction(
+ isolate, target, isolate->factory()->InternalizeUtf8String(name), type,
+ instance_size, inobject_properties, maybe_prototype, call, attributes);
}
V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
@@ -464,72 +471,73 @@ V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
}
V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
- Handle<JSObject> base, Handle<Name> property_name,
+ Isolate* isolate, Handle<JSObject> base, Handle<Name> property_name,
Handle<String> function_name, Builtins::Name call, int len, bool adapt,
PropertyAttributes attrs = DONT_ENUM,
BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
Handle<JSFunction> fun =
- SimpleCreateFunction(base->GetIsolate(), function_name, call, len, adapt);
+ SimpleCreateFunction(isolate, function_name, call, len, adapt);
if (id != kInvalidBuiltinFunctionId) {
fun->shared()->set_builtin_function_id(id);
}
- InstallFunction(base, fun, property_name, attrs);
+ InstallFunction(isolate, base, fun, property_name, attrs);
return fun;
}
V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
- Handle<JSObject> base, Handle<String> name, Builtins::Name call, int len,
- bool adapt, PropertyAttributes attrs = DONT_ENUM,
+ Isolate* isolate, Handle<JSObject> base, Handle<String> name,
+ Builtins::Name call, int len, bool adapt,
+ PropertyAttributes attrs = DONT_ENUM,
BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
- return SimpleInstallFunction(base, name, name, call, len, adapt, attrs, id);
+ return SimpleInstallFunction(isolate, base, name, name, call, len, adapt,
+ attrs, id);
}
V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
- Handle<JSObject> base, Handle<Name> property_name,
+ Isolate* isolate, Handle<JSObject> base, Handle<Name> property_name,
const char* function_name, Builtins::Name call, int len, bool adapt,
PropertyAttributes attrs = DONT_ENUM,
BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
- Factory* const factory = base->GetIsolate()->factory();
// Function name does not have to be internalized.
return SimpleInstallFunction(
- base, property_name, factory->NewStringFromAsciiChecked(function_name),
- call, len, adapt, attrs, id);
+ isolate, base, property_name,
+ isolate->factory()->NewStringFromAsciiChecked(function_name), call, len,
+ adapt, attrs, id);
}
V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
- Handle<JSObject> base, const char* name, Builtins::Name call, int len,
- bool adapt, PropertyAttributes attrs = DONT_ENUM,
+ Isolate* isolate, Handle<JSObject> base, const char* name,
+ Builtins::Name call, int len, bool adapt,
+ PropertyAttributes attrs = DONT_ENUM,
BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
- Factory* const factory = base->GetIsolate()->factory();
// Although function name does not have to be internalized the property name
// will be internalized during property addition anyway, so do it here now.
- return SimpleInstallFunction(base, factory->InternalizeUtf8String(name), call,
- len, adapt, attrs, id);
+ return SimpleInstallFunction(isolate, base,
+ isolate->factory()->InternalizeUtf8String(name),
+ call, len, adapt, attrs, id);
}
-V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
- const char* name,
- Builtins::Name call,
- int len, bool adapt,
- BuiltinFunctionId id) {
- return SimpleInstallFunction(base, name, call, len, adapt, DONT_ENUM, id);
+V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
+ Isolate* isolate, Handle<JSObject> base, const char* name,
+ Builtins::Name call, int len, bool adapt, BuiltinFunctionId id) {
+ return SimpleInstallFunction(isolate, base, name, call, len, adapt, DONT_ENUM,
+ id);
}
-V8_NOINLINE void SimpleInstallGetterSetter(Handle<JSObject> base,
+V8_NOINLINE void SimpleInstallGetterSetter(Isolate* isolate,
+ Handle<JSObject> base,
Handle<String> name,
Builtins::Name call_getter,
Builtins::Name call_setter,
PropertyAttributes attribs) {
- Isolate* const isolate = base->GetIsolate();
-
Handle<String> getter_name =
- Name::ToFunctionName(name, isolate->factory()->get_string())
+ Name::ToFunctionName(isolate, name, isolate->factory()->get_string())
.ToHandleChecked();
Handle<JSFunction> getter =
SimpleCreateFunction(isolate, getter_name, call_getter, 0, true);
Handle<String> setter_name =
- Name::ToFunctionName(name, isolate->factory()->set_string())
+ Name::ToFunctionName(isolate, name, isolate->factory()->set_string())
.ToHandleChecked();
Handle<JSFunction> setter =
SimpleCreateFunction(isolate, setter_name, call_setter, 1, true);
@@ -537,15 +545,11 @@ V8_NOINLINE void SimpleInstallGetterSetter(Handle<JSObject> base,
JSObject::DefineAccessor(base, name, getter, setter, attribs).Check();
}
-V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
- Handle<Name> name,
- Handle<Name> property_name,
- Builtins::Name call,
- bool adapt) {
- Isolate* const isolate = base->GetIsolate();
-
+V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(
+ Isolate* isolate, Handle<JSObject> base, Handle<Name> name,
+ Handle<Name> property_name, Builtins::Name call, bool adapt) {
Handle<String> getter_name =
- Name::ToFunctionName(name, isolate->factory()->get_string())
+ Name::ToFunctionName(isolate, name, isolate->factory()->get_string())
.ToHandleChecked();
Handle<JSFunction> getter =
SimpleCreateFunction(isolate, getter_name, call, 0, adapt);
@@ -558,19 +562,19 @@ V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
return getter;
}
-V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(Isolate* isolate,
+ Handle<JSObject> base,
Handle<Name> name,
Builtins::Name call,
bool adapt) {
- return SimpleInstallGetter(base, name, name, call, adapt);
+ return SimpleInstallGetter(isolate, base, name, name, call, adapt);
}
-V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
- Handle<Name> name,
- Builtins::Name call,
- bool adapt,
- BuiltinFunctionId id) {
- Handle<JSFunction> fun = SimpleInstallGetter(base, name, call, adapt);
+V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(
+ Isolate* isolate, Handle<JSObject> base, Handle<Name> name,
+ Builtins::Name call, bool adapt, BuiltinFunctionId id) {
+ Handle<JSFunction> fun =
+ SimpleInstallGetter(isolate, base, name, call, adapt);
fun->shared()->set_builtin_function_id(id);
return fun;
}
@@ -578,15 +582,17 @@ V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
V8_NOINLINE void InstallConstant(Isolate* isolate, Handle<JSObject> holder,
const char* name, Handle<Object> value) {
JSObject::AddProperty(
- holder, isolate->factory()->NewStringFromAsciiChecked(name), value,
+ isolate, holder, isolate->factory()->NewStringFromAsciiChecked(name),
+ value,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
}
-V8_NOINLINE void InstallSpeciesGetter(Handle<JSFunction> constructor) {
- Factory* factory = constructor->GetIsolate()->factory();
+V8_NOINLINE void InstallSpeciesGetter(Isolate* isolate,
+ Handle<JSFunction> constructor) {
+ Factory* factory = isolate->factory();
// TODO(adamk): We should be able to share a SharedFunctionInfo
// between all these JSFunctins.
- SimpleInstallGetter(constructor, factory->symbol_species_string(),
+ SimpleInstallGetter(isolate, constructor, factory->symbol_species_string(),
factory->species_symbol(), Builtins::kReturnReceiver,
true);
}
@@ -616,13 +622,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
script->set_type(Script::TYPE_NATIVE);
Handle<WeakFixedArray> infos = factory()->NewWeakFixedArray(2);
script->set_shared_function_infos(*infos);
- // TODO(cbruni): fix position information here.
- empty_function->shared()->set_raw_start_position(0);
- empty_function->shared()->set_raw_end_position(source->length());
empty_function->shared()->set_scope_info(*scope_info);
- empty_function->shared()->set_function_literal_id(1);
empty_function->shared()->DontAdaptArguments();
- SharedFunctionInfo::SetScript(handle(empty_function->shared()), script);
+ SharedFunctionInfo::SetScript(handle(empty_function->shared(), isolate()),
+ script, 1);
return empty_function;
}
@@ -660,7 +663,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic() {
if (!restricted_properties_thrower_.is_null()) {
return restricted_properties_thrower_;
}
- Handle<String> name(factory()->empty_string());
+ Handle<String> name = factory()->empty_string();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
name, Builtins::kStrictPoisonPillThrower, i::LanguageMode::kStrict);
Handle<JSFunction> function = factory()->NewFunction(args);
@@ -770,8 +773,9 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
Handle<JSObject> object_function_prototype =
factory->NewFunctionPrototype(object_fun);
- Handle<Map> map = Map::Copy(handle(object_function_prototype->map()),
- "EmptyObjectPrototype");
+ Handle<Map> map =
+ Map::Copy(isolate(), handle(object_function_prototype->map(), isolate()),
+ "EmptyObjectPrototype");
map->set_is_prototype_map(true);
// Ban re-setting Object.prototype.__proto__ to prevent Proxy security bug
map->set_is_immutable_proto(true);
@@ -780,7 +784,7 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
// Complete setting up empty function.
{
Handle<Map> empty_function_map(empty_function->map(), isolate_);
- Map::SetPrototype(empty_function_map, object_function_prototype);
+ Map::SetPrototype(isolate(), empty_function_map, object_function_prototype);
}
native_context()->set_initial_object_prototype(*object_function_prototype);
@@ -790,23 +794,23 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
// Set up slow map for Object.create(null) instances without in-object
// properties.
Handle<Map> map(object_fun->initial_map(), isolate_);
- map = Map::CopyInitialMapNormalized(map);
- Map::SetPrototype(map, factory->null_value());
+ map = Map::CopyInitialMapNormalized(isolate(), map);
+ Map::SetPrototype(isolate(), map, factory->null_value());
native_context()->set_slow_object_with_null_prototype_map(*map);
// Set up slow map for literals with too many properties.
- map = Map::Copy(map, "slow_object_with_object_prototype_map");
- Map::SetPrototype(map, object_function_prototype);
+ map = Map::Copy(isolate(), map, "slow_object_with_object_prototype_map");
+ Map::SetPrototype(isolate(), map, object_function_prototype);
native_context()->set_slow_object_with_object_prototype_map(*map);
}
}
namespace {
-Handle<Map> CreateNonConstructorMap(Handle<Map> source_map,
+Handle<Map> CreateNonConstructorMap(Isolate* isolate, Handle<Map> source_map,
Handle<JSObject> prototype,
const char* reason) {
- Handle<Map> map = Map::Copy(source_map, reason);
+ Handle<Map> map = Map::Copy(isolate, source_map, reason);
// Ensure the resulting map has prototype slot (it is necessary for storing
// inital map even when the prototype property is not required).
if (!map->has_prototype_slot()) {
@@ -821,7 +825,7 @@ Handle<Map> CreateNonConstructorMap(Handle<Map> source_map,
map->SetInObjectUnusedPropertyFields(unused_property_fields);
}
map->set_is_constructor(false);
- Map::SetPrototype(map, prototype);
+ Map::SetPrototype(isolate, map, prototype);
return map;
}
@@ -832,9 +836,9 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
Handle<JSObject> iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SimpleInstallFunction(iterator_prototype, factory()->iterator_symbol(),
- "[Symbol.iterator]", Builtins::kReturnReceiver, 0,
- true);
+ SimpleInstallFunction(isolate(), iterator_prototype,
+ factory()->iterator_symbol(), "[Symbol.iterator]",
+ Builtins::kReturnReceiver, 0, true);
native_context()->set_initial_iterator_prototype(*iterator_prototype);
Handle<JSObject> generator_object_prototype =
@@ -847,27 +851,28 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
JSObject::ForceSetPrototype(generator_function_prototype, empty);
JSObject::AddProperty(
- generator_function_prototype, factory()->to_string_tag_symbol(),
+ isolate(), generator_function_prototype,
+ factory()->to_string_tag_symbol(),
factory()->NewStringFromAsciiChecked("GeneratorFunction"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(generator_function_prototype,
+ JSObject::AddProperty(isolate(), generator_function_prototype,
factory()->prototype_string(),
generator_object_prototype,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(generator_object_prototype,
+ JSObject::AddProperty(isolate(), generator_object_prototype,
factory()->constructor_string(),
generator_function_prototype,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(generator_object_prototype,
+ JSObject::AddProperty(isolate(), generator_object_prototype,
factory()->to_string_tag_symbol(),
factory()->NewStringFromAsciiChecked("Generator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- SimpleInstallFunction(generator_object_prototype, "next",
+ SimpleInstallFunction(isolate(), generator_object_prototype, "next",
Builtins::kGeneratorPrototypeNext, 1, false);
- SimpleInstallFunction(generator_object_prototype, "return",
+ SimpleInstallFunction(isolate(), generator_object_prototype, "return",
Builtins::kGeneratorPrototypeReturn, 1, false);
- SimpleInstallFunction(generator_object_prototype, "throw",
+ SimpleInstallFunction(isolate(), generator_object_prototype, "throw",
Builtins::kGeneratorPrototypeThrow, 1, false);
// Internal version of generator_prototype_next, flagged as non-native such
@@ -884,29 +889,32 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
// 04-14-15, section 25.2.4.3).
// Generator functions do not have "caller" or "arguments" accessors.
Handle<Map> map;
- map = CreateNonConstructorMap(isolate()->strict_function_map(),
+ map = CreateNonConstructorMap(isolate(), isolate()->strict_function_map(),
generator_function_prototype,
"GeneratorFunction");
native_context()->set_generator_function_map(*map);
- map = CreateNonConstructorMap(isolate()->strict_function_with_name_map(),
- generator_function_prototype,
- "GeneratorFunction with name");
+ map = CreateNonConstructorMap(
+ isolate(), isolate()->strict_function_with_name_map(),
+ generator_function_prototype, "GeneratorFunction with name");
native_context()->set_generator_function_with_name_map(*map);
- map = CreateNonConstructorMap(strict_function_with_home_object_map_,
- generator_function_prototype,
- "GeneratorFunction with home object");
+ map = CreateNonConstructorMap(
+ isolate(), strict_function_with_home_object_map_,
+ generator_function_prototype, "GeneratorFunction with home object");
native_context()->set_generator_function_with_home_object_map(*map);
- map = CreateNonConstructorMap(strict_function_with_name_and_home_object_map_,
+ map = CreateNonConstructorMap(isolate(),
+ strict_function_with_name_and_home_object_map_,
generator_function_prototype,
"GeneratorFunction with name and home object");
native_context()->set_generator_function_with_name_and_home_object_map(*map);
- Handle<JSFunction> object_function(native_context()->object_function());
+ Handle<JSFunction> object_function(native_context()->object_function(),
+ isolate());
Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
- Map::SetPrototype(generator_object_prototype_map, generator_object_prototype);
+ Map::SetPrototype(isolate(), generator_object_prototype_map,
+ generator_object_prototype);
native_context()->set_generator_object_prototype_map(
*generator_object_prototype_map);
}
@@ -918,25 +926,26 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
factory()->NewJSObject(isolate()->object_function(), TENURED);
SimpleInstallFunction(
- async_iterator_prototype, factory()->async_iterator_symbol(),
+ isolate(), async_iterator_prototype, factory()->async_iterator_symbol(),
"[Symbol.asyncIterator]", Builtins::kReturnReceiver, 0, true);
// %AsyncFromSyncIteratorPrototype%
// proposal-async-iteration/#sec-%asyncfromsynciteratorprototype%-object
Handle<JSObject> async_from_sync_iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SimpleInstallFunction(async_from_sync_iterator_prototype,
+ SimpleInstallFunction(isolate(), async_from_sync_iterator_prototype,
factory()->next_string(),
Builtins::kAsyncFromSyncIteratorPrototypeNext, 1, true);
SimpleInstallFunction(
- async_from_sync_iterator_prototype, factory()->return_string(),
+ isolate(), async_from_sync_iterator_prototype, factory()->return_string(),
Builtins::kAsyncFromSyncIteratorPrototypeReturn, 1, true);
SimpleInstallFunction(
- async_from_sync_iterator_prototype, factory()->throw_string(),
+ isolate(), async_from_sync_iterator_prototype, factory()->throw_string(),
Builtins::kAsyncFromSyncIteratorPrototypeThrow, 1, true);
JSObject::AddProperty(
- async_from_sync_iterator_prototype, factory()->to_string_tag_symbol(),
+ isolate(), async_from_sync_iterator_prototype,
+ factory()->to_string_tag_symbol(),
factory()->NewStringFromAsciiChecked("Async-from-Sync Iterator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -945,7 +954,7 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
Handle<Map> async_from_sync_iterator_map = factory()->NewMap(
JS_ASYNC_FROM_SYNC_ITERATOR_TYPE, JSAsyncFromSyncIterator::kSize);
- Map::SetPrototype(async_from_sync_iterator_map,
+ Map::SetPrototype(isolate(), async_from_sync_iterator_map,
async_from_sync_iterator_prototype);
native_context()->set_async_from_sync_iterator_map(
*async_from_sync_iterator_map);
@@ -966,11 +975,15 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
// %AsyncGeneratorPrototype% intrinsic object.
// This property has the attributes
// { [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: true }.
- JSObject::AddProperty(async_generator_function_prototype,
+ JSObject::AddProperty(isolate(), async_generator_function_prototype,
factory()->prototype_string(),
async_generator_object_prototype,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(async_generator_function_prototype,
+ JSObject::AddProperty(isolate(), async_generator_object_prototype,
+ factory()->constructor_string(),
+ async_generator_function_prototype,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(isolate(), async_generator_function_prototype,
factory()->to_string_tag_symbol(),
AsyncGeneratorFunction_string,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -981,15 +994,15 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
native_context()->set_initial_async_generator_prototype(
*async_generator_object_prototype);
- JSObject::AddProperty(async_generator_object_prototype,
+ JSObject::AddProperty(isolate(), async_generator_object_prototype,
factory()->to_string_tag_symbol(),
factory()->NewStringFromAsciiChecked("AsyncGenerator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- SimpleInstallFunction(async_generator_object_prototype, "next",
+ SimpleInstallFunction(isolate(), async_generator_object_prototype, "next",
Builtins::kAsyncGeneratorPrototypeNext, 1, false);
- SimpleInstallFunction(async_generator_object_prototype, "return",
+ SimpleInstallFunction(isolate(), async_generator_object_prototype, "return",
Builtins::kAsyncGeneratorPrototypeReturn, 1, false);
- SimpleInstallFunction(async_generator_object_prototype, "throw",
+ SimpleInstallFunction(isolate(), async_generator_object_prototype, "throw",
Builtins::kAsyncGeneratorPrototypeThrow, 1, false);
// Create maps for generator functions and their prototypes. Store those
@@ -998,31 +1011,33 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
// 04-14-15, section 25.2.4.3).
// Async Generator functions do not have "caller" or "arguments" accessors.
Handle<Map> map;
- map = CreateNonConstructorMap(isolate()->strict_function_map(),
+ map = CreateNonConstructorMap(isolate(), isolate()->strict_function_map(),
async_generator_function_prototype,
"AsyncGeneratorFunction");
native_context()->set_async_generator_function_map(*map);
- map = CreateNonConstructorMap(isolate()->strict_function_with_name_map(),
- async_generator_function_prototype,
- "AsyncGeneratorFunction with name");
+ map = CreateNonConstructorMap(
+ isolate(), isolate()->strict_function_with_name_map(),
+ async_generator_function_prototype, "AsyncGeneratorFunction with name");
native_context()->set_async_generator_function_with_name_map(*map);
- map = CreateNonConstructorMap(strict_function_with_home_object_map_,
- async_generator_function_prototype,
- "AsyncGeneratorFunction with home object");
+ map =
+ CreateNonConstructorMap(isolate(), strict_function_with_home_object_map_,
+ async_generator_function_prototype,
+ "AsyncGeneratorFunction with home object");
native_context()->set_async_generator_function_with_home_object_map(*map);
map = CreateNonConstructorMap(
- strict_function_with_name_and_home_object_map_,
+ isolate(), strict_function_with_name_and_home_object_map_,
async_generator_function_prototype,
"AsyncGeneratorFunction with name and home object");
native_context()->set_async_generator_function_with_name_and_home_object_map(
*map);
- Handle<JSFunction> object_function(native_context()->object_function());
+ Handle<JSFunction> object_function(native_context()->object_function(),
+ isolate());
Handle<Map> async_generator_object_prototype_map = Map::Create(isolate(), 0);
- Map::SetPrototype(async_generator_object_prototype_map,
+ Map::SetPrototype(isolate(), async_generator_object_prototype_map,
async_generator_object_prototype);
native_context()->set_async_generator_object_prototype_map(
*async_generator_object_prototype_map);
@@ -1034,29 +1049,29 @@ void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
factory()->NewJSObject(isolate()->object_function(), TENURED);
JSObject::ForceSetPrototype(async_function_prototype, empty);
- JSObject::AddProperty(async_function_prototype,
+ JSObject::AddProperty(isolate(), async_function_prototype,
factory()->to_string_tag_symbol(),
factory()->NewStringFromAsciiChecked("AsyncFunction"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
Handle<Map> map;
map = CreateNonConstructorMap(
- isolate()->strict_function_without_prototype_map(),
+ isolate(), isolate()->strict_function_without_prototype_map(),
async_function_prototype, "AsyncFunction");
native_context()->set_async_function_map(*map);
- map = CreateNonConstructorMap(isolate()->method_with_name_map(),
+ map = CreateNonConstructorMap(isolate(), isolate()->method_with_name_map(),
async_function_prototype,
"AsyncFunction with name");
native_context()->set_async_function_with_name_map(*map);
- map = CreateNonConstructorMap(isolate()->method_with_home_object_map(),
- async_function_prototype,
- "AsyncFunction with home object");
+ map = CreateNonConstructorMap(
+ isolate(), isolate()->method_with_home_object_map(),
+ async_function_prototype, "AsyncFunction with home object");
native_context()->set_async_function_with_home_object_map(*map);
map = CreateNonConstructorMap(
- isolate()->method_with_name_and_home_object_map(),
+ isolate(), isolate()->method_with_name_and_home_object_map(),
async_function_prototype, "AsyncFunction with name and home object");
native_context()->set_async_function_with_name_and_home_object_map(*map);
}
@@ -1071,13 +1086,14 @@ void Genesis::CreateJSProxyMaps() {
proxy_map->set_may_have_interesting_symbols(true);
native_context()->set_proxy_map(*proxy_map);
- Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
+ Handle<Map> proxy_callable_map =
+ Map::Copy(isolate_, proxy_map, "callable Proxy");
proxy_callable_map->set_is_callable(true);
native_context()->set_proxy_callable_map(*proxy_callable_map);
proxy_callable_map->SetConstructor(native_context()->function_function());
Handle<Map> proxy_constructor_map =
- Map::Copy(proxy_callable_map, "constructor Proxy");
+ Map::Copy(isolate_, proxy_callable_map, "constructor Proxy");
proxy_constructor_map->set_is_constructor(true);
native_context()->set_proxy_constructor_map(*proxy_constructor_map);
@@ -1085,7 +1101,7 @@ void Genesis::CreateJSProxyMaps() {
Handle<Map> map =
factory()->NewMap(JS_OBJECT_TYPE, JSProxyRevocableResult::kSize,
TERMINAL_FAST_ELEMENTS_KIND, 2);
- Map::EnsureDescriptorSlack(map, 2);
+ Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // proxy
Descriptor d = Descriptor::DataField(factory()->proxy_string(),
@@ -1100,7 +1116,7 @@ void Genesis::CreateJSProxyMaps() {
map->AppendDescriptor(&d);
}
- Map::SetPrototype(map, isolate()->initial_object_prototype());
+ Map::SetPrototype(isolate(), map, isolate()->initial_object_prototype());
map->SetConstructor(native_context()->object_function());
native_context()->set_proxy_revocable_result_map(*map);
@@ -1108,11 +1124,11 @@ void Genesis::CreateJSProxyMaps() {
}
namespace {
-void ReplaceAccessors(Handle<Map> map, Handle<String> name,
+void ReplaceAccessors(Isolate* isolate, Handle<Map> map, Handle<String> name,
PropertyAttributes attributes,
Handle<AccessorPair> accessor_pair) {
DescriptorArray* descriptors = map->instance_descriptors();
- int idx = descriptors->SearchWithCache(map->GetIsolate(), *name, *map);
+ int idx = descriptors->SearchWithCache(isolate, *name, *map);
Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
descriptors->Replace(idx, &d);
}
@@ -1125,15 +1141,15 @@ void Genesis::AddRestrictedFunctionProperties(Handle<JSFunction> empty) {
accessors->set_getter(*thrower);
accessors->set_setter(*thrower);
- Handle<Map> map(empty->map());
- ReplaceAccessors(map, factory()->arguments_string(), rw_attribs, accessors);
- ReplaceAccessors(map, factory()->caller_string(), rw_attribs, accessors);
+ Handle<Map> map(empty->map(), isolate());
+ ReplaceAccessors(isolate(), map, factory()->arguments_string(), rw_attribs,
+ accessors);
+ ReplaceAccessors(isolate(), map, factory()->caller_string(), rw_attribs,
+ accessors);
}
-
-static void AddToWeakNativeContextList(Context* context) {
+static void AddToWeakNativeContextList(Isolate* isolate, Context* context) {
DCHECK(context->IsNativeContext());
- Isolate* isolate = context->GetIsolate();
Heap* heap = isolate->heap();
#ifdef DEBUG
{ // NOLINT
@@ -1158,7 +1174,7 @@ void Genesis::CreateRoots() {
// and the global object, but in order to create those, we need the
// native context).
native_context_ = factory()->NewNativeContext();
- AddToWeakNativeContextList(*native_context());
+ AddToWeakNativeContextList(isolate(), *native_context());
isolate()->set_context(*native_context());
// Allocate the message listeners object.
@@ -1171,7 +1187,7 @@ void Genesis::CreateRoots() {
void Genesis::InstallGlobalThisBinding() {
Handle<ScriptContextTable> script_contexts(
- native_context()->script_context_table());
+ native_context()->script_context_table(), isolate());
Handle<ScopeInfo> scope_info = ScopeInfo::CreateGlobalThisBinding(isolate());
Handle<Context> context =
factory()->NewScriptContext(native_context(), scope_info);
@@ -1212,7 +1228,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
v8::Utils::OpenHandle(*global_proxy_template);
Handle<FunctionTemplateInfo> global_constructor =
Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(data->constructor()));
+ FunctionTemplateInfo::cast(data->constructor()), isolate());
Handle<Object> proto_template(global_constructor->prototype_template(),
isolate());
if (!proto_template->IsUndefined(isolate())) {
@@ -1222,7 +1238,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
}
if (js_global_object_template.is_null()) {
- Handle<String> name(factory()->empty_string());
+ Handle<String> name = factory()->empty_string();
Handle<JSObject> prototype =
factory()->NewFunctionPrototype(isolate()->object_function());
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
@@ -1230,7 +1246,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
Builtins::kIllegal, MUTABLE);
js_global_object_function = factory()->NewFunction(args);
#ifdef DEBUG
- LookupIterator it(prototype, factory()->constructor_string(),
+ LookupIterator it(isolate(), prototype, factory()->constructor_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
Handle<Object> value = Object::GetProperty(&it).ToHandleChecked();
DCHECK(it.IsFound());
@@ -1238,7 +1254,8 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
#endif
} else {
Handle<FunctionTemplateInfo> js_global_object_constructor(
- FunctionTemplateInfo::cast(js_global_object_template->constructor()));
+ FunctionTemplateInfo::cast(js_global_object_template->constructor()),
+ isolate());
js_global_object_function = ApiNatives::CreateApiFunction(
isolate(), js_global_object_constructor, factory()->the_hole_value(),
ApiNatives::GlobalObjectType);
@@ -1254,7 +1271,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
// Step 2: (re)initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_proxy_template.IsEmpty()) {
- Handle<String> name(factory()->empty_string());
+ Handle<String> name = factory()->empty_string();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
name, factory()->the_hole_value(), JS_GLOBAL_PROXY_TYPE,
JSGlobalProxy::SizeWithEmbedderFields(0), 0, Builtins::kIllegal,
@@ -1264,7 +1281,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
Handle<ObjectTemplateInfo> data =
v8::Utils::OpenHandle(*global_proxy_template);
Handle<FunctionTemplateInfo> global_constructor(
- FunctionTemplateInfo::cast(data->constructor()));
+ FunctionTemplateInfo::cast(data->constructor()), isolate());
global_proxy_function = ApiNatives::CreateApiFunction(
isolate(), global_constructor, factory()->the_hole_value(),
ApiNatives::GlobalProxyType);
@@ -1300,10 +1317,10 @@ void Genesis::HookUpGlobalProxy(Handle<JSGlobalProxy> global_proxy) {
// Re-initialize the global proxy with the global proxy function from the
// snapshot, and then set up the link to the native context.
Handle<JSFunction> global_proxy_function(
- native_context()->global_proxy_function());
+ native_context()->global_proxy_function(), isolate());
factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
Handle<JSObject> global_object(
- JSObject::cast(native_context()->global_object()));
+ JSObject::cast(native_context()->global_object()), isolate());
JSObject::ForceSetPrototype(global_proxy, global_object);
global_proxy->set_native_context(*native_context());
DCHECK(native_context()->global_proxy() == *global_proxy);
@@ -1311,7 +1328,7 @@ void Genesis::HookUpGlobalProxy(Handle<JSGlobalProxy> global_proxy) {
void Genesis::HookUpGlobalObject(Handle<JSGlobalObject> global_object) {
Handle<JSGlobalObject> global_object_from_snapshot(
- JSGlobalObject::cast(native_context()->extension()));
+ JSGlobalObject::cast(native_context()->extension()), isolate());
native_context()->set_extension(*global_object);
native_context()->set_security_token(*global_object);
@@ -1323,8 +1340,9 @@ static void InstallWithIntrinsicDefaultProto(Isolate* isolate,
Handle<JSFunction> function,
int context_index) {
Handle<Smi> index(Smi::FromInt(context_index), isolate);
- JSObject::AddProperty(
- function, isolate->factory()->native_context_index_symbol(), index, NONE);
+ JSObject::AddProperty(isolate, function,
+ isolate->factory()->native_context_index_symbol(),
+ index, NONE);
isolate->native_context()->set(context_index, *function);
}
@@ -1333,13 +1351,13 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Factory* factory = isolate->factory();
Handle<JSFunction> error_fun = InstallFunction(
- global, name, JS_ERROR_TYPE, JSObject::kHeaderSize, 0,
+ isolate, global, name, JS_ERROR_TYPE, JSObject::kHeaderSize, 0,
factory->the_hole_value(), Builtins::kErrorConstructor, DONT_ENUM);
error_fun->shared()->DontAdaptArguments();
error_fun->shared()->set_length(1);
if (context_index == Context::ERROR_FUNCTION_INDEX) {
- SimpleInstallFunction(error_fun, "captureStackTrace",
+ SimpleInstallFunction(isolate, error_fun, "captureStackTrace",
Builtins::kErrorCaptureStackTrace, 2, false);
}
@@ -1347,22 +1365,24 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
{
// Setup %XXXErrorPrototype%.
- Handle<JSObject> prototype(JSObject::cast(error_fun->instance_prototype()));
+ Handle<JSObject> prototype(JSObject::cast(error_fun->instance_prototype()),
+ isolate);
- JSObject::AddProperty(prototype, factory->name_string(), name, DONT_ENUM);
- JSObject::AddProperty(prototype, factory->message_string(),
+ JSObject::AddProperty(isolate, prototype, factory->name_string(), name,
+ DONT_ENUM);
+ JSObject::AddProperty(isolate, prototype, factory->message_string(),
factory->empty_string(), DONT_ENUM);
if (context_index == Context::ERROR_FUNCTION_INDEX) {
Handle<JSFunction> to_string_fun =
- SimpleInstallFunction(prototype, factory->toString_string(),
+ SimpleInstallFunction(isolate, prototype, factory->toString_string(),
Builtins::kErrorPrototypeToString, 0, true);
isolate->native_context()->set_error_to_string(*to_string_fun);
isolate->native_context()->set_initial_error_prototype(*prototype);
} else {
DCHECK(isolate->native_context()->error_to_string()->IsJSFunction());
- InstallFunction(prototype, isolate->error_to_string(),
+ InstallFunction(isolate, prototype, isolate->error_to_string(),
factory->toString_string(), DONT_ENUM);
Handle<JSFunction> global_error = isolate->error_function();
@@ -1376,8 +1396,8 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
}
}
- Handle<Map> initial_map(error_fun->initial_map());
- Map::EnsureDescriptorSlack(initial_map, 1);
+ Handle<Map> initial_map(error_fun->initial_map(), isolate);
+ Map::EnsureDescriptorSlack(isolate, initial_map, 1);
{
Handle<AccessorInfo> info = factory->error_stack_accessor();
@@ -1417,8 +1437,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// by default even in case of global object reinitialization.
native_context()->set_security_token(*global_object);
- Isolate* isolate = global_object->GetIsolate();
- Factory* factory = isolate->factory();
+ Factory* factory = isolate_->factory();
Handle<ScriptContextTable> script_context_table =
factory->NewScriptContextTable();
@@ -1427,206 +1446,214 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- O b j e c t ---
Handle<String> object_name = factory->Object_string();
- Handle<JSFunction> object_function = isolate->object_function();
- JSObject::AddProperty(global_object, object_name, object_function,
+ Handle<JSFunction> object_function = isolate_->object_function();
+ JSObject::AddProperty(isolate_, global_object, object_name, object_function,
DONT_ENUM);
- SimpleInstallFunction(object_function, factory->assign_string(),
+ SimpleInstallFunction(isolate_, object_function, factory->assign_string(),
Builtins::kObjectAssign, 2, false);
- SimpleInstallFunction(object_function, "getOwnPropertyDescriptor",
+ SimpleInstallFunction(isolate_, object_function, "getOwnPropertyDescriptor",
Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
- SimpleInstallFunction(object_function,
+ SimpleInstallFunction(isolate_, object_function,
factory->getOwnPropertyDescriptors_string(),
Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
- SimpleInstallFunction(object_function, "getOwnPropertyNames",
- Builtins::kObjectGetOwnPropertyNames, 1, false);
- SimpleInstallFunction(object_function, "getOwnPropertySymbols",
+ SimpleInstallFunction(isolate_, object_function, "getOwnPropertyNames",
+ Builtins::kObjectGetOwnPropertyNames, 1, true);
+ SimpleInstallFunction(isolate_, object_function, "getOwnPropertySymbols",
Builtins::kObjectGetOwnPropertySymbols, 1, false);
- SimpleInstallFunction(object_function, "is",
- Builtins::kObjectIs, 2, true);
- SimpleInstallFunction(object_function, "preventExtensions",
+ SimpleInstallFunction(isolate_, object_function, "is", Builtins::kObjectIs,
+ 2, true);
+ SimpleInstallFunction(isolate_, object_function, "preventExtensions",
Builtins::kObjectPreventExtensions, 1, false);
- SimpleInstallFunction(object_function, "seal",
+ SimpleInstallFunction(isolate_, object_function, "seal",
Builtins::kObjectSeal, 1, false);
- Handle<JSFunction> object_create =
- SimpleInstallFunction(object_function, factory->create_string(),
- Builtins::kObjectCreate, 2, false);
+ Handle<JSFunction> object_create = SimpleInstallFunction(
+ isolate_, object_function, factory->create_string(),
+ Builtins::kObjectCreate, 2, false);
native_context()->set_object_create(*object_create);
- Handle<JSFunction> object_define_properties = SimpleInstallFunction(
- object_function, "defineProperties",
- Builtins::kObjectDefineProperties, 2, true);
+ Handle<JSFunction> object_define_properties =
+ SimpleInstallFunction(isolate_, object_function, "defineProperties",
+ Builtins::kObjectDefineProperties, 2, true);
native_context()->set_object_define_properties(*object_define_properties);
Handle<JSFunction> object_define_property = SimpleInstallFunction(
- object_function, factory->defineProperty_string(),
+ isolate_, object_function, factory->defineProperty_string(),
Builtins::kObjectDefineProperty, 3, true);
native_context()->set_object_define_property(*object_define_property);
- SimpleInstallFunction(object_function, "freeze", Builtins::kObjectFreeze, 1,
- false);
+ SimpleInstallFunction(isolate_, object_function, "freeze",
+ Builtins::kObjectFreeze, 1, false);
- Handle<JSFunction> object_get_prototype_of = SimpleInstallFunction(
- object_function, "getPrototypeOf", Builtins::kObjectGetPrototypeOf,
- 1, false);
+ Handle<JSFunction> object_get_prototype_of =
+ SimpleInstallFunction(isolate_, object_function, "getPrototypeOf",
+ Builtins::kObjectGetPrototypeOf, 1, false);
native_context()->set_object_get_prototype_of(*object_get_prototype_of);
- SimpleInstallFunction(object_function, "setPrototypeOf",
+ SimpleInstallFunction(isolate_, object_function, "setPrototypeOf",
Builtins::kObjectSetPrototypeOf, 2, false);
- SimpleInstallFunction(object_function, "isExtensible",
+ SimpleInstallFunction(isolate_, object_function, "isExtensible",
Builtins::kObjectIsExtensible, 1, false);
- SimpleInstallFunction(object_function, "isFrozen",
+ SimpleInstallFunction(isolate_, object_function, "isFrozen",
Builtins::kObjectIsFrozen, 1, false);
- Handle<JSFunction> object_is_sealed = SimpleInstallFunction(
- object_function, "isSealed", Builtins::kObjectIsSealed, 1, false);
+ Handle<JSFunction> object_is_sealed =
+ SimpleInstallFunction(isolate_, object_function, "isSealed",
+ Builtins::kObjectIsSealed, 1, false);
native_context()->set_object_is_sealed(*object_is_sealed);
Handle<JSFunction> object_keys = SimpleInstallFunction(
- object_function, "keys", Builtins::kObjectKeys, 1, true);
+ isolate_, object_function, "keys", Builtins::kObjectKeys, 1, true);
native_context()->set_object_keys(*object_keys);
- SimpleInstallFunction(object_function, factory->entries_string(),
+ SimpleInstallFunction(isolate_, object_function, factory->entries_string(),
Builtins::kObjectEntries, 1, true);
- SimpleInstallFunction(object_function, factory->values_string(),
+ SimpleInstallFunction(isolate_, object_function, factory->values_string(),
Builtins::kObjectValues, 1, true);
- SimpleInstallFunction(isolate->initial_object_prototype(),
+ SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
"__defineGetter__", Builtins::kObjectDefineGetter, 2,
true);
- SimpleInstallFunction(isolate->initial_object_prototype(),
+ SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
"__defineSetter__", Builtins::kObjectDefineSetter, 2,
true);
- SimpleInstallFunction(isolate->initial_object_prototype(), "hasOwnProperty",
+ SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
+ "hasOwnProperty",
Builtins::kObjectPrototypeHasOwnProperty, 1, true);
- SimpleInstallFunction(isolate->initial_object_prototype(),
+ SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
"__lookupGetter__", Builtins::kObjectLookupGetter, 1,
true);
- SimpleInstallFunction(isolate->initial_object_prototype(),
+ SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
"__lookupSetter__", Builtins::kObjectLookupSetter, 1,
true);
- SimpleInstallFunction(isolate->initial_object_prototype(), "isPrototypeOf",
+ SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
+ "isPrototypeOf",
Builtins::kObjectPrototypeIsPrototypeOf, 1, true);
SimpleInstallFunction(
- isolate->initial_object_prototype(), "propertyIsEnumerable",
+ isolate_, isolate_->initial_object_prototype(), "propertyIsEnumerable",
Builtins::kObjectPrototypePropertyIsEnumerable, 1, false);
- Handle<JSFunction> object_to_string = SimpleInstallFunction(
- isolate->initial_object_prototype(), factory->toString_string(),
- Builtins::kObjectPrototypeToString, 0, true);
+ Handle<JSFunction> object_to_string =
+ SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
+ factory->toString_string(),
+ Builtins::kObjectPrototypeToString, 0, true);
native_context()->set_object_to_string(*object_to_string);
Handle<JSFunction> object_value_of = SimpleInstallFunction(
- isolate->initial_object_prototype(), "valueOf",
+ isolate_, isolate_->initial_object_prototype(), "valueOf",
Builtins::kObjectPrototypeValueOf, 0, true);
native_context()->set_object_value_of(*object_value_of);
- SimpleInstallGetterSetter(isolate->initial_object_prototype(),
+ SimpleInstallGetterSetter(isolate_, isolate_->initial_object_prototype(),
factory->proto_string(),
Builtins::kObjectPrototypeGetProto,
Builtins::kObjectPrototypeSetProto, DONT_ENUM);
- SimpleInstallFunction(isolate->initial_object_prototype(), "toLocaleString",
+ SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
+ "toLocaleString",
Builtins::kObjectPrototypeToLocaleString, 0, true);
}
- Handle<JSObject> global(native_context()->global_object());
+ Handle<JSObject> global(native_context()->global_object(), isolate());
{ // --- F u n c t i o n ---
Handle<JSFunction> prototype = empty_function;
- Handle<JSFunction> function_fun = InstallFunction(
- global, "Function", JS_FUNCTION_TYPE, JSFunction::kSizeWithPrototype, 0,
- prototype, Builtins::kFunctionConstructor);
+ Handle<JSFunction> function_fun =
+ InstallFunction(isolate_, global, "Function", JS_FUNCTION_TYPE,
+ JSFunction::kSizeWithPrototype, 0, prototype,
+ Builtins::kFunctionConstructor);
// Function instances are sloppy by default.
- function_fun->set_prototype_or_initial_map(*isolate->sloppy_function_map());
+ function_fun->set_prototype_or_initial_map(
+ *isolate_->sloppy_function_map());
function_fun->shared()->DontAdaptArguments();
function_fun->shared()->set_length(1);
- InstallWithIntrinsicDefaultProto(isolate, function_fun,
+ InstallWithIntrinsicDefaultProto(isolate_, function_fun,
Context::FUNCTION_FUNCTION_INDEX);
// Setup the methods on the %FunctionPrototype%.
- JSObject::AddProperty(prototype, factory->constructor_string(),
+ JSObject::AddProperty(isolate_, prototype, factory->constructor_string(),
function_fun, DONT_ENUM);
- SimpleInstallFunction(prototype, factory->apply_string(),
+ SimpleInstallFunction(isolate_, prototype, factory->apply_string(),
Builtins::kFunctionPrototypeApply, 2, false);
- SimpleInstallFunction(prototype, factory->bind_string(),
+ SimpleInstallFunction(isolate_, prototype, factory->bind_string(),
Builtins::kFastFunctionPrototypeBind, 1, false);
- SimpleInstallFunction(prototype, factory->call_string(),
+ SimpleInstallFunction(isolate_, prototype, factory->call_string(),
Builtins::kFunctionPrototypeCall, 1, false);
- SimpleInstallFunction(prototype, factory->toString_string(),
+ SimpleInstallFunction(isolate_, prototype, factory->toString_string(),
Builtins::kFunctionPrototypeToString, 0, false);
// Install the @@hasInstance function.
Handle<JSFunction> has_instance = SimpleInstallFunction(
- prototype, factory->has_instance_symbol(), "[Symbol.hasInstance]",
- Builtins::kFunctionPrototypeHasInstance, 1, true,
+ isolate_, prototype, factory->has_instance_symbol(),
+ "[Symbol.hasInstance]", Builtins::kFunctionPrototypeHasInstance, 1,
+ true,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY),
kFunctionHasInstance);
native_context()->set_function_has_instance(*has_instance);
// Complete setting up function maps.
{
- isolate->sloppy_function_map()->SetConstructor(*function_fun);
- isolate->sloppy_function_with_name_map()->SetConstructor(*function_fun);
- isolate->sloppy_function_with_readonly_prototype_map()->SetConstructor(
+ isolate_->sloppy_function_map()->SetConstructor(*function_fun);
+ isolate_->sloppy_function_with_name_map()->SetConstructor(*function_fun);
+ isolate_->sloppy_function_with_readonly_prototype_map()->SetConstructor(
*function_fun);
- isolate->strict_function_map()->SetConstructor(*function_fun);
- isolate->strict_function_with_name_map()->SetConstructor(*function_fun);
+ isolate_->strict_function_map()->SetConstructor(*function_fun);
+ isolate_->strict_function_with_name_map()->SetConstructor(*function_fun);
strict_function_with_home_object_map_->SetConstructor(*function_fun);
strict_function_with_name_and_home_object_map_->SetConstructor(
*function_fun);
- isolate->strict_function_with_readonly_prototype_map()->SetConstructor(
+ isolate_->strict_function_with_readonly_prototype_map()->SetConstructor(
*function_fun);
- isolate->class_function_map()->SetConstructor(*function_fun);
+ isolate_->class_function_map()->SetConstructor(*function_fun);
}
}
{ // --- A s y n c F r o m S y n c I t e r a t o r
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncIteratorValueUnwrap, factory->empty_string(),
+ isolate_, Builtins::kAsyncIteratorValueUnwrap, factory->empty_string(),
1);
native_context()->set_async_iterator_value_unwrap_shared_fun(*info);
}
{ // --- A s y n c G e n e r a t o r ---
Handle<JSFunction> await_caught =
- SimpleCreateFunction(isolate, factory->empty_string(),
+ SimpleCreateFunction(isolate_, factory->empty_string(),
Builtins::kAsyncGeneratorAwaitCaught, 1, false);
native_context()->set_async_generator_await_caught(*await_caught);
Handle<JSFunction> await_uncaught =
- SimpleCreateFunction(isolate, factory->empty_string(),
+ SimpleCreateFunction(isolate_, factory->empty_string(),
Builtins::kAsyncGeneratorAwaitUncaught, 1, false);
native_context()->set_async_generator_await_uncaught(*await_uncaught);
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorAwaitResolveClosure,
+ isolate_, Builtins::kAsyncGeneratorAwaitResolveClosure,
factory->empty_string(), 1);
native_context()->set_async_generator_await_resolve_shared_fun(*info);
info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorAwaitRejectClosure,
+ isolate_, Builtins::kAsyncGeneratorAwaitRejectClosure,
factory->empty_string(), 1);
native_context()->set_async_generator_await_reject_shared_fun(*info);
info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorYieldResolveClosure,
+ isolate_, Builtins::kAsyncGeneratorYieldResolveClosure,
factory->empty_string(), 1);
native_context()->set_async_generator_yield_resolve_shared_fun(*info);
info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorReturnResolveClosure,
+ isolate_, Builtins::kAsyncGeneratorReturnResolveClosure,
factory->empty_string(), 1);
native_context()->set_async_generator_return_resolve_shared_fun(*info);
info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorReturnClosedResolveClosure,
+ isolate_, Builtins::kAsyncGeneratorReturnClosedResolveClosure,
factory->empty_string(), 1);
native_context()->set_async_generator_return_closed_resolve_shared_fun(
*info);
info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorReturnClosedRejectClosure,
+ isolate_, Builtins::kAsyncGeneratorReturnClosedRejectClosure,
factory->empty_string(), 1);
native_context()->set_async_generator_return_closed_reject_shared_fun(
*info);
@@ -1634,8 +1661,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- A r r a y ---
Handle<JSFunction> array_function = InstallFunction(
- global, "Array", JS_ARRAY_TYPE, JSArray::kSize, 0,
- isolate->initial_object_prototype(), Builtins::kArrayConstructor);
+ isolate_, global, "Array", JS_ARRAY_TYPE, JSArray::kSize, 0,
+ isolate_->initial_object_prototype(), Builtins::kArrayConstructor);
array_function->shared()->DontAdaptArguments();
array_function->shared()->set_builtin_function_id(kArrayConstructor);
@@ -1643,12 +1670,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// is 1.
array_function->shared()->set_length(1);
- Handle<Map> initial_map(array_function->initial_map());
+ Handle<Map> initial_map(array_function->initial_map(), isolate());
// This assert protects an optimization in
// HGraphBuilder::JSArrayBuilder::EmitMapCode()
DCHECK(initial_map->elements_kind() == GetInitialFastElementsKind());
- Map::EnsureDescriptorSlack(initial_map, 1);
+ Map::EnsureDescriptorSlack(isolate_, initial_map, 1);
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
@@ -1660,9 +1687,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
initial_map->AppendDescriptor(&d);
}
- InstallWithIntrinsicDefaultProto(isolate, array_function,
+ InstallWithIntrinsicDefaultProto(isolate_, array_function,
Context::ARRAY_FUNCTION_INDEX);
- InstallSpeciesGetter(array_function);
+ InstallSpeciesGetter(isolate_, array_function);
// Cache the array maps, needed by ArrayConstructorStub
CacheInitialJSArrayMaps(native_context(), initial_map);
@@ -1677,74 +1704,87 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_initial_array_prototype(*proto);
Handle<JSFunction> is_arraylike = SimpleInstallFunction(
- array_function, "isArray", Builtins::kArrayIsArray, 1, true);
+ isolate_, array_function, "isArray", Builtins::kArrayIsArray, 1, true);
native_context()->set_is_arraylike(*is_arraylike);
- SimpleInstallFunction(array_function, "from", Builtins::kArrayFrom, 1,
+ SimpleInstallFunction(isolate_, array_function, "from",
+ Builtins::kArrayFrom, 1, false);
+ SimpleInstallFunction(isolate_, array_function, "of", Builtins::kArrayOf, 0,
false);
- SimpleInstallFunction(array_function, "of", Builtins::kArrayOf, 0, false);
- JSObject::AddProperty(proto, factory->constructor_string(), array_function,
- DONT_ENUM);
+ JSObject::AddProperty(isolate_, proto, factory->constructor_string(),
+ array_function, DONT_ENUM);
- SimpleInstallFunction(proto, "concat", Builtins::kArrayConcat, 1, false);
- SimpleInstallFunction(proto, "find", Builtins::kArrayPrototypeFind, 1,
+ SimpleInstallFunction(isolate_, proto, "concat", Builtins::kArrayConcat, 1,
false);
- SimpleInstallFunction(proto, "findIndex",
+ SimpleInstallFunction(isolate_, proto, "find",
+ Builtins::kArrayPrototypeFind, 1, false);
+ SimpleInstallFunction(isolate_, proto, "findIndex",
Builtins::kArrayPrototypeFindIndex, 1, false);
- SimpleInstallFunction(proto, "pop", Builtins::kArrayPrototypePop, 0, false);
- SimpleInstallFunction(proto, "push", Builtins::kArrayPrototypePush, 1,
- false);
- SimpleInstallFunction(proto, "shift", Builtins::kArrayPrototypeShift, 0,
- false);
- SimpleInstallFunction(proto, "unshift", Builtins::kArrayUnshift, 1, false);
- SimpleInstallFunction(proto, "slice", Builtins::kArrayPrototypeSlice, 2,
- false);
+ SimpleInstallFunction(isolate_, proto, "pop", Builtins::kArrayPrototypePop,
+ 0, false);
+ SimpleInstallFunction(isolate_, proto, "push",
+ Builtins::kArrayPrototypePush, 1, false);
+ SimpleInstallFunction(isolate_, proto, "shift",
+ Builtins::kArrayPrototypeShift, 0, false);
+ SimpleInstallFunction(isolate_, proto, "unshift", Builtins::kArrayUnshift,
+ 1, false);
+ SimpleInstallFunction(isolate_, proto, "slice",
+ Builtins::kArrayPrototypeSlice, 2, false);
if (FLAG_enable_experimental_builtins) {
- SimpleInstallFunction(proto, "splice", Builtins::kArraySpliceTorque, 2,
- false);
+ SimpleInstallFunction(isolate_, proto, "splice",
+ Builtins::kArraySpliceTorque, 2, false);
} else {
- SimpleInstallFunction(proto, "splice", Builtins::kArraySplice, 2, false);
+ SimpleInstallFunction(isolate_, proto, "splice", Builtins::kArraySplice,
+ 2, false);
}
- SimpleInstallFunction(proto, "includes", Builtins::kArrayIncludes, 1,
- false);
- SimpleInstallFunction(proto, "indexOf", Builtins::kArrayIndexOf, 1, false);
- SimpleInstallFunction(proto, "keys", Builtins::kArrayPrototypeKeys, 0, true,
- kArrayKeys);
- SimpleInstallFunction(proto, "entries", Builtins::kArrayPrototypeEntries, 0,
- true, kArrayEntries);
- SimpleInstallFunction(proto, factory->iterator_symbol(), "values",
+ SimpleInstallFunction(isolate_, proto, "includes", Builtins::kArrayIncludes,
+ 1, false);
+ SimpleInstallFunction(isolate_, proto, "indexOf", Builtins::kArrayIndexOf,
+ 1, false);
+ SimpleInstallFunction(isolate_, proto, "keys",
+ Builtins::kArrayPrototypeKeys, 0, true, kArrayKeys);
+ SimpleInstallFunction(isolate_, proto, "entries",
+ Builtins::kArrayPrototypeEntries, 0, true,
+ kArrayEntries);
+ SimpleInstallFunction(isolate_, proto, factory->iterator_symbol(), "values",
Builtins::kArrayPrototypeValues, 0, true, DONT_ENUM,
kArrayValues);
- SimpleInstallFunction(proto, "forEach", Builtins::kArrayForEach, 1, false);
- SimpleInstallFunction(proto, "filter", Builtins::kArrayFilter, 1, false);
- SimpleInstallFunction(proto, "map", Builtins::kArrayMap, 1, false);
- SimpleInstallFunction(proto, "every", Builtins::kArrayEvery, 1, false);
- SimpleInstallFunction(proto, "some", Builtins::kArraySome, 1, false);
- SimpleInstallFunction(proto, "reduce", Builtins::kArrayReduce, 1, false);
- SimpleInstallFunction(proto, "reduceRight", Builtins::kArrayReduceRight, 1,
+ SimpleInstallFunction(isolate_, proto, "forEach", Builtins::kArrayForEach,
+ 1, false);
+ SimpleInstallFunction(isolate_, proto, "filter", Builtins::kArrayFilter, 1,
false);
+ SimpleInstallFunction(isolate_, proto, "map", Builtins::kArrayMap, 1,
+ false);
+ SimpleInstallFunction(isolate_, proto, "every", Builtins::kArrayEvery, 1,
+ false);
+ SimpleInstallFunction(isolate_, proto, "some", Builtins::kArraySome, 1,
+ false);
+ SimpleInstallFunction(isolate_, proto, "reduce", Builtins::kArrayReduce, 1,
+ false);
+ SimpleInstallFunction(isolate_, proto, "reduceRight",
+ Builtins::kArrayReduceRight, 1, false);
}
{ // --- A r r a y I t e r a t o r ---
Handle<JSObject> iterator_prototype(
- native_context()->initial_iterator_prototype());
+ native_context()->initial_iterator_prototype(), isolate());
Handle<JSObject> array_iterator_prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
+ factory->NewJSObject(isolate_->object_function(), TENURED);
JSObject::ForceSetPrototype(array_iterator_prototype, iterator_prototype);
JSObject::AddProperty(
- array_iterator_prototype, factory->to_string_tag_symbol(),
+ isolate_, array_iterator_prototype, factory->to_string_tag_symbol(),
factory->ArrayIterator_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- SimpleInstallFunction(array_iterator_prototype, "next",
+ SimpleInstallFunction(isolate_, array_iterator_prototype, "next",
Builtins::kArrayIteratorPrototypeNext, 0, true,
kArrayIteratorNext);
Handle<JSFunction> array_iterator_function =
- CreateFunction(isolate, factory->ArrayIterator_string(),
+ CreateFunction(isolate_, factory->ArrayIterator_string(),
JS_ARRAY_ITERATOR_TYPE, JSArrayIterator::kSize, 0,
array_iterator_prototype, Builtins::kIllegal);
array_iterator_function->shared()->set_native(false);
@@ -1757,12 +1797,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- N u m b e r ---
Handle<JSFunction> number_fun = InstallFunction(
- global, "Number", JS_VALUE_TYPE, JSValue::kSize, 0,
- isolate->initial_object_prototype(), Builtins::kNumberConstructor);
+ isolate_, global, "Number", JS_VALUE_TYPE, JSValue::kSize, 0,
+ isolate_->initial_object_prototype(), Builtins::kNumberConstructor);
number_fun->shared()->set_builtin_function_id(kNumberConstructor);
number_fun->shared()->DontAdaptArguments();
number_fun->shared()->set_length(1);
- InstallWithIntrinsicDefaultProto(isolate, number_fun,
+ InstallWithIntrinsicDefaultProto(isolate_, number_fun,
Context::NUMBER_FUNCTION_INDEX);
// Create the %NumberPrototype%
@@ -1772,45 +1812,47 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSFunction::SetPrototype(number_fun, prototype);
// Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(), number_fun,
- DONT_ENUM);
+ JSObject::AddProperty(isolate_, prototype, factory->constructor_string(),
+ number_fun, DONT_ENUM);
// Install the Number.prototype methods.
- SimpleInstallFunction(prototype, "toExponential",
+ SimpleInstallFunction(isolate_, prototype, "toExponential",
Builtins::kNumberPrototypeToExponential, 1, false);
- SimpleInstallFunction(prototype, "toFixed",
+ SimpleInstallFunction(isolate_, prototype, "toFixed",
Builtins::kNumberPrototypeToFixed, 1, false);
- SimpleInstallFunction(prototype, "toPrecision",
+ SimpleInstallFunction(isolate_, prototype, "toPrecision",
Builtins::kNumberPrototypeToPrecision, 1, false);
- SimpleInstallFunction(prototype, "toString",
+ SimpleInstallFunction(isolate_, prototype, "toString",
Builtins::kNumberPrototypeToString, 1, false);
- SimpleInstallFunction(prototype, "valueOf",
+ SimpleInstallFunction(isolate_, prototype, "valueOf",
Builtins::kNumberPrototypeValueOf, 0, true);
// Install Intl fallback functions.
- SimpleInstallFunction(prototype, "toLocaleString",
+ SimpleInstallFunction(isolate_, prototype, "toLocaleString",
Builtins::kNumberPrototypeToLocaleString, 0, false);
// Install the Number functions.
- SimpleInstallFunction(number_fun, "isFinite", Builtins::kNumberIsFinite, 1,
- true);
- SimpleInstallFunction(number_fun, "isInteger", Builtins::kNumberIsInteger,
+ SimpleInstallFunction(isolate_, number_fun, "isFinite",
+ Builtins::kNumberIsFinite, 1, true);
+ SimpleInstallFunction(isolate_, number_fun, "isInteger",
+ Builtins::kNumberIsInteger, 1, true);
+ SimpleInstallFunction(isolate_, number_fun, "isNaN", Builtins::kNumberIsNaN,
1, true);
- SimpleInstallFunction(number_fun, "isNaN", Builtins::kNumberIsNaN, 1, true);
- SimpleInstallFunction(number_fun, "isSafeInteger",
+ SimpleInstallFunction(isolate_, number_fun, "isSafeInteger",
Builtins::kNumberIsSafeInteger, 1, true);
// Install Number.parseFloat and Global.parseFloat.
- Handle<JSFunction> parse_float_fun = SimpleInstallFunction(
- number_fun, "parseFloat", Builtins::kNumberParseFloat, 1, true);
- JSObject::AddProperty(global_object,
+ Handle<JSFunction> parse_float_fun =
+ SimpleInstallFunction(isolate_, number_fun, "parseFloat",
+ Builtins::kNumberParseFloat, 1, true);
+ JSObject::AddProperty(isolate_, global_object,
factory->NewStringFromAsciiChecked("parseFloat"),
parse_float_fun, DONT_ENUM);
// Install Number.parseInt and Global.parseInt.
Handle<JSFunction> parse_int_fun = SimpleInstallFunction(
- number_fun, "parseInt", Builtins::kNumberParseInt, 2, true);
- JSObject::AddProperty(global_object,
+ isolate_, number_fun, "parseInt", Builtins::kNumberParseInt, 2, true);
+ JSObject::AddProperty(isolate_, global_object,
factory->NewStringFromAsciiChecked("parseInt"),
parse_int_fun, DONT_ENUM);
@@ -1825,89 +1867,93 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<String> nan_name = factory->NewStringFromAsciiChecked("NaN");
JSObject::AddProperty(
- number_fun, factory->NewStringFromAsciiChecked("MAX_VALUE"),
+ isolate_, number_fun, factory->NewStringFromAsciiChecked("MAX_VALUE"),
factory->NewNumber(kMaxValue),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- number_fun, factory->NewStringFromAsciiChecked("MIN_VALUE"),
+ isolate_, number_fun, factory->NewStringFromAsciiChecked("MIN_VALUE"),
factory->NewNumber(kMinValue),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- number_fun, nan_name, nan,
+ isolate_, number_fun, nan_name, nan,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- number_fun, factory->NewStringFromAsciiChecked("NEGATIVE_INFINITY"),
+ isolate_, number_fun,
+ factory->NewStringFromAsciiChecked("NEGATIVE_INFINITY"),
factory->NewNumber(-V8_INFINITY),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- number_fun, factory->NewStringFromAsciiChecked("POSITIVE_INFINITY"),
- infinity,
+ isolate_, number_fun,
+ factory->NewStringFromAsciiChecked("POSITIVE_INFINITY"), infinity,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- number_fun, factory->NewStringFromAsciiChecked("MAX_SAFE_INTEGER"),
+ isolate_, number_fun,
+ factory->NewStringFromAsciiChecked("MAX_SAFE_INTEGER"),
factory->NewNumber(kMaxSafeInteger),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- number_fun, factory->NewStringFromAsciiChecked("MIN_SAFE_INTEGER"),
+ isolate_, number_fun,
+ factory->NewStringFromAsciiChecked("MIN_SAFE_INTEGER"),
factory->NewNumber(kMinSafeInteger),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- number_fun, factory->NewStringFromAsciiChecked("EPSILON"),
+ isolate_, number_fun, factory->NewStringFromAsciiChecked("EPSILON"),
factory->NewNumber(kEPS),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- global, factory->NewStringFromAsciiChecked("Infinity"), infinity,
+ isolate_, global, factory->NewStringFromAsciiChecked("Infinity"),
+ infinity,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- global, nan_name, nan,
+ isolate_, global, nan_name, nan,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- global, factory->NewStringFromAsciiChecked("undefined"),
+ isolate_, global, factory->NewStringFromAsciiChecked("undefined"),
factory->undefined_value(),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
}
{ // --- B o o l e a n ---
Handle<JSFunction> boolean_fun = InstallFunction(
- global, "Boolean", JS_VALUE_TYPE, JSValue::kSize, 0,
- isolate->initial_object_prototype(), Builtins::kBooleanConstructor);
+ isolate_, global, "Boolean", JS_VALUE_TYPE, JSValue::kSize, 0,
+ isolate_->initial_object_prototype(), Builtins::kBooleanConstructor);
boolean_fun->shared()->DontAdaptArguments();
boolean_fun->shared()->set_length(1);
- InstallWithIntrinsicDefaultProto(isolate, boolean_fun,
+ InstallWithIntrinsicDefaultProto(isolate_, boolean_fun,
Context::BOOLEAN_FUNCTION_INDEX);
// Create the %BooleanPrototype%
Handle<JSValue> prototype =
Handle<JSValue>::cast(factory->NewJSObject(boolean_fun, TENURED));
- prototype->set_value(isolate->heap()->false_value());
+ prototype->set_value(ReadOnlyRoots(isolate_).false_value());
JSFunction::SetPrototype(boolean_fun, prototype);
// Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(), boolean_fun,
- DONT_ENUM);
+ JSObject::AddProperty(isolate_, prototype, factory->constructor_string(),
+ boolean_fun, DONT_ENUM);
// Install the Boolean.prototype methods.
- SimpleInstallFunction(prototype, "toString",
+ SimpleInstallFunction(isolate_, prototype, "toString",
Builtins::kBooleanPrototypeToString, 0, true);
- SimpleInstallFunction(prototype, "valueOf",
+ SimpleInstallFunction(isolate_, prototype, "valueOf",
Builtins::kBooleanPrototypeValueOf, 0, true);
}
{ // --- S t r i n g ---
Handle<JSFunction> string_fun = InstallFunction(
- global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
- isolate->initial_object_prototype(), Builtins::kStringConstructor);
+ isolate_, global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
+ isolate_->initial_object_prototype(), Builtins::kStringConstructor);
string_fun->shared()->set_builtin_function_id(kStringConstructor);
string_fun->shared()->DontAdaptArguments();
string_fun->shared()->set_length(1);
- InstallWithIntrinsicDefaultProto(isolate, string_fun,
+ InstallWithIntrinsicDefaultProto(isolate_, string_fun,
Context::STRING_FUNCTION_INDEX);
- Handle<Map> string_map =
- Handle<Map>(native_context()->string_function()->initial_map());
+ Handle<Map> string_map = Handle<Map>(
+ native_context()->string_function()->initial_map(), isolate());
string_map->set_elements_kind(FAST_STRING_WRAPPER_ELEMENTS);
- Map::EnsureDescriptorSlack(string_map, 1);
+ Map::EnsureDescriptorSlack(isolate_, string_map, 1);
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1919,130 +1965,131 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
// Install the String.fromCharCode function.
- SimpleInstallFunction(string_fun, "fromCharCode",
+ SimpleInstallFunction(isolate_, string_fun, "fromCharCode",
Builtins::kStringFromCharCode, 1, false);
// Install the String.fromCodePoint function.
- SimpleInstallFunction(string_fun, "fromCodePoint",
+ SimpleInstallFunction(isolate_, string_fun, "fromCodePoint",
Builtins::kStringFromCodePoint, 1, false);
// Install the String.raw function.
- SimpleInstallFunction(string_fun, "raw", Builtins::kStringRaw, 1, false);
+ SimpleInstallFunction(isolate_, string_fun, "raw", Builtins::kStringRaw, 1,
+ false);
// Create the %StringPrototype%
Handle<JSValue> prototype =
Handle<JSValue>::cast(factory->NewJSObject(string_fun, TENURED));
- prototype->set_value(isolate->heap()->empty_string());
+ prototype->set_value(ReadOnlyRoots(isolate_).empty_string());
JSFunction::SetPrototype(string_fun, prototype);
native_context()->set_initial_string_prototype(*prototype);
// Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(), string_fun,
- DONT_ENUM);
+ JSObject::AddProperty(isolate_, prototype, factory->constructor_string(),
+ string_fun, DONT_ENUM);
// Install the String.prototype methods.
- SimpleInstallFunction(prototype, "anchor", Builtins::kStringPrototypeAnchor,
- 1, true);
- SimpleInstallFunction(prototype, "big", Builtins::kStringPrototypeBig, 0,
- true);
- SimpleInstallFunction(prototype, "blink", Builtins::kStringPrototypeBlink,
- 0, true);
- SimpleInstallFunction(prototype, "bold", Builtins::kStringPrototypeBold, 0,
- true);
- SimpleInstallFunction(prototype, "charAt", Builtins::kStringPrototypeCharAt,
- 1, true);
- SimpleInstallFunction(prototype, "charCodeAt",
+ SimpleInstallFunction(isolate_, prototype, "anchor",
+ Builtins::kStringPrototypeAnchor, 1, true);
+ SimpleInstallFunction(isolate_, prototype, "big",
+ Builtins::kStringPrototypeBig, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "blink",
+ Builtins::kStringPrototypeBlink, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "bold",
+ Builtins::kStringPrototypeBold, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "charAt",
+ Builtins::kStringPrototypeCharAt, 1, true);
+ SimpleInstallFunction(isolate_, prototype, "charCodeAt",
Builtins::kStringPrototypeCharCodeAt, 1, true);
- SimpleInstallFunction(prototype, "codePointAt",
+ SimpleInstallFunction(isolate_, prototype, "codePointAt",
Builtins::kStringPrototypeCodePointAt, 1, true);
- SimpleInstallFunction(prototype, "concat", Builtins::kStringPrototypeConcat,
- 1, false);
- SimpleInstallFunction(prototype, "endsWith",
+ SimpleInstallFunction(isolate_, prototype, "concat",
+ Builtins::kStringPrototypeConcat, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "endsWith",
Builtins::kStringPrototypeEndsWith, 1, false);
- SimpleInstallFunction(prototype, "fontcolor",
+ SimpleInstallFunction(isolate_, prototype, "fontcolor",
Builtins::kStringPrototypeFontcolor, 1, true);
- SimpleInstallFunction(prototype, "fontsize",
+ SimpleInstallFunction(isolate_, prototype, "fontsize",
Builtins::kStringPrototypeFontsize, 1, true);
- SimpleInstallFunction(prototype, "fixed", Builtins::kStringPrototypeFixed,
- 0, true);
- SimpleInstallFunction(prototype, "includes",
+ SimpleInstallFunction(isolate_, prototype, "fixed",
+ Builtins::kStringPrototypeFixed, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "includes",
Builtins::kStringPrototypeIncludes, 1, false);
- SimpleInstallFunction(prototype, "indexOf",
+ SimpleInstallFunction(isolate_, prototype, "indexOf",
Builtins::kStringPrototypeIndexOf, 1, false);
- SimpleInstallFunction(prototype, "italics",
+ SimpleInstallFunction(isolate_, prototype, "italics",
Builtins::kStringPrototypeItalics, 0, true);
- SimpleInstallFunction(prototype, "lastIndexOf",
+ SimpleInstallFunction(isolate_, prototype, "lastIndexOf",
Builtins::kStringPrototypeLastIndexOf, 1, false);
- SimpleInstallFunction(prototype, "link", Builtins::kStringPrototypeLink, 1,
- true);
- SimpleInstallFunction(prototype, "localeCompare",
+ SimpleInstallFunction(isolate_, prototype, "link",
+ Builtins::kStringPrototypeLink, 1, true);
+ SimpleInstallFunction(isolate_, prototype, "localeCompare",
Builtins::kStringPrototypeLocaleCompare, 1, true);
- SimpleInstallFunction(prototype, "match", Builtins::kStringPrototypeMatch,
- 1, true);
+ SimpleInstallFunction(isolate_, prototype, "match",
+ Builtins::kStringPrototypeMatch, 1, true);
#ifdef V8_INTL_SUPPORT
- SimpleInstallFunction(prototype, "normalize",
+ SimpleInstallFunction(isolate_, prototype, "normalize",
Builtins::kStringPrototypeNormalizeIntl, 0, false);
#else
- SimpleInstallFunction(prototype, "normalize",
+ SimpleInstallFunction(isolate_, prototype, "normalize",
Builtins::kStringPrototypeNormalize, 0, false);
#endif // V8_INTL_SUPPORT
- SimpleInstallFunction(prototype, "padEnd", Builtins::kStringPrototypePadEnd,
- 1, false);
- SimpleInstallFunction(prototype, "padStart",
+ SimpleInstallFunction(isolate_, prototype, "padEnd",
+ Builtins::kStringPrototypePadEnd, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "padStart",
Builtins::kStringPrototypePadStart, 1, false);
- SimpleInstallFunction(prototype, "repeat", Builtins::kStringPrototypeRepeat,
- 1, true);
- SimpleInstallFunction(prototype, "replace",
+ SimpleInstallFunction(isolate_, prototype, "repeat",
+ Builtins::kStringPrototypeRepeat, 1, true);
+ SimpleInstallFunction(isolate_, prototype, "replace",
Builtins::kStringPrototypeReplace, 2, true);
- SimpleInstallFunction(prototype, "search", Builtins::kStringPrototypeSearch,
- 1, true);
- SimpleInstallFunction(prototype, "slice", Builtins::kStringPrototypeSlice,
- 2, false);
- SimpleInstallFunction(prototype, "small", Builtins::kStringPrototypeSmall,
- 0, true);
- SimpleInstallFunction(prototype, "split", Builtins::kStringPrototypeSplit,
- 2, false);
- SimpleInstallFunction(prototype, "strike", Builtins::kStringPrototypeStrike,
- 0, true);
- SimpleInstallFunction(prototype, "sub", Builtins::kStringPrototypeSub, 0,
- true);
- SimpleInstallFunction(prototype, "substr", Builtins::kStringPrototypeSubstr,
- 2, false);
- SimpleInstallFunction(prototype, "substring",
+ SimpleInstallFunction(isolate_, prototype, "search",
+ Builtins::kStringPrototypeSearch, 1, true);
+ SimpleInstallFunction(isolate_, prototype, "slice",
+ Builtins::kStringPrototypeSlice, 2, false);
+ SimpleInstallFunction(isolate_, prototype, "small",
+ Builtins::kStringPrototypeSmall, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "split",
+ Builtins::kStringPrototypeSplit, 2, false);
+ SimpleInstallFunction(isolate_, prototype, "strike",
+ Builtins::kStringPrototypeStrike, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "sub",
+ Builtins::kStringPrototypeSub, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "substr",
+ Builtins::kStringPrototypeSubstr, 2, false);
+ SimpleInstallFunction(isolate_, prototype, "substring",
Builtins::kStringPrototypeSubstring, 2, false);
- SimpleInstallFunction(prototype, "sup", Builtins::kStringPrototypeSup, 0,
- true);
- SimpleInstallFunction(prototype, "startsWith",
+ SimpleInstallFunction(isolate_, prototype, "sup",
+ Builtins::kStringPrototypeSup, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "startsWith",
Builtins::kStringPrototypeStartsWith, 1, false);
- SimpleInstallFunction(prototype, "toString",
+ SimpleInstallFunction(isolate_, prototype, "toString",
Builtins::kStringPrototypeToString, 0, true);
- SimpleInstallFunction(prototype, "trim", Builtins::kStringPrototypeTrim, 0,
- false);
- SimpleInstallFunction(prototype, "trimLeft",
+ SimpleInstallFunction(isolate_, prototype, "trim",
+ Builtins::kStringPrototypeTrim, 0, false);
+ SimpleInstallFunction(isolate_, prototype, "trimLeft",
Builtins::kStringPrototypeTrimStart, 0, false);
- SimpleInstallFunction(prototype, "trimRight",
+ SimpleInstallFunction(isolate_, prototype, "trimRight",
Builtins::kStringPrototypeTrimEnd, 0, false);
#ifdef V8_INTL_SUPPORT
- SimpleInstallFunction(prototype, "toLowerCase",
+ SimpleInstallFunction(isolate_, prototype, "toLowerCase",
Builtins::kStringPrototypeToLowerCaseIntl, 0, true);
- SimpleInstallFunction(prototype, "toUpperCase",
+ SimpleInstallFunction(isolate_, prototype, "toUpperCase",
Builtins::kStringPrototypeToUpperCaseIntl, 0, false);
#else
- SimpleInstallFunction(prototype, "toLocaleLowerCase",
+ SimpleInstallFunction(isolate_, prototype, "toLocaleLowerCase",
Builtins::kStringPrototypeToLocaleLowerCase, 0,
false);
- SimpleInstallFunction(prototype, "toLocaleUpperCase",
+ SimpleInstallFunction(isolate_, prototype, "toLocaleUpperCase",
Builtins::kStringPrototypeToLocaleUpperCase, 0,
false);
- SimpleInstallFunction(prototype, "toLowerCase",
+ SimpleInstallFunction(isolate_, prototype, "toLowerCase",
Builtins::kStringPrototypeToLowerCase, 0, false);
- SimpleInstallFunction(prototype, "toUpperCase",
+ SimpleInstallFunction(isolate_, prototype, "toUpperCase",
Builtins::kStringPrototypeToUpperCase, 0, false);
#endif
- SimpleInstallFunction(prototype, "valueOf",
+ SimpleInstallFunction(isolate_, prototype, "valueOf",
Builtins::kStringPrototypeValueOf, 0, true);
- SimpleInstallFunction(prototype, factory->iterator_symbol(),
+ SimpleInstallFunction(isolate_, prototype, factory->iterator_symbol(),
"[Symbol.iterator]",
Builtins::kStringPrototypeIterator, 0, true,
DONT_ENUM, kStringIterator);
@@ -2050,23 +2097,23 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- S t r i n g I t e r a t o r ---
Handle<JSObject> iterator_prototype(
- native_context()->initial_iterator_prototype());
+ native_context()->initial_iterator_prototype(), isolate());
Handle<JSObject> string_iterator_prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
+ factory->NewJSObject(isolate_->object_function(), TENURED);
JSObject::ForceSetPrototype(string_iterator_prototype, iterator_prototype);
JSObject::AddProperty(
- string_iterator_prototype, factory->to_string_tag_symbol(),
+ isolate_, string_iterator_prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("String Iterator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- SimpleInstallFunction(string_iterator_prototype, "next",
+ SimpleInstallFunction(isolate_, string_iterator_prototype, "next",
Builtins::kStringIteratorPrototypeNext, 0, true,
kStringIteratorNext);
Handle<JSFunction> string_iterator_function = CreateFunction(
- isolate, factory->NewStringFromAsciiChecked("StringIterator"),
+ isolate_, factory->NewStringFromAsciiChecked("StringIterator"),
JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize, 0,
string_iterator_prototype, Builtins::kIllegal);
string_iterator_function->shared()->set_native(false);
@@ -2076,7 +2123,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- S y m b o l ---
Handle<JSFunction> symbol_fun = InstallFunction(
- global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
+ isolate_, global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kSymbolConstructor);
symbol_fun->shared()->set_builtin_function_id(kSymbolConstructor);
symbol_fun->shared()->set_length(0);
@@ -2084,50 +2131,51 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_symbol_function(*symbol_fun);
// Install the Symbol.for and Symbol.keyFor functions.
- SimpleInstallFunction(symbol_fun, "for", Builtins::kSymbolFor, 1, false);
- SimpleInstallFunction(symbol_fun, "keyFor", Builtins::kSymbolKeyFor, 1,
+ SimpleInstallFunction(isolate_, symbol_fun, "for", Builtins::kSymbolFor, 1,
false);
+ SimpleInstallFunction(isolate_, symbol_fun, "keyFor",
+ Builtins::kSymbolKeyFor, 1, false);
// Install well-known symbols.
- InstallConstant(isolate, symbol_fun, "asyncIterator",
+ InstallConstant(isolate_, symbol_fun, "asyncIterator",
factory->async_iterator_symbol());
- InstallConstant(isolate, symbol_fun, "hasInstance",
+ InstallConstant(isolate_, symbol_fun, "hasInstance",
factory->has_instance_symbol());
- InstallConstant(isolate, symbol_fun, "isConcatSpreadable",
+ InstallConstant(isolate_, symbol_fun, "isConcatSpreadable",
factory->is_concat_spreadable_symbol());
- InstallConstant(isolate, symbol_fun, "iterator",
+ InstallConstant(isolate_, symbol_fun, "iterator",
factory->iterator_symbol());
- InstallConstant(isolate, symbol_fun, "match", factory->match_symbol());
- InstallConstant(isolate, symbol_fun, "replace", factory->replace_symbol());
- InstallConstant(isolate, symbol_fun, "search", factory->search_symbol());
- InstallConstant(isolate, symbol_fun, "species", factory->species_symbol());
- InstallConstant(isolate, symbol_fun, "split", factory->split_symbol());
- InstallConstant(isolate, symbol_fun, "toPrimitive",
+ InstallConstant(isolate_, symbol_fun, "match", factory->match_symbol());
+ InstallConstant(isolate_, symbol_fun, "replace", factory->replace_symbol());
+ InstallConstant(isolate_, symbol_fun, "search", factory->search_symbol());
+ InstallConstant(isolate_, symbol_fun, "species", factory->species_symbol());
+ InstallConstant(isolate_, symbol_fun, "split", factory->split_symbol());
+ InstallConstant(isolate_, symbol_fun, "toPrimitive",
factory->to_primitive_symbol());
- InstallConstant(isolate, symbol_fun, "toStringTag",
+ InstallConstant(isolate_, symbol_fun, "toStringTag",
factory->to_string_tag_symbol());
- InstallConstant(isolate, symbol_fun, "unscopables",
+ InstallConstant(isolate_, symbol_fun, "unscopables",
factory->unscopables_symbol());
// Setup %SymbolPrototype%.
- Handle<JSObject> prototype(
- JSObject::cast(symbol_fun->instance_prototype()));
+ Handle<JSObject> prototype(JSObject::cast(symbol_fun->instance_prototype()),
+ isolate());
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("Symbol"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
// Install the Symbol.prototype methods.
- SimpleInstallFunction(prototype, "toString",
+ SimpleInstallFunction(isolate_, prototype, "toString",
Builtins::kSymbolPrototypeToString, 0, true);
- SimpleInstallFunction(prototype, "valueOf",
+ SimpleInstallFunction(isolate_, prototype, "valueOf",
Builtins::kSymbolPrototypeValueOf, 0, true);
// Install the @@toPrimitive function.
Handle<JSFunction> to_primitive = InstallFunction(
- prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
+ isolate_, prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, 0, MaybeHandle<JSObject>(),
Builtins::kSymbolPrototypeToPrimitive,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -2140,122 +2188,126 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- D a t e ---
- Handle<JSFunction> date_fun =
- InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize, 0,
- factory->the_hole_value(), Builtins::kDateConstructor);
- InstallWithIntrinsicDefaultProto(isolate, date_fun,
+ Handle<JSFunction> date_fun = InstallFunction(
+ isolate_, global, "Date", JS_DATE_TYPE, JSDate::kSize, 0,
+ factory->the_hole_value(), Builtins::kDateConstructor);
+ InstallWithIntrinsicDefaultProto(isolate_, date_fun,
Context::DATE_FUNCTION_INDEX);
date_fun->shared()->set_length(7);
date_fun->shared()->DontAdaptArguments();
// Install the Date.now, Date.parse and Date.UTC functions.
- SimpleInstallFunction(date_fun, "now", Builtins::kDateNow, 0, false);
- SimpleInstallFunction(date_fun, "parse", Builtins::kDateParse, 1, false);
- SimpleInstallFunction(date_fun, "UTC", Builtins::kDateUTC, 7, false);
+ SimpleInstallFunction(isolate_, date_fun, "now", Builtins::kDateNow, 0,
+ false);
+ SimpleInstallFunction(isolate_, date_fun, "parse", Builtins::kDateParse, 1,
+ false);
+ SimpleInstallFunction(isolate_, date_fun, "UTC", Builtins::kDateUTC, 7,
+ false);
// Setup %DatePrototype%.
- Handle<JSObject> prototype(JSObject::cast(date_fun->instance_prototype()));
+ Handle<JSObject> prototype(JSObject::cast(date_fun->instance_prototype()),
+ isolate());
// Install the Date.prototype methods.
- SimpleInstallFunction(prototype, "toString",
+ SimpleInstallFunction(isolate_, prototype, "toString",
Builtins::kDatePrototypeToString, 0, false);
- SimpleInstallFunction(prototype, "toDateString",
+ SimpleInstallFunction(isolate_, prototype, "toDateString",
Builtins::kDatePrototypeToDateString, 0, false);
- SimpleInstallFunction(prototype, "toTimeString",
+ SimpleInstallFunction(isolate_, prototype, "toTimeString",
Builtins::kDatePrototypeToTimeString, 0, false);
- SimpleInstallFunction(prototype, "toISOString",
+ SimpleInstallFunction(isolate_, prototype, "toISOString",
Builtins::kDatePrototypeToISOString, 0, false);
Handle<JSFunction> to_utc_string =
- SimpleInstallFunction(prototype, "toUTCString",
+ SimpleInstallFunction(isolate_, prototype, "toUTCString",
Builtins::kDatePrototypeToUTCString, 0, false);
- InstallFunction(prototype, to_utc_string,
+ InstallFunction(isolate_, prototype, to_utc_string,
factory->InternalizeUtf8String("toGMTString"), DONT_ENUM);
- SimpleInstallFunction(prototype, "getDate", Builtins::kDatePrototypeGetDate,
- 0, true);
- SimpleInstallFunction(prototype, "setDate", Builtins::kDatePrototypeSetDate,
- 1, false);
- SimpleInstallFunction(prototype, "getDay", Builtins::kDatePrototypeGetDay,
- 0, true);
- SimpleInstallFunction(prototype, "getFullYear",
+ SimpleInstallFunction(isolate_, prototype, "getDate",
+ Builtins::kDatePrototypeGetDate, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "setDate",
+ Builtins::kDatePrototypeSetDate, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "getDay",
+ Builtins::kDatePrototypeGetDay, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "getFullYear",
Builtins::kDatePrototypeGetFullYear, 0, true);
- SimpleInstallFunction(prototype, "setFullYear",
+ SimpleInstallFunction(isolate_, prototype, "setFullYear",
Builtins::kDatePrototypeSetFullYear, 3, false);
- SimpleInstallFunction(prototype, "getHours",
+ SimpleInstallFunction(isolate_, prototype, "getHours",
Builtins::kDatePrototypeGetHours, 0, true);
- SimpleInstallFunction(prototype, "setHours",
+ SimpleInstallFunction(isolate_, prototype, "setHours",
Builtins::kDatePrototypeSetHours, 4, false);
- SimpleInstallFunction(prototype, "getMilliseconds",
+ SimpleInstallFunction(isolate_, prototype, "getMilliseconds",
Builtins::kDatePrototypeGetMilliseconds, 0, true);
- SimpleInstallFunction(prototype, "setMilliseconds",
+ SimpleInstallFunction(isolate_, prototype, "setMilliseconds",
Builtins::kDatePrototypeSetMilliseconds, 1, false);
- SimpleInstallFunction(prototype, "getMinutes",
+ SimpleInstallFunction(isolate_, prototype, "getMinutes",
Builtins::kDatePrototypeGetMinutes, 0, true);
- SimpleInstallFunction(prototype, "setMinutes",
+ SimpleInstallFunction(isolate_, prototype, "setMinutes",
Builtins::kDatePrototypeSetMinutes, 3, false);
- SimpleInstallFunction(prototype, "getMonth",
+ SimpleInstallFunction(isolate_, prototype, "getMonth",
Builtins::kDatePrototypeGetMonth, 0, true);
- SimpleInstallFunction(prototype, "setMonth",
+ SimpleInstallFunction(isolate_, prototype, "setMonth",
Builtins::kDatePrototypeSetMonth, 2, false);
- SimpleInstallFunction(prototype, "getSeconds",
+ SimpleInstallFunction(isolate_, prototype, "getSeconds",
Builtins::kDatePrototypeGetSeconds, 0, true);
- SimpleInstallFunction(prototype, "setSeconds",
+ SimpleInstallFunction(isolate_, prototype, "setSeconds",
Builtins::kDatePrototypeSetSeconds, 2, false);
- SimpleInstallFunction(prototype, "getTime", Builtins::kDatePrototypeGetTime,
- 0, true);
- SimpleInstallFunction(prototype, "setTime", Builtins::kDatePrototypeSetTime,
- 1, false);
- SimpleInstallFunction(prototype, "getTimezoneOffset",
+ SimpleInstallFunction(isolate_, prototype, "getTime",
+ Builtins::kDatePrototypeGetTime, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "setTime",
+ Builtins::kDatePrototypeSetTime, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "getTimezoneOffset",
Builtins::kDatePrototypeGetTimezoneOffset, 0, true);
- SimpleInstallFunction(prototype, "getUTCDate",
+ SimpleInstallFunction(isolate_, prototype, "getUTCDate",
Builtins::kDatePrototypeGetUTCDate, 0, true);
- SimpleInstallFunction(prototype, "setUTCDate",
+ SimpleInstallFunction(isolate_, prototype, "setUTCDate",
Builtins::kDatePrototypeSetUTCDate, 1, false);
- SimpleInstallFunction(prototype, "getUTCDay",
+ SimpleInstallFunction(isolate_, prototype, "getUTCDay",
Builtins::kDatePrototypeGetUTCDay, 0, true);
- SimpleInstallFunction(prototype, "getUTCFullYear",
+ SimpleInstallFunction(isolate_, prototype, "getUTCFullYear",
Builtins::kDatePrototypeGetUTCFullYear, 0, true);
- SimpleInstallFunction(prototype, "setUTCFullYear",
+ SimpleInstallFunction(isolate_, prototype, "setUTCFullYear",
Builtins::kDatePrototypeSetUTCFullYear, 3, false);
- SimpleInstallFunction(prototype, "getUTCHours",
+ SimpleInstallFunction(isolate_, prototype, "getUTCHours",
Builtins::kDatePrototypeGetUTCHours, 0, true);
- SimpleInstallFunction(prototype, "setUTCHours",
+ SimpleInstallFunction(isolate_, prototype, "setUTCHours",
Builtins::kDatePrototypeSetUTCHours, 4, false);
- SimpleInstallFunction(prototype, "getUTCMilliseconds",
+ SimpleInstallFunction(isolate_, prototype, "getUTCMilliseconds",
Builtins::kDatePrototypeGetUTCMilliseconds, 0, true);
- SimpleInstallFunction(prototype, "setUTCMilliseconds",
+ SimpleInstallFunction(isolate_, prototype, "setUTCMilliseconds",
Builtins::kDatePrototypeSetUTCMilliseconds, 1, false);
- SimpleInstallFunction(prototype, "getUTCMinutes",
+ SimpleInstallFunction(isolate_, prototype, "getUTCMinutes",
Builtins::kDatePrototypeGetUTCMinutes, 0, true);
- SimpleInstallFunction(prototype, "setUTCMinutes",
+ SimpleInstallFunction(isolate_, prototype, "setUTCMinutes",
Builtins::kDatePrototypeSetUTCMinutes, 3, false);
- SimpleInstallFunction(prototype, "getUTCMonth",
+ SimpleInstallFunction(isolate_, prototype, "getUTCMonth",
Builtins::kDatePrototypeGetUTCMonth, 0, true);
- SimpleInstallFunction(prototype, "setUTCMonth",
+ SimpleInstallFunction(isolate_, prototype, "setUTCMonth",
Builtins::kDatePrototypeSetUTCMonth, 2, false);
- SimpleInstallFunction(prototype, "getUTCSeconds",
+ SimpleInstallFunction(isolate_, prototype, "getUTCSeconds",
Builtins::kDatePrototypeGetUTCSeconds, 0, true);
- SimpleInstallFunction(prototype, "setUTCSeconds",
+ SimpleInstallFunction(isolate_, prototype, "setUTCSeconds",
Builtins::kDatePrototypeSetUTCSeconds, 2, false);
- SimpleInstallFunction(prototype, "valueOf", Builtins::kDatePrototypeValueOf,
- 0, true);
- SimpleInstallFunction(prototype, "getYear", Builtins::kDatePrototypeGetYear,
- 0, true);
- SimpleInstallFunction(prototype, "setYear", Builtins::kDatePrototypeSetYear,
- 1, false);
- SimpleInstallFunction(prototype, "toJSON", Builtins::kDatePrototypeToJson,
- 1, false);
+ SimpleInstallFunction(isolate_, prototype, "valueOf",
+ Builtins::kDatePrototypeValueOf, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "getYear",
+ Builtins::kDatePrototypeGetYear, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "setYear",
+ Builtins::kDatePrototypeSetYear, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "toJSON",
+ Builtins::kDatePrototypeToJson, 1, false);
// Install Intl fallback functions.
- SimpleInstallFunction(prototype, "toLocaleString",
+ SimpleInstallFunction(isolate_, prototype, "toLocaleString",
Builtins::kDatePrototypeToString, 0, false);
- SimpleInstallFunction(prototype, "toLocaleDateString",
+ SimpleInstallFunction(isolate_, prototype, "toLocaleDateString",
Builtins::kDatePrototypeToDateString, 0, false);
- SimpleInstallFunction(prototype, "toLocaleTimeString",
+ SimpleInstallFunction(isolate_, prototype, "toLocaleTimeString",
Builtins::kDatePrototypeToTimeString, 0, false);
// Install the @@toPrimitive function.
Handle<JSFunction> to_primitive = InstallFunction(
- prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
+ isolate_, prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, 0, MaybeHandle<JSObject>(),
Builtins::kDatePrototypeToPrimitive,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -2269,53 +2321,92 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
- isolate, Builtins::kPromiseGetCapabilitiesExecutor,
+ isolate_, Builtins::kPromiseGetCapabilitiesExecutor,
factory->empty_string(), 2);
native_context()->set_promise_get_capabilities_executor_shared_fun(*info);
}
{ // -- P r o m i s e
Handle<JSFunction> promise_fun = InstallFunction(
- global, "Promise", JS_PROMISE_TYPE, JSPromise::kSizeWithEmbedderFields,
- 0, factory->the_hole_value(), Builtins::kPromiseConstructor);
- InstallWithIntrinsicDefaultProto(isolate, promise_fun,
+ isolate_, global, "Promise", JS_PROMISE_TYPE,
+ JSPromise::kSizeWithEmbedderFields, 0, factory->the_hole_value(),
+ Builtins::kPromiseConstructor);
+ InstallWithIntrinsicDefaultProto(isolate_, promise_fun,
Context::PROMISE_FUNCTION_INDEX);
- Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate);
+ Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate_);
shared->set_internal_formal_parameter_count(1);
shared->set_length(1);
- InstallSpeciesGetter(promise_fun);
+ InstallSpeciesGetter(isolate_, promise_fun);
- SimpleInstallFunction(promise_fun, "all", Builtins::kPromiseAll, 1, true);
+ SimpleInstallFunction(isolate_, promise_fun, "all", Builtins::kPromiseAll,
+ 1, true);
- SimpleInstallFunction(promise_fun, "race", Builtins::kPromiseRace, 1, true);
+ SimpleInstallFunction(isolate_, promise_fun, "race", Builtins::kPromiseRace,
+ 1, true);
- SimpleInstallFunction(promise_fun, "resolve",
+ SimpleInstallFunction(isolate_, promise_fun, "resolve",
Builtins::kPromiseResolveTrampoline, 1, true);
- SimpleInstallFunction(promise_fun, "reject", Builtins::kPromiseReject, 1,
- true);
+ SimpleInstallFunction(isolate_, promise_fun, "reject",
+ Builtins::kPromiseReject, 1, true);
// Setup %PromisePrototype%.
Handle<JSObject> prototype(
- JSObject::cast(promise_fun->instance_prototype()));
+ JSObject::cast(promise_fun->instance_prototype()), isolate());
native_context()->set_promise_prototype(*prototype);
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), factory->Promise_string(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
+ factory->Promise_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<JSFunction> promise_then =
- SimpleInstallFunction(prototype, isolate->factory()->then_string(),
- Builtins::kPromisePrototypeThen, 2, true);
+ Handle<JSFunction> promise_then = SimpleInstallFunction(
+ isolate_, prototype, isolate_->factory()->then_string(),
+ Builtins::kPromisePrototypeThen, 2, true);
native_context()->set_promise_then(*promise_then);
- Handle<JSFunction> promise_catch = SimpleInstallFunction(
- prototype, "catch", Builtins::kPromisePrototypeCatch, 1, true);
+ Handle<JSFunction> promise_catch =
+ SimpleInstallFunction(isolate_, prototype, "catch",
+ Builtins::kPromisePrototypeCatch, 1, true);
native_context()->set_promise_catch(*promise_catch);
+ SimpleInstallFunction(isolate_, prototype, "finally",
+ Builtins::kPromisePrototypeFinally, 1, true,
+ DONT_ENUM);
+
+ {
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate(), Builtins::kPromiseThenFinally,
+ isolate_->factory()->empty_string(), 1);
+ info->set_native(true);
+ native_context()->set_promise_then_finally_shared_fun(*info);
+ }
+
+ {
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate(), Builtins::kPromiseCatchFinally,
+ isolate_->factory()->empty_string(), 1);
+ info->set_native(true);
+ native_context()->set_promise_catch_finally_shared_fun(*info);
+ }
+
+ {
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate(), Builtins::kPromiseValueThunkFinally,
+ isolate_->factory()->empty_string(), 0);
+ native_context()->set_promise_value_thunk_finally_shared_fun(*info);
+ }
+
+ {
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate(), Builtins::kPromiseThrowerFinally,
+ isolate_->factory()->empty_string(), 0);
+ native_context()->set_promise_thrower_finally_shared_fun(*info);
+ }
+
// Force the Promise constructor to fast properties, so that we can use the
// fast paths for various things like
//
@@ -2326,18 +2417,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::MigrateSlowToFast(Handle<JSObject>::cast(promise_fun), 0,
"Bootstrapping");
- Handle<Map> prototype_map(prototype->map());
- Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
+ Handle<Map> prototype_map(prototype->map(), isolate());
+ Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate_);
{ // Internal: IsPromise
Handle<JSFunction> function = SimpleCreateFunction(
- isolate, factory->empty_string(), Builtins::kIsPromise, 1, false);
+ isolate_, factory->empty_string(), Builtins::kIsPromise, 1, false);
native_context()->set_is_promise(*function);
}
{
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kPromiseCapabilityDefaultResolve,
+ isolate_, Builtins::kPromiseCapabilityDefaultResolve,
factory->empty_string(), 1, FunctionKind::kConciseMethod);
info->set_native(true);
info->set_function_map_index(
@@ -2346,7 +2437,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
*info);
info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kPromiseCapabilityDefaultReject,
+ isolate_, Builtins::kPromiseCapabilityDefaultReject,
factory->empty_string(), 1, FunctionKind::kConciseMethod);
info->set_native(true);
info->set_function_map_index(
@@ -2356,7 +2447,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kPromiseAllResolveElementClosure,
+ isolate_, Builtins::kPromiseAllResolveElementClosure,
factory->empty_string(), 1);
native_context()->set_promise_all_resolve_element_shared_fun(*info);
}
@@ -2374,73 +2465,73 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
Handle<JSFunction> regexp_fun = InstallFunction(
- global, "RegExp", JS_REGEXP_TYPE,
+ isolate_, global, "RegExp", JS_REGEXP_TYPE,
JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize,
JSRegExp::kInObjectFieldCount, factory->the_hole_value(),
Builtins::kRegExpConstructor);
- InstallWithIntrinsicDefaultProto(isolate, regexp_fun,
+ InstallWithIntrinsicDefaultProto(isolate_, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
- Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate);
+ Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate_);
shared->set_internal_formal_parameter_count(2);
shared->set_length(2);
{
// Setup %RegExpPrototype%.
Handle<JSObject> prototype(
- JSObject::cast(regexp_fun->instance_prototype()));
+ JSObject::cast(regexp_fun->instance_prototype()), isolate());
{
Handle<JSFunction> fun = SimpleInstallFunction(
- prototype, factory->exec_string(), Builtins::kRegExpPrototypeExec,
- 1, true, DONT_ENUM);
+ isolate_, prototype, factory->exec_string(),
+ Builtins::kRegExpPrototypeExec, 1, true, DONT_ENUM);
native_context()->set_regexp_exec_function(*fun);
}
- SimpleInstallGetter(prototype, factory->dotAll_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->dotAll_string(),
Builtins::kRegExpPrototypeDotAllGetter, true);
- SimpleInstallGetter(prototype, factory->flags_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->flags_string(),
Builtins::kRegExpPrototypeFlagsGetter, true);
- SimpleInstallGetter(prototype, factory->global_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->global_string(),
Builtins::kRegExpPrototypeGlobalGetter, true);
- SimpleInstallGetter(prototype, factory->ignoreCase_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->ignoreCase_string(),
Builtins::kRegExpPrototypeIgnoreCaseGetter, true);
- SimpleInstallGetter(prototype, factory->multiline_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->multiline_string(),
Builtins::kRegExpPrototypeMultilineGetter, true);
- SimpleInstallGetter(prototype, factory->source_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->source_string(),
Builtins::kRegExpPrototypeSourceGetter, true);
- SimpleInstallGetter(prototype, factory->sticky_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->sticky_string(),
Builtins::kRegExpPrototypeStickyGetter, true);
- SimpleInstallGetter(prototype, factory->unicode_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->unicode_string(),
Builtins::kRegExpPrototypeUnicodeGetter, true);
- SimpleInstallFunction(prototype, "compile",
+ SimpleInstallFunction(isolate_, prototype, "compile",
Builtins::kRegExpPrototypeCompile, 2, true,
DONT_ENUM);
- SimpleInstallFunction(prototype, factory->toString_string(),
+ SimpleInstallFunction(isolate_, prototype, factory->toString_string(),
Builtins::kRegExpPrototypeToString, 0, false,
DONT_ENUM);
- SimpleInstallFunction(prototype, "test", Builtins::kRegExpPrototypeTest,
- 1, true, DONT_ENUM);
+ SimpleInstallFunction(isolate_, prototype, "test",
+ Builtins::kRegExpPrototypeTest, 1, true, DONT_ENUM);
- SimpleInstallFunction(prototype, factory->match_symbol(),
+ SimpleInstallFunction(isolate_, prototype, factory->match_symbol(),
"[Symbol.match]", Builtins::kRegExpPrototypeMatch,
1, true);
- SimpleInstallFunction(prototype, factory->replace_symbol(),
+ SimpleInstallFunction(isolate_, prototype, factory->replace_symbol(),
"[Symbol.replace]",
Builtins::kRegExpPrototypeReplace, 2, false);
- SimpleInstallFunction(prototype, factory->search_symbol(),
+ SimpleInstallFunction(isolate_, prototype, factory->search_symbol(),
"[Symbol.search]", Builtins::kRegExpPrototypeSearch,
1, true);
- SimpleInstallFunction(prototype, factory->split_symbol(),
+ SimpleInstallFunction(isolate_, prototype, factory->split_symbol(),
"[Symbol.split]", Builtins::kRegExpPrototypeSplit,
2, false);
- Handle<Map> prototype_map(prototype->map());
- Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
+ Handle<Map> prototype_map(prototype->map(), isolate());
+ Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate_);
// Store the initial RegExp.prototype map. This is used in fast-path
// checks. Do not alter the prototype after this point.
@@ -2450,53 +2541,53 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
// RegExp getters and setters.
- InstallSpeciesGetter(regexp_fun);
+ InstallSpeciesGetter(isolate_, regexp_fun);
// Static properties set by a successful match.
const PropertyAttributes no_enum = DONT_ENUM;
- SimpleInstallGetterSetter(regexp_fun, factory->input_string(),
+ SimpleInstallGetterSetter(isolate_, regexp_fun, factory->input_string(),
Builtins::kRegExpInputGetter,
Builtins::kRegExpInputSetter, no_enum);
SimpleInstallGetterSetter(
- regexp_fun, factory->InternalizeUtf8String("$_"),
+ isolate_, regexp_fun, factory->InternalizeUtf8String("$_"),
Builtins::kRegExpInputGetter, Builtins::kRegExpInputSetter, no_enum);
SimpleInstallGetterSetter(
- regexp_fun, factory->InternalizeUtf8String("lastMatch"),
+ isolate_, regexp_fun, factory->InternalizeUtf8String("lastMatch"),
Builtins::kRegExpLastMatchGetter, Builtins::kEmptyFunction, no_enum);
SimpleInstallGetterSetter(
- regexp_fun, factory->InternalizeUtf8String("$&"),
+ isolate_, regexp_fun, factory->InternalizeUtf8String("$&"),
Builtins::kRegExpLastMatchGetter, Builtins::kEmptyFunction, no_enum);
SimpleInstallGetterSetter(
- regexp_fun, factory->InternalizeUtf8String("lastParen"),
+ isolate_, regexp_fun, factory->InternalizeUtf8String("lastParen"),
Builtins::kRegExpLastParenGetter, Builtins::kEmptyFunction, no_enum);
SimpleInstallGetterSetter(
- regexp_fun, factory->InternalizeUtf8String("$+"),
+ isolate_, regexp_fun, factory->InternalizeUtf8String("$+"),
Builtins::kRegExpLastParenGetter, Builtins::kEmptyFunction, no_enum);
- SimpleInstallGetterSetter(regexp_fun,
+ SimpleInstallGetterSetter(isolate_, regexp_fun,
factory->InternalizeUtf8String("leftContext"),
Builtins::kRegExpLeftContextGetter,
Builtins::kEmptyFunction, no_enum);
- SimpleInstallGetterSetter(regexp_fun,
+ SimpleInstallGetterSetter(isolate_, regexp_fun,
factory->InternalizeUtf8String("$`"),
Builtins::kRegExpLeftContextGetter,
Builtins::kEmptyFunction, no_enum);
- SimpleInstallGetterSetter(regexp_fun,
+ SimpleInstallGetterSetter(isolate_, regexp_fun,
factory->InternalizeUtf8String("rightContext"),
Builtins::kRegExpRightContextGetter,
Builtins::kEmptyFunction, no_enum);
- SimpleInstallGetterSetter(regexp_fun,
+ SimpleInstallGetterSetter(isolate_, regexp_fun,
factory->InternalizeUtf8String("$'"),
Builtins::kRegExpRightContextGetter,
Builtins::kEmptyFunction, no_enum);
-#define INSTALL_CAPTURE_GETTER(i) \
- SimpleInstallGetterSetter( \
- regexp_fun, factory->InternalizeUtf8String("$" #i), \
+#define INSTALL_CAPTURE_GETTER(i) \
+ SimpleInstallGetterSetter( \
+ isolate_, regexp_fun, factory->InternalizeUtf8String("$" #i), \
Builtins::kRegExpCapture##i##Getter, Builtins::kEmptyFunction, no_enum)
INSTALL_CAPTURE_GETTER(1);
INSTALL_CAPTURE_GETTER(2);
@@ -2511,11 +2602,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
DCHECK(regexp_fun->has_initial_map());
- Handle<Map> initial_map(regexp_fun->initial_map());
+ Handle<Map> initial_map(regexp_fun->initial_map(), isolate());
DCHECK_EQ(1, initial_map->GetInObjectProperties());
- Map::EnsureDescriptorSlack(initial_map, 1);
+ Map::EnsureDescriptorSlack(isolate_, initial_map, 1);
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
@@ -2527,7 +2618,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // Internal: RegExpInternalMatch
Handle<JSFunction> function =
- SimpleCreateFunction(isolate, isolate->factory()->empty_string(),
+ SimpleCreateFunction(isolate_, isolate_->factory()->empty_string(),
Builtins::kRegExpInternalMatch, 2, true);
native_context()->set(Context::REGEXP_INTERNAL_MATCH, *function);
}
@@ -2550,60 +2641,60 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- E r r o r
- InstallError(isolate, global, factory->Error_string(),
+ InstallError(isolate_, global, factory->Error_string(),
Context::ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, Builtins::kMakeError, Context::MAKE_ERROR_INDEX);
+ InstallMakeError(isolate_, Builtins::kMakeError, Context::MAKE_ERROR_INDEX);
}
{ // -- E v a l E r r o r
- InstallError(isolate, global, factory->EvalError_string(),
+ InstallError(isolate_, global, factory->EvalError_string(),
Context::EVAL_ERROR_FUNCTION_INDEX);
}
{ // -- R a n g e E r r o r
- InstallError(isolate, global, factory->RangeError_string(),
+ InstallError(isolate_, global, factory->RangeError_string(),
Context::RANGE_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, Builtins::kMakeRangeError,
+ InstallMakeError(isolate_, Builtins::kMakeRangeError,
Context::MAKE_RANGE_ERROR_INDEX);
}
{ // -- R e f e r e n c e E r r o r
- InstallError(isolate, global, factory->ReferenceError_string(),
+ InstallError(isolate_, global, factory->ReferenceError_string(),
Context::REFERENCE_ERROR_FUNCTION_INDEX);
}
{ // -- S y n t a x E r r o r
- InstallError(isolate, global, factory->SyntaxError_string(),
+ InstallError(isolate_, global, factory->SyntaxError_string(),
Context::SYNTAX_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, Builtins::kMakeSyntaxError,
+ InstallMakeError(isolate_, Builtins::kMakeSyntaxError,
Context::MAKE_SYNTAX_ERROR_INDEX);
}
{ // -- T y p e E r r o r
- InstallError(isolate, global, factory->TypeError_string(),
+ InstallError(isolate_, global, factory->TypeError_string(),
Context::TYPE_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, Builtins::kMakeTypeError,
+ InstallMakeError(isolate_, Builtins::kMakeTypeError,
Context::MAKE_TYPE_ERROR_INDEX);
}
{ // -- U R I E r r o r
- InstallError(isolate, global, factory->URIError_string(),
+ InstallError(isolate_, global, factory->URIError_string(),
Context::URI_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, Builtins::kMakeURIError,
+ InstallMakeError(isolate_, Builtins::kMakeURIError,
Context::MAKE_URI_ERROR_INDEX);
}
{ // -- C o m p i l e E r r o r
- Handle<JSObject> dummy = factory->NewJSObject(isolate->object_function());
- InstallError(isolate, dummy, factory->CompileError_string(),
+ Handle<JSObject> dummy = factory->NewJSObject(isolate_->object_function());
+ InstallError(isolate_, dummy, factory->CompileError_string(),
Context::WASM_COMPILE_ERROR_FUNCTION_INDEX);
// -- L i n k E r r o r
- InstallError(isolate, dummy, factory->LinkError_string(),
+ InstallError(isolate_, dummy, factory->LinkError_string(),
Context::WASM_LINK_ERROR_FUNCTION_INDEX);
// -- R u n t i m e E r r o r
- InstallError(isolate, dummy, factory->RuntimeError_string(),
+ InstallError(isolate_, dummy, factory->RuntimeError_string(),
Context::WASM_RUNTIME_ERROR_FUNCTION_INDEX);
}
@@ -2613,13 +2704,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- J S O N
Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSObject> json_object =
- factory->NewJSObject(isolate->object_function(), TENURED);
- JSObject::AddProperty(global, name, json_object, DONT_ENUM);
- SimpleInstallFunction(json_object, "parse", Builtins::kJsonParse, 2, false);
- SimpleInstallFunction(json_object, "stringify", Builtins::kJsonStringify, 3,
- true);
+ factory->NewJSObject(isolate_->object_function(), TENURED);
+ JSObject::AddProperty(isolate_, global, name, json_object, DONT_ENUM);
+ SimpleInstallFunction(isolate_, json_object, "parse", Builtins::kJsonParse,
+ 2, false);
+ SimpleInstallFunction(isolate_, json_object, "stringify",
+ Builtins::kJsonStringify, 3, true);
JSObject::AddProperty(
- json_object, factory->to_string_tag_symbol(),
+ isolate_, json_object, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("JSON"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
@@ -2627,66 +2719,80 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- M a t h
Handle<String> name = factory->InternalizeUtf8String("Math");
Handle<JSObject> math =
- factory->NewJSObject(isolate->object_function(), TENURED);
- JSObject::AddProperty(global, name, math, DONT_ENUM);
- SimpleInstallFunction(math, "abs", Builtins::kMathAbs, 1, true);
- SimpleInstallFunction(math, "acos", Builtins::kMathAcos, 1, true);
- SimpleInstallFunction(math, "acosh", Builtins::kMathAcosh, 1, true);
- SimpleInstallFunction(math, "asin", Builtins::kMathAsin, 1, true);
- SimpleInstallFunction(math, "asinh", Builtins::kMathAsinh, 1, true);
- SimpleInstallFunction(math, "atan", Builtins::kMathAtan, 1, true);
- SimpleInstallFunction(math, "atanh", Builtins::kMathAtanh, 1, true);
- SimpleInstallFunction(math, "atan2", Builtins::kMathAtan2, 2, true);
- SimpleInstallFunction(math, "ceil", Builtins::kMathCeil, 1, true);
- SimpleInstallFunction(math, "cbrt", Builtins::kMathCbrt, 1, true);
- SimpleInstallFunction(math, "expm1", Builtins::kMathExpm1, 1, true);
- SimpleInstallFunction(math, "clz32", Builtins::kMathClz32, 1, true);
- SimpleInstallFunction(math, "cos", Builtins::kMathCos, 1, true);
- SimpleInstallFunction(math, "cosh", Builtins::kMathCosh, 1, true);
- SimpleInstallFunction(math, "exp", Builtins::kMathExp, 1, true);
- Handle<JSFunction> math_floor =
- SimpleInstallFunction(math, "floor", Builtins::kMathFloor, 1, true);
+ factory->NewJSObject(isolate_->object_function(), TENURED);
+ JSObject::AddProperty(isolate_, global, name, math, DONT_ENUM);
+ SimpleInstallFunction(isolate_, math, "abs", Builtins::kMathAbs, 1, true);
+ SimpleInstallFunction(isolate_, math, "acos", Builtins::kMathAcos, 1, true);
+ SimpleInstallFunction(isolate_, math, "acosh", Builtins::kMathAcosh, 1,
+ true);
+ SimpleInstallFunction(isolate_, math, "asin", Builtins::kMathAsin, 1, true);
+ SimpleInstallFunction(isolate_, math, "asinh", Builtins::kMathAsinh, 1,
+ true);
+ SimpleInstallFunction(isolate_, math, "atan", Builtins::kMathAtan, 1, true);
+ SimpleInstallFunction(isolate_, math, "atanh", Builtins::kMathAtanh, 1,
+ true);
+ SimpleInstallFunction(isolate_, math, "atan2", Builtins::kMathAtan2, 2,
+ true);
+ SimpleInstallFunction(isolate_, math, "ceil", Builtins::kMathCeil, 1, true);
+ SimpleInstallFunction(isolate_, math, "cbrt", Builtins::kMathCbrt, 1, true);
+ SimpleInstallFunction(isolate_, math, "expm1", Builtins::kMathExpm1, 1,
+ true);
+ SimpleInstallFunction(isolate_, math, "clz32", Builtins::kMathClz32, 1,
+ true);
+ SimpleInstallFunction(isolate_, math, "cos", Builtins::kMathCos, 1, true);
+ SimpleInstallFunction(isolate_, math, "cosh", Builtins::kMathCosh, 1, true);
+ SimpleInstallFunction(isolate_, math, "exp", Builtins::kMathExp, 1, true);
+ Handle<JSFunction> math_floor = SimpleInstallFunction(
+ isolate_, math, "floor", Builtins::kMathFloor, 1, true);
native_context()->set_math_floor(*math_floor);
- SimpleInstallFunction(math, "fround", Builtins::kMathFround, 1, true);
- SimpleInstallFunction(math, "hypot", Builtins::kMathHypot, 2, false);
- SimpleInstallFunction(math, "imul", Builtins::kMathImul, 2, true);
- SimpleInstallFunction(math, "log", Builtins::kMathLog, 1, true);
- SimpleInstallFunction(math, "log1p", Builtins::kMathLog1p, 1, true);
- SimpleInstallFunction(math, "log2", Builtins::kMathLog2, 1, true);
- SimpleInstallFunction(math, "log10", Builtins::kMathLog10, 1, true);
- SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
- SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
- Handle<JSFunction> math_pow =
- SimpleInstallFunction(math, "pow", Builtins::kMathPow, 2, true);
+ SimpleInstallFunction(isolate_, math, "fround", Builtins::kMathFround, 1,
+ true);
+ SimpleInstallFunction(isolate_, math, "hypot", Builtins::kMathHypot, 2,
+ false);
+ SimpleInstallFunction(isolate_, math, "imul", Builtins::kMathImul, 2, true);
+ SimpleInstallFunction(isolate_, math, "log", Builtins::kMathLog, 1, true);
+ SimpleInstallFunction(isolate_, math, "log1p", Builtins::kMathLog1p, 1,
+ true);
+ SimpleInstallFunction(isolate_, math, "log2", Builtins::kMathLog2, 1, true);
+ SimpleInstallFunction(isolate_, math, "log10", Builtins::kMathLog10, 1,
+ true);
+ SimpleInstallFunction(isolate_, math, "max", Builtins::kMathMax, 2, false);
+ SimpleInstallFunction(isolate_, math, "min", Builtins::kMathMin, 2, false);
+ Handle<JSFunction> math_pow = SimpleInstallFunction(
+ isolate_, math, "pow", Builtins::kMathPow, 2, true);
native_context()->set_math_pow(*math_pow);
- SimpleInstallFunction(math, "random", Builtins::kMathRandom, 0, true);
- SimpleInstallFunction(math, "round", Builtins::kMathRound, 1, true);
- SimpleInstallFunction(math, "sign", Builtins::kMathSign, 1, true);
- SimpleInstallFunction(math, "sin", Builtins::kMathSin, 1, true);
- SimpleInstallFunction(math, "sinh", Builtins::kMathSinh, 1, true);
- SimpleInstallFunction(math, "sqrt", Builtins::kMathSqrt, 1, true);
- SimpleInstallFunction(math, "tan", Builtins::kMathTan, 1, true);
- SimpleInstallFunction(math, "tanh", Builtins::kMathTanh, 1, true);
- SimpleInstallFunction(math, "trunc", Builtins::kMathTrunc, 1, true);
+ SimpleInstallFunction(isolate_, math, "random", Builtins::kMathRandom, 0,
+ true);
+ SimpleInstallFunction(isolate_, math, "round", Builtins::kMathRound, 1,
+ true);
+ SimpleInstallFunction(isolate_, math, "sign", Builtins::kMathSign, 1, true);
+ SimpleInstallFunction(isolate_, math, "sin", Builtins::kMathSin, 1, true);
+ SimpleInstallFunction(isolate_, math, "sinh", Builtins::kMathSinh, 1, true);
+ SimpleInstallFunction(isolate_, math, "sqrt", Builtins::kMathSqrt, 1, true);
+ SimpleInstallFunction(isolate_, math, "tan", Builtins::kMathTan, 1, true);
+ SimpleInstallFunction(isolate_, math, "tanh", Builtins::kMathTanh, 1, true);
+ SimpleInstallFunction(isolate_, math, "trunc", Builtins::kMathTrunc, 1,
+ true);
// Install math constants.
double const kE = base::ieee754::exp(1.0);
double const kPI = 3.1415926535897932;
- InstallConstant(isolate, math, "E", factory->NewNumber(kE));
- InstallConstant(isolate, math, "LN10",
+ InstallConstant(isolate_, math, "E", factory->NewNumber(kE));
+ InstallConstant(isolate_, math, "LN10",
factory->NewNumber(base::ieee754::log(10.0)));
- InstallConstant(isolate, math, "LN2",
+ InstallConstant(isolate_, math, "LN2",
factory->NewNumber(base::ieee754::log(2.0)));
- InstallConstant(isolate, math, "LOG10E",
+ InstallConstant(isolate_, math, "LOG10E",
factory->NewNumber(base::ieee754::log10(kE)));
- InstallConstant(isolate, math, "LOG2E",
+ InstallConstant(isolate_, math, "LOG2E",
factory->NewNumber(base::ieee754::log2(kE)));
- InstallConstant(isolate, math, "PI", factory->NewNumber(kPI));
- InstallConstant(isolate, math, "SQRT1_2",
+ InstallConstant(isolate_, math, "PI", factory->NewNumber(kPI));
+ InstallConstant(isolate_, math, "SQRT1_2",
factory->NewNumber(std::sqrt(0.5)));
- InstallConstant(isolate, math, "SQRT2", factory->NewNumber(std::sqrt(2.0)));
+ InstallConstant(isolate_, math, "SQRT2",
+ factory->NewNumber(std::sqrt(2.0)));
JSObject::AddProperty(
- math, factory->to_string_tag_symbol(),
+ isolate_, math, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("Math"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
@@ -2694,67 +2800,61 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- C o n s o l e
Handle<String> name = factory->InternalizeUtf8String("console");
NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
- name, isolate->strict_function_map(), LanguageMode::kStrict);
+ name, isolate_->strict_function_map(), LanguageMode::kStrict);
Handle<JSFunction> cons = factory->NewFunction(args);
- Handle<JSObject> empty = factory->NewJSObject(isolate->object_function());
+ Handle<JSObject> empty = factory->NewJSObject(isolate_->object_function());
JSFunction::SetPrototype(cons, empty);
Handle<JSObject> console = factory->NewJSObject(cons, TENURED);
DCHECK(console->IsJSObject());
- JSObject::AddProperty(global, name, console, DONT_ENUM);
- SimpleInstallFunction(console, "debug", Builtins::kConsoleDebug, 1, false,
- NONE);
- SimpleInstallFunction(console, "error", Builtins::kConsoleError, 1, false,
- NONE);
- SimpleInstallFunction(console, "info", Builtins::kConsoleInfo, 1, false,
- NONE);
- SimpleInstallFunction(console, "log", Builtins::kConsoleLog, 1, false,
- NONE);
- SimpleInstallFunction(console, "warn", Builtins::kConsoleWarn, 1, false,
- NONE);
- SimpleInstallFunction(console, "dir", Builtins::kConsoleDir, 1, false,
- NONE);
- SimpleInstallFunction(console, "dirxml", Builtins::kConsoleDirXml, 1, false,
- NONE);
- SimpleInstallFunction(console, "table", Builtins::kConsoleTable, 1, false,
- NONE);
- SimpleInstallFunction(console, "trace", Builtins::kConsoleTrace, 1, false,
- NONE);
- SimpleInstallFunction(console, "group", Builtins::kConsoleGroup, 1, false,
- NONE);
- SimpleInstallFunction(console, "groupCollapsed",
- Builtins::kConsoleGroupCollapsed, 1, false, NONE);
- SimpleInstallFunction(console, "groupEnd", Builtins::kConsoleGroupEnd, 1,
- false, NONE);
- SimpleInstallFunction(console, "clear", Builtins::kConsoleClear, 1, false,
- NONE);
- SimpleInstallFunction(console, "count", Builtins::kConsoleCount, 1, false,
- NONE);
- SimpleInstallFunction(console, "countReset", Builtins::kConsoleCountReset,
+ JSObject::AddProperty(isolate_, global, name, console, DONT_ENUM);
+ SimpleInstallFunction(isolate_, console, "debug", Builtins::kConsoleDebug,
1, false, NONE);
- SimpleInstallFunction(console, "assert", Builtins::kFastConsoleAssert, 1,
+ SimpleInstallFunction(isolate_, console, "error", Builtins::kConsoleError,
+ 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "info", Builtins::kConsoleInfo, 1,
false, NONE);
- SimpleInstallFunction(console, "markTimeline",
- Builtins::kConsoleMarkTimeline, 1, false, NONE);
- SimpleInstallFunction(console, "profile", Builtins::kConsoleProfile, 1,
+ SimpleInstallFunction(isolate_, console, "log", Builtins::kConsoleLog, 1,
false, NONE);
- SimpleInstallFunction(console, "profileEnd", Builtins::kConsoleProfileEnd,
- 1, false, NONE);
- SimpleInstallFunction(console, "timeline", Builtins::kConsoleTimeline, 1,
+ SimpleInstallFunction(isolate_, console, "warn", Builtins::kConsoleWarn, 1,
false, NONE);
- SimpleInstallFunction(console, "timelineEnd", Builtins::kConsoleTimelineEnd,
- 1, false, NONE);
- SimpleInstallFunction(console, "time", Builtins::kConsoleTime, 1, false,
- NONE);
- SimpleInstallFunction(console, "timeEnd", Builtins::kConsoleTimeEnd, 1,
+ SimpleInstallFunction(isolate_, console, "dir", Builtins::kConsoleDir, 1,
false, NONE);
- SimpleInstallFunction(console, "timeStamp", Builtins::kConsoleTimeStamp, 1,
+ SimpleInstallFunction(isolate_, console, "dirxml", Builtins::kConsoleDirXml,
+ 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "table", Builtins::kConsoleTable,
+ 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "trace", Builtins::kConsoleTrace,
+ 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "group", Builtins::kConsoleGroup,
+ 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "groupCollapsed",
+ Builtins::kConsoleGroupCollapsed, 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "groupEnd",
+ Builtins::kConsoleGroupEnd, 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "clear", Builtins::kConsoleClear,
+ 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "count", Builtins::kConsoleCount,
+ 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "countReset",
+ Builtins::kConsoleCountReset, 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "assert",
+ Builtins::kFastConsoleAssert, 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "profile",
+ Builtins::kConsoleProfile, 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "profileEnd",
+ Builtins::kConsoleProfileEnd, 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "time", Builtins::kConsoleTime, 1,
false, NONE);
- SimpleInstallFunction(console, "context", Builtins::kConsoleContext, 1,
- true, NONE);
+ SimpleInstallFunction(isolate_, console, "timeEnd",
+ Builtins::kConsoleTimeEnd, 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "timeStamp",
+ Builtins::kConsoleTimeStamp, 1, false, NONE);
+ SimpleInstallFunction(isolate_, console, "context",
+ Builtins::kConsoleContext, 1, true, NONE);
JSObject::AddProperty(
- console, factory->to_string_tag_symbol(),
+ isolate_, console, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("Object"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
@@ -2763,93 +2863,110 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- I n t l
Handle<String> name = factory->InternalizeUtf8String("Intl");
Handle<JSObject> intl =
- factory->NewJSObject(isolate->object_function(), TENURED);
- JSObject::AddProperty(global, name, intl, DONT_ENUM);
+ factory->NewJSObject(isolate_->object_function(), TENURED);
+ JSObject::AddProperty(isolate_, global, name, intl, DONT_ENUM);
{
Handle<JSFunction> date_time_format_constructor = InstallFunction(
- intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize, 0,
- factory->the_hole_value(), Builtins::kIllegal);
+ isolate_, intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize,
+ 0, factory->the_hole_value(), Builtins::kIllegal);
native_context()->set_intl_date_time_format_function(
*date_time_format_constructor);
Handle<JSObject> prototype(
- JSObject::cast(date_time_format_constructor->prototype()), isolate);
+ JSObject::cast(date_time_format_constructor->prototype()), isolate_);
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), factory->Object_string(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
+ factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- SimpleInstallFunction(prototype, "formatToParts",
+ SimpleInstallFunction(isolate_, prototype, "formatToParts",
Builtins::kDateTimeFormatPrototypeFormatToParts, 1,
false);
}
{
Handle<JSFunction> number_format_constructor = InstallFunction(
- intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize, 0,
- factory->the_hole_value(), Builtins::kIllegal);
+ isolate_, intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize,
+ 0, factory->the_hole_value(), Builtins::kIllegal);
native_context()->set_intl_number_format_function(
*number_format_constructor);
Handle<JSObject> prototype(
- JSObject::cast(number_format_constructor->prototype()), isolate);
+ JSObject::cast(number_format_constructor->prototype()), isolate_);
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), factory->Object_string(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
+ factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- SimpleInstallFunction(prototype, "formatToParts",
+ SimpleInstallFunction(isolate_, prototype, "formatToParts",
Builtins::kNumberFormatPrototypeFormatToParts, 1,
false);
+ SimpleInstallGetter(isolate_, prototype,
+ factory->InternalizeUtf8String("format"),
+ Builtins::kNumberFormatPrototypeFormatNumber, false);
+
+ {
+ Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
+ isolate_, Builtins::kNumberFormatInternalFormatNumber,
+ factory->empty_string(), 1);
+ native_context()->set_number_format_internal_format_number_shared_fun(
+ *info);
+ }
}
{
- Handle<JSFunction> collator_constructor =
- InstallFunction(intl, "Collator", JS_OBJECT_TYPE, Collator::kSize, 0,
- factory->the_hole_value(), Builtins::kIllegal);
+ Handle<JSFunction> collator_constructor = InstallFunction(
+ isolate_, intl, "Collator", JS_OBJECT_TYPE, Collator::kSize, 0,
+ factory->the_hole_value(), Builtins::kIllegal);
native_context()->set_intl_collator_function(*collator_constructor);
Handle<JSObject> prototype(
- JSObject::cast(collator_constructor->prototype()), isolate);
+ JSObject::cast(collator_constructor->prototype()), isolate_);
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), factory->Object_string(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
+ factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
{
- Handle<JSFunction> v8_break_iterator_constructor = InstallFunction(
- intl, "v8BreakIterator", JS_OBJECT_TYPE, V8BreakIterator::kSize, 0,
- factory->the_hole_value(), Builtins::kIllegal);
+ Handle<JSFunction> v8_break_iterator_constructor =
+ InstallFunction(isolate_, intl, "v8BreakIterator", JS_OBJECT_TYPE,
+ V8BreakIterator::kSize, 0, factory->the_hole_value(),
+ Builtins::kIllegal);
native_context()->set_intl_v8_break_iterator_function(
*v8_break_iterator_constructor);
Handle<JSObject> prototype(
- JSObject::cast(v8_break_iterator_constructor->prototype()), isolate);
+ JSObject::cast(v8_break_iterator_constructor->prototype()), isolate_);
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), factory->Object_string(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
+ factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
{
Handle<JSFunction> plural_rules_constructor = InstallFunction(
- intl, "PluralRules", JS_OBJECT_TYPE, PluralRules::kSize, 0,
+ isolate_, intl, "PluralRules", JS_OBJECT_TYPE, PluralRules::kSize, 0,
factory->the_hole_value(), Builtins::kIllegal);
native_context()->set_intl_plural_rules_function(
*plural_rules_constructor);
Handle<JSObject> prototype(
- JSObject::cast(plural_rules_constructor->prototype()), isolate);
+ JSObject::cast(plural_rules_constructor->prototype()), isolate_);
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), factory->Object_string(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
+ factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
}
@@ -2858,13 +2975,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- A r r a y B u f f e r
Handle<String> name = factory->ArrayBuffer_string();
Handle<JSFunction> array_buffer_fun = CreateArrayBuffer(name, ARRAY_BUFFER);
- JSObject::AddProperty(global, name, array_buffer_fun, DONT_ENUM);
- InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
+ JSObject::AddProperty(isolate_, global, name, array_buffer_fun, DONT_ENUM);
+ InstallWithIntrinsicDefaultProto(isolate_, array_buffer_fun,
Context::ARRAY_BUFFER_FUN_INDEX);
- InstallSpeciesGetter(array_buffer_fun);
+ InstallSpeciesGetter(isolate_, array_buffer_fun);
Handle<JSFunction> array_buffer_noinit_fun = SimpleCreateFunction(
- isolate,
+ isolate_,
factory->NewStringFromAsciiChecked(
"arrayBufferConstructor_DoNotInitialize"),
Builtins::kArrayBufferConstructor_DoNotInitialize, 1, false);
@@ -2875,132 +2992,134 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<String> name = factory->SharedArrayBuffer_string();
Handle<JSFunction> shared_array_buffer_fun =
CreateArrayBuffer(name, SHARED_ARRAY_BUFFER);
- InstallWithIntrinsicDefaultProto(isolate, shared_array_buffer_fun,
+ InstallWithIntrinsicDefaultProto(isolate_, shared_array_buffer_fun,
Context::SHARED_ARRAY_BUFFER_FUN_INDEX);
- InstallSpeciesGetter(shared_array_buffer_fun);
+ InstallSpeciesGetter(isolate_, shared_array_buffer_fun);
}
{ // -- A t o m i c s
Handle<JSObject> atomics_object =
- factory->NewJSObject(isolate->object_function(), TENURED);
+ factory->NewJSObject(isolate_->object_function(), TENURED);
native_context()->set_atomics_object(*atomics_object);
- SimpleInstallFunction(atomics_object, "load", Builtins::kAtomicsLoad, 2,
- true);
- SimpleInstallFunction(atomics_object, "store", Builtins::kAtomicsStore, 3,
- true);
- SimpleInstallFunction(atomics_object, "add", Builtins::kAtomicsAdd, 3,
- true);
- SimpleInstallFunction(atomics_object, "sub", Builtins::kAtomicsSub, 3,
- true);
- SimpleInstallFunction(atomics_object, "and", Builtins::kAtomicsAnd, 3,
- true);
- SimpleInstallFunction(atomics_object, "or", Builtins::kAtomicsOr, 3, true);
- SimpleInstallFunction(atomics_object, "xor", Builtins::kAtomicsXor, 3,
- true);
- SimpleInstallFunction(atomics_object, "exchange",
+ SimpleInstallFunction(isolate_, atomics_object, "load",
+ Builtins::kAtomicsLoad, 2, true);
+ SimpleInstallFunction(isolate_, atomics_object, "store",
+ Builtins::kAtomicsStore, 3, true);
+ SimpleInstallFunction(isolate_, atomics_object, "add",
+ Builtins::kAtomicsAdd, 3, true);
+ SimpleInstallFunction(isolate_, atomics_object, "sub",
+ Builtins::kAtomicsSub, 3, true);
+ SimpleInstallFunction(isolate_, atomics_object, "and",
+ Builtins::kAtomicsAnd, 3, true);
+ SimpleInstallFunction(isolate_, atomics_object, "or", Builtins::kAtomicsOr,
+ 3, true);
+ SimpleInstallFunction(isolate_, atomics_object, "xor",
+ Builtins::kAtomicsXor, 3, true);
+ SimpleInstallFunction(isolate_, atomics_object, "exchange",
Builtins::kAtomicsExchange, 3, true);
- SimpleInstallFunction(atomics_object, "compareExchange",
+ SimpleInstallFunction(isolate_, atomics_object, "compareExchange",
Builtins::kAtomicsCompareExchange, 4, true);
- SimpleInstallFunction(atomics_object, "isLockFree",
+ SimpleInstallFunction(isolate_, atomics_object, "isLockFree",
Builtins::kAtomicsIsLockFree, 1, true);
- SimpleInstallFunction(atomics_object, "wait", Builtins::kAtomicsWait, 4,
- true);
- SimpleInstallFunction(atomics_object, "wake", Builtins::kAtomicsWake, 3,
- true);
- SimpleInstallFunction(atomics_object, "notify", Builtins::kAtomicsWake, 3,
- true);
+ SimpleInstallFunction(isolate_, atomics_object, "wait",
+ Builtins::kAtomicsWait, 4, true);
+ SimpleInstallFunction(isolate_, atomics_object, "wake",
+ Builtins::kAtomicsWake, 3, true);
+ SimpleInstallFunction(isolate_, atomics_object, "notify",
+ Builtins::kAtomicsWake, 3, true);
}
{ // -- T y p e d A r r a y
Handle<JSFunction> typed_array_fun = CreateFunction(
- isolate, factory->InternalizeUtf8String("TypedArray"),
+ isolate_, factory->InternalizeUtf8String("TypedArray"),
JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, 0, factory->the_hole_value(),
Builtins::kTypedArrayBaseConstructor);
typed_array_fun->shared()->set_native(false);
typed_array_fun->shared()->set_length(0);
- InstallSpeciesGetter(typed_array_fun);
+ InstallSpeciesGetter(isolate_, typed_array_fun);
native_context()->set_typed_array_function(*typed_array_fun);
- SimpleInstallFunction(typed_array_fun, "of", Builtins::kTypedArrayOf, 0,
- false);
- SimpleInstallFunction(typed_array_fun, "from", Builtins::kTypedArrayFrom, 1,
- false);
+ SimpleInstallFunction(isolate_, typed_array_fun, "of",
+ Builtins::kTypedArrayOf, 0, false);
+ SimpleInstallFunction(isolate_, typed_array_fun, "from",
+ Builtins::kTypedArrayFrom, 1, false);
// Setup %TypedArrayPrototype%.
Handle<JSObject> prototype(
- JSObject::cast(typed_array_fun->instance_prototype()));
+ JSObject::cast(typed_array_fun->instance_prototype()), isolate());
native_context()->set_typed_array_prototype(*prototype);
// Install the "buffer", "byteOffset", "byteLength", "length"
// and @@toStringTag getters on the {prototype}.
- SimpleInstallGetter(prototype, factory->buffer_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->buffer_string(),
Builtins::kTypedArrayPrototypeBuffer, false);
- SimpleInstallGetter(prototype, factory->byte_length_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->byte_length_string(),
Builtins::kTypedArrayPrototypeByteLength, true,
kTypedArrayByteLength);
- SimpleInstallGetter(prototype, factory->byte_offset_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->byte_offset_string(),
Builtins::kTypedArrayPrototypeByteOffset, true,
kTypedArrayByteOffset);
- SimpleInstallGetter(prototype, factory->length_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->length_string(),
Builtins::kTypedArrayPrototypeLength, true,
kTypedArrayLength);
- SimpleInstallGetter(prototype, factory->to_string_tag_symbol(),
+ SimpleInstallGetter(isolate_, prototype, factory->to_string_tag_symbol(),
Builtins::kTypedArrayPrototypeToStringTag, true,
kTypedArrayToStringTag);
// Install "keys", "values" and "entries" methods on the {prototype}.
- SimpleInstallFunction(prototype, "entries",
+ SimpleInstallFunction(isolate_, prototype, "entries",
Builtins::kTypedArrayPrototypeEntries, 0, true,
kTypedArrayEntries);
- SimpleInstallFunction(prototype, "keys", Builtins::kTypedArrayPrototypeKeys,
- 0, true, kTypedArrayKeys);
+ SimpleInstallFunction(isolate_, prototype, "keys",
+ Builtins::kTypedArrayPrototypeKeys, 0, true,
+ kTypedArrayKeys);
Handle<JSFunction> values = SimpleInstallFunction(
- prototype, "values", Builtins::kTypedArrayPrototypeValues, 0, true,
- kTypedArrayValues);
- JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
- DONT_ENUM);
+ isolate_, prototype, "values", Builtins::kTypedArrayPrototypeValues, 0,
+ true, kTypedArrayValues);
+ JSObject::AddProperty(isolate_, prototype, factory->iterator_symbol(),
+ values, DONT_ENUM);
// TODO(caitp): alphasort accessors/methods
- SimpleInstallFunction(prototype, "copyWithin",
+ SimpleInstallFunction(isolate_, prototype, "copyWithin",
Builtins::kTypedArrayPrototypeCopyWithin, 2, false);
- SimpleInstallFunction(prototype, "every",
+ SimpleInstallFunction(isolate_, prototype, "every",
Builtins::kTypedArrayPrototypeEvery, 1, false);
- SimpleInstallFunction(prototype, "fill",
+ SimpleInstallFunction(isolate_, prototype, "fill",
Builtins::kTypedArrayPrototypeFill, 1, false);
- SimpleInstallFunction(prototype, "filter",
+ SimpleInstallFunction(isolate_, prototype, "filter",
Builtins::kTypedArrayPrototypeFilter, 1, false);
- SimpleInstallFunction(prototype, "find", Builtins::kTypedArrayPrototypeFind,
- 1, false);
- SimpleInstallFunction(prototype, "findIndex",
+ SimpleInstallFunction(isolate_, prototype, "find",
+ Builtins::kTypedArrayPrototypeFind, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "findIndex",
Builtins::kTypedArrayPrototypeFindIndex, 1, false);
- SimpleInstallFunction(prototype, "forEach",
+ SimpleInstallFunction(isolate_, prototype, "forEach",
Builtins::kTypedArrayPrototypeForEach, 1, false);
- SimpleInstallFunction(prototype, "includes",
+ SimpleInstallFunction(isolate_, prototype, "includes",
Builtins::kTypedArrayPrototypeIncludes, 1, false);
- SimpleInstallFunction(prototype, "indexOf",
+ SimpleInstallFunction(isolate_, prototype, "indexOf",
Builtins::kTypedArrayPrototypeIndexOf, 1, false);
- SimpleInstallFunction(prototype, "lastIndexOf",
+ SimpleInstallFunction(isolate_, prototype, "lastIndexOf",
Builtins::kTypedArrayPrototypeLastIndexOf, 1, false);
- SimpleInstallFunction(prototype, "map", Builtins::kTypedArrayPrototypeMap,
- 1, false);
- SimpleInstallFunction(prototype, "reverse",
+ SimpleInstallFunction(isolate_, prototype, "map",
+ Builtins::kTypedArrayPrototypeMap, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "reverse",
Builtins::kTypedArrayPrototypeReverse, 0, false);
- SimpleInstallFunction(prototype, "reduce",
+ SimpleInstallFunction(isolate_, prototype, "reduce",
Builtins::kTypedArrayPrototypeReduce, 1, false);
- SimpleInstallFunction(prototype, "reduceRight",
+ SimpleInstallFunction(isolate_, prototype, "reduceRight",
Builtins::kTypedArrayPrototypeReduceRight, 1, false);
- SimpleInstallFunction(prototype, "set", Builtins::kTypedArrayPrototypeSet,
- 1, false);
- SimpleInstallFunction(prototype, "slice",
+ SimpleInstallFunction(isolate_, prototype, "set",
+ Builtins::kTypedArrayPrototypeSet, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "slice",
Builtins::kTypedArrayPrototypeSlice, 2, false);
- SimpleInstallFunction(prototype, "some", Builtins::kTypedArrayPrototypeSome,
- 1, false);
- SimpleInstallFunction(prototype, "sort", Builtins::kTypedArrayPrototypeSort,
- 1, false);
- SimpleInstallFunction(prototype, "subarray",
+ SimpleInstallFunction(isolate_, prototype, "some",
+ Builtins::kTypedArrayPrototypeSome, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "sort",
+ Builtins::kTypedArrayPrototypeSort, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "subarray",
Builtins::kTypedArrayPrototypeSubArray, 2, false);
}
@@ -3009,7 +3128,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ \
Handle<JSFunction> fun = \
InstallTypedArray(#Type "Array", TYPE##_ELEMENTS); \
- InstallWithIntrinsicDefaultProto(isolate, fun, \
+ InstallWithIntrinsicDefaultProto(isolate_, fun, \
Context::TYPE##_ARRAY_FUN_INDEX); \
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
@@ -3018,185 +3137,189 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- D a t a V i e w
Handle<JSFunction> data_view_fun = InstallFunction(
- global, "DataView", JS_DATA_VIEW_TYPE,
+ isolate_, global, "DataView", JS_DATA_VIEW_TYPE,
JSDataView::kSizeWithEmbedderFields, 0, factory->the_hole_value(),
Builtins::kDataViewConstructor);
- InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
+ InstallWithIntrinsicDefaultProto(isolate_, data_view_fun,
Context::DATA_VIEW_FUN_INDEX);
- data_view_fun->shared()->set_length(3);
+ data_view_fun->shared()->set_length(1);
data_view_fun->shared()->DontAdaptArguments();
// Setup %DataViewPrototype%.
Handle<JSObject> prototype(
- JSObject::cast(data_view_fun->instance_prototype()));
+ JSObject::cast(data_view_fun->instance_prototype()), isolate());
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("DataView"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
// Install the "buffer", "byteOffset" and "byteLength" getters
// on the {prototype}.
- SimpleInstallGetter(prototype, factory->buffer_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->buffer_string(),
Builtins::kDataViewPrototypeGetBuffer, false,
kDataViewBuffer);
- SimpleInstallGetter(prototype, factory->byte_length_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->byte_length_string(),
Builtins::kDataViewPrototypeGetByteLength, false,
kDataViewByteLength);
- SimpleInstallGetter(prototype, factory->byte_offset_string(),
+ SimpleInstallGetter(isolate_, prototype, factory->byte_offset_string(),
Builtins::kDataViewPrototypeGetByteOffset, false,
kDataViewByteOffset);
- SimpleInstallFunction(prototype, "getInt8",
+ SimpleInstallFunction(isolate_, prototype, "getInt8",
Builtins::kDataViewPrototypeGetInt8, 1, false);
- SimpleInstallFunction(prototype, "setInt8",
+ SimpleInstallFunction(isolate_, prototype, "setInt8",
Builtins::kDataViewPrototypeSetInt8, 2, false);
- SimpleInstallFunction(prototype, "getUint8",
+ SimpleInstallFunction(isolate_, prototype, "getUint8",
Builtins::kDataViewPrototypeGetUint8, 1, false);
- SimpleInstallFunction(prototype, "setUint8",
+ SimpleInstallFunction(isolate_, prototype, "setUint8",
Builtins::kDataViewPrototypeSetUint8, 2, false);
- SimpleInstallFunction(prototype, "getInt16",
+ SimpleInstallFunction(isolate_, prototype, "getInt16",
Builtins::kDataViewPrototypeGetInt16, 1, false);
- SimpleInstallFunction(prototype, "setInt16",
+ SimpleInstallFunction(isolate_, prototype, "setInt16",
Builtins::kDataViewPrototypeSetInt16, 2, false);
- SimpleInstallFunction(prototype, "getUint16",
+ SimpleInstallFunction(isolate_, prototype, "getUint16",
Builtins::kDataViewPrototypeGetUint16, 1, false);
- SimpleInstallFunction(prototype, "setUint16",
+ SimpleInstallFunction(isolate_, prototype, "setUint16",
Builtins::kDataViewPrototypeSetUint16, 2, false);
- SimpleInstallFunction(prototype, "getInt32",
+ SimpleInstallFunction(isolate_, prototype, "getInt32",
Builtins::kDataViewPrototypeGetInt32, 1, false);
- SimpleInstallFunction(prototype, "setInt32",
+ SimpleInstallFunction(isolate_, prototype, "setInt32",
Builtins::kDataViewPrototypeSetInt32, 2, false);
- SimpleInstallFunction(prototype, "getUint32",
+ SimpleInstallFunction(isolate_, prototype, "getUint32",
Builtins::kDataViewPrototypeGetUint32, 1, false);
- SimpleInstallFunction(prototype, "setUint32",
+ SimpleInstallFunction(isolate_, prototype, "setUint32",
Builtins::kDataViewPrototypeSetUint32, 2, false);
- SimpleInstallFunction(prototype, "getFloat32",
+ SimpleInstallFunction(isolate_, prototype, "getFloat32",
Builtins::kDataViewPrototypeGetFloat32, 1, false);
- SimpleInstallFunction(prototype, "setFloat32",
+ SimpleInstallFunction(isolate_, prototype, "setFloat32",
Builtins::kDataViewPrototypeSetFloat32, 2, false);
- SimpleInstallFunction(prototype, "getFloat64",
+ SimpleInstallFunction(isolate_, prototype, "getFloat64",
Builtins::kDataViewPrototypeGetFloat64, 1, false);
- SimpleInstallFunction(prototype, "setFloat64",
+ SimpleInstallFunction(isolate_, prototype, "setFloat64",
Builtins::kDataViewPrototypeSetFloat64, 2, false);
}
{ // -- M a p
Handle<JSFunction> js_map_fun =
- InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize, 0,
+ InstallFunction(isolate_, global, "Map", JS_MAP_TYPE, JSMap::kSize, 0,
factory->the_hole_value(), Builtins::kMapConstructor);
- InstallWithIntrinsicDefaultProto(isolate, js_map_fun,
+ InstallWithIntrinsicDefaultProto(isolate_, js_map_fun,
Context::JS_MAP_FUN_INDEX);
- Handle<SharedFunctionInfo> shared(js_map_fun->shared(), isolate);
+ Handle<SharedFunctionInfo> shared(js_map_fun->shared(), isolate_);
shared->DontAdaptArguments();
shared->set_length(0);
// Setup %MapPrototype%.
- Handle<JSObject> prototype(
- JSObject::cast(js_map_fun->instance_prototype()));
+ Handle<JSObject> prototype(JSObject::cast(js_map_fun->instance_prototype()),
+ isolate());
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), factory->Map_string(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
+ factory->Map_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
Handle<JSFunction> map_get = SimpleInstallFunction(
- prototype, "get", Builtins::kMapPrototypeGet, 1, true);
+ isolate_, prototype, "get", Builtins::kMapPrototypeGet, 1, true);
native_context()->set_map_get(*map_get);
Handle<JSFunction> map_set = SimpleInstallFunction(
- prototype, "set", Builtins::kMapPrototypeSet, 2, true);
+ isolate_, prototype, "set", Builtins::kMapPrototypeSet, 2, true);
native_context()->set_map_set(*map_set);
Handle<JSFunction> map_has = SimpleInstallFunction(
- prototype, "has", Builtins::kMapPrototypeHas, 1, true);
+ isolate_, prototype, "has", Builtins::kMapPrototypeHas, 1, true);
native_context()->set_map_has(*map_has);
Handle<JSFunction> map_delete = SimpleInstallFunction(
- prototype, "delete", Builtins::kMapPrototypeDelete, 1, true);
+ isolate_, prototype, "delete", Builtins::kMapPrototypeDelete, 1, true);
native_context()->set_map_delete(*map_delete);
- SimpleInstallFunction(prototype, "clear", Builtins::kMapPrototypeClear, 0,
- true);
- Handle<JSFunction> entries = SimpleInstallFunction(
- prototype, "entries", Builtins::kMapPrototypeEntries, 0, true);
- JSObject::AddProperty(prototype, factory->iterator_symbol(), entries,
- DONT_ENUM);
- SimpleInstallFunction(prototype, "forEach", Builtins::kMapPrototypeForEach,
- 1, false);
- SimpleInstallFunction(prototype, "keys", Builtins::kMapPrototypeKeys, 0,
- true);
- SimpleInstallGetter(prototype, factory->InternalizeUtf8String("size"),
- Builtins::kMapPrototypeGetSize, true,
- BuiltinFunctionId::kMapSize);
- SimpleInstallFunction(prototype, "values", Builtins::kMapPrototypeValues, 0,
- true);
+ SimpleInstallFunction(isolate_, prototype, "clear",
+ Builtins::kMapPrototypeClear, 0, true);
+ Handle<JSFunction> entries =
+ SimpleInstallFunction(isolate_, prototype, "entries",
+ Builtins::kMapPrototypeEntries, 0, true);
+ JSObject::AddProperty(isolate_, prototype, factory->iterator_symbol(),
+ entries, DONT_ENUM);
+ SimpleInstallFunction(isolate_, prototype, "forEach",
+ Builtins::kMapPrototypeForEach, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "keys",
+ Builtins::kMapPrototypeKeys, 0, true);
+ SimpleInstallGetter(
+ isolate_, prototype, factory->InternalizeUtf8String("size"),
+ Builtins::kMapPrototypeGetSize, true, BuiltinFunctionId::kMapSize);
+ SimpleInstallFunction(isolate_, prototype, "values",
+ Builtins::kMapPrototypeValues, 0, true);
native_context()->set_initial_map_prototype_map(prototype->map());
- InstallSpeciesGetter(js_map_fun);
+ InstallSpeciesGetter(isolate_, js_map_fun);
}
{ // -- S e t
Handle<JSFunction> js_set_fun =
- InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize, 0,
+ InstallFunction(isolate_, global, "Set", JS_SET_TYPE, JSSet::kSize, 0,
factory->the_hole_value(), Builtins::kSetConstructor);
- InstallWithIntrinsicDefaultProto(isolate, js_set_fun,
+ InstallWithIntrinsicDefaultProto(isolate_, js_set_fun,
Context::JS_SET_FUN_INDEX);
- Handle<SharedFunctionInfo> shared(js_set_fun->shared(), isolate);
+ Handle<SharedFunctionInfo> shared(js_set_fun->shared(), isolate_);
shared->DontAdaptArguments();
shared->set_length(0);
// Setup %SetPrototype%.
- Handle<JSObject> prototype(
- JSObject::cast(js_set_fun->instance_prototype()));
+ Handle<JSObject> prototype(JSObject::cast(js_set_fun->instance_prototype()),
+ isolate());
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), factory->Set_string(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
+ factory->Set_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
Handle<JSFunction> set_has = SimpleInstallFunction(
- prototype, "has", Builtins::kSetPrototypeHas, 1, true);
+ isolate_, prototype, "has", Builtins::kSetPrototypeHas, 1, true);
native_context()->set_set_has(*set_has);
Handle<JSFunction> set_add = SimpleInstallFunction(
- prototype, "add", Builtins::kSetPrototypeAdd, 1, true);
+ isolate_, prototype, "add", Builtins::kSetPrototypeAdd, 1, true);
native_context()->set_set_add(*set_add);
Handle<JSFunction> set_delete = SimpleInstallFunction(
- prototype, "delete", Builtins::kSetPrototypeDelete, 1, true);
+ isolate_, prototype, "delete", Builtins::kSetPrototypeDelete, 1, true);
native_context()->set_set_delete(*set_delete);
- SimpleInstallFunction(prototype, "clear", Builtins::kSetPrototypeClear, 0,
- true);
- SimpleInstallFunction(prototype, "entries", Builtins::kSetPrototypeEntries,
- 0, true);
- SimpleInstallFunction(prototype, "forEach", Builtins::kSetPrototypeForEach,
- 1, false);
- SimpleInstallGetter(prototype, factory->InternalizeUtf8String("size"),
- Builtins::kSetPrototypeGetSize, true,
- BuiltinFunctionId::kSetSize);
+ SimpleInstallFunction(isolate_, prototype, "clear",
+ Builtins::kSetPrototypeClear, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "entries",
+ Builtins::kSetPrototypeEntries, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "forEach",
+ Builtins::kSetPrototypeForEach, 1, false);
+ SimpleInstallGetter(
+ isolate_, prototype, factory->InternalizeUtf8String("size"),
+ Builtins::kSetPrototypeGetSize, true, BuiltinFunctionId::kSetSize);
Handle<JSFunction> values = SimpleInstallFunction(
- prototype, "values", Builtins::kSetPrototypeValues, 0, true);
- JSObject::AddProperty(prototype, factory->keys_string(), values, DONT_ENUM);
- JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
+ isolate_, prototype, "values", Builtins::kSetPrototypeValues, 0, true);
+ JSObject::AddProperty(isolate_, prototype, factory->keys_string(), values,
DONT_ENUM);
+ JSObject::AddProperty(isolate_, prototype, factory->iterator_symbol(),
+ values, DONT_ENUM);
native_context()->set_initial_set_prototype_map(prototype->map());
- InstallSpeciesGetter(js_set_fun);
+ InstallSpeciesGetter(isolate_, js_set_fun);
}
{ // -- J S M o d u l e N a m e s p a c e
Handle<Map> map = factory->NewMap(
JS_MODULE_NAMESPACE_TYPE, JSModuleNamespace::kSize,
TERMINAL_FAST_ELEMENTS_KIND, JSModuleNamespace::kInObjectFieldCount);
- Map::SetPrototype(map, isolate->factory()->null_value());
- Map::EnsureDescriptorSlack(map, 1);
+ Map::SetPrototype(isolate(), map, isolate_->factory()->null_value());
+ Map::EnsureDescriptorSlack(isolate_, map, 1);
native_context()->set_js_module_namespace_map(*map);
{ // Install @@toStringTag.
@@ -3213,8 +3336,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- I t e r a t o r R e s u l t
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSIteratorResult::kSize,
TERMINAL_FAST_ELEMENTS_KIND, 2);
- Map::SetPrototype(map, isolate->initial_object_prototype());
- Map::EnsureDescriptorSlack(map, 2);
+ Map::SetPrototype(isolate(), map, isolate_->initial_object_prototype());
+ Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // value
Descriptor d = Descriptor::DataField(factory->value_string(),
@@ -3236,28 +3359,31 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- W e a k M a p
Handle<JSFunction> cons = InstallFunction(
- global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize, 0,
+ isolate_, global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize, 0,
factory->the_hole_value(), Builtins::kWeakMapConstructor);
- InstallWithIntrinsicDefaultProto(isolate, cons,
+ InstallWithIntrinsicDefaultProto(isolate_, cons,
Context::JS_WEAK_MAP_FUN_INDEX);
- Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
+ Handle<SharedFunctionInfo> shared(cons->shared(), isolate_);
shared->DontAdaptArguments();
shared->set_length(0);
// Setup %WeakMapPrototype%.
- Handle<JSObject> prototype(JSObject::cast(cons->instance_prototype()));
+ Handle<JSObject> prototype(JSObject::cast(cons->instance_prototype()),
+ isolate());
- SimpleInstallFunction(prototype, "delete",
+ SimpleInstallFunction(isolate_, prototype, "delete",
Builtins::kWeakMapPrototypeDelete, 1, true);
- SimpleInstallFunction(prototype, "get", Builtins::kWeakMapGet, 1, true);
- SimpleInstallFunction(prototype, "has", Builtins::kWeakMapHas, 1, true);
+ SimpleInstallFunction(isolate_, prototype, "get", Builtins::kWeakMapGet, 1,
+ true);
+ SimpleInstallFunction(isolate_, prototype, "has", Builtins::kWeakMapHas, 1,
+ true);
Handle<JSFunction> weakmap_set = SimpleInstallFunction(
- prototype, "set", Builtins::kWeakMapPrototypeSet, 2, true);
+ isolate_, prototype, "set", Builtins::kWeakMapPrototypeSet, 2, true);
native_context()->set_weakmap_set(*weakmap_set);
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("WeakMap"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -3266,27 +3392,29 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- W e a k S e t
Handle<JSFunction> cons = InstallFunction(
- global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize, 0,
+ isolate_, global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize, 0,
factory->the_hole_value(), Builtins::kWeakSetConstructor);
- InstallWithIntrinsicDefaultProto(isolate, cons,
+ InstallWithIntrinsicDefaultProto(isolate_, cons,
Context::JS_WEAK_SET_FUN_INDEX);
- Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
+ Handle<SharedFunctionInfo> shared(cons->shared(), isolate_);
shared->DontAdaptArguments();
shared->set_length(0);
// Setup %WeakSetPrototype%.
- Handle<JSObject> prototype(JSObject::cast(cons->instance_prototype()));
+ Handle<JSObject> prototype(JSObject::cast(cons->instance_prototype()),
+ isolate());
- SimpleInstallFunction(prototype, "delete",
+ SimpleInstallFunction(isolate_, prototype, "delete",
Builtins::kWeakSetPrototypeDelete, 1, true);
- SimpleInstallFunction(prototype, "has", Builtins::kWeakSetHas, 1, true);
+ SimpleInstallFunction(isolate_, prototype, "has", Builtins::kWeakSetHas, 1,
+ true);
Handle<JSFunction> weakset_add = SimpleInstallFunction(
- prototype, "add", Builtins::kWeakSetPrototypeAdd, 1, true);
+ isolate_, prototype, "add", Builtins::kWeakSetPrototypeAdd, 1, true);
native_context()->set_weakset_add(*weakset_add);
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(),
+ isolate_, prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("WeakSet"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -3298,8 +3426,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Proxy function map has prototype slot for storing initial map but does
// not have a prototype property.
- Handle<Map> proxy_function_map =
- Map::Copy(isolate->strict_function_without_prototype_map(), "Proxy");
+ Handle<Map> proxy_function_map = Map::Copy(
+ isolate_, isolate_->strict_function_without_prototype_map(), "Proxy");
// Re-set the unused property fields after changing the instance size.
// TODO(ulan): Do not change instance size after map creation.
int unused_property_fields = proxy_function_map->UnusedPropertyFields();
@@ -3317,21 +3445,22 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
name, proxy_function_map, Builtins::kProxyConstructor);
Handle<JSFunction> proxy_function = factory->NewFunction(args);
- JSFunction::SetInitialMap(proxy_function, isolate->proxy_map(),
+ JSFunction::SetInitialMap(proxy_function, isolate_->proxy_map(),
factory->null_value());
proxy_function->shared()->set_internal_formal_parameter_count(2);
proxy_function->shared()->set_length(2);
native_context()->set_proxy_function(*proxy_function);
- InstallFunction(global, name, proxy_function, factory->Object_string());
+ InstallFunction(isolate_, global, name, proxy_function,
+ factory->Object_string());
- SimpleInstallFunction(proxy_function, "revocable",
+ SimpleInstallFunction(isolate_, proxy_function, "revocable",
Builtins::kProxyRevocable, 2, true);
{ // Internal: ProxyRevoke
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kProxyRevoke, factory->empty_string(), 0);
+ isolate_, Builtins::kProxyRevoke, factory->empty_string(), 0);
native_context()->set_proxy_revoke_shared_fun(*info);
}
}
@@ -3339,45 +3468,48 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- R e f l e c t
Handle<String> reflect_string = factory->InternalizeUtf8String("Reflect");
Handle<JSObject> reflect =
- factory->NewJSObject(isolate->object_function(), TENURED);
- JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
+ factory->NewJSObject(isolate_->object_function(), TENURED);
+ JSObject::AddProperty(isolate_, global, reflect_string, reflect, DONT_ENUM);
- Handle<JSFunction> define_property =
- SimpleInstallFunction(reflect, factory->defineProperty_string(),
- Builtins::kReflectDefineProperty, 3, true);
+ Handle<JSFunction> define_property = SimpleInstallFunction(
+ isolate_, reflect, factory->defineProperty_string(),
+ Builtins::kReflectDefineProperty, 3, true);
native_context()->set_reflect_define_property(*define_property);
- Handle<JSFunction> delete_property =
- SimpleInstallFunction(reflect, factory->deleteProperty_string(),
- Builtins::kReflectDeleteProperty, 2, true);
+ Handle<JSFunction> delete_property = SimpleInstallFunction(
+ isolate_, reflect, factory->deleteProperty_string(),
+ Builtins::kReflectDeleteProperty, 2, true);
native_context()->set_reflect_delete_property(*delete_property);
- Handle<JSFunction> apply = SimpleInstallFunction(
- reflect, factory->apply_string(), Builtins::kReflectApply, 3, false);
+ Handle<JSFunction> apply =
+ SimpleInstallFunction(isolate_, reflect, factory->apply_string(),
+ Builtins::kReflectApply, 3, false);
native_context()->set_reflect_apply(*apply);
Handle<JSFunction> construct =
- SimpleInstallFunction(reflect, factory->construct_string(),
+ SimpleInstallFunction(isolate_, reflect, factory->construct_string(),
Builtins::kReflectConstruct, 2, false);
native_context()->set_reflect_construct(*construct);
- SimpleInstallFunction(reflect, factory->get_string(), Builtins::kReflectGet,
- 2, false);
- SimpleInstallFunction(reflect, factory->getOwnPropertyDescriptor_string(),
+ SimpleInstallFunction(isolate_, reflect, factory->get_string(),
+ Builtins::kReflectGet, 2, false);
+ SimpleInstallFunction(isolate_, reflect,
+ factory->getOwnPropertyDescriptor_string(),
Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
- SimpleInstallFunction(reflect, factory->getPrototypeOf_string(),
+ SimpleInstallFunction(isolate_, reflect, factory->getPrototypeOf_string(),
Builtins::kReflectGetPrototypeOf, 1, true);
- SimpleInstallFunction(reflect, factory->has_string(), Builtins::kReflectHas,
- 2, true);
- SimpleInstallFunction(reflect, factory->isExtensible_string(),
+ SimpleInstallFunction(isolate_, reflect, factory->has_string(),
+ Builtins::kReflectHas, 2, true);
+ SimpleInstallFunction(isolate_, reflect, factory->isExtensible_string(),
Builtins::kReflectIsExtensible, 1, true);
- SimpleInstallFunction(reflect, factory->ownKeys_string(),
+ SimpleInstallFunction(isolate_, reflect, factory->ownKeys_string(),
Builtins::kReflectOwnKeys, 1, true);
- SimpleInstallFunction(reflect, factory->preventExtensions_string(),
+ SimpleInstallFunction(isolate_, reflect,
+ factory->preventExtensions_string(),
Builtins::kReflectPreventExtensions, 1, true);
- SimpleInstallFunction(reflect, factory->set_string(), Builtins::kReflectSet,
- 3, false);
- SimpleInstallFunction(reflect, factory->setPrototypeOf_string(),
+ SimpleInstallFunction(isolate_, reflect, factory->set_string(),
+ Builtins::kReflectSet, 3, false);
+ SimpleInstallFunction(isolate_, reflect, factory->setPrototypeOf_string(),
Builtins::kReflectSetPrototypeOf, 2, true);
}
@@ -3387,11 +3519,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
TERMINAL_FAST_ELEMENTS_KIND, 0);
map->SetConstructor(native_context()->object_function());
map->set_is_callable(true);
- Map::SetPrototype(map, empty_function);
+ Map::SetPrototype(isolate(), map, empty_function);
PropertyAttributes roc_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- Map::EnsureDescriptorSlack(map, 2);
+ Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // length
Descriptor d = Descriptor::AccessorConstant(
@@ -3408,7 +3540,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
native_context()->set_bound_function_without_constructor_map(*map);
- map = Map::Copy(map, "IsConstructor");
+ map = Map::Copy(isolate_, map, "IsConstructor");
map->set_is_constructor(true);
native_context()->set_bound_function_with_constructor_map(*map);
}
@@ -3416,14 +3548,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- sloppy arguments map
Handle<String> arguments_string = factory->Arguments_string();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
- arguments_string, isolate->initial_object_prototype(),
+ arguments_string, isolate_->initial_object_prototype(),
JS_ARGUMENTS_TYPE, JSSloppyArgumentsObject::kSize, 2,
Builtins::kIllegal, MUTABLE);
Handle<JSFunction> function = factory->NewFunction(args);
- Handle<Map> map(function->initial_map());
+ Handle<Map> map(function->initial_map(), isolate());
// Create the descriptor array for the arguments object.
- Map::EnsureDescriptorSlack(map, 2);
+ Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // length
Descriptor d = Descriptor::DataField(
@@ -3446,13 +3578,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- fast and slow aliased arguments map
- Handle<Map> map = isolate->sloppy_arguments_map();
- map = Map::Copy(map, "FastAliasedArguments");
+ Handle<Map> map = isolate_->sloppy_arguments_map();
+ map = Map::Copy(isolate_, map, "FastAliasedArguments");
map->set_elements_kind(FAST_SLOPPY_ARGUMENTS_ELEMENTS);
DCHECK_EQ(2, map->GetInObjectProperties());
native_context()->set_fast_aliased_arguments_map(*map);
- map = Map::Copy(map, "SlowAliasedArguments");
+ map = Map::Copy(isolate_, map, "SlowAliasedArguments");
map->set_elements_kind(SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
DCHECK_EQ(2, map->GetInObjectProperties());
native_context()->set_slow_aliased_arguments_map(*map);
@@ -3475,7 +3607,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<Map> map = factory->NewMap(
JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, PACKED_ELEMENTS, 1);
// Create the descriptor array for the arguments object.
- Map::EnsureDescriptorSlack(map, 2);
+ Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // length
Descriptor d = Descriptor::DataField(
@@ -3491,8 +3623,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// @@iterator method is added later.
DCHECK_EQ(native_context()->object_function()->prototype(),
- *isolate->initial_object_prototype());
- Map::SetPrototype(map, isolate->initial_object_prototype());
+ *isolate_->initial_object_prototype());
+ Map::SetPrototype(isolate(), map, isolate_->initial_object_prototype());
// Copy constructor from the sloppy arguments boilerplate.
map->SetConstructor(
@@ -3507,7 +3639,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- context extension
// Create a function for the context extension objects.
Handle<JSFunction> context_extension_fun =
- CreateFunction(isolate, factory->empty_string(),
+ CreateFunction(isolate_, factory->empty_string(),
JS_CONTEXT_EXTENSION_OBJECT_TYPE, JSObject::kHeaderSize,
0, factory->the_hole_value(), Builtins::kIllegal);
native_context()->set_context_extension_function(*context_extension_fun);
@@ -3516,7 +3648,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
// Set up the call-as-function delegate.
Handle<JSFunction> delegate =
- SimpleCreateFunction(isolate, factory->empty_string(),
+ SimpleCreateFunction(isolate_, factory->empty_string(),
Builtins::kHandleApiCallAsFunction, 0, false);
native_context()->set_call_as_function_delegate(*delegate);
}
@@ -3524,7 +3656,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
// Set up the call-as-constructor delegate.
Handle<JSFunction> delegate =
- SimpleCreateFunction(isolate, factory->empty_string(),
+ SimpleCreateFunction(isolate_, factory->empty_string(),
Builtins::kHandleApiCallAsConstructor, 0, false);
native_context()->set_call_as_constructor_delegate(*delegate);
}
@@ -3532,16 +3664,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
ElementsKind elements_kind) {
- Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
+ Handle<JSObject> global =
+ Handle<JSObject>(native_context()->global_object(), isolate());
- Handle<JSObject> typed_array_prototype =
- Handle<JSObject>(isolate()->typed_array_prototype());
- Handle<JSFunction> typed_array_function =
- Handle<JSFunction>(isolate()->typed_array_function());
+ Handle<JSObject> typed_array_prototype = isolate()->typed_array_prototype();
+ Handle<JSFunction> typed_array_function = isolate()->typed_array_function();
Handle<JSFunction> result = InstallFunction(
- global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithEmbedderFields,
- 0, factory()->the_hole_value(), Builtins::kTypedArrayConstructor);
+ isolate(), global, name, JS_TYPED_ARRAY_TYPE,
+ JSTypedArray::kSizeWithEmbedderFields, 0, factory()->the_hole_value(),
+ Builtins::kTypedArrayConstructor);
result->initial_map()->set_elements_kind(elements_kind);
result->shared()->DontAdaptArguments();
@@ -3628,7 +3760,7 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
NativesFlag natives_flag) {
SuppressDebug compiling_natives(isolate->debug());
- Handle<Context> context(isolate->context());
+ Handle<Context> context(isolate->context(), isolate);
Handle<String> script_name =
isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
@@ -3690,10 +3822,10 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
// function and insert it into the cache.
Vector<const char> name = CStrVector(extension->name());
SourceCodeCache* cache = isolate->bootstrapper()->extensions_cache();
- Handle<Context> context(isolate->context());
+ Handle<Context> context(isolate->context(), isolate);
DCHECK(context->IsNativeContext());
- if (!cache->Lookup(name, &function_info)) {
+ if (!cache->Lookup(isolate, name, &function_info)) {
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
@@ -3702,7 +3834,7 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
extension, nullptr, ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE);
if (!maybe_function_info.ToHandle(&function_info)) return false;
- cache->Add(name, function_info);
+ cache->Add(isolate, name, function_info);
}
// Set up the function context. Conceptually, we should clone the
@@ -3719,17 +3851,16 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
.is_null();
}
-
-static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
+static Handle<JSObject> ResolveBuiltinIdHolder(Isolate* isolate,
+ Handle<Context> native_context,
const char* holder_expr) {
- Isolate* isolate = native_context->GetIsolate();
Factory* factory = isolate->factory();
- Handle<JSGlobalObject> global(native_context->global_object());
+ Handle<JSGlobalObject> global(native_context->global_object(), isolate);
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == nullptr) {
return Handle<JSObject>::cast(
Object::GetPropertyOrElement(
- global, factory->InternalizeUtf8String(holder_expr))
+ isolate, global, factory->InternalizeUtf8String(holder_expr))
.ToHandleChecked());
}
const char* inner = period_pos + 1;
@@ -3739,15 +3870,16 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
Handle<String> property_string = factory->InternalizeUtf8String(property);
DCHECK(!property_string.is_null());
Handle<JSObject> object = Handle<JSObject>::cast(
- JSReceiver::GetProperty(global, property_string).ToHandleChecked());
+ JSReceiver::GetProperty(isolate, global, property_string)
+ .ToHandleChecked());
if (strcmp("prototype", inner) == 0) {
Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- return Handle<JSObject>(JSObject::cast(function->prototype()));
+ return Handle<JSObject>(JSObject::cast(function->prototype()), isolate);
}
Handle<String> inner_string = factory->InternalizeUtf8String(inner);
DCHECK(!inner_string.is_null());
Handle<Object> value =
- JSReceiver::GetProperty(object, inner_string).ToHandleChecked();
+ JSReceiver::GetProperty(isolate, object, inner_string).ToHandleChecked();
return Handle<JSObject>::cast(value);
}
@@ -3769,14 +3901,15 @@ void Genesis::ConfigureUtilsObject(GlobalContextType context_type) {
if (natives_key->AsArrayIndex(&dummy_index)) break;
Handle<Object> utils = isolate()->natives_utils_object();
Handle<JSObject> global = isolate()->global_object();
- JSObject::AddProperty(global, natives_key, utils, DONT_ENUM);
+ JSObject::AddProperty(isolate(), global, natives_key, utils, DONT_ENUM);
break;
}
}
// The utils object can be removed for cases that reach this point.
- native_context()->set_natives_utils_object(heap()->undefined_value());
- native_context()->set_extras_utils_object(heap()->undefined_value());
+ HeapObject* undefined = ReadOnlyRoots(heap()).undefined_value();
+ native_context()->set_natives_utils_object(undefined);
+ native_context()->set_extras_utils_object(undefined);
}
@@ -3787,34 +3920,36 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<Context> native_context = isolate->native_context();
#define EXPORT_PRIVATE_SYMBOL(NAME) \
Handle<String> NAME##_name = factory->NewStringFromAsciiChecked(#NAME); \
- JSObject::AddProperty(container, NAME##_name, factory->NAME(), NONE);
+ JSObject::AddProperty(isolate, container, NAME##_name, factory->NAME(), NONE);
PRIVATE_SYMBOL_LIST(EXPORT_PRIVATE_SYMBOL)
#undef EXPORT_PRIVATE_SYMBOL
#define EXPORT_PUBLIC_SYMBOL(NAME, DESCRIPTION) \
Handle<String> NAME##_name = factory->NewStringFromAsciiChecked(#NAME); \
- JSObject::AddProperty(container, NAME##_name, factory->NAME(), NONE);
+ JSObject::AddProperty(isolate, container, NAME##_name, factory->NAME(), NONE);
PUBLIC_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
WELL_KNOWN_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
#undef EXPORT_PUBLIC_SYMBOL
Handle<JSObject> iterator_prototype(
- native_context->initial_iterator_prototype());
+ native_context->initial_iterator_prototype(), isolate);
- JSObject::AddProperty(container,
+ JSObject::AddProperty(isolate, container,
factory->InternalizeUtf8String("IteratorPrototype"),
iterator_prototype, NONE);
{
- PrototypeIterator iter(native_context->generator_function_map());
- Handle<JSObject> generator_function_prototype(iter.GetCurrent<JSObject>());
+ PrototypeIterator iter(isolate, native_context->generator_function_map());
+ Handle<JSObject> generator_function_prototype(iter.GetCurrent<JSObject>(),
+ isolate);
JSObject::AddProperty(
- container, factory->InternalizeUtf8String("GeneratorFunctionPrototype"),
+ isolate, container,
+ factory->InternalizeUtf8String("GeneratorFunctionPrototype"),
generator_function_prototype, NONE);
Handle<JSFunction> generator_function_function = InstallFunction(
- container, "GeneratorFunction", JS_FUNCTION_TYPE,
+ isolate, container, "GeneratorFunction", JS_FUNCTION_TYPE,
JSFunction::kSizeWithPrototype, 0, generator_function_prototype,
Builtins::kGeneratorFunctionConstructor);
generator_function_function->set_prototype_or_initial_map(
@@ -3828,7 +3963,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
JSObject::ForceSetPrototype(generator_function_function,
isolate->function_function());
JSObject::AddProperty(
- generator_function_prototype, factory->constructor_string(),
+ isolate, generator_function_prototype, factory->constructor_string(),
generator_function_function,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -3837,12 +3972,13 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{
- PrototypeIterator iter(native_context->async_generator_function_map());
+ PrototypeIterator iter(isolate,
+ native_context->async_generator_function_map());
Handle<JSObject> async_generator_function_prototype(
- iter.GetCurrent<JSObject>());
+ iter.GetCurrent<JSObject>(), isolate);
Handle<JSFunction> async_generator_function_function = InstallFunction(
- container, "AsyncGeneratorFunction", JS_FUNCTION_TYPE,
+ isolate, container, "AsyncGeneratorFunction", JS_FUNCTION_TYPE,
JSFunction::kSizeWithPrototype, 0, async_generator_function_prototype,
Builtins::kAsyncGeneratorFunctionConstructor);
async_generator_function_function->set_prototype_or_initial_map(
@@ -3857,8 +3993,8 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
isolate->function_function());
JSObject::AddProperty(
- async_generator_function_prototype, factory->constructor_string(),
- async_generator_function_function,
+ isolate, async_generator_function_prototype,
+ factory->constructor_string(), async_generator_function_function,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
native_context->async_generator_function_map()->SetConstructor(
@@ -3875,26 +4011,26 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), name,
+ isolate, prototype, factory->to_string_tag_symbol(), name,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
// Install the next function on the {prototype}.
- SimpleInstallFunction(prototype, "next",
+ SimpleInstallFunction(isolate, prototype, "next",
Builtins::kSetIteratorPrototypeNext, 0, true,
kSetIteratorNext);
// Setup SetIterator constructor.
- Handle<JSFunction> set_iterator_function =
- InstallFunction(container, "SetIterator", JS_SET_VALUE_ITERATOR_TYPE,
- JSSetIterator::kSize, 0, prototype, Builtins::kIllegal);
+ Handle<JSFunction> set_iterator_function = InstallFunction(
+ isolate, container, "SetIterator", JS_SET_VALUE_ITERATOR_TYPE,
+ JSSetIterator::kSize, 0, prototype, Builtins::kIllegal);
set_iterator_function->shared()->set_native(false);
Handle<Map> set_value_iterator_map(set_iterator_function->initial_map(),
isolate);
native_context->set_set_value_iterator_map(*set_value_iterator_map);
- Handle<Map> set_key_value_iterator_map =
- Map::Copy(set_value_iterator_map, "JS_SET_KEY_VALUE_ITERATOR_TYPE");
+ Handle<Map> set_key_value_iterator_map = Map::Copy(
+ isolate, set_value_iterator_map, "JS_SET_KEY_VALUE_ITERATOR_TYPE");
set_key_value_iterator_map->set_instance_type(
JS_SET_KEY_VALUE_ITERATOR_TYPE);
native_context->set_set_key_value_iterator_map(*set_key_value_iterator_map);
@@ -3910,150 +4046,44 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- prototype, factory->to_string_tag_symbol(), name,
+ isolate, prototype, factory->to_string_tag_symbol(), name,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
// Install the next function on the {prototype}.
- SimpleInstallFunction(prototype, "next",
+ SimpleInstallFunction(isolate, prototype, "next",
Builtins::kMapIteratorPrototypeNext, 0, true,
kMapIteratorNext);
// Setup MapIterator constructor.
- Handle<JSFunction> map_iterator_function =
- InstallFunction(container, "MapIterator", JS_MAP_KEY_ITERATOR_TYPE,
- JSMapIterator::kSize, 0, prototype, Builtins::kIllegal);
+ Handle<JSFunction> map_iterator_function = InstallFunction(
+ isolate, container, "MapIterator", JS_MAP_KEY_ITERATOR_TYPE,
+ JSMapIterator::kSize, 0, prototype, Builtins::kIllegal);
map_iterator_function->shared()->set_native(false);
Handle<Map> map_key_iterator_map(map_iterator_function->initial_map(),
isolate);
native_context->set_map_key_iterator_map(*map_key_iterator_map);
- Handle<Map> map_key_value_iterator_map =
- Map::Copy(map_key_iterator_map, "JS_MAP_KEY_VALUE_ITERATOR_TYPE");
+ Handle<Map> map_key_value_iterator_map = Map::Copy(
+ isolate, map_key_iterator_map, "JS_MAP_KEY_VALUE_ITERATOR_TYPE");
map_key_value_iterator_map->set_instance_type(
JS_MAP_KEY_VALUE_ITERATOR_TYPE);
native_context->set_map_key_value_iterator_map(*map_key_value_iterator_map);
Handle<Map> map_value_iterator_map =
- Map::Copy(map_key_iterator_map, "JS_MAP_VALUE_ITERATOR_TYPE");
+ Map::Copy(isolate, map_key_iterator_map, "JS_MAP_VALUE_ITERATOR_TYPE");
map_value_iterator_map->set_instance_type(JS_MAP_VALUE_ITERATOR_TYPE);
native_context->set_map_value_iterator_map(*map_value_iterator_map);
}
- { // -- S c r i p t
- Handle<String> name = factory->Script_string();
- Handle<JSFunction> script_fun = InstallFunction(
- container, name, JS_VALUE_TYPE, JSValue::kSize, 0,
- factory->the_hole_value(), Builtins::kUnsupportedThrower, DONT_ENUM);
- native_context->set_script_function(*script_fun);
-
- Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
- Map::EnsureDescriptorSlack(script_map, 15);
-
- PropertyAttributes attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- { // column_offset
- Handle<AccessorInfo> info = factory->script_column_offset_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // id
- Handle<AccessorInfo> info = factory->script_id_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // name
- Handle<AccessorInfo> info = factory->script_name_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // line_offset
- Handle<AccessorInfo> info = factory->script_line_offset_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // source
- Handle<AccessorInfo> info = factory->script_source_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // type
- Handle<AccessorInfo> info = factory->script_type_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // compilation_type
- Handle<AccessorInfo> info = factory->script_compilation_type_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // context_data
- Handle<AccessorInfo> info = factory->script_context_data_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // eval_from_script
- Handle<AccessorInfo> info = factory->script_eval_from_script_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // eval_from_script_position
- Handle<AccessorInfo> info =
- factory->script_eval_from_script_position_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // eval_from_function_name
- Handle<AccessorInfo> info =
- factory->script_eval_from_function_name_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // source_url
- Handle<AccessorInfo> info = factory->script_source_url_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- { // source_mapping_url
- Handle<AccessorInfo> info = factory->script_source_mapping_url_accessor();
- Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
- info, attribs);
- script_map->AppendDescriptor(&d);
- }
- }
-
{ // -- A s y n c F u n c t i o n
// Builtin functions for AsyncFunction.
- PrototypeIterator iter(native_context->async_function_map());
- Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
+ PrototypeIterator iter(isolate, native_context->async_function_map());
+ Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>(),
+ isolate);
Handle<JSFunction> async_function_constructor = InstallFunction(
- container, "AsyncFunction", JS_FUNCTION_TYPE,
+ isolate, container, "AsyncFunction", JS_FUNCTION_TYPE,
JSFunction::kSizeWithPrototype, 0, async_function_prototype,
Builtins::kAsyncFunctionConstructor);
async_function_constructor->set_prototype_or_initial_map(
@@ -4065,7 +4095,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
isolate->function_function());
JSObject::AddProperty(
- async_function_prototype, factory->constructor_string(),
+ isolate, async_function_prototype, factory->constructor_string(),
async_function_constructor,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -4110,7 +4140,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
{
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(),
- Builtins::kAsyncFunctionPromiseRelease, 1, false);
+ Builtins::kAsyncFunctionPromiseRelease, 2, false);
native_context->set_async_function_promise_release(*function);
}
}
@@ -4123,15 +4153,15 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
// CallSiteUtils::Construct to create CallSite objects.
Handle<JSFunction> callsite_fun = InstallFunction(
- container, "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize, 0,
- factory->the_hole_value(), Builtins::kUnsupportedThrower);
+ isolate, container, "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ 0, factory->the_hole_value(), Builtins::kUnsupportedThrower);
callsite_fun->shared()->DontAdaptArguments();
isolate->native_context()->set_callsite_function(*callsite_fun);
{
// Setup CallSite.prototype.
Handle<JSObject> prototype(
- JSObject::cast(callsite_fun->instance_prototype()));
+ JSObject::cast(callsite_fun->instance_prototype()), isolate);
struct FunctionInfo {
const char* name;
@@ -4162,7 +4192,8 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSFunction> fun;
for (const FunctionInfo& info : infos) {
- SimpleInstallFunction(prototype, info.name, info.id, 0, true, attrs);
+ SimpleInstallFunction(isolate, prototype, info.name, info.id, 0, true,
+ attrs);
}
}
}
@@ -4173,8 +4204,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_public_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_fields)
@@ -4182,65 +4211,50 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_static_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_catch_binding)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator)
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
-void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
- const char* name, Handle<Symbol> value) {
- Handle<JSGlobalObject> global(
- JSGlobalObject::cast(native_context->global_object()));
- Handle<String> symbol_string = factory->InternalizeUtf8String("Symbol");
- Handle<JSObject> symbol = Handle<JSObject>::cast(
- JSObject::GetProperty(global, symbol_string).ToHandleChecked());
- Handle<String> name_string = factory->InternalizeUtf8String(name);
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- JSObject::AddProperty(symbol, name_string, value, attributes);
-}
-
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
- Handle<JSGlobalObject> global(native_context()->global_object());
- Isolate* isolate = global->GetIsolate();
- Factory* factory = isolate->factory();
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
+ Factory* factory = isolate()->factory();
{
Handle<String> name = factory->InternalizeUtf8String("SharedArrayBuffer");
- JSObject::AddProperty(global, name, isolate->shared_array_buffer_fun(),
- DONT_ENUM);
+ JSObject::AddProperty(isolate_, global, name,
+ isolate()->shared_array_buffer_fun(), DONT_ENUM);
}
{
Handle<String> name = factory->InternalizeUtf8String("Atomics");
- JSObject::AddProperty(global, name, isolate->atomics_object(), DONT_ENUM);
+ JSObject::AddProperty(isolate_, global, name, isolate()->atomics_object(),
+ DONT_ENUM);
JSObject::AddProperty(
- isolate->atomics_object(), factory->to_string_tag_symbol(), name,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ isolate_, isolate()->atomics_object(), factory->to_string_tag_symbol(),
+ name, static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
}
void Genesis::InitializeGlobal_harmony_string_trimming() {
if (!FLAG_harmony_string_trimming) return;
- Handle<JSGlobalObject> global(native_context()->global_object());
- Isolate* isolate = global->GetIsolate();
- Factory* factory = isolate->factory();
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
+ Factory* factory = isolate()->factory();
Handle<JSObject> string_prototype(
- native_context()->initial_string_prototype());
+ native_context()->initial_string_prototype(), isolate());
{
Handle<String> trim_left_name = factory->InternalizeUtf8String("trimLeft");
Handle<String> trim_start_name =
factory->InternalizeUtf8String("trimStart");
Handle<JSFunction> trim_left_fun = Handle<JSFunction>::cast(
- JSObject::GetProperty(string_prototype, trim_left_name)
+ JSObject::GetProperty(isolate_, string_prototype, trim_left_name)
.ToHandleChecked());
- JSObject::AddProperty(string_prototype, trim_start_name, trim_left_fun,
- DONT_ENUM);
+ JSObject::AddProperty(isolate_, string_prototype, trim_start_name,
+ trim_left_fun, DONT_ENUM);
trim_left_fun->shared()->SetName(*trim_start_name);
}
@@ -4249,73 +4263,91 @@ void Genesis::InitializeGlobal_harmony_string_trimming() {
factory->InternalizeUtf8String("trimRight");
Handle<String> trim_end_name = factory->InternalizeUtf8String("trimEnd");
Handle<JSFunction> trim_right_fun = Handle<JSFunction>::cast(
- JSObject::GetProperty(string_prototype, trim_right_name)
+ JSObject::GetProperty(isolate_, string_prototype, trim_right_name)
.ToHandleChecked());
- JSObject::AddProperty(string_prototype, trim_end_name, trim_right_fun,
- DONT_ENUM);
+ JSObject::AddProperty(isolate_, string_prototype, trim_end_name,
+ trim_right_fun, DONT_ENUM);
trim_right_fun->shared()->SetName(*trim_end_name);
}
}
void Genesis::InitializeGlobal_harmony_array_prototype_values() {
if (!FLAG_harmony_array_prototype_values) return;
- Handle<JSFunction> array_constructor(native_context()->array_function());
+ Handle<JSFunction> array_constructor(native_context()->array_function(),
+ isolate());
Handle<JSObject> array_prototype(
- JSObject::cast(array_constructor->instance_prototype()));
+ JSObject::cast(array_constructor->instance_prototype()), isolate());
Handle<Object> values_iterator =
- JSObject::GetProperty(array_prototype, factory()->iterator_symbol())
+ JSObject::GetProperty(isolate(), array_prototype,
+ factory()->iterator_symbol())
.ToHandleChecked();
DCHECK(values_iterator->IsJSFunction());
- JSObject::AddProperty(array_prototype, factory()->values_string(),
+ JSObject::AddProperty(isolate(), array_prototype, factory()->values_string(),
values_iterator, DONT_ENUM);
Handle<Object> unscopables =
- JSObject::GetProperty(array_prototype, factory()->unscopables_symbol())
+ JSObject::GetProperty(isolate(), array_prototype,
+ factory()->unscopables_symbol())
.ToHandleChecked();
DCHECK(unscopables->IsJSObject());
- JSObject::AddProperty(Handle<JSObject>::cast(unscopables),
+ JSObject::AddProperty(isolate(), Handle<JSObject>::cast(unscopables),
factory()->values_string(), factory()->true_value(),
NONE);
}
-void Genesis::InitializeGlobal_harmony_array_flatten() {
- if (!FLAG_harmony_array_flatten) return;
- Handle<JSFunction> array_constructor(native_context()->array_function());
+void Genesis::InitializeGlobal_harmony_array_flat() {
+ if (!FLAG_harmony_array_flat) return;
+ Handle<JSFunction> array_constructor(native_context()->array_function(),
+ isolate());
Handle<JSObject> array_prototype(
- JSObject::cast(array_constructor->instance_prototype()));
- SimpleInstallFunction(array_prototype, "flatten",
- Builtins::kArrayPrototypeFlatten, 0, false, DONT_ENUM);
- SimpleInstallFunction(array_prototype, "flatMap",
+ JSObject::cast(array_constructor->instance_prototype()), isolate());
+ SimpleInstallFunction(isolate(), array_prototype, "flat",
+ Builtins::kArrayPrototypeFlat, 0, false, DONT_ENUM);
+ SimpleInstallFunction(isolate(), array_prototype, "flatMap",
Builtins::kArrayPrototypeFlatMap, 1, false, DONT_ENUM);
}
+void Genesis::InitializeGlobal_harmony_symbol_description() {
+ if (!FLAG_harmony_symbol_description) return;
+
+ // Symbol.prototype.description
+ Handle<JSFunction> symbol_fun(native_context()->symbol_function(), isolate());
+ Handle<JSObject> symbol_prototype(
+ JSObject::cast(symbol_fun->instance_prototype()), isolate());
+ SimpleInstallGetter(isolate(), symbol_prototype,
+ factory()->InternalizeUtf8String("description"),
+ Builtins::kSymbolPrototypeDescriptionGetter, true);
+}
+
void Genesis::InitializeGlobal_harmony_string_matchall() {
if (!FLAG_harmony_string_matchall) return;
{ // String.prototype.matchAll
- Handle<JSFunction> string_fun(native_context()->string_function());
+ Handle<JSFunction> string_fun(native_context()->string_function(),
+ isolate());
Handle<JSObject> string_prototype(
- JSObject::cast(string_fun->instance_prototype()));
+ JSObject::cast(string_fun->instance_prototype()), isolate());
- SimpleInstallFunction(string_prototype, "matchAll",
+ SimpleInstallFunction(isolate(), string_prototype, "matchAll",
Builtins::kStringPrototypeMatchAll, 1, true);
}
{ // RegExp.prototype[@@matchAll]
- Handle<JSFunction> regexp_fun(native_context()->regexp_function());
+ Handle<JSFunction> regexp_fun(native_context()->regexp_function(),
+ isolate());
Handle<JSObject> regexp_prototype(
- JSObject::cast(regexp_fun->instance_prototype()));
- SimpleInstallFunction(regexp_prototype, factory()->match_all_symbol(),
- "[Symbol.matchAll]",
+ JSObject::cast(regexp_fun->instance_prototype()), isolate());
+ SimpleInstallFunction(isolate(), regexp_prototype,
+ factory()->match_all_symbol(), "[Symbol.matchAll]",
Builtins::kRegExpPrototypeMatchAll, 1, true);
- Handle<Map> regexp_prototype_map(regexp_prototype->map());
+ Handle<Map> regexp_prototype_map(regexp_prototype->map(), isolate());
Map::SetShouldBeFastPrototypeMap(regexp_prototype_map, true, isolate());
native_context()->set_regexp_prototype_map(*regexp_prototype_map);
}
{ // --- R e g E x p S t r i n g I t e r a t o r ---
Handle<JSObject> iterator_prototype(
- native_context()->initial_iterator_prototype());
+ native_context()->initial_iterator_prototype(), isolate());
Handle<JSObject> regexp_string_iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
@@ -4323,11 +4355,12 @@ void Genesis::InitializeGlobal_harmony_string_matchall() {
iterator_prototype);
JSObject::AddProperty(
- regexp_string_iterator_prototype, factory()->to_string_tag_symbol(),
+ isolate(), regexp_string_iterator_prototype,
+ factory()->to_string_tag_symbol(),
factory()->NewStringFromAsciiChecked("RegExp String Iterator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- SimpleInstallFunction(regexp_string_iterator_prototype, "next",
+ SimpleInstallFunction(isolate(), regexp_string_iterator_prototype, "next",
Builtins::kRegExpStringIteratorPrototypeNext, 0,
true);
@@ -4341,58 +4374,16 @@ void Genesis::InitializeGlobal_harmony_string_matchall() {
}
{ // @@matchAll Symbol
- Handle<JSFunction> symbol_fun(native_context()->symbol_function());
+ Handle<JSFunction> symbol_fun(native_context()->symbol_function(),
+ isolate());
InstallConstant(isolate(), symbol_fun, "matchAll",
factory()->match_all_symbol());
}
}
-void Genesis::InitializeGlobal_harmony_promise_finally() {
- if (!FLAG_harmony_promise_finally) return;
-
- Handle<JSFunction> constructor(native_context()->promise_function());
- Handle<JSObject> prototype(JSObject::cast(constructor->instance_prototype()));
- SimpleInstallFunction(prototype, "finally",
- Builtins::kPromisePrototypeFinally, 1, true, DONT_ENUM);
-
- // The promise prototype map has changed because we added a property
- // to prototype, so we update the saved map.
- Handle<Map> prototype_map(prototype->map());
- Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate());
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate(), Builtins::kPromiseThenFinally, factory()->empty_string(), 1);
- info->set_native(true);
- native_context()->set_promise_then_finally_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate(), Builtins::kPromiseCatchFinally, factory()->empty_string(),
- 1);
- info->set_native(true);
- native_context()->set_promise_catch_finally_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate(), Builtins::kPromiseValueThunkFinally,
- factory()->empty_string(), 0);
- native_context()->set_promise_value_thunk_finally_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate(), Builtins::kPromiseThrowerFinally, factory()->empty_string(),
- 0);
- native_context()->set_promise_thrower_finally_shared_fun(*info);
- }
-}
-
void Genesis::InitializeGlobal_harmony_bigint() {
Factory* factory = isolate()->factory();
- Handle<JSGlobalObject> global(native_context()->global_object());
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
if (!FLAG_harmony_bigint) {
// Typed arrays are installed by default; remove them if the flag is off.
CHECK(JSObject::DeleteProperty(
@@ -4404,9 +4395,9 @@ void Genesis::InitializeGlobal_harmony_bigint() {
return;
}
- Handle<JSFunction> bigint_fun =
- InstallFunction(global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
- factory->the_hole_value(), Builtins::kBigIntConstructor);
+ Handle<JSFunction> bigint_fun = InstallFunction(
+ isolate(), global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
+ factory->the_hole_value(), Builtins::kBigIntConstructor);
bigint_fun->shared()->set_builtin_function_id(kBigIntConstructor);
bigint_fun->shared()->DontAdaptArguments();
bigint_fun->shared()->set_length(1);
@@ -4415,29 +4406,30 @@ void Genesis::InitializeGlobal_harmony_bigint() {
// Install the properties of the BigInt constructor.
// asUintN(bits, bigint)
- SimpleInstallFunction(bigint_fun, "asUintN", Builtins::kBigIntAsUintN, 2,
- false);
+ SimpleInstallFunction(isolate(), bigint_fun, "asUintN",
+ Builtins::kBigIntAsUintN, 2, false);
// asIntN(bits, bigint)
- SimpleInstallFunction(bigint_fun, "asIntN", Builtins::kBigIntAsIntN, 2,
- false);
+ SimpleInstallFunction(isolate(), bigint_fun, "asIntN",
+ Builtins::kBigIntAsIntN, 2, false);
// Set up the %BigIntPrototype%.
- Handle<JSObject> prototype(JSObject::cast(bigint_fun->instance_prototype()));
+ Handle<JSObject> prototype(JSObject::cast(bigint_fun->instance_prototype()),
+ isolate());
JSFunction::SetPrototype(bigint_fun, prototype);
// Install the properties of the BigInt.prototype.
// "constructor" is created implicitly by InstallFunction() above.
// toLocaleString([reserved1 [, reserved2]])
- SimpleInstallFunction(prototype, "toLocaleString",
+ SimpleInstallFunction(isolate(), prototype, "toLocaleString",
Builtins::kBigIntPrototypeToLocaleString, 0, false);
// toString([radix])
- SimpleInstallFunction(prototype, "toString",
+ SimpleInstallFunction(isolate(), prototype, "toString",
Builtins::kBigIntPrototypeToString, 0, false);
// valueOf()
- SimpleInstallFunction(prototype, "valueOf", Builtins::kBigIntPrototypeValueOf,
- 0, false);
+ SimpleInstallFunction(isolate(), prototype, "valueOf",
+ Builtins::kBigIntPrototypeValueOf, 0, false);
// @@toStringTag
- JSObject::AddProperty(prototype, factory->to_string_tag_symbol(),
+ JSObject::AddProperty(isolate(), prototype, factory->to_string_tag_symbol(),
factory->BigInt_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
@@ -4445,17 +4437,69 @@ void Genesis::InitializeGlobal_harmony_bigint() {
// TODO(jkummerow): Move these to the "DataView" section when dropping the
// FLAG_harmony_bigint.
Handle<JSObject> dataview_prototype(
- JSObject::cast(native_context()->data_view_fun()->instance_prototype()));
- SimpleInstallFunction(dataview_prototype, "getBigInt64",
+ JSObject::cast(native_context()->data_view_fun()->instance_prototype()),
+ isolate());
+ SimpleInstallFunction(isolate(), dataview_prototype, "getBigInt64",
Builtins::kDataViewPrototypeGetBigInt64, 1, false);
- SimpleInstallFunction(dataview_prototype, "setBigInt64",
+ SimpleInstallFunction(isolate(), dataview_prototype, "setBigInt64",
Builtins::kDataViewPrototypeSetBigInt64, 2, false);
- SimpleInstallFunction(dataview_prototype, "getBigUint64",
+ SimpleInstallFunction(isolate(), dataview_prototype, "getBigUint64",
Builtins::kDataViewPrototypeGetBigUint64, 1, false);
- SimpleInstallFunction(dataview_prototype, "setBigUint64",
+ SimpleInstallFunction(isolate(), dataview_prototype, "setBigUint64",
Builtins::kDataViewPrototypeSetBigUint64, 2, false);
}
+void Genesis::InitializeGlobal_harmony_await_optimization() {
+ if (!FLAG_harmony_await_optimization) return;
+
+ // async/await
+ Handle<JSFunction> await_caught_function = SimpleCreateFunction(
+ isolate(), factory()->empty_string(),
+ Builtins::kAsyncFunctionAwaitCaughtOptimized, 2, false);
+ native_context()->set_async_function_await_caught(*await_caught_function);
+
+ Handle<JSFunction> await_uncaught_function = SimpleCreateFunction(
+ isolate(), factory()->empty_string(),
+ Builtins::kAsyncFunctionAwaitUncaughtOptimized, 2, false);
+ native_context()->set_async_function_await_uncaught(*await_uncaught_function);
+
+ // async generators
+ Handle<JSObject> async_iterator_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+
+ SimpleInstallFunction(
+ isolate(), async_iterator_prototype, factory()->async_iterator_symbol(),
+ "[Symbol.asyncIterator]", Builtins::kReturnReceiver, 0, true);
+
+ Handle<JSObject> async_from_sync_iterator_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ SimpleInstallFunction(
+ isolate(), async_from_sync_iterator_prototype, factory()->next_string(),
+ Builtins::kAsyncFromSyncIteratorPrototypeNextOptimized, 1, true);
+ SimpleInstallFunction(
+ isolate(), async_from_sync_iterator_prototype, factory()->return_string(),
+ Builtins::kAsyncFromSyncIteratorPrototypeReturnOptimized, 1, true);
+ SimpleInstallFunction(
+ isolate(), async_from_sync_iterator_prototype, factory()->throw_string(),
+ Builtins::kAsyncFromSyncIteratorPrototypeThrowOptimized, 1, true);
+
+ JSObject::AddProperty(
+ isolate(), async_from_sync_iterator_prototype,
+ factory()->to_string_tag_symbol(),
+ factory()->NewStringFromAsciiChecked("Async-from-Sync Iterator"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ JSObject::ForceSetPrototype(async_from_sync_iterator_prototype,
+ async_iterator_prototype);
+
+ Handle<Map> async_from_sync_iterator_map = factory()->NewMap(
+ JS_ASYNC_FROM_SYNC_ITERATOR_TYPE, JSAsyncFromSyncIterator::kSize);
+ Map::SetPrototype(isolate(), async_from_sync_iterator_map,
+ async_from_sync_iterator_prototype);
+ native_context()->set_async_from_sync_iterator_map(
+ *async_from_sync_iterator_map);
+}
+
#ifdef V8_INTL_SUPPORT
void Genesis::InitializeGlobal_harmony_locale() {
@@ -4463,12 +4507,13 @@ void Genesis::InitializeGlobal_harmony_locale() {
Handle<JSObject> intl = Handle<JSObject>::cast(
JSReceiver::GetProperty(
- Handle<JSReceiver>(native_context()->global_object()),
+ isolate(),
+ Handle<JSReceiver>(native_context()->global_object(), isolate()),
factory()->InternalizeUtf8String("Intl"))
.ToHandleChecked());
Handle<JSFunction> locale_fun = InstallFunction(
- intl, "Locale", JS_INTL_LOCALE_TYPE, JSLocale::kSize, 0,
+ isolate(), intl, "Locale", JS_INTL_LOCALE_TYPE, JSLocale::kSize, 0,
factory()->the_hole_value(), Builtins::kLocaleConstructor);
InstallWithIntrinsicDefaultProto(isolate(), locale_fun,
Context::INTL_LOCALE_FUNCTION_INDEX);
@@ -4476,40 +4521,81 @@ void Genesis::InitializeGlobal_harmony_locale() {
locale_fun->shared()->DontAdaptArguments();
// Setup %LocalePrototype%.
- Handle<JSObject> prototype(JSObject::cast(locale_fun->instance_prototype()));
+ Handle<JSObject> prototype(JSObject::cast(locale_fun->instance_prototype()),
+ isolate());
// Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(prototype, factory()->to_string_tag_symbol(),
+ JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
factory()->NewStringFromAsciiChecked("Locale"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- SimpleInstallFunction(prototype, "toString",
+ SimpleInstallFunction(isolate(), prototype, "toString",
Builtins::kLocalePrototypeToString, 0, false);
// Base locale getters.
- SimpleInstallGetter(prototype, factory()->InternalizeUtf8String("language"),
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("language"),
Builtins::kLocalePrototypeLanguage, true);
- SimpleInstallGetter(prototype, factory()->InternalizeUtf8String("script"),
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("script"),
Builtins::kLocalePrototypeScript, true);
- SimpleInstallGetter(prototype, factory()->InternalizeUtf8String("region"),
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("region"),
Builtins::kLocalePrototypeRegion, true);
- SimpleInstallGetter(prototype, factory()->InternalizeUtf8String("baseName"),
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("baseName"),
Builtins::kLocalePrototypeBaseName, true);
// Unicode extension getters.
- SimpleInstallGetter(prototype, factory()->InternalizeUtf8String("calendar"),
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("calendar"),
Builtins::kLocalePrototypeCalendar, true);
- SimpleInstallGetter(prototype, factory()->InternalizeUtf8String("caseFirst"),
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("caseFirst"),
Builtins::kLocalePrototypeCaseFirst, true);
- SimpleInstallGetter(prototype, factory()->InternalizeUtf8String("collation"),
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("collation"),
Builtins::kLocalePrototypeCollation, true);
- SimpleInstallGetter(prototype, factory()->InternalizeUtf8String("hourCycle"),
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("hourCycle"),
Builtins::kLocalePrototypeHourCycle, true);
- SimpleInstallGetter(prototype, factory()->InternalizeUtf8String("numeric"),
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("numeric"),
Builtins::kLocalePrototypeNumeric, true);
- SimpleInstallGetter(prototype,
+ SimpleInstallGetter(isolate(), prototype,
factory()->InternalizeUtf8String("numberingSystem"),
Builtins::kLocalePrototypeNumberingSystem, true);
}
+void Genesis::InitializeGlobal_harmony_intl_relative_time_format() {
+ if (!FLAG_harmony_intl_relative_time_format) return;
+ Handle<JSObject> intl = Handle<JSObject>::cast(
+ JSReceiver::GetProperty(
+ isolate(),
+ Handle<JSReceiver>(native_context()->global_object(), isolate()),
+ factory()->InternalizeUtf8String("Intl"))
+ .ToHandleChecked());
+
+ Handle<JSFunction> relative_time_format_fun = InstallFunction(
+ isolate(), intl, "RelativeTimeFormat", JS_INTL_RELATIVE_TIME_FORMAT_TYPE,
+ JSRelativeTimeFormat::kSize, 0, factory()->the_hole_value(),
+ Builtins::kRelativeTimeFormatConstructor);
+ relative_time_format_fun->shared()->set_length(0);
+ relative_time_format_fun->shared()->DontAdaptArguments();
+
+ // Setup %RelativeTimeFormatPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(relative_time_format_fun->instance_prototype()),
+ isolate());
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
+ factory()->Object_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
+ Builtins::kRelativeTimeFormatPrototypeResolvedOptions,
+ 0, false);
+}
+
#endif // V8_INTL_SUPPORT
Handle<JSFunction> Genesis::CreateArrayBuffer(
@@ -4518,7 +4604,8 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
// Setup the {prototype} with the given {name} for @@toStringTag.
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- JSObject::AddProperty(prototype, factory()->to_string_tag_symbol(), name,
+ JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
+ name,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
// Allocate the constructor with the given {prototype}.
@@ -4530,32 +4617,32 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
array_buffer_fun->shared()->set_length(1);
// Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory()->constructor_string(),
+ JSObject::AddProperty(isolate(), prototype, factory()->constructor_string(),
array_buffer_fun, DONT_ENUM);
switch (array_buffer_kind) {
case ARRAY_BUFFER:
- SimpleInstallFunction(array_buffer_fun, factory()->isView_string(),
- Builtins::kArrayBufferIsView, 1, true, DONT_ENUM,
- kArrayBufferIsView);
+ SimpleInstallFunction(
+ isolate(), array_buffer_fun, factory()->isView_string(),
+ Builtins::kArrayBufferIsView, 1, true, DONT_ENUM, kArrayBufferIsView);
// Install the "byteLength" getter on the {prototype}.
- SimpleInstallGetter(prototype, factory()->byte_length_string(),
+ SimpleInstallGetter(isolate(), prototype, factory()->byte_length_string(),
Builtins::kArrayBufferPrototypeGetByteLength, false,
BuiltinFunctionId::kArrayBufferByteLength);
- SimpleInstallFunction(prototype, "slice",
+ SimpleInstallFunction(isolate(), prototype, "slice",
Builtins::kArrayBufferPrototypeSlice, 2, true);
break;
case SHARED_ARRAY_BUFFER:
// Install the "byteLength" getter on the {prototype}.
- SimpleInstallGetter(prototype, factory()->byte_length_string(),
+ SimpleInstallGetter(isolate(), prototype, factory()->byte_length_string(),
Builtins::kSharedArrayBufferPrototypeGetByteLength,
false,
BuiltinFunctionId::kSharedArrayBufferByteLength);
- SimpleInstallFunction(prototype, "slice",
+ SimpleInstallFunction(isolate(), prototype, "slice",
Builtins::kSharedArrayBufferPrototypeSlice, 2,
true);
break;
@@ -4577,18 +4664,18 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSFunction> array_function =
- InstallFunction(target, name, JS_ARRAY_TYPE, JSArray::kSize, 0, prototype,
- Builtins::kInternalArrayConstructor);
+ InstallFunction(isolate(), target, name, JS_ARRAY_TYPE, JSArray::kSize, 0,
+ prototype, Builtins::kInternalArrayConstructor);
array_function->shared()->DontAdaptArguments();
- Handle<Map> original_map(array_function->initial_map());
- Handle<Map> initial_map = Map::Copy(original_map, "InternalArray");
+ Handle<Map> original_map(array_function->initial_map(), isolate());
+ Handle<Map> initial_map = Map::Copy(isolate(), original_map, "InternalArray");
initial_map->set_elements_kind(elements_kind);
JSFunction::SetInitialMap(array_function, initial_map, prototype);
// Make "length" magic on instances.
- Map::EnsureDescriptorSlack(initial_map, 1);
+ Map::EnsureDescriptorSlack(isolate(), initial_map, 1);
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
@@ -4625,7 +4712,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kPromiseInternalConstructor, 1, true);
promise_internal_constructor->shared()->set_native(false);
- InstallFunction(extras_utils, promise_internal_constructor,
+ InstallFunction(isolate(), extras_utils, promise_internal_constructor,
factory()->NewStringFromAsciiChecked("createPromise"));
// v8.rejectPromise(promise, reason)
@@ -4633,7 +4720,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kPromiseInternalReject, 2, true);
promise_internal_reject->shared()->set_native(false);
- InstallFunction(extras_utils, promise_internal_reject,
+ InstallFunction(isolate(), extras_utils, promise_internal_reject,
factory()->NewStringFromAsciiChecked("rejectPromise"));
// v8.resolvePromise(promise, resolution)
@@ -4641,10 +4728,10 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kPromiseInternalResolve, 2, true);
promise_internal_resolve->shared()->set_native(false);
- InstallFunction(extras_utils, promise_internal_resolve,
+ InstallFunction(isolate(), extras_utils, promise_internal_resolve,
factory()->NewStringFromAsciiChecked("resolvePromise"));
- InstallFunction(extras_utils, isolate()->is_promise(),
+ InstallFunction(isolate(), extras_utils, isolate()->is_promise(),
factory()->NewStringFromAsciiChecked("isPromise"));
int builtin_index = Natives::GetDebuggerCount();
@@ -4697,7 +4784,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Store the map for the %ObjectPrototype% after the natives has been compiled
// and the Object function has been set up.
{
- Handle<JSFunction> object_function(native_context()->object_function());
+ Handle<JSFunction> object_function(native_context()->object_function(),
+ isolate());
DCHECK(JSObject::cast(object_function->initial_map()->prototype())
->HasFastProperties());
native_context()->set_object_function_prototype_map(
@@ -4706,7 +4794,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Store the map for the %StringPrototype% after the natives has been compiled
// and the String function has been set up.
- Handle<JSFunction> string_function(native_context()->string_function());
+ Handle<JSFunction> string_function(native_context()->string_function(),
+ isolate());
JSObject* string_function_prototype =
JSObject::cast(string_function->initial_map()->prototype());
DCHECK(string_function_prototype->HasFastProperties());
@@ -4714,54 +4803,56 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
string_function_prototype->map());
Handle<JSGlobalObject> global_object =
- handle(native_context()->global_object());
+ handle(native_context()->global_object(), isolate());
// Install Global.decodeURI.
- SimpleInstallFunction(global_object, "decodeURI", Builtins::kGlobalDecodeURI,
- 1, false, kGlobalDecodeURI);
+ SimpleInstallFunction(isolate(), global_object, "decodeURI",
+ Builtins::kGlobalDecodeURI, 1, false, kGlobalDecodeURI);
// Install Global.decodeURIComponent.
- SimpleInstallFunction(global_object, "decodeURIComponent",
+ SimpleInstallFunction(isolate(), global_object, "decodeURIComponent",
Builtins::kGlobalDecodeURIComponent, 1, false,
kGlobalDecodeURIComponent);
// Install Global.encodeURI.
- SimpleInstallFunction(global_object, "encodeURI", Builtins::kGlobalEncodeURI,
- 1, false, kGlobalEncodeURI);
+ SimpleInstallFunction(isolate(), global_object, "encodeURI",
+ Builtins::kGlobalEncodeURI, 1, false, kGlobalEncodeURI);
// Install Global.encodeURIComponent.
- SimpleInstallFunction(global_object, "encodeURIComponent",
+ SimpleInstallFunction(isolate(), global_object, "encodeURIComponent",
Builtins::kGlobalEncodeURIComponent, 1, false,
kGlobalEncodeURIComponent);
// Install Global.escape.
- SimpleInstallFunction(global_object, "escape", Builtins::kGlobalEscape, 1,
- false, kGlobalEscape);
+ SimpleInstallFunction(isolate(), global_object, "escape",
+ Builtins::kGlobalEscape, 1, false, kGlobalEscape);
// Install Global.unescape.
- SimpleInstallFunction(global_object, "unescape", Builtins::kGlobalUnescape, 1,
- false, kGlobalUnescape);
+ SimpleInstallFunction(isolate(), global_object, "unescape",
+ Builtins::kGlobalUnescape, 1, false, kGlobalUnescape);
// Install Global.eval.
{
- Handle<JSFunction> eval =
- SimpleInstallFunction(global_object, factory()->eval_string(),
- Builtins::kGlobalEval, 1, false);
+ Handle<JSFunction> eval = SimpleInstallFunction(
+ isolate(), global_object, factory()->eval_string(),
+ Builtins::kGlobalEval, 1, false);
native_context()->set_global_eval_fun(*eval);
}
// Install Global.isFinite
- SimpleInstallFunction(global_object, "isFinite", Builtins::kGlobalIsFinite, 1,
- true, kGlobalIsFinite);
+ SimpleInstallFunction(isolate(), global_object, "isFinite",
+ Builtins::kGlobalIsFinite, 1, true, kGlobalIsFinite);
// Install Global.isNaN
- SimpleInstallFunction(global_object, "isNaN", Builtins::kGlobalIsNaN, 1, true,
- kGlobalIsNaN);
+ SimpleInstallFunction(isolate(), global_object, "isNaN",
+ Builtins::kGlobalIsNaN, 1, true, kGlobalIsNaN);
// Install Array builtin functions.
{
- Handle<JSFunction> array_constructor(native_context()->array_function());
- Handle<JSArray> proto(JSArray::cast(array_constructor->prototype()));
+ Handle<JSFunction> array_constructor(native_context()->array_function(),
+ isolate());
+ Handle<JSArray> proto(JSArray::cast(array_constructor->prototype()),
+ isolate());
// Verification of important array prototype properties.
Object* length = proto->length();
@@ -4770,15 +4861,17 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
CHECK(proto->HasSmiOrObjectElements());
// This is necessary to enable fast checks for absence of elements
// on Array.prototype and below.
- proto->set_elements(heap()->empty_fixed_array());
+ proto->set_elements(ReadOnlyRoots(heap()).empty_fixed_array());
}
// Install InternalArray.prototype.concat
{
Handle<JSFunction> array_constructor(
- native_context()->internal_array_function());
- Handle<JSObject> proto(JSObject::cast(array_constructor->prototype()));
- SimpleInstallFunction(proto, "concat", Builtins::kArrayConcat, 1, false);
+ native_context()->internal_array_function(), isolate());
+ Handle<JSObject> proto(JSObject::cast(array_constructor->prototype()),
+ isolate());
+ SimpleInstallFunction(isolate(), proto, "concat", Builtins::kArrayConcat, 1,
+ false);
}
InstallBuiltinFunctionIds();
@@ -4791,7 +4884,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
factory()->NewMap(JS_OBJECT_TYPE, JSAccessorPropertyDescriptor::kSize,
TERMINAL_FAST_ELEMENTS_KIND, 4);
// Create the descriptor array for the property descriptor object.
- Map::EnsureDescriptorSlack(map, 4);
+ Map::EnsureDescriptorSlack(isolate(), map, 4);
{ // get
Descriptor d = Descriptor::DataField(
@@ -4820,7 +4913,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
map->AppendDescriptor(&d);
}
- Map::SetPrototype(map, isolate()->initial_object_prototype());
+ Map::SetPrototype(isolate(), map, isolate()->initial_object_prototype());
map->SetConstructor(native_context()->object_function());
native_context()->set_accessor_property_descriptor_map(*map);
@@ -4835,7 +4928,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
factory()->NewMap(JS_OBJECT_TYPE, JSDataPropertyDescriptor::kSize,
TERMINAL_FAST_ELEMENTS_KIND, 4);
// Create the descriptor array for the property descriptor object.
- Map::EnsureDescriptorSlack(map, 4);
+ Map::EnsureDescriptorSlack(isolate(), map, 4);
{ // value
Descriptor d = Descriptor::DataField(
@@ -4865,7 +4958,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
map->AppendDescriptor(&d);
}
- Map::SetPrototype(map, isolate()->initial_object_prototype());
+ Map::SetPrototype(isolate(), map, isolate()->initial_object_prototype());
map->SetConstructor(native_context()->object_function());
native_context()->set_data_property_descriptor_map(*map);
@@ -4877,9 +4970,10 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// JSRegExpResult initial map.
// Find global.Array.prototype to inherit from.
- Handle<JSFunction> array_constructor(native_context()->array_function());
+ Handle<JSFunction> array_constructor(native_context()->array_function(),
+ isolate());
Handle<JSObject> array_prototype(
- JSObject::cast(array_constructor->instance_prototype()));
+ JSObject::cast(array_constructor->instance_prototype()), isolate());
// Add initial map.
Handle<Map> initial_map = factory()->NewMap(
@@ -4889,24 +4983,24 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Set prototype on map.
initial_map->set_has_non_instance_prototype(false);
- Map::SetPrototype(initial_map, array_prototype);
+ Map::SetPrototype(isolate(), initial_map, array_prototype);
// Update map with length accessor from Array and add "index", "input" and
// "groups".
- Map::EnsureDescriptorSlack(initial_map,
+ Map::EnsureDescriptorSlack(isolate(), initial_map,
JSRegExpResult::kInObjectPropertyCount + 1);
// length descriptor.
{
JSFunction* array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
- array_function->initial_map()->instance_descriptors());
+ array_function->initial_map()->instance_descriptors(), isolate());
Handle<String> length = factory()->length_string();
int old = array_descriptors->SearchWithCache(
isolate(), *length, array_function->initial_map());
DCHECK_NE(old, DescriptorArray::kNotFound);
Descriptor d = Descriptor::AccessorConstant(
- length, handle(array_descriptors->GetValue(old), isolate()),
+ length, handle(array_descriptors->GetStrongValue(old), isolate()),
array_descriptors->GetDetails(old).attributes());
initial_map->AppendDescriptor(&d);
}
@@ -4946,29 +5040,31 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
{
Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
arguments_iterator, attribs);
- Handle<Map> map(native_context()->sloppy_arguments_map());
- Map::EnsureDescriptorSlack(map, 1);
+ Handle<Map> map(native_context()->sloppy_arguments_map(), isolate());
+ Map::EnsureDescriptorSlack(isolate(), map, 1);
map->AppendDescriptor(&d);
}
{
Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
arguments_iterator, attribs);
- Handle<Map> map(native_context()->fast_aliased_arguments_map());
- Map::EnsureDescriptorSlack(map, 1);
+ Handle<Map> map(native_context()->fast_aliased_arguments_map(),
+ isolate());
+ Map::EnsureDescriptorSlack(isolate(), map, 1);
map->AppendDescriptor(&d);
}
{
Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
arguments_iterator, attribs);
- Handle<Map> map(native_context()->slow_aliased_arguments_map());
- Map::EnsureDescriptorSlack(map, 1);
+ Handle<Map> map(native_context()->slow_aliased_arguments_map(),
+ isolate());
+ Map::EnsureDescriptorSlack(isolate(), map, 1);
map->AppendDescriptor(&d);
}
{
Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
arguments_iterator, attribs);
- Handle<Map> map(native_context()->strict_arguments_map());
- Map::EnsureDescriptorSlack(map, 1);
+ Handle<Map> map(native_context()->strict_arguments_map(), isolate());
+ Map::EnsureDescriptorSlack(isolate(), map, 1);
map->AppendDescriptor(&d);
}
}
@@ -4981,15 +5077,6 @@ bool Genesis::InstallExtraNatives() {
Handle<JSObject> extras_binding =
factory()->NewJSObject(isolate()->object_function());
-
- // binding.isTraceCategoryenabled(category)
- SimpleInstallFunction(extras_binding, "isTraceCategoryEnabled",
- Builtins::kIsTraceCategoryEnabled, 1, true);
-
- // binding.trace(phase, category, name, id, data)
- SimpleInstallFunction(extras_binding, "trace", Builtins::kTrace, 5,
- true);
-
native_context()->set_extras_binding_object(*extras_binding);
for (int i = ExtraNatives::GetDebuggerCount();
@@ -5019,11 +5106,9 @@ bool Genesis::InstallDebuggerNatives() {
return true;
}
-
-static void InstallBuiltinFunctionId(Handle<JSObject> holder,
+static void InstallBuiltinFunctionId(Isolate* isolate, Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
- Isolate* isolate = holder->GetIsolate();
Handle<Object> function_object =
JSReceiver::GetProperty(isolate, holder, function_name).ToHandleChecked();
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
@@ -5048,9 +5133,9 @@ void Genesis::InstallBuiltinFunctionIds() {
FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)};
for (const BuiltinFunctionIds& builtin : builtins) {
- Handle<JSObject> holder =
- ResolveBuiltinIdHolder(native_context(), builtin.holder_expr);
- InstallBuiltinFunctionId(holder, builtin.fun_name, builtin.id);
+ Handle<JSObject> holder = ResolveBuiltinIdHolder(
+ isolate(), native_context(), builtin.holder_expr);
+ InstallBuiltinFunctionId(isolate(), holder, builtin.fun_name, builtin.id);
}
}
@@ -5065,19 +5150,17 @@ void Genesis::InitializeNormalizedMapCaches() {
bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
+ // Don't install extensions into the snapshot.
+ if (isolate_->serializer_enabled()) return true;
BootstrapperActive active(this);
SaveContext saved_context(isolate_);
isolate_->set_context(*native_context);
- return Genesis::InstallExtensions(native_context, extensions) &&
- Genesis::InstallSpecialObjects(native_context);
+ return Genesis::InstallExtensions(isolate_, native_context, extensions) &&
+ Genesis::InstallSpecialObjects(isolate_, native_context);
}
-
-bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
- Isolate* isolate = native_context->GetIsolate();
- // Don't install extensions into the snapshot.
- if (isolate->serializer_enabled()) return true;
-
+bool Genesis::InstallSpecialObjects(Isolate* isolate,
+ Handle<Context> native_context) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -5085,7 +5168,7 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<String> name =
factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("stackTraceLimit"));
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
- JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
+ JSObject::AddProperty(isolate, Error, name, stack_trace_limit, NONE);
if (FLAG_expose_wasm) {
// Install the internal data structures into the isolate and expose on
@@ -5123,10 +5206,9 @@ void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
reinterpret_cast<void*>(static_cast<intptr_t>(state));
}
-
-bool Genesis::InstallExtensions(Handle<Context> native_context,
+bool Genesis::InstallExtensions(Isolate* isolate,
+ Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
- Isolate* isolate = native_context->GetIsolate();
ExtensionStates extension_states; // All extensions have state UNVISITED.
return InstallAutoExtensions(isolate, &extension_states) &&
(!FLAG_expose_free_buffer ||
@@ -5230,9 +5312,9 @@ bool Genesis::InstallExtension(Isolate* isolate,
bool Genesis::ConfigureGlobalObjects(
v8::Local<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy(
- JSObject::cast(native_context()->global_proxy()));
+ JSObject::cast(native_context()->global_proxy()), isolate());
Handle<JSObject> global_object(
- JSObject::cast(native_context()->global_object()));
+ JSObject::cast(native_context()->global_object()), isolate());
if (!global_proxy_template.IsEmpty()) {
// Configure the global proxy object.
@@ -5242,10 +5324,12 @@ bool Genesis::ConfigureGlobalObjects(
// Configure the global object.
Handle<FunctionTemplateInfo> proxy_constructor(
- FunctionTemplateInfo::cast(global_proxy_data->constructor()));
+ FunctionTemplateInfo::cast(global_proxy_data->constructor()),
+ isolate());
if (!proxy_constructor->prototype_template()->IsUndefined(isolate())) {
Handle<ObjectTemplateInfo> global_object_data(
- ObjectTemplateInfo::cast(proxy_constructor->prototype_template()));
+ ObjectTemplateInfo::cast(proxy_constructor->prototype_template()),
+ isolate());
if (!ConfigureApiObject(global_object, global_object_data)) return false;
}
}
@@ -5255,8 +5339,8 @@ bool Genesis::ConfigureGlobalObjects(
native_context()->set_array_buffer_map(
native_context()->array_buffer_fun()->initial_map());
- Handle<JSFunction> js_map_fun(native_context()->js_map_fun());
- Handle<JSFunction> js_set_fun(native_context()->js_set_fun());
+ Handle<JSFunction> js_map_fun(native_context()->js_map_fun(), isolate());
+ Handle<JSFunction> js_set_fun(native_context()->js_set_fun(), isolate());
// Force the Map/Set constructor to fast properties, so that we can use the
// fast paths for various things like
//
@@ -5282,7 +5366,7 @@ bool Genesis::ConfigureApiObject(Handle<JSObject> object,
->IsTemplateFor(object->map()));;
MaybeHandle<JSObject> maybe_obj =
- ApiNatives::InstantiateObject(object_template);
+ ApiNatives::InstantiateObject(object->GetIsolate(), object_template);
Handle<JSObject> obj;
if (!maybe_obj.ToHandle(&obj)) {
DCHECK(isolate()->has_pending_exception());
@@ -5303,17 +5387,18 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// in the snapshotted global object.
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(from->map()->instance_descriptors());
+ Handle<DescriptorArray>(from->map()->instance_descriptors(), isolate());
for (int i = 0; i < from->map()->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
if (details.location() == kField) {
if (details.kind() == kData) {
HandleScope inner(isolate());
- Handle<Name> key = Handle<Name>(descs->GetKey(i));
+ Handle<Name> key = Handle<Name>(descs->GetKey(i), isolate());
FieldIndex index = FieldIndex::ForDescriptor(from->map(), i);
Handle<Object> value =
JSObject::FastPropertyAt(from, details.representation(), index);
- JSObject::AddProperty(to, key, value, details.attributes());
+ JSObject::AddProperty(isolate(), to, key, value,
+ details.attributes());
} else {
DCHECK_EQ(kAccessor, details.kind());
UNREACHABLE();
@@ -5324,21 +5409,22 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (details.kind() == kData) {
DCHECK(!FLAG_track_constant_fields);
HandleScope inner(isolate());
- Handle<Name> key = Handle<Name>(descs->GetKey(i));
- Handle<Object> value(descs->GetValue(i), isolate());
- JSObject::AddProperty(to, key, value, details.attributes());
-
+ Handle<Name> key = Handle<Name>(descs->GetKey(i), isolate());
+ Handle<Object> value(descs->GetStrongValue(i), isolate());
+ JSObject::AddProperty(isolate(), to, key, value,
+ details.attributes());
} else {
DCHECK_EQ(kAccessor, details.kind());
- Handle<Name> key(descs->GetKey(i));
- LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<Name> key(descs->GetKey(i), isolate());
+ LookupIterator it(isolate(), to, key,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
// If the property is already there we skip it
if (it.IsFound()) continue;
HandleScope inner(isolate());
DCHECK(!to->HasFastProperties());
// Add to dictionary.
- Handle<Object> value(descs->GetValue(i), isolate());
+ Handle<Object> value(descs->GetStrongValue(i), isolate());
PropertyDetails d(kAccessor, details.attributes(),
PropertyCellType::kMutable);
JSObject::SetNormalizedProperty(to, key, value, d);
@@ -5348,14 +5434,16 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
} else if (from->IsJSGlobalObject()) {
// Copy all keys and values in enumeration order.
Handle<GlobalDictionary> properties(
- JSGlobalObject::cast(*from)->global_dictionary());
- Handle<FixedArray> indices = GlobalDictionary::IterationIndices(properties);
+ JSGlobalObject::cast(*from)->global_dictionary(), isolate());
+ Handle<FixedArray> indices =
+ GlobalDictionary::IterationIndices(isolate(), properties);
for (int i = 0; i < indices->length(); i++) {
int index = Smi::ToInt(indices->get(i));
// If the property is already there we skip it.
- Handle<PropertyCell> cell(properties->CellAt(index));
+ Handle<PropertyCell> cell(properties->CellAt(index), isolate());
Handle<Name> key(cell->name(), isolate());
- LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(isolate(), to, key,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
if (it.IsFound()) continue;
// Set the property.
@@ -5363,22 +5451,24 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (value->IsTheHole(isolate())) continue;
PropertyDetails details = cell->property_details();
if (details.kind() != kData) continue;
- JSObject::AddProperty(to, key, value, details.attributes());
+ JSObject::AddProperty(isolate(), to, key, value, details.attributes());
}
} else {
// Copy all keys and values in enumeration order.
Handle<NameDictionary> properties =
- Handle<NameDictionary>(from->property_dictionary());
+ Handle<NameDictionary>(from->property_dictionary(), isolate());
Handle<FixedArray> key_indices =
- NameDictionary::IterationIndices(properties);
+ NameDictionary::IterationIndices(isolate(), properties);
+ ReadOnlyRoots roots(isolate());
for (int i = 0; i < key_indices->length(); i++) {
int key_index = Smi::ToInt(key_indices->get(i));
Object* raw_key = properties->KeyAt(key_index);
- DCHECK(properties->IsKey(isolate(), raw_key));
+ DCHECK(properties->IsKey(roots, raw_key));
DCHECK(raw_key->IsName());
// If the property is already there we skip it.
Handle<Name> key(Name::cast(raw_key), isolate());
- LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(isolate(), to, key,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
if (it.IsFound()) continue;
// Set the property.
@@ -5388,7 +5478,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!value->IsTheHole(isolate()));
PropertyDetails details = properties->DetailsAt(key_index);
DCHECK_EQ(kData, details.kind());
- JSObject::AddProperty(to, key, value, details.attributes());
+ JSObject::AddProperty(isolate(), to, key, value, details.attributes());
}
}
}
@@ -5398,7 +5488,7 @@ void Genesis::TransferIndexedProperties(Handle<JSObject> from,
Handle<JSObject> to) {
// Cloning the elements array is sufficient.
Handle<FixedArray> from_elements =
- Handle<FixedArray>(FixedArray::cast(from->elements()));
+ Handle<FixedArray>(FixedArray::cast(from->elements()), isolate());
Handle<FixedArray> to_elements = factory()->CopyFixedArray(from_elements);
to->set_elements(*to_elements);
}
@@ -5468,7 +5558,7 @@ Genesis::Genesis(
}
if (!native_context().is_null()) {
- AddToWeakNativeContextList(*native_context());
+ AddToWeakNativeContextList(isolate, *native_context());
isolate->set_context(*native_context());
isolate->counters()->contexts_created_by_snapshot()->Increment();
@@ -5526,7 +5616,8 @@ Genesis::Genesis(
// Store String.prototype's map again in case it has been changed by
// experimental natives.
- Handle<JSFunction> string_function(native_context()->string_function());
+ Handle<JSFunction> string_function(native_context()->string_function(),
+ isolate);
JSObject* string_function_prototype =
JSObject::cast(string_function->initial_map()->prototype());
DCHECK(string_function_prototype->HasFastProperties());
@@ -5541,7 +5632,7 @@ Genesis::Genesis(
if (FLAG_disallow_code_generation_from_strings) {
native_context()->set_allow_code_gen_from_strings(
- isolate->heap()->false_value());
+ ReadOnlyRoots(isolate).false_value());
}
ConfigureUtilsObject(context_type);
@@ -5578,10 +5669,11 @@ Genesis::Genesis(Isolate* isolate,
Handle<ObjectTemplateInfo> global_proxy_data =
Utils::OpenHandle(*global_proxy_template);
Handle<FunctionTemplateInfo> global_constructor(
- FunctionTemplateInfo::cast(global_proxy_data->constructor()));
+ FunctionTemplateInfo::cast(global_proxy_data->constructor()), isolate);
Handle<ObjectTemplateInfo> global_object_template(
- ObjectTemplateInfo::cast(global_constructor->prototype_template()));
+ ObjectTemplateInfo::cast(global_constructor->prototype_template()),
+ isolate);
Handle<JSObject> global_object =
ApiNatives::InstantiateRemoteObject(
global_object_template).ToHandleChecked();
@@ -5596,7 +5688,7 @@ Genesis::Genesis(Isolate* isolate,
global_proxy_map->set_may_have_interesting_symbols(true);
// A remote global proxy has no native context.
- global_proxy->set_native_context(heap()->null_value());
+ global_proxy->set_native_context(ReadOnlyRoots(heap()).null_value());
// Configure the hidden prototype chain of the global proxy.
JSObject::ForceSetPrototype(global_proxy, global_object);
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 8902ce2529..e3ba8c06f2 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -29,9 +29,11 @@ class SourceCodeCache final BASE_EMBEDDED {
bit_cast<Object**, FixedArray**>(&cache_));
}
- bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle);
+ bool Lookup(Isolate* isolate, Vector<const char> name,
+ Handle<SharedFunctionInfo>* handle);
- void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared);
+ void Add(Isolate* isolate, Vector<const char> name,
+ Handle<SharedFunctionInfo> shared);
private:
Script::Type type_;
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index ec1b37e94f..a5219bf070 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -28,7 +28,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExternalReference::Create(address).address()) &
1);
#endif
- __ Move(r5, ExternalReference::Create(address));
+ __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -39,57 +39,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
-namespace {
-
-void AdaptorWithExitFrameType(MacroAssembler* masm,
- Builtins::ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments excluding receiver
- // -- r1 : target
- // -- r3 : new.target
- // -- r5 : entry point
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument
- // -- sp[4 * argc] : receiver
- // -----------------------------------
- __ AssertFunction(r1);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // CEntry expects r0 to contain the number of arguments including the
- // receiver and the extra arguments.
- __ add(r0, r0, Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
-
- // Insert extra arguments.
- __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
- __ SmiTag(r0);
- __ Push(r0, r1, r3);
- __ SmiUntag(r0);
-
- // Jump to the C entry runtime stub directly here instead of using
- // JumpToExternalReference. We have already loaded entry point to r5
- // in Generate_adaptor.
- __ mov(r1, r5);
- Handle<Code> code =
- CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-} // namespace
-
-void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, EXIT);
-}
-
-void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -111,39 +60,8 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// function.
// tail call a stub
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : array function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ ldr(r7, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ SmiTst(r7);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r7, r8, r9, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // r2 is the AllocationSite - here undefined.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- // If r3 (new target) is undefined, then this is the 'Call' case, so move
- // r1 (the constructor) to r3.
- __ cmp(r3, r2);
- __ mov(r3, r1, LeaveCC, eq);
-
- // Run the native code for the Array function called as a normal function.
- // tail call a stub
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -494,20 +412,25 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- sp[0] : generator receiver
// -----------------------------------
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
+ // Copy the function arguments from the generator object's register file.
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldrh(r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldr(r2,
+ FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
+ __ mov(r6, Operand(0));
+
__ bind(&loop);
- __ sub(r3, r3, Operand(1), SetCC);
- __ b(mi, &done_loop);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ cmp(r6, r3);
+ __ b(ge, &done_loop);
+ __ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
+ __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ Push(scratch);
+ __ add(r6, r6, Operand(1));
__ b(&loop);
+
__ bind(&done_loop);
}
@@ -523,8 +446,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(
- r0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldrh(r0, FieldMemOperand(
+ r0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -906,17 +829,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
- __ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
- __ SmiTst(r4);
- __ b(ne, &maybe_load_debug_bytecode_array);
- __ bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
@@ -1027,37 +945,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
-
- // Load debug copy of the bytecode array if it exists.
- // kInterpreterBytecodeArrayRegister is already loaded with
- // SharedFunctionInfo::kFunctionDataOffset.
- __ bind(&maybe_load_debug_bytecode_array);
- __ ldr(r9, FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
- __ JumpIfRoot(r9, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
-
- __ mov(kInterpreterBytecodeArrayRegister, r9);
- __ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
- __ SmiUntag(r9);
- __ And(r9, r9, Operand(DebugInfo::kDebugExecutionMode));
-
- ExternalReference debug_execution_mode =
- ExternalReference::debug_execution_mode_address(masm->isolate());
- __ mov(r4, Operand(debug_execution_mode));
- __ ldrsb(r4, MemOperand(r4));
- STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
- static_cast<int>(DebugInfo::kSideEffects));
- __ cmp(r4, r9);
- __ b(eq, &bytecode_array_loaded);
-
- __ push(closure);
- __ push(feedback_vector);
- __ push(kInterpreterBytecodeArrayRegister);
- __ push(closure);
- __ CallRuntime(Runtime::kDebugApplyInstrumentation);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ pop(feedback_vector);
- __ pop(closure);
- __ b(&bytecode_array_loaded);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1160,8 +1047,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
- ArrayConstructorStub array_constructor_stub(masm->isolate());
- __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r0, r1, and r3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1278,208 +1165,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to CompileLazy.
- __ Move(r2, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ str(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ RecordWriteField(r1, JSFunction::kCodeOffset, r2, r4, kLRHasNotBeenSaved,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Jump to compile lazy.
- Generate_CompileLazy(masm);
-}
-
-static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
- Register scratch1) {
- // Figure out the SFI's code object.
- Label done;
- Label check_is_bytecode_array;
- Label check_is_exported_function_data;
- Label check_is_fixed_array;
- Label check_is_pre_parsed_scope_data;
- Label check_is_function_template_info;
- Label check_is_interpreter_data;
-
- Register data_type = scratch1;
-
- // IsSmi: Is builtin
- __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
- __ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
- __ ldr(sfi_data, MemOperand::PointerAddressFromSmiKey(scratch1, sfi_data));
- __ b(&done);
-
- // Get map for subsequent checks.
- __ bind(&check_is_bytecode_array);
- __ ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
- __ ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
-
- // IsBytecodeArray: Interpret bytecode
- __ cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
- __ b(ne, &check_is_exported_function_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
- __ b(&done);
-
- // IsWasmExportedFunctionData: Use the wrapper code
- __ bind(&check_is_exported_function_data);
- __ cmp(data_type, Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
- __ b(ne, &check_is_fixed_array);
- __ ldr(sfi_data, FieldMemOperand(
- sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
- __ b(&done);
-
- // IsFixedArray: Instantiate using AsmWasmData
- __ bind(&check_is_fixed_array);
- __ cmp(data_type, Operand(FIXED_ARRAY_TYPE));
- __ b(ne, &check_is_pre_parsed_scope_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
- __ b(&done);
-
- // IsPreParsedScopeData: Compile lazy
- __ bind(&check_is_pre_parsed_scope_data);
- __ cmp(data_type, Operand(TUPLE2_TYPE));
- __ b(ne, &check_is_function_template_info);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ b(&done);
-
- // IsFunctionTemplateInfo: API call
- __ bind(&check_is_function_template_info);
- __ cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
- __ b(ne, &check_is_interpreter_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
- __ b(&done);
-
- // IsInterpreterData: Interpret bytecode
- __ bind(&check_is_interpreter_data);
- if (FLAG_debug_code) {
- __ cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
- __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
- }
- __ ldr(
- sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
-
- __ bind(&done);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee)
- // -- r3 : new target (preserved for callee)
- // -- r1 : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime;
-
- Register closure = r1;
- Register feedback_vector = r2;
-
- // Do we have a valid feedback vector?
- __ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime);
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
-
- // We found no optimized code. Infer the code object needed for the SFI.
- Register entry = r4;
- __ ldr(entry,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(entry,
- FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoCode(masm, entry, r5);
-
- // If code entry points to anything other than CompileLazy, install that.
- __ Move(r5, masm->CodeObject());
- __ cmp(entry, r5);
- __ b(eq, &gotta_call_runtime);
-
- // Install the SFI's code entry.
- __ str(entry, FieldMemOperand(closure, JSFunction::kCodeOffset));
- __ mov(r9, entry); // Write barrier clobbers r9 below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, r9, r5,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(entry);
-
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee)
- // -- r3 : new target (preserved for callee)
- // -- r1 : target function (preserved for callee)
- // -----------------------------------
-
- Label deserialize_in_runtime;
-
- Register target = r1; // Must be preserved
- Register scratch0 = r2;
- Register scratch1 = r4;
-
- CHECK(scratch0 != r0 && scratch0 != r3 && scratch0 != r1);
- CHECK(scratch1 != r0 && scratch1 != r3 && scratch1 != r1);
- CHECK(scratch0 != scratch1);
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
-
- __ AssertFunction(target);
- __ ldr(scratch0,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ ldr(scratch1,
- FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
- __ AssertSmi(scratch1);
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- {
- // Load the code object at builtins_table[builtin_id] into scratch1.
-
- __ SmiUntag(scratch1);
- __ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
- __ ldr(scratch1, MemOperand(scratch0, scratch1, LSL, kPointerSizeLog2));
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
-
- __ Move(scratch0, masm->CodeObject());
- __ cmp(scratch1, scratch0);
- __ b(eq, &deserialize_in_runtime);
- }
-
- {
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
-
- Register target_builtin = scratch1;
-
- __ str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
- __ mov(r9, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(target, JSFunction::kCodeOffset, r9, r5,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // All copying is done. Jump to the deserialized code object.
-
- __ add(target_builtin, target_builtin,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(target_builtin);
- }
-
- __ bind(&deserialize_in_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1892,10 +1577,27 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- r4 : len (number of elements to push from args)
// -- r3 : new.target (for [[Construct]])
// -----------------------------------
- __ AssertFixedArray(r2);
-
Register scratch = r8;
+ if (masm->emit_debug_code()) {
+ // Allow r2 to be a FixedArray, or a FixedDoubleArray if r4 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(r2);
+ __ ldr(scratch, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldrh(r6, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ cmp(r6, Operand(FIXED_ARRAY_TYPE));
+ __ b(eq, &ok);
+ __ cmp(r6, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ b(ne, &fail);
+ __ cmp(r4, Operand(0));
+ __ b(eq, &ok);
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
+
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -1977,8 +1679,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r5, FieldMemOperand(
- r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldrh(r5, FieldMemOperand(
+ r5, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r4, fp);
}
__ b(&arguments_done);
@@ -2108,8 +1810,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- __ ldr(r2,
- FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldrh(r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(r0);
ParameterCount expected(r2);
__ InvokeFunctionCode(r1, no_reg, expected, actual, JUMP_FUNCTION);
@@ -2378,42 +2080,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(r1);
- __ Push(r1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(r1);
- __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ Push(r1, r2);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : message_id as Smi
- // -- lr : return address
- // -----------------------------------
- __ Push(r1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : actual number of arguments
@@ -2425,10 +2091,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ cmp(r0, r2);
- __ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
+ __ cmp(r0, r2);
+ __ b(lt, &too_few);
Register scratch = r5;
@@ -2547,29 +2213,36 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in r4 by the jump table trampoline.
+ // Convert to Smi for the runtime call.
+ __ SmiTag(r4, r4);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<r0, r1, r2>();
+ constexpr RegList gp_regs = Register::ListOf<r0, r1, r2, r3>();
constexpr DwVfpRegister lowest_fp_reg = d0;
constexpr DwVfpRegister highest_fp_reg = d7;
__ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
- // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ // Pass instance and function index as explicit arguments to the runtime
+ // function.
__ push(kWasmInstanceRegister);
+ __ push(r4);
+ // Load the correct CEntry builtin from the instance object.
+ __ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(cp, Smi::kZero);
- __ CallRuntime(Runtime::kWasmCompileLazy);
- // The entrypoint address is the first return value.
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, r2);
+ // The entrypoint address is the return value.
__ mov(r8, kReturnRegister0);
- // The WASM instance is the second return value.
- __ mov(kWasmInstanceRegister, kReturnRegister1);
// Restore registers.
__ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
@@ -2741,6 +2414,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label negate, done;
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
UseScratchRegisterScope temps(masm);
Register result_reg = r7;
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
@@ -2832,20 +2506,20 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent == r2);
const LowDwVfpRegister double_base = d0;
const LowDwVfpRegister double_exponent = d1;
const LowDwVfpRegister double_result = d2;
const LowDwVfpRegister double_scratch = d3;
const SwVfpRegister single_scratch = s6;
- const Register scratch = r9;
- const Register scratch2 = r4;
+ // Avoid using Registers r0-r3 as they may be needed when calling to C if the
+ // ABI is softfloat.
+ const Register integer_exponent = r4;
+ const Register scratch = r5;
Label call_runtime, done, int_exponent;
// Detect integer exponents stored as double.
- __ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
+ __ TryDoubleToInt32Exact(integer_exponent, double_exponent, double_scratch);
__ b(eq, &int_exponent);
__ push(lr);
@@ -2862,16 +2536,13 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
// Calculate power with integer exponent.
__ bind(&int_exponent);
- // Get two copies of exponent in the registers scratch and exponent.
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
-
__ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, Double(1.0), scratch2);
+ __ vmov(double_result, Double(1.0), scratch);
// Get absolute value of exponent.
- __ cmp(scratch, Operand::Zero());
- __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi);
+ __ cmp(integer_exponent, Operand::Zero());
+ __ mov(scratch, integer_exponent);
+ __ rsb(scratch, integer_exponent, Operand::Zero(), LeaveCC, mi);
Label while_true;
__ bind(&while_true);
@@ -2880,7 +2551,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ vmul(double_scratch, double_scratch, double_scratch, ne);
__ b(ne, &while_true);
- __ cmp(exponent, Operand::Zero());
+ __ cmp(integer_exponent, Operand::Zero());
__ b(ge, &done);
__ vmov(double_scratch, Double(1.0), scratch);
__ vdiv(double_result, double_scratch, double_result);
@@ -2890,7 +2561,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ b(ne, &done);
// double_exponent may not containe the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
- __ vmov(single_scratch, exponent);
+ __ vmov(single_scratch, integer_exponent);
__ vcvt_f64_s32(double_exponent, single_scratch);
// Returning or bailing out.
@@ -2908,6 +2579,88 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ __ cmp(r0, Operand(1));
+
+ __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET, lo);
+
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
+ __ Jump(code, RelocInfo::CODE_TARGET, hi);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ ldr(r3, MemOperand(sp, 0));
+ __ cmp(r3, Operand::Zero());
+
+ __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
+ masm->isolate(), GetHoleyElementsKind(kind))
+ .code(),
+ RelocInfo::CODE_TARGET, ne);
+ }
+
+ __ Jump(
+ CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+}
+
+} // namespace
+
+void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- r1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a nullptr and a Smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r3);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(r3, Operand(PACKED_ELEMENTS));
+ __ b(eq, &done);
+ __ cmp(r3, Operand(HOLEY_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(r3, Operand(PACKED_ELEMENTS));
+ __ b(eq, &fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index c397d03f28..2254f010c1 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -22,7 +22,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
- __ Mov(x5, ExternalReference::Create(address));
+ __ Mov(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -33,57 +33,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
-namespace {
-
-void AdaptorWithExitFrameType(MacroAssembler* masm,
- Builtins::ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments excluding receiver
- // -- x1 : target
- // -- x3 : new target
- // -- x5 : entry point
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument
- // -- sp[4 * argc] : receiver
- // -----------------------------------
- __ AssertFunction(x1);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- // CEntry expects x0 to contain the number of arguments including the
- // receiver and the extra arguments.
- __ Add(x0, x0, BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
-
- // Insert extra arguments.
- Register padding = x10;
- __ LoadRoot(padding, Heap::kTheHoleValueRootIndex);
- __ SmiTag(x11, x0);
- __ Push(padding, x11, x1, x3);
-
- // Jump to the C entry runtime stub directly here instead of using
- // JumpToExternalReference. We have already loaded entry point to x5
- // in Generate_adaptor.
- __ Mov(x1, x5);
- Handle<Code> code =
- CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-} // namespace
-
-void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, EXIT);
-}
-
-void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -105,39 +54,8 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : array function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_ArrayConstructor");
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
- __ Tst(x10, kSmiTagMask);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(x10, x11, x12, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // x2 is the AllocationSite - here undefined.
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- // If x3 (new target) is undefined, then this is the 'Call' case, so move
- // x1 (the constructor) to x3.
- __ Cmp(x3, x2);
- __ CmovX(x3, x1, eq);
-
- // Run the native code for the Array function called as a normal function.
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -266,8 +184,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore smi-tagged arguments count from the frame. Use fp relative
// addressing to avoid the circular dependency between padding existence and
// argc parity.
- __ Ldrsw(x1,
- UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
@@ -351,8 +268,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore constructor function and argument count.
__ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ Ldrsw(x12,
- UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Copy arguments to the expression stack. The called function pops the
// receiver along with its arguments, so we need an extra receiver on the
@@ -451,8 +367,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
- __ Ldrsw(x1,
- UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
@@ -513,8 +428,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Get number of arguments for generator function.
__ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w10,
- FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ldrh(w10, FieldMemOperand(
+ x10, SharedFunctionInfo::kFormalParameterCountOffset));
// Claim slots for arguments and receiver (rounded up to a multiple of two).
__ Add(x11, x10, 2);
@@ -539,18 +454,21 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- sp[0 .. arg count - 1] : claimed for args
// -----------------------------------
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
+ // Copy the function arguments from the generator object's register file.
+
+ __ Ldr(x5,
+ FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done;
__ Cbz(x10, &done);
- __ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
+ __ Mov(x12, 0);
__ Bind(&loop);
__ Sub(x10, x10, 1);
+ __ Add(x11, x5, Operand(x12, LSL, kPointerSizeLog2));
+ __ Ldr(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
__ Poke(x11, Operand(x10, LSL, kPointerSizeLog2));
+ __ Add(x12, x12, 1);
__ Cbnz(x10, &loop);
__ Bind(&done);
}
@@ -571,8 +489,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w0, FieldMemOperand(
- x0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ldrh(w0, FieldMemOperand(
+ x0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -983,10 +901,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(lr, fp, cp, closure);
__ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded,
- has_bytecode_array;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ Label has_bytecode_array;
__ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
@@ -997,9 +914,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(kInterpreterBytecodeArrayRegister,
InterpreterData::kBytecodeArrayOffset));
__ Bind(&has_bytecode_array);
- __ Ldr(x11, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
- __ JumpIfNotSmi(x11, &maybe_load_debug_bytecode_array);
- __ Bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
@@ -1112,31 +1026,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in x0.
LeaveInterpreterFrame(masm, x2);
__ Ret();
-
- // Load debug copy of the bytecode array if it exists.
- // kInterpreterBytecodeArrayRegister is already loaded with
- // SharedFunctionInfo::kFunctionDataOffset.
- __ Bind(&maybe_load_debug_bytecode_array);
- __ Ldr(x10, FieldMemOperand(x11, DebugInfo::kDebugBytecodeArrayOffset));
- __ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
-
- __ Mov(kInterpreterBytecodeArrayRegister, x10);
- __ Ldr(x10, UntagSmiFieldMemOperand(x11, DebugInfo::kFlagsOffset));
- __ And(x10, x10, Immediate(DebugInfo::kDebugExecutionMode));
-
- STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
- static_cast<int>(DebugInfo::kSideEffects));
- ExternalReference debug_execution_mode =
- ExternalReference::debug_execution_mode_address(masm->isolate());
- __ Mov(x11, Operand(debug_execution_mode));
- __ Ldrsb(x11, MemOperand(x11));
- __ CompareAndBranch(x10, x11, eq, &bytecode_array_loaded);
-
- __ Push(closure, feedback_vector);
- __ PushArgument(closure);
- __ CallRuntime(Runtime::kDebugApplyInstrumentation);
- __ Pop(feedback_vector, closure);
- __ jmp(&bytecode_array_loaded);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1274,8 +1163,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
- ArrayConstructorStub array_constructor_stub(masm->isolate());
- __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with x0, x1, and x3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1383,209 +1272,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to CompileLazy.
- __ Move(x2, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ Str(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
- __ RecordWriteField(x1, JSFunction::kCodeOffset, x2, x5, kLRHasNotBeenSaved,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Jump to compile lazy.
- Generate_CompileLazy(masm);
-}
-
-static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
- Register scratch1) {
- // Figure out the SFI's code object.
- Label done;
- Label check_is_bytecode_array;
- Label check_is_exported_function_data;
- Label check_is_fixed_array;
- Label check_is_pre_parsed_scope_data;
- Label check_is_function_template_info;
- Label check_is_interpreter_data;
-
- Register data_type = scratch1;
-
- // IsSmi: Is builtin
- __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
- __ Mov(scratch1, ExternalReference::builtins_address(masm->isolate()));
- __ Mov(sfi_data, Operand::UntagSmiAndScale(sfi_data, kPointerSizeLog2));
- __ Ldr(sfi_data, MemOperand(scratch1, sfi_data));
- __ B(&done);
-
- // Get map for subsequent checks.
- __ Bind(&check_is_bytecode_array);
- __ Ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
- __ Ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
-
- // IsBytecodeArray: Interpret bytecode
- __ Cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
- __ B(ne, &check_is_exported_function_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
- __ B(&done);
-
- // IsWasmExportedFunctionData: Use the wrapper code
- __ Bind(&check_is_exported_function_data);
- __ Cmp(data_type, Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
- __ B(ne, &check_is_fixed_array);
- __ Ldr(sfi_data, FieldMemOperand(
- sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
- __ B(&done);
-
- // IsFixedArray: Instantiate using AsmWasmData
- __ Bind(&check_is_fixed_array);
- __ Cmp(data_type, Operand(FIXED_ARRAY_TYPE));
- __ B(ne, &check_is_pre_parsed_scope_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
- __ B(&done);
-
- // IsPreParsedScopeData: Compile lazy
- __ Bind(&check_is_pre_parsed_scope_data);
- __ Cmp(data_type, Operand(TUPLE2_TYPE));
- __ B(ne, &check_is_function_template_info);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ B(&done);
-
- // IsFunctionTemplateInfo: API call
- __ Bind(&check_is_function_template_info);
- __ Cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
- __ B(ne, &check_is_interpreter_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
- __ B(&done);
-
- // IsInterpreterData: Interpret bytecode
- __ Bind(&check_is_interpreter_data);
- if (FLAG_debug_code) {
- __ Cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
- __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
- }
- __ Ldr(
- sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
-
- __ Bind(&done);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : argument count (preserved for callee)
- // -- x3 : new target (preserved for callee)
- // -- x1 : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime;
-
- Register closure = x1;
- Register feedback_vector = x2;
-
- // Do we have a valid feedback vector?
- __ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime);
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
-
- // We found no optimized code. Infer the code object needed for the SFI.
- Register entry = x7;
- __ Ldr(entry,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(entry,
- FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoCode(masm, entry, x5);
-
- // If code entry points to anything other than CompileLazy, install that.
- __ Move(x5, masm->CodeObject());
- __ Cmp(entry, x5);
- __ B(eq, &gotta_call_runtime);
-
- // Install the SFI's code entry.
- __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeOffset));
- __ Mov(x10, entry); // Write barrier clobbers x10 below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, x10, x5,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(entry);
-
- __ Bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : argument count (preserved for callee)
- // -- x3 : new target (preserved for callee)
- // -- x1 : target function (preserved for callee)
- // -----------------------------------
-
- Label deserialize_in_runtime;
-
- Register target = x1; // Must be preserved
- Register scratch0 = x2;
- Register scratch1 = x4;
-
- CHECK(!scratch0.is(x0) && !scratch0.is(x3) && !scratch0.is(x1));
- CHECK(!scratch1.is(x0) && !scratch1.is(x3) && !scratch1.is(x1));
- CHECK(!scratch0.is(scratch1));
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
-
- __ AssertFunction(target);
- __ Ldr(scratch0,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ Ldr(scratch1,
- FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
- __ AssertSmi(scratch1);
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- {
- // Load the code object at builtins_table[builtin_id] into scratch1.
-
- __ SmiUntag(scratch1);
- __ Mov(scratch0, ExternalReference::builtins_address(masm->isolate()));
- __ Ldr(scratch1, MemOperand(scratch0, scratch1, LSL, kPointerSizeLog2));
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
-
- __ Move(scratch0, masm->CodeObject());
- __ Cmp(scratch1, scratch0);
- __ B(eq, &deserialize_in_runtime);
- }
-
- {
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
-
- Register target_builtin = scratch1;
-
- __ Str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
- __ Mov(x9, target_builtin); // Write barrier clobbers x9 below.
- __ RecordWriteField(target, JSFunction::kCodeOffset, x9, x5,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // All copying is done. Jump to the deserialized code object.
-
- __ Add(target_builtin, target_builtin,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(target_builtin);
- }
-
- __ bind(&deserialize_in_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@@ -1656,7 +1342,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
- __ Ldr(w4, UntagSmiMemOperand(sp, 3 * kPointerSize));
+ __ SmiUntag(x4, MemOperand(sp, 3 * kPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
@@ -1800,9 +1486,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ Ldrsw(w1, UntagSmiFieldMemOperand(
- x1, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex)));
+ __ SmiUntag(x1,
+ FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -2222,7 +1908,24 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- x4 : len (number of elements to push from args)
// -- x3 : new.target (for [[Construct]])
// -----------------------------------
- __ AssertFixedArray(x2);
+ if (masm->emit_debug_code()) {
+ // Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
+ __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
+ __ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Cmp(x13, FIXED_ARRAY_TYPE);
+ __ B(eq, &ok);
+ __ Cmp(x13, FIXED_DOUBLE_ARRAY_TYPE);
+ __ B(ne, &fail);
+ __ Cmp(x4, 0);
+ __ B(eq, &ok);
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
Register arguments_list = x2;
Register argc = x0;
@@ -2328,18 +2031,18 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(scratch,
FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(len,
- FieldMemOperand(
- scratch, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ldrh(len,
+ FieldMemOperand(scratch,
+ SharedFunctionInfo::kFormalParameterCountOffset));
__ Mov(args_fp, fp);
}
__ B(&arguments_done);
__ Bind(&arguments_adaptor);
{
// Just load the length from ArgumentsAdaptorFrame.
- __ Ldrsw(len,
- UntagSmiMemOperand(
- args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(
+ len,
+ MemOperand(args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ Bind(&arguments_done);
}
@@ -2455,8 +2158,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- __ Ldrsw(
- x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ldrh(x2,
+ FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(x0);
ParameterCount expected(x2);
__ InvokeFunctionCode(x1, no_reg, expected, actual, JUMP_FUNCTION);
@@ -2486,8 +2189,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Label no_bound_arguments;
__ Ldr(bound_argv,
FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
- __ Ldrsw(bound_argc,
- UntagSmiFieldMemOperand(bound_argv, FixedArray::kLengthOffset));
+ __ SmiUntag(bound_argc,
+ FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
__ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
@@ -2774,46 +2477,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
- // ----------- S t a t e -------------
- // -- x1 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(x1);
- __ PushArgument(x1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_AllocateInOldSpace");
- // ----------- S t a t e -------------
- // -- x1 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(x1);
- __ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ Push(x1, x2);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_Abort");
- // ----------- S t a t e -------------
- // -- x1 : message_id as Smi
- // -- lr : return address
- // -----------------------------------
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
- __ PushArgument(x1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
@@ -3007,32 +2670,38 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in w8 by the jump table trampoline.
+ // Sign extend and convert to Smi for the runtime call.
+ __ sxtw(x8, w8);
+ __ SmiTag(x8, x8);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<x0, x1, x2, x3, x4, x5>();
+ constexpr RegList gp_regs =
+ Register::ListOf<x0, x1, x2, x3, x4, x5, x6, x7>();
constexpr RegList fp_regs =
Register::ListOf<d0, d1, d2, d3, d4, d5, d6, d7>();
__ PushXRegList(gp_regs);
__ PushDRegList(fp_regs);
- __ Push(x5, x6); // note: pushed twice because alignment required
- // Pass the WASM instance as an explicit argument to WasmCompileLazy.
- __ PushArgument(kWasmInstanceRegister);
+ // Pass instance and function index as explicit arguments to the runtime
+ // function.
+ __ Push(kWasmInstanceRegister, x8);
+ // Load the correct CEntry builtin from the instance object.
+ __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(cp, Smi::kZero);
- __ CallRuntime(Runtime::kWasmCompileLazy);
- // The entrypoint address is the first return value.
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, x2);
+ // The entrypoint address is the return value.
__ mov(x8, kReturnRegister0);
- // The WASM instance is the second return value.
- __ mov(kWasmInstanceRegister, kReturnRegister1);
// Restore registers.
- __ Pop(x6, x5); // note: pushed twice because alignment required
__ PopDRegList(fp_regs);
__ PopXRegList(gp_regs);
}
@@ -3268,6 +2937,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
DCHECK(result.Is64Bits());
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
UseScratchRegisterScope temps(masm);
Register scratch1 = temps.AcquireX();
Register scratch2 = temps.AcquireX();
@@ -3329,16 +2999,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- // Stack on entry:
- // sp[0]: Exponent (as a tagged value).
- // sp[1]: Base (as a tagged value).
- //
- // The (tagged) result will be returned in x0, as a heap number.
-
- Register exponent_tagged = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent_tagged.is(x11));
- Register exponent_integer = MathPowIntegerDescriptor::exponent();
- DCHECK(exponent_integer.is(x12));
+ Register exponent_integer = x12;
Register saved_lr = x19;
VRegister result_double = d0;
VRegister base_double = d0;
@@ -3348,7 +3009,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
VRegister scratch0_double = d7;
// A fast-path for integer exponents.
- Label exponent_is_smi, exponent_is_integer;
+ Label exponent_is_integer;
// Allocate a heap number for the result, and return it.
Label done;
@@ -3368,24 +3029,12 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ B(&done);
}
- // Handle SMI exponents.
- __ Bind(&exponent_is_smi);
- // x10 base_tagged The tagged base (input).
- // x11 exponent_tagged The tagged exponent (input).
- // d1 base_double The base as a double.
- __ SmiUntag(exponent_integer, exponent_tagged);
-
__ Bind(&exponent_is_integer);
- // x10 base_tagged The tagged base (input).
- // x11 exponent_tagged The tagged exponent (input).
- // x12 exponent_integer The exponent as an integer.
- // d1 base_double The base as a double.
// Find abs(exponent). For negative exponents, we can find the inverse later.
Register exponent_abs = x13;
__ Cmp(exponent_integer, 0);
__ Cneg(exponent_abs, exponent_integer, mi);
- // x13 exponent_abs The value of abs(exponent_integer).
// Repeatedly multiply to calculate the power.
// result = 1.0;
@@ -3441,6 +3090,102 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ Label zero_case, n_case;
+ Register argc = x0;
+
+ __ Cbz(argc, &zero_case);
+ __ CompareAndBranch(argc, 1, ne, &n_case);
+
+ // One argument.
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
+ masm->isolate(), GetHoleyElementsKind(kind))
+ .code(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&packed_case);
+ }
+
+ __ Jump(
+ CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&n_case);
+ // N arguments.
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+} // namespace
+
+void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- x1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ Register constructor = x1;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a nullptr and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+ }
+
+ Register kind = w3;
+ // Figure out the right elements kind
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Retrieve elements_kind from map.
+ __ LoadElementsKindFromMap(kind, x10);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Cmp(x3, PACKED_ELEMENTS);
+ __ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ }
+
+ Label fast_elements_case;
+ __ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+
+ __ Bind(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq
new file mode 100644
index 0000000000..9919f9e395
--- /dev/null
+++ b/deps/v8/src/builtins/array-foreach.tq
@@ -0,0 +1,173 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module array {
+ macro ArrayForEachTorqueContinuation(
+ context: Context, o: Object, len: Number, callbackfn: Callable,
+ thisArg: Object, initial_k: Smi): Object {
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Smi = initial_k; k < len; k = k + 1) {
+ // 6a. Let Pk be ! ToString(k).
+ let pK: String = ToString_Inline(context, k);
+
+ // 6b. Let kPresent be ? HasProperty(O, Pk).
+ let kPresent: Oddball = HasPropertyObject(o, pK, context, kHasProperty);
+
+ // 6c. If kPresent is true, then
+ if (kPresent == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ let kValue: Object = GetProperty(context, o, pK);
+
+ // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
+ Call(context, callbackfn, thisArg, kValue, k, o);
+ }
+
+ // 6d. Increase k by 1. (done by the loop).
+ }
+ return Undefined;
+ }
+
+ javascript builtin ArrayForEachLoopEagerDeoptContinuation(
+ context: Context, receiver: Object, callback: Object, thisArg: Object,
+ initialK: Object, length: Object): Object {
+ return ArrayForEachLoopContinuation(
+ context, receiver, callback, thisArg, Undefined, receiver, initialK,
+ length, Undefined);
+ }
+
+ javascript builtin ArrayForEachLoopLazyDeoptContinuation(
+ context: Context, receiver: Object, callback: Object, thisArg: Object,
+ initialK: Object, length: Object, result: Object): Object {
+ return ArrayForEachLoopContinuation(
+ context, receiver, callback, thisArg, Undefined, receiver, initialK,
+ length, Undefined);
+ }
+
+ builtin ArrayForEachLoopContinuation(
+ context: Context, receiver: Object, callback: Object, thisArg: Object,
+ array: Object, object: Object, initialK: Object, length: Object,
+ to: Object): Object {
+ try {
+ let callbackfn: Callable = cast<Callable>(callback) otherwise Unexpected;
+ let k: Smi = cast<Smi>(initialK) otherwise Unexpected;
+ let number_length: Number = cast<Number>(length) otherwise Unexpected;
+
+ return ArrayForEachTorqueContinuation(
+ context, object, number_length, callbackfn, thisArg, k);
+ }
+ label Unexpected {
+ unreachable;
+ }
+ }
+
+ macro VisitAllElements<FixedArrayType : type>(
+ context: Context, a: JSArray, len: Smi, callbackfn: Callable,
+ thisArg: Object): void labels
+ Bailout(Smi) {
+ let k: Smi = 0;
+ let map: Map = a.map;
+
+ try {
+ // Build a fast loop over the smi array.
+ for (; k < len; k = k + 1) {
+ // Ensure that the map didn't change.
+ if (map != a.map) goto Slow;
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= a.length) goto Slow;
+
+ try {
+ let value: Object =
+ LoadElementNoHole<FixedArrayType>(a, k) otherwise FoundHole;
+ Call(context, callbackfn, thisArg, value, k, a);
+ }
+ label FoundHole {
+ // If we found the hole, we need to bail out if the initial
+ // array prototype has had elements inserted. This is preferable
+ // to walking the prototype chain looking for elements.
+
+ if (IsNoElementsProtectorCellInvalid()) goto Bailout(k);
+ }
+ }
+ }
+ label Slow {
+ goto Bailout(k);
+ }
+ }
+
+ macro FastArrayForEach(
+ context: Context, o: Object, len: Number, callbackfn: Callable,
+ thisArg: Object): Object labels
+ Bailout(Smi) {
+ let k: Smi = 0;
+ try {
+ let smi_len: Smi = cast<Smi>(len) otherwise Slow;
+ let a: JSArray = cast<JSArray>(o) otherwise Slow;
+ let map: Map = a.map;
+
+ if (!IsPrototypeInitialArrayPrototype(context, map)) goto Slow;
+ let elementsKind: ElementsKind = map.elements_kind;
+ if (!IsFastElementsKind(elementsKind)) goto Slow;
+
+ if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) {
+ VisitAllElements<FixedDoubleArray>(
+ context, a, smi_len, callbackfn, thisArg)
+ otherwise Bailout;
+ } else {
+ VisitAllElements<FixedArray>(context, a, smi_len, callbackfn, thisArg)
+ otherwise Bailout;
+ }
+ }
+ label Slow {
+ goto Bailout(k);
+ }
+ return Undefined;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.foreach
+ javascript builtin ArrayForEach(
+ context: Context, receiver: Object, ...arguments): Object {
+ try {
+ if (IsNullOrUndefined(receiver)) {
+ goto NullOrUndefinedError;
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ let o: Object = ToObject(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ let len: Number = GetLengthProperty(context, o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto TypeError;
+ }
+ let callbackfn: Callable =
+ cast<Callable>(arguments[0]) otherwise TypeError;
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ let thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+
+ // Special cases.
+ let k: Smi = 0;
+ try {
+ return FastArrayForEach(context, o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ }
+ label Bailout(k_value: Smi) {
+ k = k_value;
+ }
+
+ return ArrayForEachTorqueContinuation(
+ context, o, len, callbackfn, thisArg, k);
+ }
+ label TypeError {
+ ThrowTypeError(context, kCalledNonCallable, arguments[0]);
+ }
+ label NullOrUndefinedError {
+ ThrowTypeError(
+ context, kCalledOnNullOrUndefined, 'Array.prototype.forEach');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-sort.tq b/deps/v8/src/builtins/array-sort.tq
new file mode 100644
index 0000000000..30bbf5ef74
--- /dev/null
+++ b/deps/v8/src/builtins/array-sort.tq
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module array {
+ // TODO(szuend): TimSort implementation will go here. Keeping the file around
+ // after removing the QuickSort Torque implementation.
+}
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index 69aea57f5a..edfe342ae3 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -3,8 +3,18 @@
// found in the LICENSE file.
module array {
+ macro GetLengthProperty(context: Context, o: Object): Number {
+ if (BranchIfFastJSArray(o, context)) {
+ let a: JSArray = unsafe_cast<JSArray>(o);
+ return a.length_fast;
+ } else
+ deferred {
+ return ToLength_Inline(context, GetProperty(context, o, 'length'));
+ }
+ }
+
macro FastArraySplice(
- context: Context, args: Arguments, o: Object,
+ context: Context, args: constexpr Arguments, o: Object,
originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi,
actualDeleteCountNumber: Number): Object
labels Bailout {
@@ -103,8 +113,7 @@ module array {
let o: Object = ToObject(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
- let len: Number =
- ToLength_Inline(context, GetProperty(context, o, 'length'));
+ let len: Number = GetLengthProperty(context, o);
// 3. Let relativeStart be ? ToInteger(start).
let start: Object = arguments[0];
@@ -145,7 +154,7 @@ module array {
// 8. If len + insertCount - actualDeleteCount > 2^53-1, throw a
// Bailout exception.
if (len + insertCount - actualDeleteCount > kMaxSafeInteger) {
- ThrowRangeError(context, kInvalidArrayLengthMessage);
+ ThrowRangeError(context, kInvalidArrayLength);
}
try {
@@ -186,7 +195,7 @@ module array {
}
// 12. Perform ? Set(A, "length", actualDeleteCount, true).
- SetProperty(context, a, 'length', actualDeleteCount, strict);
+ SetProperty(context, a, 'length', actualDeleteCount, kStrict);
// 13. Let items be a List whose elements are, in left-to-right order,
// the portion of the actual argument list starting with the third
@@ -217,12 +226,12 @@ module array {
let fromValue: Object = GetProperty(context, o, from);
// 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(context, o, to, fromValue, strict);
+ SetProperty(context, o, to, fromValue, kStrict);
// v. Else fromPresent is false,
} else {
// 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(context, o, to, strict);
+ DeleteProperty(context, o, to, kStrict);
}
// vi. Increase k by 1.
k = k + 1;
@@ -233,7 +242,7 @@ module array {
// d. Repeat, while k > (len - actualDeleteCount + itemCount)
while (k > (len - actualDeleteCount + itemCount)) {
// i. Perform ? DeletePropertyOrThrow(O, ! ToString(k - 1)).
- DeleteProperty(context, o, ToString_Inline(context, k - 1), strict);
+ DeleteProperty(context, o, ToString_Inline(context, k - 1), kStrict);
// ii. Decrease k by 1.
k = k - 1;
@@ -261,12 +270,12 @@ module array {
let fromValue: Object = GetProperty(context, o, from);
// 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(context, o, to, fromValue, strict);
+ SetProperty(context, o, to, fromValue, kStrict);
// v. Else fromPresent is false,
} else {
// 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(context, o, to, strict);
+ DeleteProperty(context, o, to, kStrict);
}
// vi. Decrease k by 1.
@@ -283,7 +292,7 @@ module array {
if (arguments.length > 2) {
for (let e: Object of arguments [2: ]) {
// b. Perform ? Set(O, ! ToString(k), E, true).
- SetProperty(context, o, ToString_Inline(context, k), e, strict);
+ SetProperty(context, o, ToString_Inline(context, k), e, kStrict);
// c. Increase k by 1.
k = k + 1;
@@ -293,177 +302,8 @@ module array {
// 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount,
// true).
SetProperty(
- context, o, 'length', len - actualDeleteCount + itemCount, strict);
+ context, o, 'length', len - actualDeleteCount + itemCount, kStrict);
return a;
}
-
- macro ArrayForEachTorqueContinuation(
- context: Context, o: Object, len: Number, callbackfn: Callable,
- thisArg: Object, initial_k: Smi): Object {
- // 5. Let k be 0.
- // 6. Repeat, while k < len
- for (let k: Smi = initial_k; k < len; k = k + 1) {
- // 6a. Let Pk be ! ToString(k).
- let pK: String = ToString_Inline(context, k);
-
- // 6b. Let kPresent be ? HasProperty(O, Pk).
- let kPresent: Oddball = HasPropertyObject(o, pK, context, kHasProperty);
-
- // 6c. If kPresent is true, then
- if (kPresent == True) {
- // 6c. i. Let kValue be ? Get(O, Pk).
- let kValue: Object = GetProperty(context, o, pK);
-
- // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
- Call(context, callbackfn, thisArg, kValue, k, o);
- }
-
- // 6d. Increase k by 1. (done by the loop).
- }
- return Undefined;
- }
-
- javascript builtin ArrayForEachLoopEagerDeoptContinuation(
- context: Context, receiver: Object, callback: Object, thisArg: Object,
- initialK: Object, length: Object): Object {
- return ArrayForEachLoopContinuation(
- context, receiver, callback, thisArg, Undefined, receiver, initialK,
- length, Undefined);
- }
-
- javascript builtin ArrayForEachLoopLazyDeoptContinuation(
- context: Context, receiver: Object, callback: Object, thisArg: Object,
- initialK: Object, length: Object, result: Object): Object {
- return ArrayForEachLoopContinuation(
- context, receiver, callback, thisArg, Undefined, receiver, initialK,
- length, Undefined);
- }
-
- builtin ArrayForEachLoopContinuation(
- context: Context, receiver: Object, callback: Object, thisArg: Object,
- array: Object, object: Object, initialK: Object, length: Object,
- to: Object): Object {
- try {
- let callbackfn: Callable = cast<Callable>(callback) otherwise Unexpected;
- let k: Smi = cast<Smi>(initialK) otherwise Unexpected;
- let number_length: Number = cast<Number>(length) otherwise Unexpected;
-
- return ArrayForEachTorqueContinuation(
- context, object, number_length, callbackfn, thisArg, k);
- }
- label Unexpected {
- unreachable;
- }
- }
-
- macro VisitAllElements<FixedArrayType : type>(
- context: Context, a: JSArray, len: Smi, callbackfn: Callable,
- thisArg: Object): void labels
- Bailout(Smi) {
- let k: Smi = 0;
- let map: Map = a.map;
-
- try {
- // Build a fast loop over the smi array.
- for (; k < len; k = k + 1) {
- // Ensure that the map didn't change.
- if (map != a.map) goto Slow;
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= a.length) goto Slow;
-
- try {
- let value: Object =
- LoadElementNoHole<FixedArrayType>(a, k) otherwise FoundHole;
- Call(context, callbackfn, thisArg, value, k, a);
- }
- label FoundHole {
- // If we found the hole, we need to bail out if the initial
- // array prototype has had elements inserted. This is preferable
- // to walking the prototype chain looking for elements.
-
- if (IsNoElementsProtectorCellInvalid()) goto Bailout(k);
- }
- }
- }
- label Slow {
- goto Bailout(k);
- }
- }
-
- macro FastArrayForEach(
- context: Context, o: Object, len: Number, callbackfn: Callable,
- thisArg: Object): Object labels
- Bailout(Smi) {
- let k: Smi = 0;
- try {
- let smi_len: Smi = cast<Smi>(len) otherwise Slow;
- let a: JSArray = cast<JSArray>(o) otherwise Slow;
- let map: Map = a.map;
-
- if (!IsPrototypeInitialArrayPrototype(context, map)) goto Slow;
- let elementsKind: ElementsKind = map.elements_kind;
- if (!IsFastElementsKind(elementsKind)) goto Slow;
-
- if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) {
- VisitAllElements<FixedDoubleArray>(
- context, a, smi_len, callbackfn, thisArg)
- otherwise Bailout;
- } else {
- VisitAllElements<FixedArray>(context, a, smi_len, callbackfn, thisArg)
- otherwise Bailout;
- }
- }
- label Slow {
- goto Bailout(k);
- }
- return Undefined;
- }
-
- // https://tc39.github.io/ecma262/#sec-array.prototype.foreach
- javascript builtin ArrayForEach(
- context: Context, receiver: Object, ...arguments): Object {
- try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
-
- // 1. Let O be ? ToObject(this value).
- let o: Object = ToObject(context, receiver);
-
- // 2. Let len be ? ToLength(? Get(O, "length")).
- let len: Number =
- ToLength_Inline(context, GetProperty(context, o, 'length'));
-
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) {
- goto TypeError;
- }
- let callbackfn: Callable =
- cast<Callable>(arguments[0]) otherwise TypeError;
-
- // 4. If thisArg is present, let T be thisArg; else let T be undefined.
- let thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
-
- // Special cases.
- let k: Smi = 0;
- try {
- return FastArrayForEach(context, o, len, callbackfn, thisArg)
- otherwise Bailout;
- }
- label Bailout(k_value: Smi) {
- k = k_value;
- }
-
- return ArrayForEachTorqueContinuation(
- context, o, len, callbackfn, thisArg, k);
- }
- label TypeError {
- ThrowTypeError(context, kCalledNonCallable, arguments[0]);
- }
- label NullOrUndefinedError {
- ThrowTypeError(
- context, kCalledOnNullOrUndefined, 'Array.prototype.forEach');
- }
- }
}
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 9b4d02d0ce..1c9acdd5c6 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -2,21 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-type Arguments generates 'CodeStubArguments*';
+type Arguments constexpr 'CodeStubArguments*';
type void generates 'void';
type never generates 'void';
-type Object generates 'TNode<Object>';
+type Tagged generates 'TNode<Object>';
+type Smi extends Tagged generates 'TNode<Smi>';
+type HeapObject extends Tagged generates 'TNode<HeapObject>';
+type Object = Smi|HeapObject;
type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
+type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
+type int64 generates 'TNode<Int64T>' constexpr 'int64_t';
type intptr generates 'TNode<IntPtrT>' constexpr 'intptr_t';
+type uintptr generates 'TNode<UintPtrT>' constexpr 'uintptr_t';
+type float32 generates 'TNode<Float32T>' constexpr 'float';
type float64 generates 'TNode<Float64T>' constexpr 'double';
type bool generates 'TNode<BoolT>' constexpr 'bool';
+type string constexpr 'const char*';
-type int31 extends int32 generates 'TNode<Int32T>' constexpr 'int32_t';
+type int31 extends int32 generates 'TNode<Int32T>' constexpr 'int31_t';
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
-type Number extends Object generates 'TNode<Number>';
-type Smi extends Number generates 'TNode<Smi>';
-type HeapObject extends Object generates 'TNode<HeapObject>';
type AbstractCode extends HeapObject generates 'TNode<AbstractCode>';
type Code extends AbstractCode generates 'TNode<Code>';
type JSReceiver extends HeapObject generates 'TNode<JSReceiver>';
@@ -24,108 +29,183 @@ type Context extends HeapObject generates 'TNode<Context>';
type String extends HeapObject generates 'TNode<String>';
type Oddball extends HeapObject generates 'TNode<Oddball>';
type HeapNumber extends HeapObject generates 'TNode<HeapNumber>';
+type Number = Smi|HeapNumber;
+type BigInt extends HeapObject generates 'TNode<BigInt>';
+type Numeric = Number|BigInt;
type Boolean extends Oddball generates 'TNode<Oddball>';
-type JSArray extends HeapObject generates 'TNode<JSArray>';
-type Callable extends JSReceiver generates 'TNode<JSReceiver>';
-type JSFunction extends Callable generates 'TNode<JSFunction>';
+type JSProxy extends JSReceiver generates 'TNode<JSProxy>';
+type JSObject extends JSReceiver generates 'TNode<JSObject>';
+type JSArray extends JSObject generates 'TNode<JSArray>';
+type JSFunction extends JSObject generates 'TNode<JSFunction>';
+type JSBoundFunction extends JSObject generates 'TNode<JSBoundFunction>';
+type Callable = JSFunction|JSBoundFunction|JSProxy;
type Map extends HeapObject generates 'TNode<Map>';
type FixedArrayBase extends HeapObject generates 'TNode<FixedArrayBase>';
type FixedArray extends FixedArrayBase generates 'TNode<FixedArray>';
type FixedDoubleArray extends FixedArrayBase generates
'TNode<FixedDoubleArray>';
-
-type JSArrayBuffer extends Object generates 'TNode<JSArrayBuffer>';
-type JSArrayBufferView extends Object generates 'TNode<JSArrayBufferView>';
+type FixedTypedArrayBase extends FixedArrayBase generates
+'TNode<FixedTypedArrayBase>';
+type FixedTypedArray extends FixedTypedArrayBase generates
+'TNode<FixedTypedArray>';
+type NumberDictionary extends HeapObject generates 'TNode<NumberDictionary>';
+
+type JSArrayBuffer extends JSObject generates 'TNode<JSArrayBuffer>';
+type JSArrayBufferView extends JSObject generates 'TNode<JSArrayBufferView>';
type JSTypedArray extends JSArrayBufferView generates 'TNode<JSTypedArray>';
+type JSDataView extends JSArrayBufferView generates 'TNode<JSDataView>';
-type InstanceType extends int32 generates 'TNode<Int32T>';
+type InstanceType generates 'TNode<Int32T>' constexpr 'InstanceType';
type ElementsKind generates 'TNode<Int32T>' constexpr 'ElementsKind';
type LanguageMode generates 'TNode<Smi>' constexpr 'LanguageMode';
type ExtractFixedArrayFlags generates
'TNode<Smi>' constexpr 'ExtractFixedArrayFlags';
-
-type MessageTemplate;
-type HasPropertyFlag generates 'HasPropertyLookupMode';
-
-const PACKED_SMI_ELEMENTS: constexpr ElementsKind = 'PACKED_SMI_ELEMENTS';
-const HOLEY_SMI_ELEMENTS: constexpr ElementsKind = 'HOLEY_SMI_ELEMENTS';
-const PACKED_ELEMENTS: constexpr ElementsKind = 'PACKED_ELEMENTS';
-const HOLEY_ELEMENTS: constexpr ElementsKind = 'HOLEY_ELEMENTS';
-const PACKED_DOUBLE_ELEMENTS: constexpr ElementsKind = 'PACKED_DOUBLE_ELEMENTS';
-const HOLEY_DOUBLE_ELEMENTS: constexpr ElementsKind = 'HOLEY_DOUBLE_ELEMENTS';
-
-const UINT8_ELEMENTS: constexpr ElementsKind = 'UINT8_ELEMENTS';
-const INT8_ELEMENTS: constexpr ElementsKind = 'INT8_ELEMENTS';
-const UINT16_ELEMENTS: constexpr ElementsKind = 'UINT16_ELEMENTS';
-const INT16_ELEMENTS: constexpr ElementsKind = 'INT16_ELEMENTS';
-const UINT32_ELEMENTS: constexpr ElementsKind = 'UINT32_ELEMENTS';
-const INT32_ELEMENTS: constexpr ElementsKind = 'INT32_ELEMENTS';
-const FLOAT32_ELEMENTS: constexpr ElementsKind = 'FLOAT32_ELEMENTS';
-const FLOAT64_ELEMENTS: constexpr ElementsKind = 'FLOAT64_ELEMENTS';
-const UINT8_CLAMPED_ELEMENTS: constexpr ElementsKind = 'UINT8_CLAMPED_ELEMENTS';
-const BIGUINT64_ELEMENTS: constexpr ElementsKind = 'BIGUINT64_ELEMENTS';
-const BIGINT64_ELEMENTS: constexpr ElementsKind = 'BIGINT64_ELEMENTS';
-
-const kAllFixedArrays: constexpr ExtractFixedArrayFlags =
- 'ExtractFixedArrayFlag::kAllFixedArrays';
-
-const kCOWMap: Map = 'LoadRoot(Heap::kFixedCOWArrayMapRootIndex)';
-const kEmptyFixedArray: FixedArrayBase =
- 'UncheckedCast<FixedArrayBase>(LoadRoot(Heap::kEmptyFixedArrayRootIndex))';
-
-const kInvalidArrayLengthMessage: MessageTemplate =
- 'MessageTemplate::kInvalidArrayLength';
-const kCalledNonCallable: MessageTemplate =
- 'MessageTemplate::kCalledNonCallable';
-const kCalledOnNullOrUndefined: MessageTemplate =
- 'MessageTemplate::kCalledOnNullOrUndefined';
-
-const kHasProperty: HasPropertyFlag = 'kHasProperty';
-
-const kMaxSafeInteger: constexpr float64 = 'kMaxSafeInteger';
-
-const kNotTypedArray: MessageTemplate = 'MessageTemplate::kNotTypedArray';
-const kDetachedOperation: MessageTemplate =
- 'MessageTemplate::kDetachedOperation';
-const kBadSortComparisonFunction: MessageTemplate =
- 'MessageTemplate::kBadSortComparisonFunction';
-
-const Hole: Oddball = 'TheHoleConstant()';
-const Null: Oddball = 'NullConstant()';
-const Undefined: Oddball = 'UndefinedConstant()';
-const True: Boolean = 'TrueConstant()';
-const False: Boolean = 'FalseConstant()';
-const true: constexpr bool = 'true';
-const false: constexpr bool = 'false';
-
-const strict: constexpr LanguageMode = 'LanguageMode::kStrict';
-const sloppy: constexpr LanguageMode = 'LanguageMode::kSloppy';
-
+type ParameterMode generates 'TNode<Int32T>' constexpr 'ParameterMode';
+type RootListIndex generates 'TNode<Int32T>' constexpr 'Heap::RootListIndex';
+
+type MessageTemplate constexpr 'MessageTemplate';
+type HasPropertyLookupMode constexpr 'HasPropertyLookupMode';
+
+type ToIntegerTruncationMode constexpr 'ToIntegerTruncationMode';
+
+const NO_ELEMENTS: constexpr ElementsKind generates 'NO_ELEMENTS';
+
+const PACKED_SMI_ELEMENTS: constexpr ElementsKind generates
+ 'PACKED_SMI_ELEMENTS';
+const HOLEY_SMI_ELEMENTS: constexpr ElementsKind generates 'HOLEY_SMI_ELEMENTS';
+const PACKED_ELEMENTS: constexpr ElementsKind generates 'PACKED_ELEMENTS';
+const HOLEY_ELEMENTS: constexpr ElementsKind generates 'HOLEY_ELEMENTS';
+const PACKED_DOUBLE_ELEMENTS: constexpr ElementsKind generates
+ 'PACKED_DOUBLE_ELEMENTS';
+const HOLEY_DOUBLE_ELEMENTS: constexpr ElementsKind generates
+ 'HOLEY_DOUBLE_ELEMENTS';
+const DICTIONARY_ELEMENTS: constexpr ElementsKind generates
+ 'DICTIONARY_ELEMENTS';
+
+const UINT8_ELEMENTS: constexpr ElementsKind generates 'UINT8_ELEMENTS';
+const INT8_ELEMENTS: constexpr ElementsKind generates 'INT8_ELEMENTS';
+const UINT16_ELEMENTS: constexpr ElementsKind generates 'UINT16_ELEMENTS';
+const INT16_ELEMENTS: constexpr ElementsKind generates 'INT16_ELEMENTS';
+const UINT32_ELEMENTS: constexpr ElementsKind generates 'UINT32_ELEMENTS';
+const INT32_ELEMENTS: constexpr ElementsKind generates 'INT32_ELEMENTS';
+const FLOAT32_ELEMENTS: constexpr ElementsKind generates 'FLOAT32_ELEMENTS';
+const FLOAT64_ELEMENTS: constexpr ElementsKind generates 'FLOAT64_ELEMENTS';
+const UINT8_CLAMPED_ELEMENTS: constexpr ElementsKind generates
+ 'UINT8_CLAMPED_ELEMENTS';
+const BIGUINT64_ELEMENTS: constexpr ElementsKind generates 'BIGUINT64_ELEMENTS';
+const BIGINT64_ELEMENTS: constexpr ElementsKind generates 'BIGINT64_ELEMENTS';
+
+type FixedUint8Array extends FixedTypedArray;
+type FixedInt8Array extends FixedTypedArray;
+type FixedUint16Array extends FixedTypedArray;
+type FixedInt16Array extends FixedTypedArray;
+type FixedUint32Array extends FixedTypedArray;
+type FixedInt32Array extends FixedTypedArray;
+type FixedFloat32Array extends FixedTypedArray;
+type FixedFloat64Array extends FixedTypedArray;
+type FixedUint8ClampedArray extends FixedTypedArray;
+type FixedBigUint64Array extends FixedTypedArray;
+type FixedBigInt64Array extends FixedTypedArray;
+
+const kAllFixedArrays: constexpr ExtractFixedArrayFlags generates
+'ExtractFixedArrayFlag::kAllFixedArrays';
+
+const kFixedCOWArrayMapRootIndex: constexpr RootListIndex generates
+'Heap::kFixedCOWArrayMapRootIndex';
+const kEmptyFixedArrayRootIndex: constexpr RootListIndex generates
+'Heap::kEmptyFixedArrayRootIndex';
+
+const kInvalidArrayLength: constexpr MessageTemplate generates
+'MessageTemplate::kInvalidArrayLength';
+const kCalledNonCallable: constexpr MessageTemplate generates
+'MessageTemplate::kCalledNonCallable';
+const kCalledOnNullOrUndefined: constexpr MessageTemplate generates
+'MessageTemplate::kCalledOnNullOrUndefined';
+
+const kHasProperty: constexpr HasPropertyLookupMode generates 'kHasProperty';
+
+const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger';
+
+const kTruncateMinusZero: constexpr ToIntegerTruncationMode generates
+'ToIntegerTruncationMode::kTruncateMinusZero';
+
+const kNotTypedArray: constexpr MessageTemplate generates
+'MessageTemplate::kNotTypedArray';
+const kDetachedOperation: constexpr MessageTemplate generates
+'MessageTemplate::kDetachedOperation';
+const kBadSortComparisonFunction: constexpr MessageTemplate generates
+'MessageTemplate::kBadSortComparisonFunction';
+const kIncompatibleMethodReceiver: constexpr MessageTemplate generates
+'MessageTemplate::kIncompatibleMethodReceiver';
+const kInvalidDataViewAccessorOffset: constexpr MessageTemplate generates
+'MessageTemplate::kInvalidDataViewAccessorOffset';
+const kStrictReadOnlyProperty: constexpr MessageTemplate generates
+'MessageTemplate::kStrictReadOnlyProperty';
+
+extern macro TheHoleConstant(): Oddball;
+extern macro NullConstant(): Oddball;
+extern macro UndefinedConstant(): Oddball;
+extern macro TrueConstant(): Boolean;
+extern macro FalseConstant(): Boolean;
+
+const Hole: Oddball = TheHoleConstant();
+const Null: Oddball = NullConstant();
+const Undefined: Oddball = UndefinedConstant();
+const True: Boolean = TrueConstant();
+const False: Boolean = FalseConstant();
+
+const true: constexpr bool generates 'true';
+const false: constexpr bool generates 'false';
+
+const kStrict: constexpr LanguageMode generates 'LanguageMode::kStrict';
+const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy';
+
+const SMI_PARAMETERS: constexpr ParameterMode generates 'SMI_PARAMETERS';
+const INTPTR_PARAMETERS: constexpr ParameterMode generates 'INTPTR_PARAMETERS';
+
+extern macro Is64(): constexpr bool;
+
+extern macro Print(constexpr string);
+extern macro Print(constexpr string, Object);
extern macro Print(Object);
extern macro DebugBreak();
extern macro ToInteger_Inline(Context, Object): Number;
+extern macro ToInteger_Inline(
+ Context, Object, constexpr ToIntegerTruncationMode): Number;
extern macro ToLength_Inline(Context, Object): Number;
extern macro ToNumber_Inline(Context, Object): Number;
extern macro ToString_Inline(Context, Object): String;
extern macro GetProperty(Context, Object, Object): Object;
-extern macro HasProperty(HeapObject, Object, Context, HasPropertyFlag): Oddball;
-extern macro ThrowRangeError(Context, MessageTemplate): never;
-extern macro ThrowTypeError(Context, MessageTemplate): never;
-extern macro ThrowTypeError(Context, MessageTemplate, Object): never;
+extern macro HasProperty(
+ HeapObject, Object, Context, constexpr HasPropertyLookupMode): Oddball;
+extern macro ThrowRangeError(Context, constexpr MessageTemplate): never;
+extern macro ThrowTypeError(Context, constexpr MessageTemplate): never;
+extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object): never;
+extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object, Object,
+ Object): never;
extern macro ArraySpeciesCreate(Context, Object, Number): Object;
extern macro EnsureArrayPushable(Map): ElementsKind labels Bailout;
-extern builtin ToObject(Context, Object): Object;
+extern builtin ToObject(Context, Object): JSReceiver;
extern macro IsNullOrUndefined(Object): bool;
+extern macro IsTheHole(Object): bool;
+extern macro IsString(HeapObject): bool;
+extern builtin ToString(Context, Object): String;
extern runtime CreateDataProperty(Context, Object, Object, Object);
extern runtime SetProperty(Context, Object, Object, Object, LanguageMode);
extern runtime DeleteProperty(Context, Object, Object, LanguageMode);
+extern macro LoadRoot(constexpr RootListIndex): Object;
+extern macro StoreRoot(constexpr RootListIndex, Object): Object;
+extern macro LoadAndUntagToWord32Root(constexpr RootListIndex): int32;
+
extern runtime StringEqual(Context, String, String): Oddball;
+extern builtin StringLessThan(Context, String, String): Boolean;
+
+extern macro StrictEqual(Object, Object): Boolean;
+extern runtime SmiLexicographicCompare(Context, Object, Object): Number;
-extern operator '==' macro Word32Equal(int32, int32): bool;
-extern operator '!=' macro Word32NotEqual(int32, int32): bool;
extern operator '<' macro Int32LessThan(int32, int32): bool;
extern operator '>' macro Int32GreaterThan(int32, int32): bool;
extern operator '<=' macro Int32LessThanOrEqual(int32, int32): bool;
@@ -140,18 +220,24 @@ extern operator '>=' macro SmiGreaterThanOrEqual(Smi, Smi): bool;
extern operator '==' macro ElementsKindEqual(
constexpr ElementsKind, constexpr ElementsKind): constexpr bool;
+extern operator '==' macro ElementsKindEqual(ElementsKind, ElementsKind): bool;
extern macro IsFastElementsKind(constexpr ElementsKind): constexpr bool;
+extern macro IsDoubleElementsKind(constexpr ElementsKind): constexpr bool;
extern macro SmiAbove(Smi, Smi): bool;
extern operator '==' macro WordEqual(intptr, intptr): bool;
+extern operator '==' macro WordEqual(uintptr, uintptr): bool;
extern operator '!=' macro WordNotEqual(intptr, intptr): bool;
+extern operator '!=' macro WordNotEqual(uintptr, uintptr): bool;
extern operator '<' macro IntPtrLessThan(intptr, intptr): bool;
extern operator '>' macro IntPtrGreaterThan(intptr, intptr): bool;
extern operator '<=' macro IntPtrLessThanOrEqual(intptr, intptr): bool;
extern operator '>=' macro IntPtrGreaterThanOrEqual(intptr, intptr): bool;
+extern operator '>=' macro UintPtrGreaterThanOrEqual(uintptr, uintptr): bool;
extern operator '==' macro Float64Equal(float64, float64): bool;
+extern operator '!=' macro Float64NotEqual(float64, float64): bool;
extern operator
'<' macro BranchIfNumberLessThan(Number, Number): never labels Taken, NotTaken;
@@ -170,11 +256,34 @@ extern operator '!=' macro WordNotEqual(Object, Object): bool;
extern operator '+' macro SmiAdd(Smi, Smi): Smi;
extern operator '-' macro SmiSub(Smi, Smi): Smi;
+extern operator '&' macro SmiAnd(Smi, Smi): Smi;
extern operator '>>>' macro SmiShr(Smi, constexpr int31): Smi;
extern operator '+' macro IntPtrAdd(intptr, intptr): intptr;
extern operator '-' macro IntPtrSub(intptr, intptr): intptr;
-extern operator '>>>' macro WordShr(intptr, intptr): intptr;
+extern operator '>>>' macro WordShr(uintptr, uintptr): uintptr;
+extern operator '<<' macro WordShl(intptr, intptr): intptr;
+extern operator '&' macro WordAnd(intptr, intptr): intptr;
+extern operator '&' macro WordAnd(uintptr, uintptr): uintptr;
+
+extern operator '+' macro Int32Add(int32, int32): int32;
+extern operator '-' macro Int32Sub(int32, int32): int32;
+extern operator '*' macro Int32Mul(int32, int32): int32;
+extern operator '%' macro Int32Mod(int32, int32): int32;
+extern operator '&' macro Word32And(int32, int32): int32;
+extern operator '&' macro Word32And(uint32, uint32): uint32;
+extern operator '==' macro
+ConstexprInt31Equal(constexpr int31, constexpr int31): constexpr bool;
+
+extern operator '==' macro Word32Equal(int32, int32): bool;
+extern operator '==' macro Word32Equal(uint32, uint32): bool;
+extern operator '!=' macro Word32NotEqual(int32, int32): bool;
+extern operator '!=' macro Word32NotEqual(uint32, uint32): bool;
+extern operator '>>>' macro Word32Shr(uint32, uint32): uint32;
+extern operator '<<' macro Word32Shl(int32, int32): int32;
+extern operator '<<' macro Word32Shl(uint32, uint32): uint32;
+extern operator '|' macro Word32Or(int32, int32): int32;
+extern operator '|' macro Word32Or(uint32, uint32): uint32;
extern operator '+' macro NumberAdd(Number, Number): Number;
extern operator '-' macro NumberSub(Number, Number): Number;
@@ -186,51 +295,286 @@ extern operator '!' macro Word32BinaryNot(bool): bool;
extern operator '.map' macro LoadMap(HeapObject): Map;
extern operator '.map=' macro StoreMap(HeapObject, Map);
-extern operator '.instanceType' macro LoadInstanceType(Object): InstanceType;
+extern operator
+'.instanceType' macro LoadInstanceType(HeapObject): InstanceType;
extern operator '.length' macro LoadStringLengthAsWord(String): intptr;
-extern operator '.length' macro GetArgumentsLength(Arguments): intptr;
-extern operator '[]' macro GetArgumentValue(Arguments, intptr): Object;
-extern operator '[]' macro GetArgumentValueSmiIndex(Arguments, Smi): Object;
+extern operator '.length' macro GetArgumentsLength(constexpr Arguments): intptr;
+extern operator
+'[]' macro GetArgumentValue(constexpr Arguments, intptr): Object;
extern operator 'is<Smi>' macro TaggedIsSmi(Object): bool;
extern operator 'isnt<Smi>' macro TaggedIsNotSmi(Object): bool;
-
-extern operator
-'cast<>' macro TaggedToHeapObject(Object): HeapObject labels CastError;
-extern operator 'cast<>' macro TaggedToSmi(Object): Smi labels CastError;
-extern operator
-'cast<>' macro TaggedToJSArray(Object): JSArray labels CastError;
-extern operator
-'cast<>' macro TaggedToCallable(Object): Callable labels CastError;
-extern operator 'cast<>' macro ConvertFixedArrayBaseToFixedArray(
- FixedArrayBase): FixedArray labels CastError;
-extern operator 'cast<>' macro ConvertFixedArrayBaseToFixedDoubleArray(
- FixedArrayBase): FixedDoubleArray labels CastError;
+extern macro TaggedIsPositiveSmi(Object): bool;
+
+extern macro TaggedToJSDataView(Object): JSDataView labels CastError;
+extern macro TaggedToHeapObject(Object): HeapObject labels CastError;
+extern macro TaggedToSmi(Object): Smi labels CastError;
+extern macro TaggedToJSArray(Object): JSArray labels CastError;
+extern macro TaggedToCallable(Object): Callable labels CastError;
+extern macro ConvertFixedArrayBaseToFixedArray(FixedArrayBase):
+ FixedArray labels CastError;
+extern macro ConvertFixedArrayBaseToFixedDoubleArray(FixedArrayBase):
+ FixedDoubleArray labels CastError;
+extern macro TaggedToNumber(Object): Number labels CastError;
+
+macro cast<A : type>(o: Object): A labels CastError;
+cast<Number>(o: Object): Number labels CastError {
+ return TaggedToNumber(o) otherwise CastError;
+}
+cast<HeapObject>(o: Object): HeapObject labels CastError {
+ return TaggedToHeapObject(o) otherwise CastError;
+}
+cast<Smi>(o: Object): Smi labels CastError {
+ return TaggedToSmi(o) otherwise CastError;
+}
+cast<JSDataView>(o: Object): JSDataView labels CastError {
+ return TaggedToJSDataView(o) otherwise CastError;
+}
+cast<Callable>(o: Object): Callable labels CastError {
+ return TaggedToCallable(o) otherwise CastError;
+}
+cast<JSArray>(o: Object): JSArray labels CastError {
+ return TaggedToJSArray(o) otherwise CastError;
+}
+macro cast<A : type>(o: FixedArrayBase): A labels CastError;
+cast<FixedArray>(o: FixedArrayBase): FixedArray labels CastError {
+ return ConvertFixedArrayBaseToFixedArray(o) otherwise CastError;
+}
+cast<FixedDoubleArray>(o: FixedArrayBase): FixedDoubleArray labels CastError {
+ return ConvertFixedArrayBaseToFixedDoubleArray(o) otherwise CastError;
+}
extern macro AllocateHeapNumberWithValue(float64): HeapNumber;
+extern macro ChangeInt32ToTagged(int32): Number;
+extern macro ChangeUint32ToTagged(uint32): Number;
+extern macro Unsigned(int32): uint32;
+extern macro Unsigned(intptr): uintptr;
+extern macro Unsigned(RawPtr): uintptr;
+extern macro Signed(uint32): int32;
+extern macro Signed(uintptr): intptr;
+extern macro Signed(RawPtr): intptr;
+extern macro TruncateIntPtrToInt32(intptr): int32;
+extern macro SmiTag(intptr): Smi;
+extern macro SmiFromInt32(int32): Smi;
+extern macro SmiUntag(Smi): intptr;
+extern macro SmiToInt32(Smi): int32;
+extern macro RoundIntPtrToFloat64(intptr): float64;
+extern macro LoadHeapNumberValue(HeapNumber): float64;
+extern macro ChangeFloat32ToFloat64(float32): float64;
+extern macro ChangeNumberToFloat64(Number): float64;
+extern macro ChangeFloat64ToUintPtr(float64): uintptr;
+extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
+extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
+
+extern macro NumberConstant(constexpr float64): Number;
+extern macro NumberConstant(constexpr int32): Number;
+extern macro IntPtrConstant(constexpr int31): intptr;
+extern macro IntPtrConstant(constexpr int32): intptr;
+extern macro Int32Constant(constexpr int31): int31;
+extern macro Int32Constant(constexpr int32): int32;
+extern macro Float64Constant(constexpr int31): float64;
+extern macro SmiConstant(constexpr int31): Smi;
+extern macro BoolConstant(constexpr bool): bool;
+extern macro StringConstant(constexpr string): String;
+extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode;
+extern macro Int32Constant(constexpr ElementsKind): ElementsKind;
+
+macro from_constexpr<A : type>(o: constexpr int31): A;
+from_constexpr<intptr>(i: constexpr int31): intptr {
+ return IntPtrConstant(i);
+}
+from_constexpr<int31>(i: constexpr int31): int31 {
+ return Int32Constant(i);
+}
+from_constexpr<int32>(i: constexpr int31): int32 {
+ return Int32Constant(i);
+}
+from_constexpr<uint32>(i: constexpr int31): uint32 {
+ return Unsigned(Int32Constant(i));
+}
+from_constexpr<uintptr>(i: constexpr int31): uintptr {
+ return ChangeUint32ToWord(i);
+}
+from_constexpr<Smi>(i: constexpr int31): Smi {
+ return SmiConstant(i);
+}
+from_constexpr<Number>(i: constexpr int31): Number {
+ return SmiConstant(i);
+}
+from_constexpr<float64>(i: constexpr int31): float64 {
+ return Float64Constant(i);
+}
+macro from_constexpr<A : type>(o: constexpr int32): A;
+from_constexpr<intptr>(i: constexpr int32): intptr {
+ return IntPtrConstant(i);
+}
+from_constexpr<int32>(i: constexpr int32): int32 {
+ return Int32Constant(i);
+}
+from_constexpr<Number>(i: constexpr int32): Number {
+ return NumberConstant(i);
+}
+macro from_constexpr<A : type>(o: constexpr float64): A;
+from_constexpr<Number>(f: constexpr float64): Number {
+ return NumberConstant(f);
+}
+macro from_constexpr<A : type>(b: constexpr bool): A;
+from_constexpr<bool>(b: constexpr bool): bool {
+ return BoolConstant(b);
+}
+macro from_constexpr<A : type>(l: constexpr LanguageMode): A;
+from_constexpr<LanguageMode>(b: constexpr LanguageMode): LanguageMode {
+ return LanguageModeConstant(b);
+}
+macro from_constexpr<A : type>(e: constexpr ElementsKind): A;
+from_constexpr<ElementsKind>(e: constexpr ElementsKind): ElementsKind {
+ return Int32Constant(e);
+}
+macro from_constexpr<A : type>(s: constexpr string): A;
+from_constexpr<String>(s: constexpr string): String {
+ return StringConstant(s);
+}
+from_constexpr<Object>(s: constexpr string): Object {
+ return StringConstant(s);
+}
-extern implicit operator
-'convert<>' macro AllocateHeapNumberWithValue(constexpr float64): Number;
-extern implicit operator
-'convert<>' macro IntPtrConstant(constexpr int31): intptr;
-extern implicit operator
-'convert<>' macro Int32Constant(constexpr int31): int32;
-extern implicit operator 'convert<>' macro SmiConstant(constexpr int31): Smi;
-extern implicit operator
-'convert<>' macro NumberConstant(constexpr int31): Number;
-extern implicit operator 'convert<>' macro BoolConstant(constexpr bool): bool;
-extern implicit operator 'convert<>' macro LanguageModeConstant(
- constexpr LanguageMode): LanguageMode;
-
-extern implicit operator 'convert<>' macro SmiFromInt32(ElementsKind): Smi;
-
-extern operator 'convert<>' macro ChangeInt32ToTagged(int32): Number;
-extern operator 'convert<>' macro TruncateWordToWord32(intptr): int32;
-extern operator 'convert<>' macro SmiTag(intptr): Smi;
-extern operator 'convert<>' macro SmiFromInt32(int32): Smi;
-extern operator 'convert<>' macro SmiUntag(Smi): intptr;
+macro convert<A : type>(i: constexpr int31): A {
+ return i;
+}
+macro convert<A : type>(i: int32): A;
+convert<Number>(i: int32): Number {
+ return ChangeInt32ToTagged(i);
+}
+convert<intptr>(i: int32): intptr {
+ return ChangeInt32ToIntPtr(i);
+}
+convert<Smi>(i: int32): Smi {
+ return SmiFromInt32(i);
+}
+macro convert<A : type>(ui: uint32): A;
+convert<Number>(ui: uint32): Number {
+ return ChangeUint32ToTagged(ui);
+}
+convert<Smi>(ui: uint32): Smi {
+ return SmiFromInt32(Signed(ui));
+}
+convert<uintptr>(ui: uint32): uintptr {
+ return ChangeUint32ToWord(ui);
+}
+macro convert<A : type>(i: intptr): A;
+convert<int32>(i: intptr): int32 {
+ return TruncateIntPtrToInt32(i);
+}
+convert<Smi>(i: intptr): Smi {
+ return SmiTag(i);
+}
+macro convert<A : type>(ui: uintptr): A;
+convert<uint32>(ui: uintptr): uint32 {
+ return Unsigned(TruncateIntPtrToInt32(Signed(ui)));
+}
+macro convert<A : type>(s: Smi): A;
+convert<intptr>(s: Smi): intptr {
+ return SmiUntag(s);
+}
+convert<int32>(s: Smi): int32 {
+ return SmiToInt32(s);
+}
+macro convert<A : type>(h: HeapNumber): A;
+convert<float64>(h: HeapNumber): float64 {
+ return LoadHeapNumberValue(h);
+}
+macro convert<A : type>(n: Number): A;
+convert<float64>(n: Number): float64 {
+ return ChangeNumberToFloat64(n);
+}
+macro convert<A : type>(f: float32): A;
+convert<float64>(f: float32): float64 {
+ return ChangeFloat32ToFloat64(f);
+}
+macro convert<A : type>(d: float64): A;
+convert<Number>(d: float64): Number {
+ return AllocateHeapNumberWithValue(d);
+}
+convert<uintptr>(d: float64): uintptr {
+ return ChangeFloat64ToUintPtr(d);
+}
+macro convert<A : type>(r: RawPtr): A;
+convert<uintptr>(r: RawPtr): uintptr {
+ return Unsigned(r);
+}
+convert<intptr>(r: RawPtr): intptr {
+ return Signed(r);
+}
+
+extern macro UnsafeCastNumberToHeapNumber(Number): HeapNumber;
+extern macro UnsafeCastObjectToFixedArrayBase(Object): FixedArrayBase;
+extern macro UnsafeCastObjectToFixedArray(Object): FixedArray;
+extern macro UnsafeCastObjectToFixedDoubleArray(Object): FixedDoubleArray;
+extern macro UnsafeCastObjectToHeapNumber(Object): HeapNumber;
+extern macro UnsafeCastObjectToCallable(Object): Callable;
+extern macro UnsafeCastObjectToSmi(Object): Smi;
+extern macro UnsafeCastObjectToNumber(Object): Number;
+extern macro UnsafeCastObjectToHeapObject(Object): HeapObject;
+extern macro UnsafeCastObjectToJSArray(Object): JSArray;
+extern macro UnsafeCastObjectToFixedTypedArrayBase(Object): FixedTypedArrayBase;
+extern macro UnsafeCastObjectToNumberDictionary(Object): NumberDictionary;
+extern macro UnsafeCastObjectToJSReceiver(Object): JSReceiver;
+extern macro UnsafeCastObjectToJSObject(Object): JSObject;
+extern macro UnsafeCastObjectToMap(Object): Map;
+
+macro unsafe_cast<A : type>(n: Number): A;
+unsafe_cast<HeapNumber>(n: Number): HeapNumber {
+ return UnsafeCastNumberToHeapNumber(n);
+}
+macro unsafe_cast<A : type>(o: Object): A;
+unsafe_cast<FixedArray>(o: Object): FixedArray {
+ return UnsafeCastObjectToFixedArray(o);
+}
+unsafe_cast<FixedDoubleArray>(o: Object): FixedDoubleArray {
+ return UnsafeCastObjectToFixedDoubleArray(o);
+}
+unsafe_cast<HeapNumber>(o: Object): HeapNumber {
+ return UnsafeCastObjectToHeapNumber(o);
+}
+unsafe_cast<Callable>(o: Object): Callable {
+ return UnsafeCastObjectToCallable(o);
+}
+unsafe_cast<Smi>(o: Object): Smi {
+ return UnsafeCastObjectToSmi(o);
+}
+unsafe_cast<Number>(o: Object): Number {
+ return UnsafeCastObjectToNumber(o);
+}
+unsafe_cast<HeapObject>(o: Object): HeapObject {
+ return UnsafeCastObjectToHeapObject(o);
+}
+unsafe_cast<JSArray>(o: Object): JSArray {
+ return UnsafeCastObjectToJSArray(o);
+}
+unsafe_cast<FixedTypedArrayBase>(o: Object): FixedTypedArrayBase {
+ return UnsafeCastObjectToFixedTypedArrayBase(o);
+}
+unsafe_cast<NumberDictionary>(o: Object): NumberDictionary {
+ return UnsafeCastObjectToNumberDictionary(o);
+}
+unsafe_cast<JSReceiver>(o: Object): JSReceiver {
+ return UnsafeCastObjectToJSReceiver(o);
+}
+unsafe_cast<JSObject>(o: Object): JSObject {
+ return UnsafeCastObjectToJSObject(o);
+}
+unsafe_cast<Map>(o: Object): Map {
+ return UnsafeCastObjectToMap(o);
+}
+unsafe_cast<FixedArrayBase>(o: Object): FixedArrayBase {
+ return UnsafeCastObjectToFixedArrayBase(o);
+}
+
+const kCOWMap: Map = unsafe_cast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
+const kEmptyFixedArray: FixedArrayBase = unsafe_cast<FixedArrayBase>(
+ LoadRoot(kEmptyFixedArrayRootIndex));
extern macro BranchIfFastJSArray(Object, Context): never labels Taken, NotTaken;
extern macro BranchIfNotFastJSArray(Object, Context): never labels Taken,
@@ -251,28 +595,51 @@ extern operator '.elements_kind' macro LoadMapElementsKind(Map): ElementsKind;
extern operator
'.elements_kind' macro LoadElementsKind(JSTypedArray): ElementsKind;
-extern operator '.elements' macro LoadElements(Object): FixedArrayBase;
-extern operator '.elements=' macro StoreElements(Object, FixedArrayBase);
+extern operator '.elements' macro LoadElements(JSObject): FixedArrayBase;
+extern operator '.elements=' macro StoreElements(JSObject, FixedArrayBase);
extern operator '.length' macro LoadTypedArrayLength(JSTypedArray): Smi;
extern operator '.length' macro LoadJSArrayLength(JSArray): Number;
+extern operator '.length_fast' macro LoadFastJSArrayLength(JSArray): Smi;
extern operator '.length=' macro StoreJSArrayLength(JSArray, Smi);
extern operator '.length' macro LoadFixedArrayBaseLength(FixedArrayBase): Smi;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, intptr): Object;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, Smi): Object;
extern operator
+'[]' macro LoadFixedArrayElementInt(FixedArray, constexpr int31): Object;
+extern operator
'[]=' macro StoreFixedArrayElement(FixedArray, intptr, Object): void;
extern operator
+'[]=' macro StoreFixedArrayElementInt(
+ FixedArray, constexpr int31, Object): void;
+extern operator
'[]=' macro StoreFixedArrayElementSmi(FixedArray, Smi, Object): void;
+extern operator '.instance_type' macro LoadMapInstanceType(Map): int32;
+
+extern macro LoadFixedDoubleArrayElement(FixedDoubleArray, Smi): float64;
+extern macro Float64SilenceNaN(float64): float64;
+
+extern macro StoreFixedDoubleArrayElement(
+ FixedDoubleArray, Object, float64, constexpr ParameterMode);
+macro StoreFixedDoubleArrayElementWithSmiIndex(
+ array: FixedDoubleArray, index: Smi, value: float64) {
+ StoreFixedDoubleArrayElement(array, index, value, SMI_PARAMETERS);
+}
+
+extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr):
+ Object labels NotData, IfHole;
+extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, Object)
+labels NotData, IfHole, ReadOnly;
+
extern macro IsFastElementsKind(ElementsKind): bool;
+extern macro IsDoubleElementsKind(ElementsKind): bool;
extern macro IsFastSmiOrTaggedElementsKind(ElementsKind): bool;
extern macro IsFastSmiElementsKind(ElementsKind): bool;
extern macro IsHoleyFastElementsKind(ElementsKind): bool;
-extern macro AllocateFixedArray(constexpr ElementsKind, Smi): FixedArray;
-extern macro AllocateFixedArray(constexpr ElementsKind, Smi, Map): FixedArray;
+extern macro AllocateFixedArray(constexpr ElementsKind, intptr): FixedArray;
extern macro CopyFixedArrayElements(
constexpr ElementsKind, FixedArray, constexpr ElementsKind, FixedArray,
@@ -289,17 +656,19 @@ extern macro IsElementsKindGreaterThan(
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
labels IfHole;
-extern macro Call(Context, Callable, Object, ...): Object;
+extern macro Call(Context, Callable, Object): Object;
+extern macro Call(Context, Callable, Object, Object): Object;
+extern macro Call(Context, Callable, Object, Object, Object): Object;
+extern macro Call(Context, Callable, Object, Object, Object, Object): Object;
+extern macro Call(Context, Callable, Object, Object, Object, Object, Object): Object;
+extern macro Call(Context, Callable, Object, Object, Object, Object, Object, Object): Object;
extern macro ExtractFixedArray(
FixedArray, Smi, Smi, Smi, constexpr ExtractFixedArrayFlags): FixedArray;
extern builtin ExtractFastJSArray(Context, JSArray, Smi, Smi): JSArray;
-macro LoadElementNoHole<T : type>(a: JSArray, index: Smi): Object
-labels IfHole {
- unreachable;
-}
+macro LoadElementNoHole<T : type>(a: JSArray, index: Smi): Object labels IfHole;
LoadElementNoHole<FixedArray>(a: JSArray, index: Smi): Object
labels IfHole {
@@ -331,9 +700,10 @@ labels IfHole {
}
macro HasPropertyObject(
- o: Object, p: Object, c: Context, f: HasPropertyFlag): Oddball {
+ o: Object, p: Object, c: Context,
+ f: constexpr HasPropertyLookupMode): Oddball {
try {
- return HasProperty(cast<HeapObject>(o) otherwise CastError, p, c, f);
+ return HasProperty((cast<HeapObject>(o) otherwise CastError), p, c, f);
}
label CastError {
return False;
@@ -341,5 +711,41 @@ macro HasPropertyObject(
}
extern macro IsCallable(HeapObject): bool;
+extern macro IsJSArray(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
extern macro IsDetachedBuffer(JSArrayBuffer): bool;
+extern macro IsHeapNumber(HeapObject): bool;
+extern macro IsExtensibleMap(Map): bool;
+extern macro IsCustomElementsReceiverInstanceType(int32): bool;
+extern macro Typeof(Object): Object;
+
+// Return true iff number is NaN.
+macro NumberIsNaN(number: Number): bool {
+ if (TaggedIsSmi(number)) return false;
+
+ let value: float64 = convert<float64>(unsafe_cast<HeapNumber>(number));
+ return value != value;
+}
+
+extern macro BranchIfToBooleanIsTrue(Object): never labels Taken, NotTaken;
+
+macro ToBoolean(obj: Object): bool {
+ if (BranchIfToBooleanIsTrue(obj)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+macro ToIndex(input: Object, context: Context): Number labels RangeError {
+ if (input == Undefined) {
+ return 0;
+ }
+
+ let value: Number = ToInteger_Inline(context, input, kTruncateMinusZero);
+ if (value < 0 || value > kMaxSafeInteger) {
+ goto RangeError;
+ }
+
+ return value;
+}
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index c26c1a9fd1..d77bc79238 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -64,7 +64,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, js_receiver,
- ApiNatives::InstantiateObject(instance_template,
+ ApiNatives::InstantiateObject(isolate, instance_template,
Handle<JSReceiver>::cast(new_target)),
Object);
args[0] = *js_receiver;
@@ -80,7 +80,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
// Proxies never need access checks.
DCHECK(js_receiver->IsJSObject());
Handle<JSObject> js_obj_receiver = Handle<JSObject>::cast(js_receiver);
- if (!isolate->MayAccess(handle(isolate->context()), js_obj_receiver)) {
+ if (!isolate->MayAccess(handle(isolate->context(), isolate),
+ js_obj_receiver)) {
isolate->ReportFailedAccessCheck(js_obj_receiver);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
@@ -222,7 +223,8 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
argv[cursor--] = *args[i];
}
DCHECK_EQ(cursor, BuiltinArguments::kPaddingOffset);
- argv[BuiltinArguments::kPaddingOffset] = isolate->heap()->the_hole_value();
+ argv[BuiltinArguments::kPaddingOffset] =
+ ReadOnlyRoots(isolate).the_hole_value();
argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc);
argv[BuiltinArguments::kTargetOffset] = *function;
argv[BuiltinArguments::kNewTargetOffset] = *new_target;
@@ -260,7 +262,7 @@ V8_WARN_UNUSED_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
// right answer.
new_target = obj;
} else {
- new_target = isolate->heap()->undefined_value();
+ new_target = ReadOnlyRoots(isolate).undefined_value();
}
// Get the invocation callback from the function descriptor that was
@@ -284,7 +286,7 @@ V8_WARN_UNUSED_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
args.length() - 1);
Handle<Object> result_handle = custom.Call(call_data);
if (result_handle.is_null()) {
- result = isolate->heap()->undefined_value();
+ result = ReadOnlyRoots(isolate).undefined_value();
} else {
result = *result_handle;
}
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index ba5ed13f32..09c725fe37 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -12,6 +12,7 @@
#include "src/frame-constants.h"
#include "src/interface-descriptors.h"
#include "src/objects-inl.h"
+#include "src/objects/arguments.h"
namespace v8 {
namespace internal {
@@ -44,7 +45,7 @@ ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
CSA_SLOW_ASSERT(this, HasInstanceType(shared, SHARED_FUNCTION_INFO_TYPE));
Node* formal_parameter_count =
LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
- MachineType::Int32());
+ MachineType::Uint16());
formal_parameter_count = Int32ToParameter(formal_parameter_count, mode);
argument_count.Bind(formal_parameter_count);
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 0bfbf7b417..fd08639c72 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-array-gen.h"
+
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-string-gen.h"
#include "src/builtins/builtins-typed-array-gen.h"
@@ -10,8 +12,7 @@
#include "src/code-stub-assembler.h"
#include "src/frame-constants.h"
#include "src/heap/factory-inl.h"
-
-#include "src/builtins/builtins-array-gen.h"
+#include "src/objects/arguments-inl.h"
namespace v8 {
namespace internal {
@@ -211,10 +212,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
context(), original_array, length, method_name);
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
- CSA_ASSERT(
- this,
- SmiLessThanOrEqual(
- CAST(len_), CAST(LoadObjectField(a, JSTypedArray::kLengthOffset))));
+ CSA_ASSERT(this, SmiLessThanOrEqual(CAST(len_), LoadTypedArrayLength(a)));
fast_typed_array_target_ =
Word32Equal(LoadInstanceType(LoadElements(original_array)),
LoadInstanceType(LoadElements(a)));
@@ -248,7 +246,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
transition_smi_double(this);
Label array_not_smi(this), array_fast(this), array_double(this);
- Node* kind = LoadMapElementsKind(LoadMap(a()));
+ TNode<Int32T> kind = LoadElementsKind(a());
Node* elements = LoadElements(a());
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS), &array_not_smi);
TryStoreArrayElement(HOLEY_SMI_ELEMENTS, mode, &transition_pre, elements, k,
@@ -296,9 +294,18 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
Node* const native_context = LoadNativeContext(context());
Node* const double_map = LoadContextElement(
native_context, Context::JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX);
- CallStub(CodeFactory::TransitionElementsKind(
- isolate(), HOLEY_SMI_ELEMENTS, HOLEY_DOUBLE_ELEMENTS, true),
- context(), a(), double_map);
+
+ const ElementsKind kFromKind = HOLEY_SMI_ELEMENTS;
+ const ElementsKind kToKind = HOLEY_DOUBLE_ELEMENTS;
+ const bool kIsJSArray = true;
+
+ Label transition_in_runtime(this, Label::kDeferred);
+ TransitionElementsKind(a(), double_map, kFromKind, kToKind, kIsJSArray,
+ &transition_in_runtime);
+ Goto(&array_double);
+
+ BIND(&transition_in_runtime);
+ CallRuntime(Runtime::kTransitionElementsKind, context(), a(), double_map);
Goto(&array_double);
}
@@ -419,28 +426,34 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
GotoIf(DoesntHaveInstanceType(o(), JS_ARRAY_TYPE), &not_js_array);
merged_length = LoadJSArrayLength(CAST(o()));
Goto(&has_length);
+
BIND(&not_js_array);
- Node* len_property =
- GetProperty(context(), o(), isolate()->factory()->length_string());
- merged_length = ToLength_Inline(context(), len_property);
- Goto(&has_length);
+ {
+ Node* len_property =
+ GetProperty(context(), o(), isolate()->factory()->length_string());
+ merged_length = ToLength_Inline(context(), len_property);
+ Goto(&has_length);
+ }
BIND(&has_length);
- len_ = merged_length.value();
+ {
+ len_ = merged_length.value();
- // 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
- Label type_exception(this, Label::kDeferred);
- Label done(this);
- GotoIf(TaggedIsSmi(callbackfn()), &type_exception);
- Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
+ // 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ Label type_exception(this, Label::kDeferred);
+ Label done(this);
+ GotoIf(TaggedIsSmi(callbackfn()), &type_exception);
+ Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
- BIND(&throw_null_undefined_exception);
- ThrowTypeError(context(), MessageTemplate::kCalledOnNullOrUndefined, name);
+ BIND(&throw_null_undefined_exception);
+ ThrowTypeError(context(), MessageTemplate::kCalledOnNullOrUndefined,
+ name);
- BIND(&type_exception);
- ThrowTypeError(context(), MessageTemplate::kCalledNonCallable,
- callbackfn());
+ BIND(&type_exception);
+ ThrowTypeError(context(), MessageTemplate::kCalledNonCallable,
+ callbackfn());
- BIND(&done);
+ BIND(&done);
+ }
// 6. If thisArg was supplied, let T be thisArg; else let T be undefined.
// [Already done by the arguments adapter]
@@ -501,7 +514,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
LoadObjectField(typed_array, JSTypedArray::kBufferOffset);
GotoIf(IsDetachedBuffer(array_buffer), &throw_detached);
- len_ = LoadObjectField<Smi>(typed_array, JSTypedArray::kLengthOffset);
+ len_ = LoadTypedArrayLength(typed_array);
Label throw_not_callable(this, Label::kDeferred);
Label distinguish_types(this);
@@ -545,6 +558,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
} else {
k_.Bind(NumberDec(len()));
}
+ CSA_ASSERT(this, IsSafeInteger(k()));
Node* instance_type = LoadInstanceType(LoadElements(typed_array));
Switch(instance_type, &unexpected_instance_type, instance_types.data(),
label_ptrs.data(), labels.size());
@@ -586,10 +600,9 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
Label done_element(this, &to_);
// a. Let Pk be ToString(k).
- // We never have to perform a ToString conversion as the above guards
- // guarantee that we have a positive {k} which also is a valid array
- // index in the range [0, 2^32-1).
- CSA_ASSERT(this, IsNumberArrayIndex(k()));
+ // k() is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+ CSA_ASSERT(this, IsSafeInteger(k()));
if (missing_property_mode == MissingPropertyMode::kSkip) {
// b. Let kPresent be HasProperty(O, Pk).
@@ -902,9 +915,9 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
- CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
TNode<Object> receiver = args.GetReceiver();
@@ -953,7 +966,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
- Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
+ TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
GotoIf(Int32LessThanOrEqual(elements_kind,
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
&fast_elements);
@@ -994,10 +1007,12 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&runtime);
{
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- TailCallStub(CodeFactory::ArrayPop(isolate()), context, target,
- UndefinedConstant(), argc);
+ // We are not using Parameter(Descriptor::kJSTarget) and loading the value
+ // from the current frame here in order to reduce register pressure on the
+ // fast path.
+ TNode<JSFunction> target = LoadTargetFromFrame();
+ TailCallBuiltin(Builtins::kArrayPop, context, target, UndefinedConstant(),
+ argc);
}
}
@@ -1014,9 +1029,9 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
- CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
TNode<Object> receiver = args.GetReceiver();
@@ -1126,10 +1141,12 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&runtime);
{
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- TailCallStub(CodeFactory::ArrayPush(isolate()), context, target,
- UndefinedConstant(), argc);
+ // We are not using Parameter(Descriptor::kJSTarget) and loading the value
+ // from the current frame here in order to reduce register pressure on the
+ // fast path.
+ TNode<JSFunction> target = LoadTargetFromFrame();
+ TailCallBuiltin(Builtins::kArrayPush, context, target, UndefinedConstant(),
+ argc);
}
}
@@ -1170,8 +1187,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
CSA_ASSERT(this, SmiGreaterThanOrEqual(CAST(from), SmiConstant(0)));
- result.Bind(CallStub(CodeFactory::ExtractFastJSArray(isolate()), context,
- array, from, count));
+ result.Bind(CallBuiltin(Builtins::kExtractFastJSArray, context, array, from,
+ count));
Goto(&done);
BIND(&try_fast_arguments);
@@ -1297,8 +1314,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
Node* const argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label slow(this, Label::kDeferred), fast_elements_kind(this);
CodeStubArguments args(this, argc);
@@ -1326,7 +1343,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
BIND(&clone);
args.PopAndReturn(
- CallStub(CodeFactory::CloneFastJSArray(isolate()), context, receiver));
+ CallBuiltin(Builtins::kCloneFastJSArray, context, receiver));
BIND(&check_arguments_length);
@@ -1472,9 +1489,9 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
- CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
TNode<Object> receiver = args.GetReceiver();
@@ -1531,7 +1548,7 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
- Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
+ TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
GotoIf(
Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)),
&fast_elements_smi);
@@ -1623,10 +1640,12 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
BIND(&runtime);
{
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- TailCallStub(CodeFactory::ArrayShift(isolate()), context, target,
- UndefinedConstant(), argc);
+ // We are not using Parameter(Descriptor::kJSTarget) and loading the value
+ // from the current frame here in order to reduce register pressure on the
+ // fast path.
+ TNode<JSFunction> target = LoadTargetFromFrame();
+ TailCallBuiltin(Builtins::kArrayShift, context, target, UndefinedConstant(),
+ argc);
}
}
@@ -1736,9 +1755,9 @@ TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
// ES #sec-get-%typedarray%.prototype.find
TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1828,9 +1847,9 @@ TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
// ES #sec-get-%typedarray%.prototype.findIndex
TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1988,9 +2007,9 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
// ES #sec-array.from
TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
@@ -2182,10 +2201,10 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
// ES #sec-array.of
TF_BUILTIN(ArrayOf, ArrayPopulatorAssembler) {
TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Smi> length = SmiFromInt32(argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CodeStubArguments args(this, length, nullptr, ParameterMode::SMI_PARAMETERS);
@@ -2208,9 +2227,9 @@ TF_BUILTIN(ArrayOf, ArrayPopulatorAssembler) {
// ES #sec-get-%typedarray%.prototype.find
TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2227,9 +2246,9 @@ TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinsAssembler) {
// ES #sec-get-%typedarray%.prototype.findIndex
TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2245,9 +2264,9 @@ TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2325,9 +2344,9 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArraySome, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2344,9 +2363,9 @@ TF_BUILTIN(ArraySome, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2424,9 +2443,9 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayEvery, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2443,9 +2462,9 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2522,9 +2541,9 @@ TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2542,9 +2561,9 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2624,9 +2643,9 @@ TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2646,9 +2665,9 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2742,9 +2761,9 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayFilter, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2820,9 +2839,9 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayMap, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2839,9 +2858,9 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2887,7 +2906,8 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
enum SearchVariant { kIncludes, kIndexOf };
- void Generate(SearchVariant variant);
+ void Generate(SearchVariant variant, TNode<IntPtrT> argc,
+ TNode<Context> context);
void GenerateSmiOrObject(SearchVariant variant, Node* context, Node* elements,
Node* search_element, Node* array_length,
Node* from_index);
@@ -2899,18 +2919,17 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
Node* from_index);
};
-void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
+void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
+ TNode<IntPtrT> argc,
+ TNode<Context> context) {
const int kSearchElementArg = 0;
const int kFromIndexArg = 1;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> search_element =
args.GetOptionalArgumentValue(kSearchElementArg);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* intptr_zero = IntPtrConstant(0);
@@ -2970,7 +2989,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
- Node* elements_kind = LoadMapElementsKind(LoadMap(array));
+ TNode<Int32T> elements_kind = LoadElementsKind(array);
Node* elements = LoadElements(array);
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
@@ -3375,7 +3394,11 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
}
TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
- Generate(kIncludes);
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ Generate(kIncludes, argc, context);
}
TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) {
@@ -3409,7 +3432,13 @@ TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
from_index);
}
-TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { Generate(kIndexOf); }
+TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) {
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ Generate(kIndexOf, argc, context);
+}
TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -3627,8 +3656,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
GotoIf(IsDetachedBuffer(buffer), &if_detached);
- TNode<Smi> length =
- CAST(LoadObjectField(array, JSTypedArray::kLengthOffset));
+ TNode<Smi> length = LoadTypedArrayLength(CAST(array));
GotoIfNot(SmiBelow(CAST(index), length), &set_done);
@@ -3921,12 +3949,12 @@ TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
mapper_function, this_arg));
}
-// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatten
-TF_BUILTIN(ArrayPrototypeFlatten, CodeStubAssembler) {
+// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat
+TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
Node* const argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const context = Parameter(Descriptor::kContext);
Node* const receiver = args.GetReceiver();
Node* const depth = args.GetOptionalArgumentValue(0);
@@ -3967,9 +3995,9 @@ TF_BUILTIN(ArrayPrototypeFlatten, CodeStubAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
Node* const argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const context = Parameter(Descriptor::kContext);
Node* const receiver = args.GetReceiver();
Node* const mapper_function = args.GetOptionalArgumentValue(0);
@@ -4005,5 +4033,405 @@ TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
{ ThrowTypeError(context, MessageTemplate::kMapperFunctionNonCallable); }
}
+TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
+ // This is a trampoline to ArrayConstructorImpl which just adds
+ // allocation_site parameter value and sets new_target if necessary.
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+
+ // If new_target is undefined, then this is the 'Call' case, so set new_target
+ // to function.
+ new_target =
+ SelectConstant<Object>(IsUndefined(new_target), function, new_target);
+
+ // Run the native code for the Array function called as a normal function.
+ TNode<Object> no_allocation_site = UndefinedConstant();
+ TailCallBuiltin(Builtins::kArrayConstructorImpl, context, function,
+ new_target, argc, no_allocation_site);
+}
+
+void ArrayBuiltinsAssembler::TailCallArrayConstructorStub(
+ const Callable& callable, TNode<Context> context, TNode<JSFunction> target,
+ TNode<HeapObject> allocation_site_or_undefined, TNode<Int32T> argc) {
+ TNode<Code> code = HeapConstant(callable.code());
+
+ // We are going to call here ArrayNoArgumentsConstructor or
+ // ArraySingleArgumentsConstructor which in addition to the register arguments
+ // also expect some number of arguments on the expression stack.
+ // Since
+ // 1) incoming JS arguments are still on the stack,
+ // 2) the ArrayNoArgumentsConstructor, ArraySingleArgumentsConstructor and
+ // ArrayNArgumentsConstructor are defined so that the register arguments
+ // are passed on the same registers,
+ // in order to be able to generate a tail call to those builtins we do the
+ // following trick here: we tail call to the constructor builtin using
+ // ArrayNArgumentsConstructorDescriptor, so the tail call instruction
+ // pops the current frame but leaves all the incoming JS arguments on the
+ // expression stack so that the target builtin can still find them where it
+ // expects.
+ TailCallStub(ArrayNArgumentsConstructorDescriptor{}, code, context, target,
+ allocation_site_or_undefined, argc);
+}
+
+void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument(
+ TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
+ AllocationSiteOverrideMode mode, TNode<AllocationSite> allocation_site) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ Callable callable = CodeFactory::ArrayNoArgumentConstructor(
+ isolate(), GetInitialFastElementsKind(), mode);
+
+ TailCallArrayConstructorStub(callable, context, target, UndefinedConstant(),
+ argc);
+ } else {
+ DCHECK_EQ(mode, DONT_OVERRIDE);
+ TNode<Int32T> elements_kind = LoadElementsKind(allocation_site);
+
+ // TODO(ishell): Compute the builtin index dynamically instead of
+ // iterating over all expected elements kinds.
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next(this);
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ GotoIfNot(Word32Equal(elements_kind, Int32Constant(kind)), &next);
+
+ Callable callable =
+ CodeFactory::ArrayNoArgumentConstructor(isolate(), kind, mode);
+
+ TailCallArrayConstructorStub(callable, context, target, allocation_site,
+ argc);
+
+ BIND(&next);
+ }
+
+ // If we reached this point there is a problem.
+ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
+ }
+}
+
+void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
+ TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
+ AllocationSiteOverrideMode mode, TNode<AllocationSite> allocation_site) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+ Callable callable = CodeFactory::ArraySingleArgumentConstructor(
+ isolate(), holey_initial, mode);
+
+ TailCallArrayConstructorStub(callable, context, target, UndefinedConstant(),
+ argc);
+ } else {
+ DCHECK_EQ(mode, DONT_OVERRIDE);
+ TNode<Smi> transition_info = LoadTransitionInfo(allocation_site);
+
+ // Least significant bit in fast array elements kind means holeyness.
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
+
+ Label normal_sequence(this);
+ TVARIABLE(Int32T, var_elements_kind,
+ Signed(DecodeWord32<AllocationSite::ElementsKindBits>(
+ SmiToInt32(transition_info))));
+ // Is the low bit set? If so, we are holey and that is good.
+ int fast_elements_kind_holey_mask =
+ AllocationSite::ElementsKindBits::encode(static_cast<ElementsKind>(1));
+ GotoIf(IsSetSmi(transition_info, fast_elements_kind_holey_mask),
+ &normal_sequence);
+ {
+ // Make elements kind holey and update elements kind in the type info.
+ var_elements_kind =
+ Signed(Word32Or(var_elements_kind.value(), Int32Constant(1)));
+ StoreObjectFieldNoWriteBarrier(
+ allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset,
+ SmiOr(transition_info, SmiConstant(fast_elements_kind_holey_mask)));
+ Goto(&normal_sequence);
+ }
+ BIND(&normal_sequence);
+
+ // TODO(ishell): Compute the builtin index dynamically instead of
+ // iterating over all expected elements kinds.
+ // TODO(ishell): Given that the code above ensures that the elements kind
+ // is holey we can skip checking with non-holey elements kinds.
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next(this);
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ GotoIfNot(Word32Equal(var_elements_kind.value(), Int32Constant(kind)),
+ &next);
+
+ Callable callable =
+ CodeFactory::ArraySingleArgumentConstructor(isolate(), kind, mode);
+
+ TailCallArrayConstructorStub(callable, context, target, allocation_site,
+ argc);
+
+ BIND(&next);
+ }
+
+ // If we reached this point there is a problem.
+ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
+ }
+}
+
+void ArrayBuiltinsAssembler::GenerateDispatchToArrayStub(
+ TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
+ AllocationSiteOverrideMode mode, TNode<AllocationSite> allocation_site) {
+ Label check_one_case(this), fallthrough(this);
+ GotoIfNot(Word32Equal(argc, Int32Constant(0)), &check_one_case);
+ CreateArrayDispatchNoArgument(context, target, argc, mode, allocation_site);
+
+ BIND(&check_one_case);
+ GotoIfNot(Word32Equal(argc, Int32Constant(1)), &fallthrough);
+ CreateArrayDispatchSingleArgument(context, target, argc, mode,
+ allocation_site);
+
+ BIND(&fallthrough);
+}
+
+TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) {
+ TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ TNode<HeapObject> maybe_allocation_site =
+ CAST(Parameter(Descriptor::kAllocationSite));
+
+ // Initial map for the builtin Array functions should be Map.
+ CSA_ASSERT(this, IsMap(CAST(LoadObjectField(
+ target, JSFunction::kPrototypeOrInitialMapOffset))));
+
+ // We should either have undefined or a valid AllocationSite
+ CSA_ASSERT(this, Word32Or(IsUndefined(maybe_allocation_site),
+ IsAllocationSite(maybe_allocation_site)));
+
+ // "Enter" the context of the Array function.
+ TNode<Context> context =
+ CAST(LoadObjectField(target, JSFunction::kContextOffset));
+
+ Label runtime(this, Label::kDeferred);
+ GotoIf(WordNotEqual(target, new_target), &runtime);
+
+ Label no_info(this);
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ GotoIf(IsUndefined(maybe_allocation_site), &no_info);
+
+ GenerateDispatchToArrayStub(context, target, argc, DONT_OVERRIDE,
+ CAST(maybe_allocation_site));
+ Goto(&runtime);
+
+ BIND(&no_info);
+ GenerateDispatchToArrayStub(context, target, argc, DISABLE_ALLOCATION_SITES);
+ Goto(&runtime);
+
+ BIND(&runtime);
+ GenerateArrayNArgumentsConstructor(context, target, new_target, argc,
+ maybe_allocation_site);
+}
+
+void ArrayBuiltinsAssembler::GenerateConstructor(
+ Node* context, Node* array_function, Node* array_map, Node* array_size,
+ Node* allocation_site, ElementsKind elements_kind,
+ AllocationSiteMode mode) {
+ Label ok(this);
+ Label smi_size(this);
+ Label small_smi_size(this);
+ Label call_runtime(this, Label::kDeferred);
+
+ Branch(TaggedIsSmi(array_size), &smi_size, &call_runtime);
+
+ BIND(&smi_size);
+
+ if (IsFastPackedElementsKind(elements_kind)) {
+ Label abort(this, Label::kDeferred);
+ Branch(SmiEqual(CAST(array_size), SmiConstant(0)), &small_smi_size, &abort);
+
+ BIND(&abort);
+ Node* reason = SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray);
+ TailCallRuntime(Runtime::kAbort, context, reason);
+ } else {
+ int element_size =
+ IsDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
+ int max_fast_elements =
+ (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
+ AllocationMemento::kSize) /
+ element_size;
+ Branch(SmiAboveOrEqual(CAST(array_size), SmiConstant(max_fast_elements)),
+ &call_runtime, &small_smi_size);
+ }
+
+ BIND(&small_smi_size);
+ {
+ Node* array = AllocateJSArray(
+ elements_kind, array_map, array_size, array_size,
+ mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
+ CodeStubAssembler::SMI_PARAMETERS);
+ Return(array);
+ }
+
+ BIND(&call_runtime);
+ {
+ TailCallRuntime(Runtime::kNewArray, context, array_function, array_size,
+ array_function, allocation_site);
+ }
+}
+
+void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
+ ElementsKind kind, AllocationSiteOverrideMode mode) {
+ typedef ArrayNoArgumentConstructorDescriptor Descriptor;
+ Node* native_context = LoadObjectField(Parameter(Descriptor::kFunction),
+ JSFunction::kContextOffset);
+ bool track_allocation_site =
+ AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES;
+ Node* allocation_site =
+ track_allocation_site ? Parameter(Descriptor::kAllocationSite) : nullptr;
+ Node* array_map = LoadJSArrayElementsMap(kind, native_context);
+ Node* array = AllocateJSArray(
+ kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ SmiConstant(0), allocation_site);
+ Return(array);
+}
+
+void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
+ ElementsKind kind, AllocationSiteOverrideMode mode) {
+ typedef ArraySingleArgumentConstructorDescriptor Descriptor;
+ Node* context = Parameter(Descriptor::kContext);
+ Node* function = Parameter(Descriptor::kFunction);
+ Node* native_context = LoadObjectField(function, JSFunction::kContextOffset);
+ Node* array_map = LoadJSArrayElementsMap(kind, native_context);
+
+ AllocationSiteMode allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ if (mode == DONT_OVERRIDE) {
+ allocation_site_mode = AllocationSite::ShouldTrack(kind)
+ ? TRACK_ALLOCATION_SITE
+ : DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter);
+ Node* allocation_site = Parameter(Descriptor::kAllocationSite);
+
+ GenerateConstructor(context, function, array_map, array_size, allocation_site,
+ kind, allocation_site_mode);
+}
+
+void ArrayBuiltinsAssembler::GenerateArrayNArgumentsConstructor(
+ TNode<Context> context, TNode<JSFunction> target, TNode<Object> new_target,
+ TNode<Int32T> argc, TNode<HeapObject> maybe_allocation_site) {
+ // Replace incoming JS receiver argument with the target.
+ // TODO(ishell): Avoid replacing the target on the stack and just add it
+ // as another additional parameter for Runtime::kNewArray.
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ args.SetReceiver(target);
+
+ // Adjust arguments count for the runtime call: +1 for implicit receiver
+ // and +2 for new_target and maybe_allocation_site.
+ argc = Int32Add(argc, Int32Constant(3));
+ TailCallRuntime(Runtime::kNewArray, argc, context, new_target,
+ maybe_allocation_site);
+}
+
+TF_BUILTIN(ArrayNArgumentsConstructor, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSFunction> target = CAST(Parameter(Descriptor::kFunction));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ TNode<HeapObject> maybe_allocation_site =
+ CAST(Parameter(Descriptor::kAllocationSite));
+
+ GenerateArrayNArgumentsConstructor(context, target, target, argc,
+ maybe_allocation_site);
+}
+
+void ArrayBuiltinsAssembler::GenerateInternalArrayNoArgumentConstructor(
+ ElementsKind kind) {
+ typedef ArrayNoArgumentConstructorDescriptor Descriptor;
+ Node* array_map = LoadObjectField(Parameter(Descriptor::kFunction),
+ JSFunction::kPrototypeOrInitialMapOffset);
+ Node* array = AllocateJSArray(
+ kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ SmiConstant(0));
+ Return(array);
+}
+
+void ArrayBuiltinsAssembler::GenerateInternalArraySingleArgumentConstructor(
+ ElementsKind kind) {
+ typedef ArraySingleArgumentConstructorDescriptor Descriptor;
+ Node* context = Parameter(Descriptor::kContext);
+ Node* function = Parameter(Descriptor::kFunction);
+ Node* array_map =
+ LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter);
+ Node* allocation_site = UndefinedConstant();
+
+ GenerateConstructor(context, function, array_map, array_size, allocation_site,
+ kind, DONT_TRACK_ALLOCATION_SITE);
+}
+
+#define GENERATE_ARRAY_CTOR(name, kind_camel, kind_caps, mode_camel, \
+ mode_caps) \
+ TF_BUILTIN(Array##name##Constructor_##kind_camel##_##mode_camel, \
+ ArrayBuiltinsAssembler) { \
+ GenerateArray##name##Constructor(kind_caps, mode_caps); \
+ }
+
+// The ArrayNoArgumentConstructor builtin family.
+GENERATE_ARRAY_CTOR(NoArgument, PackedSmi, PACKED_SMI_ELEMENTS, DontOverride,
+ DONT_OVERRIDE);
+GENERATE_ARRAY_CTOR(NoArgument, HoleySmi, HOLEY_SMI_ELEMENTS, DontOverride,
+ DONT_OVERRIDE);
+GENERATE_ARRAY_CTOR(NoArgument, PackedSmi, PACKED_SMI_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(NoArgument, HoleySmi, HOLEY_SMI_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(NoArgument, Packed, PACKED_ELEMENTS, DisableAllocationSites,
+ DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(NoArgument, Holey, HOLEY_ELEMENTS, DisableAllocationSites,
+ DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(NoArgument, PackedDouble, PACKED_DOUBLE_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(NoArgument, HoleyDouble, HOLEY_DOUBLE_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+
+// The ArraySingleArgumentConstructor builtin family.
+GENERATE_ARRAY_CTOR(SingleArgument, PackedSmi, PACKED_SMI_ELEMENTS,
+ DontOverride, DONT_OVERRIDE);
+GENERATE_ARRAY_CTOR(SingleArgument, HoleySmi, HOLEY_SMI_ELEMENTS, DontOverride,
+ DONT_OVERRIDE);
+GENERATE_ARRAY_CTOR(SingleArgument, PackedSmi, PACKED_SMI_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(SingleArgument, HoleySmi, HOLEY_SMI_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(SingleArgument, Packed, PACKED_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(SingleArgument, Holey, HOLEY_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(SingleArgument, PackedDouble, PACKED_DOUBLE_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+GENERATE_ARRAY_CTOR(SingleArgument, HoleyDouble, HOLEY_DOUBLE_ELEMENTS,
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+
+#undef GENERATE_ARRAY_CTOR
+
+#define GENERATE_INTERNAL_ARRAY_CTOR(name, kind_camel, kind_caps) \
+ TF_BUILTIN(InternalArray##name##Constructor_##kind_camel, \
+ ArrayBuiltinsAssembler) { \
+ GenerateInternalArray##name##Constructor(kind_caps); \
+ }
+
+GENERATE_INTERNAL_ARRAY_CTOR(NoArgument, Packed, PACKED_ELEMENTS);
+GENERATE_INTERNAL_ARRAY_CTOR(NoArgument, Holey, HOLEY_ELEMENTS);
+GENERATE_INTERNAL_ARRAY_CTOR(SingleArgument, Packed, PACKED_ELEMENTS);
+GENERATE_INTERNAL_ARRAY_CTOR(SingleArgument, Holey, HOLEY_ELEMENTS);
+
+#undef GENERATE_INTERNAL_ARRAY_CTOR
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 1cb5de3146..92b32115a0 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -68,6 +68,15 @@ class ArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
void NullPostLoopAction();
+ // TODO(szuend): Remove once overload resolution is fixed in Torque.
+ TNode<Object> LoadFixedArrayElementInt(TNode<FixedArray> array, int index) {
+ return LoadFixedArrayElement(array, index);
+ }
+ Node* StoreFixedArrayElementInt(TNode<FixedArray> array, int index,
+ TNode<Object> value) {
+ return StoreFixedArrayElement(array, index, value);
+ }
+
protected:
TNode<Context> context() { return context_; }
TNode<Object> receiver() { return receiver_; }
@@ -76,7 +85,7 @@ class ArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
TNode<Number> len() { return len_; }
Node* callbackfn() { return callbackfn_; }
Node* this_arg() { return this_arg_; }
- Node* k() { return k_.value(); }
+ TNode<Number> k() { return CAST(k_.value()); }
Node* a() { return a_.value(); }
void ReturnFromBuiltin(Node* value);
@@ -106,6 +115,41 @@ class ArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
MissingPropertyMode missing_property_mode,
ForEachDirection direction = ForEachDirection::kForward);
+ void TailCallArrayConstructorStub(
+ const Callable& callable, TNode<Context> context,
+ TNode<JSFunction> target, TNode<HeapObject> allocation_site_or_undefined,
+ TNode<Int32T> argc);
+
+ void GenerateDispatchToArrayStub(
+ TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
+ AllocationSiteOverrideMode mode,
+ TNode<AllocationSite> allocation_site = TNode<AllocationSite>());
+
+ void CreateArrayDispatchNoArgument(
+ TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
+ AllocationSiteOverrideMode mode,
+ TNode<AllocationSite> allocation_site = TNode<AllocationSite>());
+
+ void CreateArrayDispatchSingleArgument(
+ TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
+ AllocationSiteOverrideMode mode,
+ TNode<AllocationSite> allocation_site = TNode<AllocationSite>());
+
+ void GenerateConstructor(Node* context, Node* array_function, Node* array_map,
+ Node* array_size, Node* allocation_site,
+ ElementsKind elements_kind, AllocationSiteMode mode);
+ void GenerateArrayNoArgumentConstructor(ElementsKind kind,
+ AllocationSiteOverrideMode mode);
+ void GenerateArraySingleArgumentConstructor(ElementsKind kind,
+ AllocationSiteOverrideMode mode);
+ void GenerateArrayNArgumentsConstructor(
+ TNode<Context> context, TNode<JSFunction> target,
+ TNode<Object> new_target, TNode<Int32T> argc,
+ TNode<HeapObject> maybe_allocation_site);
+
+ void GenerateInternalArrayNoArgumentConstructor(ElementsKind kind);
+ void GenerateInternalArraySingleArgumentConstructor(ElementsKind kind);
+
private:
static ElementsKind ElementsKindForInstanceType(InstanceType type);
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index f9b90f1b2f..5154b904f5 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -141,46 +141,168 @@ V8_WARN_UNUSED_RESULT static Object* CallJsIntrinsic(
isolate,
Execution::Call(isolate, function, args.receiver(), argc, argv.start()));
}
+
+V8_WARN_UNUSED_RESULT Object* GenericArrayPush(Isolate* isolate,
+ BuiltinArguments* args) {
+ // 1. Let O be ? ToObject(this value).
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver, Object::ToObject(isolate, args->receiver()));
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ Handle<Object> raw_length_number;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, raw_length_number,
+ Object::GetLengthFromArrayLike(isolate, Handle<Object>::cast(receiver)));
+
+ // 3. Let args be a List whose elements are, in left to right order,
+ // the arguments that were passed to this function invocation.
+ // 4. Let arg_count be the number of elements in args.
+ int arg_count = args->length() - 1;
+
+ // 5. If len + arg_count > 2^53-1, throw a TypeError exception.
+ double length = raw_length_number->Number();
+ if (arg_count > kMaxSafeInteger - length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kPushPastSafeLength,
+ isolate->factory()->NewNumberFromInt(arg_count),
+ raw_length_number));
+ }
+
+ // 6. Repeat, while args is not empty.
+ for (int i = 0; i < arg_count; ++i) {
+ // a. Remove the first element from args and let E be the value of the
+ // element.
+ Handle<Object> element = args->at(i + 1);
+
+ // b. Perform ? Set(O, ! ToString(len), E, true).
+ if (length <= static_cast<double>(JSArray::kMaxArrayIndex)) {
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, Object::SetElement(isolate, receiver, length, element,
+ LanguageMode::kStrict));
+ } else {
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, receiver, isolate->factory()->NewNumber(length), &success);
+ // Must succeed since we always pass a valid key.
+ DCHECK(success);
+ MAYBE_RETURN(Object::SetProperty(&it, element, LanguageMode::kStrict,
+ Object::MAY_BE_STORE_FROM_KEYED),
+ ReadOnlyRoots(isolate).exception());
+ }
+
+ // c. Let len be len+1.
+ ++length;
+ }
+
+ // 7. Perform ? Set(O, "length", len, true).
+ Handle<Object> final_length = isolate->factory()->NewNumber(length);
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, Object::SetProperty(isolate, receiver,
+ isolate->factory()->length_string(),
+ final_length, LanguageMode::kStrict));
+
+ // 8. Return len.
+ return *final_length;
+}
} // namespace
BUILTIN(ArrayPush) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
- return CallJsIntrinsic(isolate, isolate->array_push(), args);
+ return GenericArrayPush(isolate, &args);
}
+
// Fast Elements Path
int to_add = args.length() - 1;
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- int len = Smi::ToInt(array->length());
- if (to_add == 0) return Smi::FromInt(len);
+ uint32_t len = static_cast<uint32_t>(array->length()->Number());
+ if (to_add == 0) return *isolate->factory()->NewNumberFromUint(len);
// Currently fixed arrays cannot grow too big, so we should never hit this.
DCHECK_LE(to_add, Smi::kMaxValue - Smi::ToInt(array->length()));
if (JSArray::HasReadOnlyLength(array)) {
- return CallJsIntrinsic(isolate, isolate->array_push(), args);
+ return GenericArrayPush(isolate, &args);
}
ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = accessor->Push(array, &args, to_add);
- return Smi::FromInt(new_length);
+ uint32_t new_length = accessor->Push(array, &args, to_add);
+ return *isolate->factory()->NewNumberFromUint((new_length));
}
+namespace {
+
+V8_WARN_UNUSED_RESULT Object* GenericArrayPop(Isolate* isolate,
+ BuiltinArguments* args) {
+ // 1. Let O be ? ToObject(this value).
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver, Object::ToObject(isolate, args->receiver()));
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ Handle<Object> raw_length_number;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, raw_length_number,
+ Object::GetLengthFromArrayLike(isolate, receiver));
+ double length = raw_length_number->Number();
+
+ // 3. If len is zero, then.
+ if (length == 0) {
+ // a. Perform ? Set(O, "length", 0, true).
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, Object::SetProperty(
+ isolate, receiver, isolate->factory()->length_string(),
+ Handle<Smi>(Smi::kZero, isolate), LanguageMode::kStrict));
+
+ // b. Return undefined.
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ // 4. Else len > 0.
+ // a. Let new_len be len-1.
+ Handle<Object> new_length = isolate->factory()->NewNumber(length - 1);
+
+ // b. Let index be ! ToString(newLen).
+ Handle<String> index = isolate->factory()->NumberToString(new_length);
+
+ // c. Let element be ? Get(O, index).
+ Handle<Object> element;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, element,
+ JSReceiver::GetPropertyOrElement(isolate, receiver, index));
+
+ // d. Perform ? DeletePropertyOrThrow(O, index).
+ MAYBE_RETURN(JSReceiver::DeletePropertyOrElement(receiver, index,
+ LanguageMode::kStrict),
+ ReadOnlyRoots(isolate).exception());
+
+ // e. Perform ? Set(O, "length", newLen, true).
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, Object::SetProperty(isolate, receiver,
+ isolate->factory()->length_string(),
+ new_length, LanguageMode::kStrict));
+
+ // f. Return element.
+ return *element;
+}
+
+} // namespace
+
BUILTIN(ArrayPop) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0)) {
- return CallJsIntrinsic(isolate, isolate->array_pop(), args);
+ return GenericArrayPop(isolate, &args);
}
-
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- uint32_t len = static_cast<uint32_t>(Smi::ToInt(array->length()));
- if (len == 0) return isolate->heap()->undefined_value();
+ uint32_t len = static_cast<uint32_t>(array->length()->Number());
+ if (len == 0) return ReadOnlyRoots(isolate).undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
- return CallJsIntrinsic(isolate, isolate->array_pop(), args);
+ return GenericArrayPop(isolate, &args);
}
Handle<Object> result;
@@ -194,6 +316,7 @@ BUILTIN(ArrayPop) {
isolate, result, JSReceiver::GetElement(isolate, array, new_length));
JSArray::SetLength(array, new_length);
}
+
return *result;
}
@@ -208,7 +331,7 @@ BUILTIN(ArrayShift) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
int len = Smi::ToInt(array->length());
- if (len == 0) return heap->undefined_value();
+ if (len == 0) return ReadOnlyRoots(heap).undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
return CallJsIntrinsic(isolate, isolate->array_shift(), args);
@@ -364,12 +487,12 @@ class ArrayConcatVisitor {
// Fall-through to dictionary mode.
}
DCHECK(!fast_elements());
- Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_));
+ Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_), isolate_);
// The object holding this backing store has just been allocated, so
// it cannot yet be used as a prototype.
Handle<JSObject> not_a_prototype_holder;
- Handle<NumberDictionary> result =
- NumberDictionary::Set(dict, index, elm, not_a_prototype_holder);
+ Handle<NumberDictionary> result = NumberDictionary::Set(
+ isolate_, dict, index, elm, not_a_prototype_holder);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
@@ -420,8 +543,9 @@ class ArrayConcatVisitor {
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
RETURN_ON_EXCEPTION(
isolate_,
- JSReceiver::SetProperty(result, isolate_->factory()->length_string(),
- length, LanguageMode::kStrict),
+ JSReceiver::SetProperty(isolate_, result,
+ isolate_->factory()->length_string(), length,
+ LanguageMode::kStrict),
JSReceiver);
return result;
}
@@ -445,7 +569,7 @@ class ArrayConcatVisitor {
// it cannot yet be used as a prototype.
Handle<JSObject> not_a_prototype_holder;
Handle<NumberDictionary> new_storage = NumberDictionary::Set(
- slow_storage, i, element, not_a_prototype_holder);
+ isolate_, slow_storage, i, element, not_a_prototype_holder);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
@@ -491,7 +615,7 @@ class ArrayConcatVisitor {
uint32_t bit_field_;
};
-uint32_t EstimateElementCount(Handle<JSArray> array) {
+uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
DisallowHeapAllocation no_gc;
uint32_t length = static_cast<uint32_t>(array->length()->Number());
int element_count = 0;
@@ -504,7 +628,6 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
// a 32-bit signed integer.
DCHECK_GE(static_cast<int32_t>(FixedArray::kMaxLength), 0);
int fast_length = static_cast<int>(length);
- Isolate* isolate = array->GetIsolate();
FixedArray* elements = FixedArray::cast(array->elements());
for (int i = 0; i < fast_length; i++) {
if (!elements->get(i)->IsTheHole(isolate)) element_count++;
@@ -529,11 +652,11 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
}
case DICTIONARY_ELEMENTS: {
NumberDictionary* dictionary = NumberDictionary::cast(array->elements());
- Isolate* isolate = dictionary->GetIsolate();
int capacity = dictionary->Capacity();
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
Object* key = dictionary->KeyAt(i);
- if (dictionary->IsKey(isolate, key)) {
+ if (dictionary->IsKey(roots, key)) {
element_count++;
}
}
@@ -558,9 +681,8 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
return element_count;
}
-void CollectElementIndices(Handle<JSObject> object, uint32_t range,
- std::vector<uint32_t>* indices) {
- Isolate* isolate = object->GetIsolate();
+void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
+ uint32_t range, std::vector<uint32_t>* indices) {
ElementsKind kind = object->GetElementsKind();
switch (kind) {
case PACKED_SMI_ELEMENTS:
@@ -585,7 +707,7 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
break;
}
Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(object->elements()));
+ FixedDoubleArray::cast(object->elements()), isolate);
uint32_t length = static_cast<uint32_t>(elements->length());
if (range < length) length = range;
for (uint32_t i = 0; i < length; i++) {
@@ -599,9 +721,10 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
DisallowHeapAllocation no_gc;
NumberDictionary* dict = NumberDictionary::cast(object->elements());
uint32_t capacity = dict->Capacity();
+ ReadOnlyRoots roots(isolate);
FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
Object* k = dict->KeyAt(j);
- if (!dict->IsKey(isolate, k)) continue;
+ if (!dict->IsKey(roots, k)) continue;
DCHECK(k->IsNumber());
uint32_t index = static_cast<uint32_t>(k->Number());
if (index < range) {
@@ -670,8 +793,8 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
if (!iter.IsAtEnd()) {
// The prototype will usually have no inherited element indices,
// but we have to check.
- CollectElementIndices(PrototypeIterator::GetCurrent<JSObject>(iter), range,
- indices);
+ CollectElementIndices(
+ isolate, PrototypeIterator::GetCurrent<JSObject>(iter), range, indices);
}
}
@@ -738,7 +861,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
case HOLEY_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
- Handle<FixedArray> elements(FixedArray::cast(array->elements()));
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()), isolate);
int fast_length = static_cast<int>(length);
DCHECK(fast_length <= elements->length());
FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
@@ -771,7 +894,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
break;
}
Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(array->elements()));
+ FixedDoubleArray::cast(array->elements()), isolate);
int fast_length = static_cast<int>(length);
DCHECK(fast_length <= elements->length());
FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
@@ -798,13 +921,13 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
}
case DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> dict(array->element_dictionary());
+ Handle<NumberDictionary> dict(array->element_dictionary(), isolate);
std::vector<uint32_t> indices;
indices.reserve(dict->Capacity() / 2);
// Collect all indices in the object and the prototypes less
// than length. This might introduce duplicates in the indices list.
- CollectElementIndices(array, length, &indices);
+ CollectElementIndices(isolate, array, length, &indices);
std::sort(indices.begin(), indices.end());
size_t n = indices.size();
FOR_WITH_HANDLE_SCOPE(isolate, size_t, j = 0, j, j < n, (void)0, {
@@ -859,7 +982,7 @@ static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
MaybeHandle<Object> maybeValue =
i::Runtime::GetObjectProperty(isolate, obj, key);
if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
- if (!value->IsUndefined(isolate)) return Just(value->BooleanValue());
+ if (!value->IsUndefined(isolate)) return Just(value->BooleanValue(isolate));
}
return Object::IsArray(obj);
}
@@ -891,7 +1014,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
GetPackedElementsKind(array->GetElementsKind());
kind = GetMoreGeneralElementsKind(kind, array_kind);
}
- element_estimate = EstimateElementCount(array);
+ element_estimate = EstimateElementCount(isolate, array);
} else {
if (obj->IsHeapObject()) {
kind = GetMoreGeneralElementsKind(
@@ -965,7 +1088,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
}
case HOLEY_SMI_ELEMENTS:
case PACKED_SMI_ELEMENTS: {
- Object* the_hole = isolate->heap()->the_hole_value();
+ Object* the_hole = ReadOnlyRoots(isolate).the_hole_value();
FixedArray* elements(FixedArray::cast(array->elements()));
for (uint32_t i = 0; i < length; i++) {
Object* element = elements->get(i);
@@ -1021,14 +1144,14 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj((*args)[i], isolate);
Maybe<bool> spreadable = IsConcatSpreadable(isolate, obj);
- MAYBE_RETURN(spreadable, isolate->heap()->exception());
+ MAYBE_RETURN(spreadable, ReadOnlyRoots(isolate).exception());
if (spreadable.FromJust()) {
Handle<JSReceiver> object = Handle<JSReceiver>::cast(obj);
if (!IterateElements(isolate, object, &visitor)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
} else {
- if (!visitor.visit(0, obj)) return isolate->heap()->exception();
+ if (!visitor.visit(0, obj)) return ReadOnlyRoots(isolate).exception();
visitor.increase_index_offset(1);
}
}
@@ -1128,7 +1251,8 @@ BUILTIN(ArrayConcat) {
if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
return *result_array;
}
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ if (isolate->has_pending_exception())
+ return ReadOnlyRoots(isolate).exception();
}
// Reading @@species happens before anything else with a side effect, so
// we can do it here to determine whether to take the fast path.
@@ -1139,7 +1263,8 @@ BUILTIN(ArrayConcat) {
if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
return *result_array;
}
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ if (isolate->has_pending_exception())
+ return ReadOnlyRoots(isolate).exception();
}
return Slow_ArrayConcat(&args, species, isolate);
}
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 9c77a0047d..92b003a4e2 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -82,7 +82,8 @@ BUILTIN(ArrayBufferConstructor) {
// all cases, or we will expose uinitialized memory to user code.
BUILTIN(ArrayBufferConstructor_DoNotInitialize) {
HandleScope scope(isolate);
- Handle<JSFunction> target(isolate->native_context()->array_buffer_fun());
+ Handle<JSFunction> target(isolate->native_context()->array_buffer_fun(),
+ isolate);
Handle<Object> length = args.atOrUndefined(isolate, 1);
return ConstructBuffer(isolate, target, target, length, false);
}
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 1f29cbfefd..7fbfbdd494 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -20,6 +20,10 @@ class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
void AsyncFunctionAwait(Node* const context, Node* const generator,
Node* const awaited, Node* const outer_promise,
const bool is_predicted_as_caught);
+ void AsyncFunctionAwaitOptimized(Node* const context, Node* const generator,
+ Node* const awaited,
+ Node* const outer_promise,
+ const bool is_predicted_as_caught);
void AsyncFunctionAwaitResumeClosure(
Node* const context, Node* const sent_value,
@@ -120,6 +124,11 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
// TODO(jgruber): Use a faster specialized version of
// InternalPerformPromiseThen.
+ Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred);
+ GotoIf(HasAsyncEventDelegate(), &call_debug_hook);
+ Goto(&after_debug_hook);
+ BIND(&after_debug_hook);
+
Await(context, generator, awaited, outer_promise, AwaitContext::kLength,
init_closure_context, Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN,
@@ -128,6 +137,47 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
// Return outer promise to avoid adding an load of the outer promise before
// suspending in BytecodeGenerator.
Return(outer_promise);
+
+ BIND(&call_debug_hook);
+ CallRuntime(Runtime::kDebugAsyncFunctionSuspended, context, outer_promise);
+ Goto(&after_debug_hook);
+}
+
+void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitOptimized(
+ Node* const context, Node* const generator, Node* const awaited,
+ Node* const outer_promise, const bool is_predicted_as_caught) {
+ CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
+ CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
+
+ ContextInitializer init_closure_context = [&](Node* context) {
+ StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
+ generator);
+ };
+
+ // TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
+ // the awaited promise if it is already a promise. Reuse is non-spec compliant
+ // but part of our old behavior gives us a couple of percent
+ // performance boost.
+ // TODO(jgruber): Use a faster specialized version of
+ // InternalPerformPromiseThen.
+
+ Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred);
+ GotoIf(HasAsyncEventDelegate(), &call_debug_hook);
+ Goto(&after_debug_hook);
+ BIND(&after_debug_hook);
+
+ AwaitOptimized(
+ context, generator, awaited, outer_promise, AwaitContext::kLength,
+ init_closure_context, Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
+ Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, is_predicted_as_caught);
+
+ // Return outer promise to avoid adding an load of the outer promise before
+ // suspending in BytecodeGenerator.
+ Return(outer_promise);
+
+ BIND(&call_debug_hook);
+ CallRuntime(Runtime::kDebugAsyncFunctionSuspended, context, outer_promise);
+ Goto(&after_debug_hook);
}
// Called by the parser from the desugaring of 'await' when catch
@@ -145,6 +195,19 @@ TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
kIsPredictedAsCaught);
}
+TF_BUILTIN(AsyncFunctionAwaitCaughtOptimized, AsyncFunctionBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 3);
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const awaited = Parameter(Descriptor::kAwaited);
+ Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ static const bool kIsPredictedAsCaught = true;
+
+ AsyncFunctionAwaitOptimized(context, generator, awaited, outer_promise,
+ kIsPredictedAsCaught);
+}
+
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
@@ -160,6 +223,20 @@ TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
kIsPredictedAsCaught);
}
+TF_BUILTIN(AsyncFunctionAwaitUncaughtOptimized,
+ AsyncFunctionBuiltinsAssembler) {
+ CSA_ASSERT_JS_ARGC_EQ(this, 3);
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const awaited = Parameter(Descriptor::kAwaited);
+ Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ static const bool kIsPredictedAsCaught = false;
+
+ AsyncFunctionAwaitOptimized(context, generator, awaited, outer_promise,
+ kIsPredictedAsCaught);
+}
+
TF_BUILTIN(AsyncFunctionPromiseCreate, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 0);
Node* const context = Parameter(Descriptor::kContext);
@@ -177,29 +254,29 @@ TF_BUILTIN(AsyncFunctionPromiseCreate, AsyncFunctionBuiltinsAssembler) {
// Push the Promise under construction in an async function on
// the catch prediction stack to handle exceptions thrown before
// the first await.
- // Assign ID and create a recurring task to save stack for future
- // resumptions from await.
- CallRuntime(Runtime::kDebugAsyncFunctionPromiseCreated, context, promise);
+ CallRuntime(Runtime::kDebugPushPromise, context, promise);
Return(promise);
}
}
TF_BUILTIN(AsyncFunctionPromiseRelease, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
+ CSA_ASSERT_JS_ARGC_EQ(this, 2);
Node* const promise = Parameter(Descriptor::kPromise);
Node* const context = Parameter(Descriptor::kContext);
- Label if_is_debug_active(this, Label::kDeferred);
- GotoIf(IsDebugActive(), &if_is_debug_active);
+ Label call_debug_instrumentation(this, Label::kDeferred);
+ GotoIf(HasAsyncEventDelegate(), &call_debug_instrumentation);
+ GotoIf(IsDebugActive(), &call_debug_instrumentation);
// Early exit if debug is not active.
Return(UndefinedConstant());
- BIND(&if_is_debug_active);
+ BIND(&call_debug_instrumentation);
{
// Pop the Promise under construction in an async function on
// from catch prediction stack.
- CallRuntime(Runtime::kDebugPopPromise, context);
+ CallRuntime(Runtime::kDebugAsyncFunctionFinished, context,
+ Parameter(Descriptor::kCanSuspend), promise);
Return(promise);
}
}
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index ba0226d7b3..c7e3c5cdeb 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/heap/factory-inl.h"
+#include "src/objects/js-promise.h"
#include "src/objects/shared-function-info.h"
namespace v8 {
@@ -97,10 +98,9 @@ Node* AsyncBuiltinsAssembler::Await(
{
// Add PromiseHooks if needed
Label next(this);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &next);
- CallRuntime(Runtime::kPromiseHookInit, context, wrapped_value,
- outer_promise);
- CallRuntime(Runtime::kPromiseHookInit, context, throwaway, wrapped_value);
+ GotoIfNot(IsPromiseHookEnabledOrHasAsyncEventDelegate(), &next);
+ CallRuntime(Runtime::kAwaitPromisesInit, context, wrapped_value,
+ outer_promise, throwaway);
Goto(&next);
BIND(&next);
}
@@ -147,6 +147,120 @@ Node* AsyncBuiltinsAssembler::Await(
on_resolve, on_reject, throwaway);
}
+Node* AsyncBuiltinsAssembler::AwaitOptimized(
+ Node* context, Node* generator, Node* value, Node* outer_promise,
+ int context_length, const ContextInitializer& init_closure_context,
+ Node* on_resolve_context_index, Node* on_reject_context_index,
+ Node* is_predicted_as_caught) {
+ DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
+ CSA_ASSERT(this, IsConstructor(promise_fun));
+
+ static const int kThrowawayPromiseOffset =
+ FixedArray::SizeFor(context_length);
+ static const int kResolveClosureOffset =
+ kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
+ static const int kRejectClosureOffset =
+ kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
+ static const int kTotalSize =
+ kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
+
+ // 2. Let promise be ? PromiseResolve(« promise »).
+ Node* const promise =
+ CallBuiltin(Builtins::kPromiseResolve, context, promise_fun, value);
+
+ Node* const base = AllocateInNewSpace(kTotalSize);
+ Node* const closure_context = base;
+ {
+ // Initialize closure context
+ InitializeFunctionContext(native_context, closure_context, context_length);
+ init_closure_context(closure_context);
+ }
+
+ Node* const promise_map =
+ LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ // Assert that the JSPromise map has an instance size is
+ // JSPromise::kSizeWithEmbedderFields.
+ CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
+ kPointerSize)));
+ Node* const throwaway = InnerAllocate(base, kThrowawayPromiseOffset);
+ {
+ // Initialize throwawayPromise
+ StoreMapNoWriteBarrier(throwaway, promise_map);
+ InitializeJSObjectFromMap(
+ throwaway, promise_map,
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
+ PromiseInit(throwaway);
+ }
+
+ Node* const on_resolve = InnerAllocate(base, kResolveClosureOffset);
+ {
+ // Initialize resolve handler
+ InitializeNativeClosure(closure_context, native_context, on_resolve,
+ on_resolve_context_index);
+ }
+
+ Node* const on_reject = InnerAllocate(base, kRejectClosureOffset);
+ {
+ // Initialize reject handler
+ InitializeNativeClosure(closure_context, native_context, on_reject,
+ on_reject_context_index);
+ }
+
+ {
+ // Add PromiseHooks if needed
+ Label next(this);
+ GotoIfNot(IsPromiseHookEnabledOrHasAsyncEventDelegate(), &next);
+ CallRuntime(Runtime::kAwaitPromisesInit, context, promise, outer_promise,
+ throwaway);
+ Goto(&next);
+ BIND(&next);
+ }
+
+ // The Promise will be thrown away and not handled, but it shouldn't trigger
+ // unhandled reject events as its work is done
+ PromiseSetHasHandler(throwaway);
+
+ Label do_perform_promise_then(this);
+ GotoIfNot(IsDebugActive(), &do_perform_promise_then);
+ {
+ Label common(this);
+ GotoIf(TaggedIsSmi(value), &common);
+ GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &common);
+ {
+ // Mark the reject handler callback to be a forwarding edge, rather
+ // than a meaningful catch handler
+ Node* const key =
+ HeapConstant(factory()->promise_forwarding_handler_symbol());
+ CallRuntime(Runtime::kSetProperty, context, on_reject, key,
+ TrueConstant(), SmiConstant(LanguageMode::kStrict));
+
+ GotoIf(IsFalse(is_predicted_as_caught), &common);
+ PromiseSetHandledHint(value);
+ }
+
+ Goto(&common);
+ BIND(&common);
+ // Mark the dependency to outer Promise in case the throwaway Promise is
+ // found on the Promise stack
+ CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
+
+ Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
+ CallRuntime(Runtime::kSetProperty, context, throwaway, key, outer_promise,
+ SmiConstant(LanguageMode::kStrict));
+ }
+
+ Goto(&do_perform_promise_then);
+ BIND(&do_perform_promise_then);
+ return CallBuiltin(Builtins::kPerformPromiseThen, native_context, promise,
+ on_resolve, on_reject, throwaway);
+}
+
void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
Node* native_context,
Node* function,
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 45d7c8689a..e5f487d8cc 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -28,6 +28,12 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
const ContextInitializer& init_closure_context,
Node* on_resolve_context_index, Node* on_reject_context_index,
Node* is_predicted_as_caught);
+ Node* AwaitOptimized(Node* context, Node* generator, Node* value,
+ Node* outer_promise, int context_length,
+ const ContextInitializer& init_closure_context,
+ Node* on_resolve_context_index,
+ Node* on_reject_context_index,
+ Node* is_predicted_as_caught);
Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
int context_length,
const ContextInitializer& init_closure_context,
@@ -38,6 +44,17 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
IntPtrConstant(on_reject_context_index),
is_predicted_as_caught);
}
+ Node* AwaitOptimized(Node* context, Node* generator, Node* value,
+ Node* outer_promise, int context_length,
+ const ContextInitializer& init_closure_context,
+ int on_resolve_context_index,
+ int on_reject_context_index,
+ Node* is_predicted_as_caught) {
+ return AwaitOptimized(
+ context, generator, value, outer_promise, context_length,
+ init_closure_context, IntPtrConstant(on_resolve_context_index),
+ IntPtrConstant(on_reject_context_index), is_predicted_as_caught);
+ }
Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
int context_length,
const ContextInitializer& init_closure_context,
@@ -48,6 +65,17 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
on_reject_context_index,
BooleanConstant(is_predicted_as_caught));
}
+ Node* AwaitOptimized(Node* context, Node* generator, Node* value,
+ Node* outer_promise, int context_length,
+ const ContextInitializer& init_closure_context,
+ int on_resolve_context_index,
+ int on_reject_context_index,
+ bool is_predicted_as_caught) {
+ return AwaitOptimized(context, generator, value, outer_promise,
+ context_length, init_closure_context,
+ on_resolve_context_index, on_reject_context_index,
+ BooleanConstant(is_predicted_as_caught));
+ }
// Return a new built-in function object as defined in
// Async Iterator Value Unwrap Functions
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 929522a83b..251ff1ee40 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -329,12 +329,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* generator = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kNext,
@@ -347,12 +347,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* generator = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kReturn,
@@ -365,12 +365,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* generator = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kThrow,
@@ -527,11 +527,38 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
done);
}
- // Perform Call(promiseCapability.[[Resolve]], undefined, «iteratorResult»).
- CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
+ // We know that {iter_result} itself doesn't have any "then" property (a
+ // freshly allocated IterResultObject only has "value" and "done" properties)
+ // and we also know that the [[Prototype]] of {iter_result} is the intrinsic
+ // %ObjectPrototype%. So we can skip the [[Resolve]] logic here completely
+ // and directly call into the FulfillPromise operation if we can prove
+ // that the %ObjectPrototype% also doesn't have any "then" property. This
+ // is guarded by the Promise#then() protector.
+ // If the PromiseHooks are enabled, we cannot take the shortcut here, since
+ // the "promiseResolve" hook would not be fired otherwise.
+ Label if_fast(this), if_slow(this, Label::kDeferred), return_promise(this);
+ GotoIfForceSlowPath(&if_slow);
+ GotoIf(IsPromiseHookEnabled(), &if_slow);
+ Branch(IsPromiseThenProtectorCellInvalid(), &if_slow, &if_fast);
+
+ BIND(&if_fast);
+ {
+ // Skip the "then" on {iter_result} and directly fulfill the {promise}
+ // with the {iter_result}.
+ CallBuiltin(Builtins::kFulfillPromise, context, promise, iter_result);
+ Goto(&return_promise);
+ }
+
+ BIND(&if_slow);
+ {
+ // Perform Call(promiseCapability.[[Resolve]], undefined, «iteratorResult»).
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
+ Goto(&return_promise);
+ }
// Per spec, AsyncGeneratorResolve() returns undefined. However, for the
// benefit of %TraceExit(), return the Promise.
+ BIND(&return_promise);
Return(promise);
}
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 58691bd00e..82db5fbd5d 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -36,6 +36,13 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
const char* operation_name,
Label::Type reject_label_type = Label::kDeferred,
Node* const initial_exception_value = nullptr);
+ void Generate_AsyncFromSyncIteratorMethodOptimized(
+ Node* const context, Node* const iterator, Node* const sent_value,
+ const SyncIteratorNodeGenerator& get_method,
+ const UndefinedMethodHandler& if_method_undefined,
+ const char* operation_name,
+ Label::Type reject_label_type = Label::kDeferred,
+ Node* const initial_exception_value = nullptr);
void Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
@@ -50,6 +57,19 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
context, iterator, sent_value, get_method, if_method_undefined,
operation_name, reject_label_type, initial_exception_value);
}
+ void Generate_AsyncFromSyncIteratorMethodOptimized(
+ Node* const context, Node* const iterator, Node* const sent_value,
+ Handle<String> name, const UndefinedMethodHandler& if_method_undefined,
+ const char* operation_name,
+ Label::Type reject_label_type = Label::kDeferred,
+ Node* const initial_exception_value = nullptr) {
+ auto get_method = [=](Node* const sync_iterator) {
+ return GetProperty(context, sync_iterator, name);
+ };
+ return Generate_AsyncFromSyncIteratorMethodOptimized(
+ context, iterator, sent_value, get_method, if_method_undefined,
+ operation_name, reject_label_type, initial_exception_value);
+ }
// Load "value" and "done" from an iterator result object. If an exception
// is thrown at any point, jumps to te `if_exception` label with exception
@@ -157,6 +177,73 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
}
}
+void AsyncFromSyncBuiltinsAssembler::
+ Generate_AsyncFromSyncIteratorMethodOptimized(
+ Node* const context, Node* const iterator, Node* const sent_value,
+ const SyncIteratorNodeGenerator& get_method,
+ const UndefinedMethodHandler& if_method_undefined,
+ const char* operation_name, Label::Type reject_label_type,
+ Node* const initial_exception_value) {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise = AllocateAndInitJSPromise(context);
+
+ VARIABLE(var_exception, MachineRepresentation::kTagged,
+ initial_exception_value == nullptr ? UndefinedConstant()
+ : initial_exception_value);
+ Label reject_promise(this, reject_label_type);
+
+ ThrowIfNotAsyncFromSyncIterator(context, iterator, &reject_promise,
+ &var_exception, operation_name);
+
+ Node* const sync_iterator =
+ LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
+
+ Node* const method = get_method(sync_iterator);
+
+ if (if_method_undefined) {
+ Label if_isnotundefined(this);
+
+ GotoIfNot(IsUndefined(method), &if_isnotundefined);
+ if_method_undefined(native_context, promise, &reject_promise);
+
+ BIND(&if_isnotundefined);
+ }
+
+ Node* const iter_result = CallJS(CodeFactory::Call(isolate()), context,
+ method, sync_iterator, sent_value);
+ GotoIfException(iter_result, &reject_promise, &var_exception);
+
+ Node* value;
+ Node* done;
+ std::tie(value, done) = LoadIteratorResult(
+ context, native_context, iter_result, &reject_promise, &var_exception);
+
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ CSA_ASSERT(this, IsConstructor(promise_fun));
+
+ // Let valueWrapper be ? PromiseResolve(« value »).
+ Node* const valueWrapper = CallBuiltin(Builtins::kPromiseResolve,
+ native_context, promise_fun, value);
+
+ // Let onFulfilled be a new built-in function object as defined in
+ // Async Iterator Value Unwrap Functions.
+ // Set onFulfilled.[[Done]] to throwDone.
+ Node* const on_fulfilled = CreateUnwrapClosure(native_context, done);
+
+ // Perform ! PerformPromiseThen(valueWrapper,
+ // onFulfilled, undefined, promiseCapability).
+ Return(CallBuiltin(Builtins::kPerformPromiseThen, context, valueWrapper,
+ on_fulfilled, UndefinedConstant(), promise));
+
+ BIND(&reject_promise);
+ {
+ Node* const exception = var_exception.value();
+ CallBuiltin(Builtins::kRejectPromise, context, promise, exception,
+ TrueConstant());
+ Return(promise);
+ }
+}
std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
Node* const context, Node* const native_context, Node* const iter_result,
Label* if_exception, Variable* var_exception) {
@@ -246,6 +333,20 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
"[Async-from-Sync Iterator].prototype.next");
}
+TF_BUILTIN(AsyncFromSyncIteratorPrototypeNextOptimized,
+ AsyncFromSyncBuiltinsAssembler) {
+ Node* const iterator = Parameter(Descriptor::kReceiver);
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ auto get_method = [=](Node* const unused) {
+ return LoadObjectField(iterator, JSAsyncFromSyncIterator::kNextOffset);
+ };
+ Generate_AsyncFromSyncIteratorMethodOptimized(
+ context, iterator, value, get_method, UndefinedMethodHandler(),
+ "[Async-from-Sync Iterator].prototype.next");
+}
+
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.return
TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
@@ -273,6 +374,31 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
"[Async-from-Sync Iterator].prototype.return");
}
+TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturnOptimized,
+ AsyncFromSyncBuiltinsAssembler) {
+ Node* const iterator = Parameter(Descriptor::kReceiver);
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ auto if_return_undefined = [=](Node* const native_context,
+ Node* const promise, Label* if_exception) {
+ // If return is undefined, then
+ // Let iterResult be ! CreateIterResultObject(value, true)
+ Node* const iter_result = CallBuiltin(Builtins::kCreateIterResultObject,
+ context, value, TrueConstant());
+
+ // Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
+ // IfAbruptRejectPromise(nextDone, promiseCapability).
+ // Return promiseCapability.[[Promise]].
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
+ Return(promise);
+ };
+
+ Generate_AsyncFromSyncIteratorMethodOptimized(
+ context, iterator, value, factory()->return_string(), if_return_undefined,
+ "[Async-from-Sync Iterator].prototype.return");
+}
+
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.throw
TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
@@ -290,5 +416,20 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
reason);
}
+TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrowOptimized,
+ AsyncFromSyncBuiltinsAssembler) {
+ Node* const iterator = Parameter(Descriptor::kReceiver);
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ auto if_throw_undefined = [=](Node* const native_context, Node* const promise,
+ Label* if_exception) { Goto(if_exception); };
+
+ Generate_AsyncFromSyncIteratorMethodOptimized(
+ context, iterator, reason, factory()->throw_string(), if_throw_undefined,
+ "[Async-from-Sync Iterator].prototype.throw", Label::kNonDeferred,
+ reason);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 6c04c9dcb7..6bc1a911f7 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -49,7 +49,8 @@ BUILTIN(BigIntAsUintN) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
BigInt::FromObject(isolate, bigint_obj));
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::AsUintN(bits->Number(), bigint));
+ RETURN_RESULT_OR_FAILURE(isolate,
+ BigInt::AsUintN(isolate, bits->Number(), bigint));
}
BUILTIN(BigIntAsIntN) {
@@ -66,7 +67,7 @@ BUILTIN(BigIntAsIntN) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
BigInt::FromObject(isolate, bigint_obj));
- return *BigInt::AsIntN(bits->Number(), bigint);
+ return *BigInt::AsIntN(isolate, bits->Number(), bigint);
}
namespace {
@@ -115,7 +116,7 @@ Object* BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
}
// Return the String representation of this Number value using the radix
// specified by radixNumber.
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::ToString(x, radix_number));
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::ToString(isolate, x, radix_number));
}
} // namespace
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index 5dc42d506f..e0e1bb738c 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -18,7 +18,7 @@ BUILTIN(BooleanConstructor) {
HandleScope scope(isolate);
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
Handle<Object> value = args.atOrUndefined(isolate, 1);
- return isolate->heap()->ToBoolean(value->BooleanValue());
+ return isolate->heap()->ToBoolean(value->BooleanValue(isolate));
} else { // [[Construct]]
HandleScope scope(isolate);
Handle<Object> value = args.atOrUndefined(isolate, 1);
@@ -29,7 +29,7 @@ BUILTIN(BooleanConstructor) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
JSObject::New(target, new_target));
Handle<JSValue>::cast(result)->set_value(
- isolate->heap()->ToBoolean(value->BooleanValue()));
+ isolate->heap()->ToBoolean(value->BooleanValue(isolate)));
return *result;
}
}
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index cc6ff07e13..35aaee5ec2 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -62,9 +62,8 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
}
void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
- Node* target, Node* new_target, Node* arguments_list, Node* context) {
- VARIABLE(var_elements, MachineRepresentation::kTagged);
- VARIABLE(var_length, MachineRepresentation::kWord32);
+ TNode<Object> target, SloppyTNode<Object> new_target,
+ TNode<Object> arguments_list, TNode<Context> context) {
Label if_done(this), if_arguments(this), if_array(this),
if_holey_array(this, Label::kDeferred),
if_runtime(this, Label::kDeferred);
@@ -75,7 +74,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
Label if_target_callable(this),
if_target_not_callable(this, Label::kDeferred);
GotoIf(TaggedIsSmi(target), &if_target_not_callable);
- Branch(IsCallable(target), &if_target_callable, &if_target_not_callable);
+ Branch(IsCallable(CAST(target)), &if_target_callable,
+ &if_target_not_callable);
BIND(&if_target_not_callable);
{
CallRuntime(Runtime::kThrowApplyNonFunction, context, target);
@@ -87,7 +87,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
Label if_target_constructor(this),
if_target_not_constructor(this, Label::kDeferred);
GotoIf(TaggedIsSmi(target), &if_target_not_constructor);
- Branch(IsConstructor(target), &if_target_constructor,
+ Branch(IsConstructor(CAST(target)), &if_target_constructor,
&if_target_not_constructor);
BIND(&if_target_not_constructor);
{
@@ -100,7 +100,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
Label if_new_target_constructor(this),
if_new_target_not_constructor(this, Label::kDeferred);
GotoIf(TaggedIsSmi(new_target), &if_new_target_not_constructor);
- Branch(IsConstructor(new_target), &if_new_target_constructor,
+ Branch(IsConstructor(CAST(new_target)), &if_new_target_constructor,
&if_new_target_not_constructor);
BIND(&if_new_target_not_constructor);
{
@@ -111,27 +111,29 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
}
GotoIf(TaggedIsSmi(arguments_list), &if_runtime);
- Node* arguments_list_map = LoadMap(arguments_list);
- Node* native_context = LoadNativeContext(context);
+
+ TNode<Map> arguments_list_map = LoadMap(CAST(arguments_list));
+ TNode<Context> native_context = LoadNativeContext(context);
// Check if {arguments_list} is an (unmodified) arguments object.
- Node* sloppy_arguments_map =
- LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ TNode<Map> sloppy_arguments_map = CAST(
+ LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
GotoIf(WordEqual(arguments_list_map, sloppy_arguments_map), &if_arguments);
- Node* strict_arguments_map =
- LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
+ TNode<Map> strict_arguments_map = CAST(
+ LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX));
GotoIf(WordEqual(arguments_list_map, strict_arguments_map), &if_arguments);
// Check if {arguments_list} is a fast JSArray.
Branch(IsJSArrayMap(arguments_list_map), &if_array, &if_runtime);
+ TVARIABLE(FixedArrayBase, var_elements);
+ TVARIABLE(Int32T, var_length);
BIND(&if_array);
{
// Try to extract the elements from a JSArray object.
- var_elements.Bind(
- LoadObjectField(arguments_list, JSArray::kElementsOffset));
- var_length.Bind(LoadAndUntagToWord32ObjectField(arguments_list,
- JSArray::kLengthOffset));
+ var_elements = LoadElements(CAST(arguments_list));
+ var_length =
+ LoadAndUntagToWord32ObjectField(arguments_list, JSArray::kLengthOffset);
// Holey arrays and double backing stores need special treatment.
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
@@ -142,7 +144,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
STATIC_ASSERT(LAST_FAST_ELEMENTS_KIND == HOLEY_DOUBLE_ELEMENTS);
- Node* kind = LoadMapElementsKind(arguments_list_map);
+ TNode<Int32T> kind = LoadMapElementsKind(arguments_list_map);
GotoIf(Int32GreaterThan(kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
&if_runtime);
@@ -160,26 +162,25 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
BIND(&if_arguments);
{
+ TNode<JSArgumentsObject> js_arguments = CAST(arguments_list);
// Try to extract the elements from an JSArgumentsObject.
- Node* length =
- LoadObjectField(arguments_list, JSArgumentsObject::kLengthOffset);
- Node* elements =
- LoadObjectField(arguments_list, JSArgumentsObject::kElementsOffset);
- Node* elements_length = LoadFixedArrayBaseLength(elements);
+ TNode<Object> length =
+ LoadObjectField(js_arguments, JSArgumentsObject::kLengthOffset);
+ TNode<FixedArrayBase> elements = LoadElements(js_arguments);
+ TNode<Smi> elements_length = LoadFixedArrayBaseLength(elements);
GotoIfNot(WordEqual(length, elements_length), &if_runtime);
- var_elements.Bind(elements);
- var_length.Bind(SmiToInt32(length));
+ var_elements = elements;
+ var_length = SmiToInt32(CAST(length));
Goto(&if_done);
}
BIND(&if_runtime);
{
// Ask the runtime to create the list (actually a FixedArray).
- Node* elements =
- CallRuntime(Runtime::kCreateListFromArrayLike, context, arguments_list);
- var_elements.Bind(elements);
- var_length.Bind(
- LoadAndUntagToWord32ObjectField(elements, FixedArray::kLengthOffset));
+ var_elements = CAST(CallRuntime(Runtime::kCreateListFromArrayLike, context,
+ arguments_list));
+ var_length = LoadAndUntagToWord32ObjectField(var_elements.value(),
+ FixedArray::kLengthOffset);
Goto(&if_done);
}
@@ -188,27 +189,41 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
BIND(&if_done);
{
Label if_not_double(this), if_double(this);
- Node* elements = var_elements.value();
- Node* length = var_length.value();
- Node* args_count = Int32Constant(0); // args already on the stack
+ TNode<Int32T> args_count = Int32Constant(0); // args already on the stack
+ TNode<Int32T> length = var_length.value();
+ {
+ Label normalize_done(this);
+ GotoIfNot(Word32Equal(length, Int32Constant(0)), &normalize_done);
+ // Make sure we don't accidentally pass along the
+ // empty_fixed_double_array since the tailed-called stubs cannot handle
+ // the normalization yet.
+ var_elements = EmptyFixedArrayConstant();
+ Goto(&normalize_done);
+
+ BIND(&normalize_done);
+ }
+
+ TNode<FixedArrayBase> elements = var_elements.value();
Branch(IsFixedDoubleArray(elements), &if_double, &if_not_double);
BIND(&if_not_double);
- if (new_target == nullptr) {
- Callable callable = CodeFactory::CallVarargs(isolate());
- TailCallStub(callable, context, target, args_count, elements, length);
- } else {
- Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count, elements,
- length);
+ {
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, elements, length);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count,
+ elements, length);
+ }
}
BIND(&if_double);
{
// Kind is hardcoded here because CreateListFromArrayLike will only
// produce holey double arrays.
- CallOrConstructDoubleVarargs(target, new_target, elements, length,
+ CallOrConstructDoubleVarargs(target, new_target, CAST(elements), length,
args_count, context,
Int32Constant(HOLEY_DOUBLE_ELEMENTS));
}
@@ -219,19 +234,19 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
// boxed as HeapNumbers, then tail calls CallVarargs/ConstructVarargs depending
// on whether {new_target} was passed.
void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
- Node* target, Node* new_target, Node* elements, Node* length,
- Node* args_count, Node* context, Node* kind) {
+ TNode<Object> target, SloppyTNode<Object> new_target,
+ TNode<FixedDoubleArray> elements, TNode<Int32T> length,
+ TNode<Int32T> args_count, TNode<Context> context, TNode<Int32T> kind) {
Label if_holey_double(this), if_packed_double(this), if_done(this);
const ElementsKind new_kind = PACKED_ELEMENTS;
- const ParameterMode mode = INTPTR_PARAMETERS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
- Node* intptr_length = ChangeInt32ToIntPtr(length);
+ TNode<IntPtrT> intptr_length = ChangeInt32ToIntPtr(length);
+ CSA_ASSERT(this, WordNotEqual(intptr_length, IntPtrConstant(0)));
// Allocate a new FixedArray of Objects.
- Node* new_elements =
- AllocateFixedArray(new_kind, intptr_length, mode,
- CodeStubAssembler::kAllowLargeObjectAllocation);
+ TNode<FixedArray> new_elements = AllocateFixedArray(
+ new_kind, intptr_length, CodeStubAssembler::kAllowLargeObjectAllocation);
Branch(Word32Equal(kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
&if_holey_double, &if_packed_double);
@@ -266,18 +281,19 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
}
void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
- Node* target, Node* new_target, Node* spread, Node* args_count,
- Node* context) {
+ TNode<Object> target, TNode<Object> new_target, TNode<Object> spread,
+ TNode<Int32T> args_count, TNode<Context> context) {
Label if_smiorobject(this), if_double(this),
if_generic(this, Label::kDeferred);
- VARIABLE(var_length, MachineRepresentation::kWord32);
- VARIABLE(var_elements, MachineRepresentation::kTagged);
- VARIABLE(var_elements_kind, MachineRepresentation::kWord32);
+ TVARIABLE(Int32T, var_length);
+ TVARIABLE(FixedArrayBase, var_elements);
+ TVARIABLE(Int32T, var_elements_kind);
GotoIf(TaggedIsSmi(spread), &if_generic);
- Node* spread_map = LoadMap(spread);
+ TNode<Map> spread_map = LoadMap(CAST(spread));
GotoIfNot(IsJSArrayMap(spread_map), &if_generic);
+ TNode<JSArray> spread_array = CAST(spread);
// Check that we have the original Array.prototype.
GotoIfNot(IsPrototypeInitialArrayPrototype(context, spread_map), &if_generic);
@@ -287,40 +303,41 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
// Check that the Array.prototype hasn't been modified in a way that would
// affect iteration.
- Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex);
- DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell());
+ TNode<PropertyCell> protector_cell =
+ CAST(LoadRoot(Heap::kArrayIteratorProtectorRootIndex));
GotoIf(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
SmiConstant(Isolate::kProtectorInvalid)),
&if_generic);
-
- // The fast-path accesses the {spread} elements directly.
- Node* spread_kind = LoadMapElementsKind(spread_map);
- var_elements_kind.Bind(spread_kind);
- var_length.Bind(
- LoadAndUntagToWord32ObjectField(spread, JSArray::kLengthOffset));
- var_elements.Bind(LoadObjectField(spread, JSArray::kElementsOffset));
-
- // Check elements kind of {spread}.
- GotoIf(Int32LessThan(spread_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
- &if_smiorobject);
- Branch(Int32GreaterThan(spread_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
- &if_generic, &if_double);
+ {
+ // The fast-path accesses the {spread} elements directly.
+ TNode<Int32T> spread_kind = LoadMapElementsKind(spread_map);
+ var_elements_kind = spread_kind;
+ var_length =
+ LoadAndUntagToWord32ObjectField(spread_array, JSArray::kLengthOffset);
+ var_elements = LoadElements(spread_array);
+
+ // Check elements kind of {spread}.
+ GotoIf(Int32LessThan(spread_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
+ &if_smiorobject);
+ Branch(
+ Int32GreaterThan(spread_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_generic, &if_double);
+ }
BIND(&if_generic);
{
Label if_iterator_fn_not_callable(this, Label::kDeferred);
- Node* iterator_fn = GetProperty(context, spread, IteratorSymbolConstant());
- GotoIf(TaggedIsSmi(iterator_fn), &if_iterator_fn_not_callable);
- GotoIfNot(IsCallable(iterator_fn), &if_iterator_fn_not_callable);
- Node* list = CallBuiltin(Builtins::kIterableToList, context, spread,
- iterator_fn);
- CSA_ASSERT(this, IsJSArray(list));
- Node* list_kind = LoadMapElementsKind(LoadMap(list));
- var_length.Bind(
- LoadAndUntagToWord32ObjectField(list, JSArray::kLengthOffset));
- var_elements.Bind(LoadObjectField(list, JSArray::kElementsOffset));
- var_elements_kind.Bind(list_kind);
- Branch(Int32LessThan(list_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
+ TNode<Object> iterator_fn =
+ GetProperty(context, spread, IteratorSymbolConstant());
+ GotoIfNot(TaggedIsCallable(iterator_fn), &if_iterator_fn_not_callable);
+ TNode<JSArray> list = CAST(
+ CallBuiltin(Builtins::kIterableToList, context, spread, iterator_fn));
+ var_length = LoadAndUntagToWord32ObjectField(list, JSArray::kLengthOffset);
+
+ var_elements = LoadElements(list);
+ var_elements_kind = LoadElementsKind(list);
+ Branch(Int32LessThan(var_elements_kind.value(),
+ Int32Constant(PACKED_DOUBLE_ELEMENTS)),
&if_smiorobject, &if_double);
BIND(&if_iterator_fn_not_callable);
@@ -329,8 +346,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
BIND(&if_smiorobject);
{
- Node* const elements = var_elements.value();
- Node* const length = var_length.value();
+ TNode<FixedArrayBase> elements = var_elements.value();
+ TNode<Int32T> length = var_length.value();
if (new_target == nullptr) {
Callable callable = CodeFactory::CallVarargs(isolate());
@@ -344,29 +361,28 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
BIND(&if_double);
{
- Node* const elements_kind = var_elements_kind.value();
- Node* const elements = var_elements.value();
- Node* const length = var_length.value();
-
- CallOrConstructDoubleVarargs(target, new_target, elements, length,
- args_count, context, elements_kind);
+ GotoIf(Word32Equal(var_length.value(), Int32Constant(0)), &if_smiorobject);
+ CallOrConstructDoubleVarargs(target, new_target, CAST(var_elements.value()),
+ var_length.value(), args_count, context,
+ var_elements_kind.value());
}
}
TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
- Node* target = Parameter(CallWithArrayLikeDescriptor::kTarget);
- Node* new_target = nullptr;
- Node* arguments_list = Parameter(CallWithArrayLikeDescriptor::kArgumentsList);
- Node* context = Parameter(CallWithArrayLikeDescriptor::kContext);
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ SloppyTNode<Object> new_target = nullptr;
+ TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
- Node* target = Parameter(CallWithSpreadDescriptor::kTarget);
- Node* new_target = nullptr;
- Node* spread = Parameter(CallWithSpreadDescriptor::kSpread);
- Node* args_count = Parameter(CallWithSpreadDescriptor::kArgumentsCount);
- Node* context = Parameter(CallWithSpreadDescriptor::kContext);
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ SloppyTNode<Object> new_target = nullptr;
+ TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
+ TNode<Int32T> args_count =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h
index bbbdefc0c5..013093f38b 100644
--- a/deps/v8/src/builtins/builtins-call-gen.h
+++ b/deps/v8/src/builtins/builtins-call-gen.h
@@ -15,14 +15,19 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
explicit CallOrConstructBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- void CallOrConstructWithArrayLike(Node* target, Node* new_target,
- Node* arguments_list, Node* context);
- void CallOrConstructDoubleVarargs(Node* target, Node* new_target,
- Node* elements, Node* length,
- Node* args_count, Node* context,
- Node* kind);
- void CallOrConstructWithSpread(Node* target, Node* new_target, Node* spread,
- Node* args_count, Node* context);
+ void CallOrConstructWithArrayLike(TNode<Object> target,
+ SloppyTNode<Object> new_target,
+ TNode<Object> arguments_list,
+ TNode<Context> context);
+ void CallOrConstructDoubleVarargs(TNode<Object> target,
+ SloppyTNode<Object> new_target,
+ TNode<FixedDoubleArray> elements,
+ TNode<Int32T> length,
+ TNode<Int32T> args_count,
+ TNode<Context> context, TNode<Int32T> kind);
+ void CallOrConstructWithSpread(TNode<Object> target, TNode<Object> new_target,
+ TNode<Object> spread, TNode<Int32T> args_count,
+ TNode<Context> context);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 5ce0aa0155..ff04fa2dbe 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -28,7 +28,7 @@ namespace {
Object* PositiveNumberOrNull(int value, Isolate* isolate) {
if (value >= 0) return *isolate->factory()->NewNumberFromInt(value);
- return isolate->heap()->null_value();
+ return ReadOnlyRoots(isolate).null_value();
}
Handle<FrameArray> GetFrameArray(Isolate* isolate, Handle<JSObject> object) {
@@ -76,7 +76,7 @@ BUILTIN(CallSitePrototypeGetFunction) {
GetFrameIndex(isolate, recv));
StackFrameBase* frame = it.Frame();
- if (frame->IsStrict()) return isolate->heap()->undefined_value();
+ if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
return *frame->GetFunction();
}
@@ -127,7 +127,7 @@ BUILTIN(CallSitePrototypeGetThis) {
GetFrameIndex(isolate, recv));
StackFrameBase* frame = it.Frame();
- if (frame->IsStrict()) return isolate->heap()->undefined_value();
+ if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
return *frame->GetReceiver();
}
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index edbfb26a71..6cab828e2e 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -8,6 +8,7 @@
#include "src/code-stub-assembler.h"
#include "src/heap/factory-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-collection.h"
namespace v8 {
namespace internal {
@@ -81,7 +82,9 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Main entry point for a collection constructor builtin.
void GenerateConstructor(Variant variant,
- Handle<String> constructor_function_name);
+ Handle<String> constructor_function_name,
+ TNode<Object> new_target, TNode<IntPtrT> argc,
+ TNode<Context> context);
// Retrieves the collection function that adds an entry. `set` for Maps and
// `add` for Sets.
@@ -220,7 +223,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
TNode<Object> collection, TNode<JSArray> fast_jsarray,
Label* if_may_have_side_effects) {
TNode<FixedArrayBase> elements = LoadElements(fast_jsarray);
- TNode<Int32T> elements_kind = LoadMapElementsKind(LoadMap(fast_jsarray));
+ TNode<Int32T> elements_kind = LoadElementsKind(fast_jsarray);
TNode<JSFunction> add_func = GetInitialAddFunction(variant, native_context);
CSA_ASSERT(
this,
@@ -353,13 +356,11 @@ TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
}
void BaseCollectionsAssembler::GenerateConstructor(
- Variant variant, Handle<String> constructor_function_name) {
+ Variant variant, Handle<String> constructor_function_name,
+ TNode<Object> new_target, TNode<IntPtrT> argc, TNode<Context> context) {
const int kIterableArg = 0;
- CodeStubArguments args(
- this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ CodeStubArguments args(this, argc);
TNode<Object> iterable = args.GetOptionalArgumentValue(kIterableArg);
- TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Label if_undefined(this, Label::kDeferred);
GotoIf(IsUndefined(new_target), &if_undefined);
@@ -500,8 +501,9 @@ TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement(
TNode<HeapObject> elements, TNode<IntPtrT> index) {
TVARIABLE(Object, entry);
Label if_hole(this, Label::kDeferred), next(this);
- TNode<Float64T> element = UncheckedCast<Float64T>(LoadFixedDoubleArrayElement(
- elements, index, MachineType::Float64(), 0, INTPTR_PARAMETERS, &if_hole));
+ TNode<Float64T> element =
+ LoadFixedDoubleArrayElement(CAST(elements), index, MachineType::Float64(),
+ 0, INTPTR_PARAMETERS, &if_hole);
{ // not hole
entry = AllocateHeapNumberWithValue(element);
Goto(&next);
@@ -528,7 +530,7 @@ void BaseCollectionsAssembler::LoadKeyValue(
TNode<JSArray> array = CAST(maybe_array);
TNode<Smi> length = LoadFastJSArrayLength(array);
TNode<FixedArrayBase> elements = LoadElements(array);
- TNode<Int32T> elements_kind = LoadMapElementsKind(LoadMap(array));
+ TNode<Int32T> elements_kind = LoadElementsKind(array);
Label if_smiorobjects(this), if_doubles(this);
Branch(IsFastSmiOrTaggedElementsKind(elements_kind), &if_smiorobjects,
@@ -743,11 +745,23 @@ TNode<Object> CollectionsBuiltinsAssembler::AllocateTable(
}
TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
- GenerateConstructor(kMap, isolate()->factory()->Map_string());
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ GenerateConstructor(kMap, isolate()->factory()->Map_string(), new_target,
+ argc, context);
}
TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
- GenerateConstructor(kSet, isolate()->factory()->Set_string());
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ GenerateConstructor(kSet, isolate()->factory()->Set_string(), new_target,
+ argc, context);
}
Node* CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(Node* const key) {
@@ -1524,8 +1538,8 @@ TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Map.prototype.forEach";
- Node* const argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount);
+ Node* const context = Parameter(Descriptor::kContext);
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
Node* const receiver = args.GetReceiver();
Node* const callback = args.GetOptionalArgumentValue(0);
@@ -1755,8 +1769,8 @@ TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Set.prototype.forEach";
- Node* const argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount);
+ Node* const context = Parameter(Descriptor::kContext);
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
Node* const receiver = args.GetReceiver();
Node* const callback = args.GetOptionalArgumentValue(0);
@@ -1965,7 +1979,7 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
TNode<Smi> CreateIdentityHash(TNode<Object> receiver);
TNode<IntPtrT> EntryMask(TNode<IntPtrT> capacity);
- // Builds code that finds the ObjectHashTable entry for a {key} using the
+ // Builds code that finds the EphemeronHashTable entry for a {key} using the
// comparison code generated by {key_compare}. The key index is returned if
// the {key} is found.
typedef std::function<void(TNode<Object> entry_key, Label* if_same)>
@@ -1974,12 +1988,13 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
TNode<IntPtrT> entry_mask,
const KeyComparator& key_compare);
- // Builds code that finds an ObjectHashTable entry available for a new entry.
+ // Builds code that finds an EphemeronHashTable entry available for a new
+ // entry.
TNode<IntPtrT> FindKeyIndexForInsertion(TNode<HeapObject> table,
TNode<IntPtrT> key_hash,
TNode<IntPtrT> entry_mask);
- // Builds code that finds the ObjectHashTable entry with key that matches
+ // Builds code that finds the EphemeronHashTable entry with key that matches
// {key} and returns the entry's key index. If {key} cannot be found, jumps to
// {if_not_found}.
TNode<IntPtrT> FindKeyIndexForKey(TNode<HeapObject> table, TNode<Object> key,
@@ -2009,13 +2024,13 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
void WeakCollectionsBuiltinsAssembler::AddEntry(
TNode<HeapObject> table, TNode<IntPtrT> key_index, TNode<Object> key,
TNode<Object> value, TNode<IntPtrT> number_of_elements) {
- // See ObjectHashTable::AddEntry().
+ // See EphemeronHashTable::AddEntry().
TNode<IntPtrT> value_index = ValueIndexFromKeyIndex(key_index);
StoreFixedArrayElement(table, key_index, key);
StoreFixedArrayElement(table, value_index, value);
// See HashTableBase::ElementAdded().
- StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
+ StoreFixedArrayElement(table, EphemeronHashTable::kNumberOfElementsIndex,
SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
}
@@ -2029,17 +2044,18 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
// See HashTable::NewInternal().
TNode<IntPtrT> length = KeyIndexFromEntry(capacity);
- TNode<FixedArray> table = AllocateFixedArray(
- HOLEY_ELEMENTS, length, INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
+ TNode<FixedArray> table =
+ AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation);
- Heap::RootListIndex map_root_index =
- static_cast<Heap::RootListIndex>(ObjectHashTableShape::GetMapRootIndex());
+ Heap::RootListIndex map_root_index = static_cast<Heap::RootListIndex>(
+ EphemeronHashTableShape::GetMapRootIndex());
StoreMapNoWriteBarrier(table, map_root_index);
- StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
+ StoreFixedArrayElement(table, EphemeronHashTable::kNumberOfElementsIndex,
SmiConstant(0), SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
+ StoreFixedArrayElement(table,
+ EphemeronHashTable::kNumberOfDeletedElementsIndex,
SmiConstant(0), SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(table, ObjectHashTable::kCapacityIndex,
+ StoreFixedArrayElement(table, EphemeronHashTable::kCapacityIndex,
SmiFromIntPtr(capacity), SKIP_WRITE_BARRIER);
TNode<IntPtrT> start = KeyIndexFromEntry(IntPtrConstant(0));
@@ -2124,22 +2140,22 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::KeyIndexFromEntry(
// See HashTable::KeyAt().
// (entry * kEntrySize) + kElementsStartIndex + kEntryKeyIndex
return IntPtrAdd(
- IntPtrMul(entry, IntPtrConstant(ObjectHashTable::kEntrySize)),
- IntPtrConstant(ObjectHashTable::kElementsStartIndex +
- ObjectHashTable::kEntryKeyIndex));
+ IntPtrMul(entry, IntPtrConstant(EphemeronHashTable::kEntrySize)),
+ IntPtrConstant(EphemeronHashTable::kElementsStartIndex +
+ EphemeronHashTable::kEntryKeyIndex));
}
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfElements(
TNode<HeapObject> table, int offset) {
- TNode<IntPtrT> number_of_elements = SmiUntag(CAST(
- LoadFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex)));
+ TNode<IntPtrT> number_of_elements = SmiUntag(CAST(LoadFixedArrayElement(
+ table, EphemeronHashTable::kNumberOfElementsIndex)));
return IntPtrAdd(number_of_elements, IntPtrConstant(offset));
}
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfDeleted(
TNode<HeapObject> table, int offset) {
TNode<IntPtrT> number_of_deleted = SmiUntag(CAST(LoadFixedArrayElement(
- table, ObjectHashTable::kNumberOfDeletedElementsIndex)));
+ table, EphemeronHashTable::kNumberOfDeletedElementsIndex)));
return IntPtrAdd(number_of_deleted, IntPtrConstant(offset));
}
@@ -2151,7 +2167,7 @@ TNode<HeapObject> WeakCollectionsBuiltinsAssembler::LoadTable(
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadTableCapacity(
TNode<HeapObject> table) {
return SmiUntag(
- CAST(LoadFixedArrayElement(table, ObjectHashTable::kCapacityIndex)));
+ CAST(LoadFixedArrayElement(table, EphemeronHashTable::kCapacityIndex)));
}
TNode<Word32T> WeakCollectionsBuiltinsAssembler::InsufficientCapacityToAdd(
@@ -2175,16 +2191,17 @@ TNode<Word32T> WeakCollectionsBuiltinsAssembler::InsufficientCapacityToAdd(
void WeakCollectionsBuiltinsAssembler::RemoveEntry(
TNode<HeapObject> table, TNode<IntPtrT> key_index,
TNode<IntPtrT> number_of_elements) {
- // See ObjectHashTable::RemoveEntry().
+ // See EphemeronHashTable::RemoveEntry().
TNode<IntPtrT> value_index = ValueIndexFromKeyIndex(key_index);
StoreFixedArrayElement(table, key_index, TheHoleConstant());
StoreFixedArrayElement(table, value_index, TheHoleConstant());
// See HashTableBase::ElementRemoved().
TNode<IntPtrT> number_of_deleted = LoadNumberOfDeleted(table, 1);
- StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
+ StoreFixedArrayElement(table, EphemeronHashTable::kNumberOfElementsIndex,
SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
+ StoreFixedArrayElement(table,
+ EphemeronHashTable::kNumberOfDeletedElementsIndex,
SmiFromIntPtr(number_of_deleted), SKIP_WRITE_BARRIER);
}
@@ -2214,16 +2231,28 @@ TNode<Word32T> WeakCollectionsBuiltinsAssembler::ShouldShrink(
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
TNode<IntPtrT> key_index) {
return IntPtrAdd(key_index,
- IntPtrConstant(ObjectHashTableShape::kEntryValueIndex -
- ObjectHashTable::kEntryKeyIndex));
+ IntPtrConstant(EphemeronHashTableShape::kEntryValueIndex -
+ EphemeronHashTable::kEntryKeyIndex));
}
TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
- GenerateConstructor(kWeakMap, isolate()->factory()->WeakMap_string());
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ GenerateConstructor(kWeakMap, isolate()->factory()->WeakMap_string(),
+ new_target, argc, context);
}
TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
- GenerateConstructor(kWeakSet, isolate()->factory()->WeakSet_string());
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ GenerateConstructor(kWeakSet, isolate()->factory()->WeakSet_string(),
+ new_target, argc, context);
}
TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
@@ -2289,7 +2318,7 @@ TF_BUILTIN(WeakMapHas, WeakCollectionsBuiltinsAssembler) {
}
// Helper that removes the entry with a given key from the backing store
-// (ObjectHashTable) of a WeakMap or WeakSet.
+// (EphemeronHashTable) of a WeakMap or WeakSet.
TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<HeapObject> collection = CAST(Parameter(Descriptor::kCollection));
@@ -2318,8 +2347,8 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
SmiTag(hash)));
}
-// Helper that sets the key and value to the backing store (ObjectHashTable) of
-// a WeakMap or WeakSet.
+// Helper that sets the key and value to the backing store (EphemeronHashTable)
+// of a WeakMap or WeakSet.
TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<HeapObject> collection = CAST(Parameter(Descriptor::kCollection));
diff --git a/deps/v8/src/builtins/builtins-collections.cc b/deps/v8/src/builtins/builtins-collections.cc
index e3c97d3841..9a642e7d3b 100644
--- a/deps/v8/src/builtins/builtins-collections.cc
+++ b/deps/v8/src/builtins/builtins-collections.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/objects-inl.h"
+#include "src/objects/js-collection-inl.h"
namespace v8 {
namespace internal {
@@ -13,16 +14,16 @@ BUILTIN(MapPrototypeClear) {
HandleScope scope(isolate);
const char* const kMethodName = "Map.prototype.clear";
CHECK_RECEIVER(JSMap, map, kMethodName);
- JSMap::Clear(map);
- return isolate->heap()->undefined_value();
+ JSMap::Clear(isolate, map);
+ return ReadOnlyRoots(isolate).undefined_value();
}
BUILTIN(SetPrototypeClear) {
HandleScope scope(isolate);
const char* const kMethodName = "Set.prototype.clear";
CHECK_RECEIVER(JSSet, set, kMethodName);
- JSSet::Clear(set);
- return isolate->heap()->undefined_value();
+ JSSet::Clear(isolate, set);
+ return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-console-gen.cc b/deps/v8/src/builtins/builtins-console-gen.cc
index 0ddd90e3f1..249ec10a28 100644
--- a/deps/v8/src/builtins/builtins-console-gen.cc
+++ b/deps/v8/src/builtins/builtins-console-gen.cc
@@ -16,9 +16,9 @@ TF_BUILTIN(FastConsoleAssert, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* new_target = Parameter(Descriptor::kJSNewTarget);
GotoIf(Word32Equal(argc, Int32Constant(0)), &runtime);
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
@@ -28,8 +28,10 @@ TF_BUILTIN(FastConsoleAssert, CodeStubAssembler) {
BIND(&runtime);
{
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
+ // We are not using Parameter(Descriptor::kJSTarget) and loading the value
+ // from the current frame here in order to reduce register pressure on the
+ // fast path.
+ TNode<JSFunction> target = LoadTargetFromFrame();
TailCallBuiltin(Builtins::kConsoleAssert, context, target, new_target,
argc);
}
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index 936d22c492..d87183c716 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -32,11 +32,8 @@ namespace internal {
V(Count, count) \
V(CountReset, countReset) \
V(Assert, assert) \
- V(MarkTimeline, markTimeline) \
V(Profile, profile) \
- V(ProfileEnd, profileEnd) \
- V(Timeline, timeline) \
- V(TimelineEnd, timelineEnd)
+ V(ProfileEnd, profileEnd)
namespace {
void ConsoleCall(
@@ -81,7 +78,7 @@ void LogTimerEvent(Isolate* isolate, BuiltinArguments args,
BUILTIN(Console##call) { \
ConsoleCall(isolate, args, &debug::ConsoleDelegate::call); \
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); \
- return isolate->heap()->undefined_value(); \
+ return ReadOnlyRoots(isolate).undefined_value(); \
}
CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
#undef CONSOLE_BUILTIN_IMPLEMENTATION
@@ -90,31 +87,31 @@ BUILTIN(ConsoleTime) {
LogTimerEvent(isolate, args, Logger::START);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::Time);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
BUILTIN(ConsoleTimeEnd) {
LogTimerEvent(isolate, args, Logger::END);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeEnd);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
BUILTIN(ConsoleTimeStamp) {
LogTimerEvent(isolate, args, Logger::STAMP);
ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeStamp);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
namespace {
-void InstallContextFunction(Handle<JSObject> target, const char* name,
- Builtins::Name builtin_id, int context_id,
- Handle<Object> context_name) {
- Factory* const factory = target->GetIsolate()->factory();
+void InstallContextFunction(Isolate* isolate, Handle<JSObject> target,
+ const char* name, Builtins::Name builtin_id,
+ int context_id, Handle<Object> context_name) {
+ Factory* const factory = isolate->factory();
Handle<String> name_string =
- Name::ToFunctionName(factory->InternalizeUtf8String(name))
+ Name::ToFunctionName(isolate, factory->InternalizeUtf8String(name))
.ToHandleChecked();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
name_string, builtin_id, i::LanguageMode::kSloppy);
@@ -124,14 +121,13 @@ void InstallContextFunction(Handle<JSObject> target, const char* name,
fun->shared()->DontAdaptArguments();
fun->shared()->set_length(1);
- JSObject::AddProperty(fun, factory->console_context_id_symbol(),
- handle(Smi::FromInt(context_id), target->GetIsolate()),
- NONE);
+ JSObject::AddProperty(isolate, fun, factory->console_context_id_symbol(),
+ handle(Smi::FromInt(context_id), isolate), NONE);
if (context_name->IsString()) {
- JSObject::AddProperty(fun, factory->console_context_name_symbol(),
+ JSObject::AddProperty(isolate, fun, factory->console_context_name_symbol(),
context_name, NONE);
}
- JSObject::AddProperty(target, name_string, fun, NONE);
+ JSObject::AddProperty(isolate, target, name_string, fun, NONE);
}
} // namespace
@@ -152,17 +148,17 @@ BUILTIN(ConsoleContext) {
int id = isolate->last_console_context_id() + 1;
isolate->set_last_console_context_id(id);
-#define CONSOLE_BUILTIN_SETUP(call, name) \
- InstallContextFunction(context, #name, Builtins::kConsole##call, id, \
- args.at(1));
+#define CONSOLE_BUILTIN_SETUP(call, name) \
+ InstallContextFunction(isolate, context, #name, Builtins::kConsole##call, \
+ id, args.at(1));
CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_SETUP)
#undef CONSOLE_BUILTIN_SETUP
- InstallContextFunction(context, "time", Builtins::kConsoleTime, id,
- args.at(1));
- InstallContextFunction(context, "timeEnd", Builtins::kConsoleTimeEnd, id,
- args.at(1));
- InstallContextFunction(context, "timeStamp", Builtins::kConsoleTimeStamp, id,
+ InstallContextFunction(isolate, context, "time", Builtins::kConsoleTime, id,
args.at(1));
+ InstallContextFunction(isolate, context, "timeEnd", Builtins::kConsoleTimeEnd,
+ id, args.at(1));
+ InstallContextFunction(isolate, context, "timeStamp",
+ Builtins::kConsoleTimeStamp, id, args.at(1));
return *context;
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 0933cc9a3c..76c28bc869 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -36,35 +36,25 @@ void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
}
TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
- Node* target = Parameter(ConstructWithArrayLikeDescriptor::kTarget);
- Node* new_target = Parameter(ConstructWithArrayLikeDescriptor::kNewTarget);
- Node* arguments_list =
- Parameter(ConstructWithArrayLikeDescriptor::kArgumentsList);
- Node* context = Parameter(ConstructWithArrayLikeDescriptor::kContext);
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ SloppyTNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
- Node* target = Parameter(ConstructWithSpreadDescriptor::kTarget);
- Node* new_target = Parameter(ConstructWithSpreadDescriptor::kNewTarget);
- Node* spread = Parameter(ConstructWithSpreadDescriptor::kSpread);
- Node* args_count = Parameter(ConstructWithSpreadDescriptor::kArgumentsCount);
- Node* context = Parameter(ConstructWithSpreadDescriptor::kContext);
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ SloppyTNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
+ TNode<Int32T> args_count =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
typedef compiler::Node Node;
-Node* ConstructorBuiltinsAssembler::NotHasBoilerplate(Node* literal_site) {
- return TaggedIsSmi(literal_site);
-}
-
-Node* ConstructorBuiltinsAssembler::LoadAllocationSiteBoilerplate(Node* site) {
- CSA_ASSERT(this, IsAllocationSite(site));
- return LoadObjectField(site,
- AllocationSite::kTransitionInfoOrBoilerplateOffset);
-}
-
TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
Node* shared_function_info = Parameter(Descriptor::kSharedFunctionInfo);
Node* feedback_cell = Parameter(Descriptor::kFeedbackCell);
@@ -149,7 +139,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
Handle<Code> lazy_builtin_handle(
- isolate()->builtins()->builtin(Builtins::kCompileLazy));
+ isolate()->builtins()->builtin(Builtins::kCompileLazy), isolate());
Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
Return(result);
@@ -287,17 +277,17 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
}
TF_BUILTIN(FastNewFunctionContextEval, ConstructorBuiltinsAssembler) {
- Node* scope_info = Parameter(FastNewFunctionContextDescriptor::kScopeInfo);
- Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
- Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
+ Node* scope_info = Parameter(Descriptor::kScopeInfo);
+ Node* slots = Parameter(Descriptor::kSlots);
+ Node* context = Parameter(Descriptor::kContext);
Return(EmitFastNewFunctionContext(scope_info, slots, context,
ScopeType::EVAL_SCOPE));
}
TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
- Node* scope_info = Parameter(FastNewFunctionContextDescriptor::kScopeInfo);
- Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
- Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
+ Node* scope_info = Parameter(Descriptor::kScopeInfo);
+ Node* slots = Parameter(Descriptor::kSlots);
+ Node* context = Parameter(Descriptor::kContext);
Return(EmitFastNewFunctionContext(scope_info, slots, context,
ScopeType::FUNCTION_SCOPE));
}
@@ -308,8 +298,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
Label call_runtime(this, Label::kDeferred), end(this);
VARIABLE(result, MachineRepresentation::kTagged);
- TNode<Object> literal_site = ToObject(
- LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ TNode<Object> literal_site =
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
GotoIf(NotHasBoilerplate(literal_site), &call_runtime);
{
Node* boilerplate = literal_site;
@@ -353,13 +343,13 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
return_result(this);
VARIABLE(result, MachineRepresentation::kTagged);
- TNode<Object> allocation_site = ToObject(
- LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
- GotoIf(NotHasBoilerplate(allocation_site), call_runtime);
+ TNode<Object> maybe_allocation_site =
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ GotoIf(NotHasBoilerplate(maybe_allocation_site), call_runtime);
- Node* boilerplate = LoadAllocationSiteBoilerplate(allocation_site);
+ TNode<AllocationSite> allocation_site = CAST(maybe_allocation_site);
+ TNode<JSArray> boilerplate = CAST(LoadBoilerplate(allocation_site));
- CSA_ASSERT(this, IsJSArrayMap(LoadMap(boilerplate)));
ParameterMode mode = OptimalParameterMode();
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
return CloneFastJSArray(context, boilerplate, mode, allocation_site);
@@ -392,15 +382,17 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
Node* feedback_vector, Node* slot, Node* context) {
// Array literals always have a valid AllocationSite to properly track
// elements transitions.
- TVARIABLE(Object, allocation_site,
- ToObject(LoadFeedbackVectorSlot(feedback_vector, slot, 0,
- INTPTR_PARAMETERS)));
+ TNode<Object> maybe_allocation_site =
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ TVARIABLE(AllocationSite, allocation_site);
Label create_empty_array(this),
initialize_allocation_site(this, Label::kDeferred), done(this);
- Branch(TaggedIsSmi(allocation_site.value()), &initialize_allocation_site,
- &create_empty_array);
-
+ GotoIf(TaggedIsSmi(maybe_allocation_site), &initialize_allocation_site);
+ {
+ allocation_site = CAST(maybe_allocation_site);
+ Goto(&create_empty_array);
+ }
// TODO(cbruni): create the AllocationSite in CSA.
BIND(&initialize_allocation_site);
{
@@ -410,12 +402,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
}
BIND(&create_empty_array);
- CSA_ASSERT(this, IsAllocationSite(CAST(allocation_site.value())));
- Node* kind = SmiToInt32(CAST(
- LoadObjectField(CAST(allocation_site.value()),
- AllocationSite::kTransitionInfoOrBoilerplateOffset)));
- CSA_ASSERT(this, IsFastElementsKind(kind));
- Node* native_context = LoadNativeContext(context);
+ TNode<Int32T> kind = LoadElementsKind(allocation_site.value());
+ TNode<Context> native_context = LoadNativeContext(context);
Comment("LoadJSArrayElementsMap");
Node* array_map = LoadJSArrayElementsMap(kind, native_context);
Node* zero = SmiConstant(0);
@@ -440,12 +428,13 @@ TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
Node* feedback_vector, Node* slot, Label* call_runtime) {
- TNode<Object> allocation_site = ToObject(
- LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
- GotoIf(NotHasBoilerplate(allocation_site), call_runtime);
+ TNode<Object> maybe_allocation_site =
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ GotoIf(NotHasBoilerplate(maybe_allocation_site), call_runtime);
- Node* boilerplate = LoadAllocationSiteBoilerplate(allocation_site);
- Node* boilerplate_map = LoadMap(boilerplate);
+ TNode<AllocationSite> allocation_site = CAST(maybe_allocation_site);
+ TNode<JSObject> boilerplate = LoadBoilerplate(allocation_site);
+ TNode<Map> boilerplate_map = LoadMap(boilerplate);
CSA_ASSERT(this, IsJSObjectMap(boilerplate_map));
VARIABLE(var_properties, MachineRepresentation::kTagged);
@@ -588,7 +577,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
{
Node* double_value = LoadHeapNumberValue(field);
Node* mutable_heap_number =
- AllocateHeapNumberWithValue(double_value, MUTABLE);
+ AllocateMutableHeapNumberWithValue(double_value);
StoreObjectField(copy, offset, mutable_heap_number);
Goto(&continue_loop);
}
@@ -611,12 +600,12 @@ TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
Return(copy);
BIND(&call_runtime);
- Node* boilerplate_description =
- Parameter(Descriptor::kBoilerplateDescription);
+ Node* object_boilerplate_description =
+ Parameter(Descriptor::kObjectBoilerplateDescription);
Node* flags = Parameter(Descriptor::kFlags);
Node* context = Parameter(Descriptor::kContext);
TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
- SmiTag(slot), boilerplate_description, flags);
+ SmiTag(slot), object_boilerplate_description, flags);
}
// Used by the CreateEmptyObjectLiteral bytecode and the Object constructor.
@@ -642,17 +631,16 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
int const kValueArg = 0;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* new_target = Parameter(Descriptor::kJSNewTarget);
VARIABLE(var_result, MachineRepresentation::kTagged);
Label if_subclass(this, Label::kDeferred), if_notsubclass(this),
return_result(this);
GotoIf(IsUndefined(new_target), &if_notsubclass);
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
+ TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
Branch(WordEqual(new_target, target), &if_notsubclass, &if_subclass);
BIND(&if_subclass);
@@ -694,9 +682,9 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
// ES #sec-number-constructor
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
// 1. If no arguments were passed to this function invocation, let n be +0.
@@ -716,7 +704,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
{
// 3. If NewTarget is undefined, return n.
Node* n_value = var_n.value();
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* new_target = Parameter(Descriptor::kJSNewTarget);
Label return_n(this), constructnumber(this, Label::kDeferred);
Branch(IsUndefined(new_target), &return_n, &constructnumber);
@@ -729,8 +717,11 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
// "%NumberPrototype%", « [[NumberData]] »).
// 5. Set O.[[NumberData]] to n.
// 6. Return O.
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
+
+ // We are not using Parameter(Descriptor::kJSTarget) and loading the value
+ // from the current frame here in order to reduce register pressure on the
+ // fast path.
+ TNode<JSFunction> target = LoadTargetFromFrame();
Node* result =
CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
StoreObjectField(result, JSValue::kValueOffset, n_value);
@@ -741,14 +732,12 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
// https://tc39.github.io/ecma262/#sec-string-constructor
TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
// 1. If no arguments were passed to this function invocation, let s be "".
VARIABLE(var_s, MachineRepresentation::kTagged, EmptyStringConstant());
@@ -789,6 +778,11 @@ TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
BIND(&constructstring);
{
+ // We are not using Parameter(Descriptor::kJSTarget) and loading the value
+ // from the current frame here in order to reduce register pressure on the
+ // fast path.
+ TNode<JSFunction> target = LoadTargetFromFrame();
+
Node* result =
CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
StoreObjectField(result, JSValue::kValueOffset, s_value);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 820970961b..3079d9a4f7 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -35,10 +35,6 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
Node* EmitFastNewObject(Node* context, Node* target, Node* new_target,
Label* call_runtime);
-
- private:
- Node* NotHasBoilerplate(Node* literal_site);
- Node* LoadAllocationSiteBoilerplate(Node* allocation_site);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 63a6dc0a91..b1441adc37 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -392,7 +392,7 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
// ES6 section 12.5.5 typeof operator
TF_BUILTIN(Typeof, CodeStubAssembler) {
- Node* object = Parameter(TypeofDescriptor::kObject);
+ Node* object = Parameter(Descriptor::kObject);
Return(Typeof(object));
}
diff --git a/deps/v8/src/builtins/builtins-data-view-gen.h b/deps/v8/src/builtins/builtins-data-view-gen.h
new file mode 100644
index 0000000000..6c755c4d08
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-data-view-gen.h
@@ -0,0 +1,67 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_DATA_VIEW_GEN_H_
+#define V8_BUILTINS_BUILTINS_DATA_VIEW_GEN_H_
+
+#include "src/elements-kind.h"
+#include "src/objects/bigint.h"
+#include "torque-generated/builtins-base-from-dsl-gen.h"
+
+namespace v8 {
+namespace internal {
+
+class DataViewBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
+ public:
+ explicit DataViewBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : BaseBuiltinsFromDSLAssembler(state) {}
+
+ TNode<Number> LoadDataViewByteOffset(TNode<JSDataView> data_view) {
+ return CAST(LoadObjectField(data_view, JSDataView::kByteOffsetOffset));
+ }
+
+ TNode<Number> LoadDataViewByteLength(TNode<JSDataView> data_view) {
+ return CAST(LoadObjectField(data_view, JSDataView::kByteLengthOffset));
+ }
+
+ TNode<Int32T> LoadUint8(TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset) {
+ return UncheckedCast<Int32T>(
+ Load(MachineType::Uint8(), data_pointer, offset));
+ }
+
+ TNode<Int32T> LoadInt8(TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset) {
+ return UncheckedCast<Int32T>(
+ Load(MachineType::Int8(), data_pointer, offset));
+ }
+
+ void StoreWord8(TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset,
+ TNode<Word32T> value) {
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, data_pointer, offset,
+ value);
+ }
+
+ int32_t DataViewElementSize(ElementsKind elements_kind) {
+ return ElementsKindToByteSize(elements_kind);
+ }
+
+ TNode<IntPtrT> DataViewEncodeBigIntBits(bool sign, int32_t digits) {
+ return IntPtrConstant(BigInt::SignBits::encode(sign) |
+ BigInt::LengthBits::encode(digits));
+ }
+
+ TNode<UintPtrT> DataViewDecodeBigIntLength(TNode<BigInt> value) {
+ TNode<WordT> bitfield = LoadBigIntBitfield(value);
+ return DecodeWord<BigIntBase::LengthBits>(bitfield);
+ }
+
+ TNode<UintPtrT> DataViewDecodeBigIntSign(TNode<BigInt> value) {
+ TNode<WordT> bitfield = LoadBigIntBitfield(value);
+ return DecodeWord<BigIntBase::SignBits>(bitfield);
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_DATA_VIEW_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 49dcbe1e83..07aa4eb48b 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -104,299 +104,5 @@ BUILTIN(DataViewConstructor) {
}
}
-// ES6 section 24.2.4.1 get DataView.prototype.buffer
-BUILTIN(DataViewPrototypeGetBuffer) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.buffer");
- return data_view->buffer();
-}
-
-// ES6 section 24.2.4.2 get DataView.prototype.byteLength
-BUILTIN(DataViewPrototypeGetByteLength) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.byteLength");
- // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {data_view} was neutered.
- return data_view->byte_length();
-}
-
-// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
-BUILTIN(DataViewPrototypeGetByteOffset) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.byteOffset");
- // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {data_view} was neutered.
- return data_view->byte_offset();
-}
-
-namespace {
-
-bool NeedToFlipBytes(bool is_little_endian) {
-#ifdef V8_TARGET_LITTLE_ENDIAN
- return !is_little_endian;
-#else
- return is_little_endian;
-#endif
-}
-
-template <size_t n>
-void CopyBytes(uint8_t* target, uint8_t const* source) {
- for (size_t i = 0; i < n; i++) {
- *(target++) = *(source++);
- }
-}
-
-template <size_t n>
-void FlipBytes(uint8_t* target, uint8_t const* source) {
- source = source + (n - 1);
- for (size_t i = 0; i < n; i++) {
- *(target++) = *(source--);
- }
-}
-
-template <typename T>
-MaybeHandle<Object> AllocateResult(Isolate* isolate, T value) {
- return isolate->factory()->NewNumber(value);
-}
-
-template <>
-MaybeHandle<Object> AllocateResult(Isolate* isolate, int64_t value) {
- return BigInt::FromInt64(isolate, value);
-}
-
-template <>
-MaybeHandle<Object> AllocateResult(Isolate* isolate, uint64_t value) {
- return BigInt::FromUint64(isolate, value);
-}
-
-// ES6 section 24.2.1.1 GetViewValue (view, requestIndex, isLittleEndian, type)
-template <typename T>
-MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
- Handle<Object> request_index,
- bool is_little_endian, const char* method) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, request_index,
- Object::ToIndex(isolate, request_index,
- MessageTemplate::kInvalidDataViewAccessorOffset),
- Object);
- size_t get_index = 0;
- if (!TryNumberToSize(*request_index, &get_index)) {
- THROW_NEW_ERROR(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
- Object);
- }
- Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
- isolate);
- if (buffer->was_neutered()) {
- Handle<String> operation =
- isolate->factory()->NewStringFromAsciiChecked(method);
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kDetachedOperation, operation),
- Object);
- }
- size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
- size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
- if (get_index + sizeof(T) > data_view_byte_length ||
- get_index + sizeof(T) < get_index) { // overflow
- THROW_NEW_ERROR(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
- Object);
- }
- union {
- T data;
- uint8_t bytes[sizeof(T)];
- } v;
- size_t const buffer_offset = data_view_byte_offset + get_index;
- DCHECK_GE(NumberToSize(buffer->byte_length()), buffer_offset + sizeof(T));
- uint8_t const* const source =
- static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
- if (NeedToFlipBytes(is_little_endian)) {
- FlipBytes<sizeof(T)>(v.bytes, source);
- } else {
- CopyBytes<sizeof(T)>(v.bytes, source);
- }
- return AllocateResult<T>(isolate, v.data);
-}
-
-template <typename T>
-MaybeHandle<Object> DataViewConvertInput(Isolate* isolate,
- Handle<Object> input) {
- return Object::ToNumber(input);
-}
-
-template <>
-MaybeHandle<Object> DataViewConvertInput<int64_t>(Isolate* isolate,
- Handle<Object> input) {
- return BigInt::FromObject(isolate, input);
-}
-
-template <>
-MaybeHandle<Object> DataViewConvertInput<uint64_t>(Isolate* isolate,
- Handle<Object> input) {
- return BigInt::FromObject(isolate, input);
-}
-
-template <typename T>
-T DataViewConvertValue(Handle<Object> value);
-
-template <>
-int8_t DataViewConvertValue<int8_t>(Handle<Object> value) {
- return static_cast<int8_t>(DoubleToInt32(value->Number()));
-}
-
-template <>
-int16_t DataViewConvertValue<int16_t>(Handle<Object> value) {
- return static_cast<int16_t>(DoubleToInt32(value->Number()));
-}
-
-template <>
-int32_t DataViewConvertValue<int32_t>(Handle<Object> value) {
- return DoubleToInt32(value->Number());
-}
-
-template <>
-uint8_t DataViewConvertValue<uint8_t>(Handle<Object> value) {
- return static_cast<uint8_t>(DoubleToUint32(value->Number()));
-}
-
-template <>
-uint16_t DataViewConvertValue<uint16_t>(Handle<Object> value) {
- return static_cast<uint16_t>(DoubleToUint32(value->Number()));
-}
-
-template <>
-uint32_t DataViewConvertValue<uint32_t>(Handle<Object> value) {
- return DoubleToUint32(value->Number());
-}
-
-template <>
-float DataViewConvertValue<float>(Handle<Object> value) {
- return static_cast<float>(value->Number());
-}
-
-template <>
-double DataViewConvertValue<double>(Handle<Object> value) {
- return value->Number();
-}
-
-template <>
-int64_t DataViewConvertValue<int64_t>(Handle<Object> value) {
- return BigInt::cast(*value)->AsInt64();
-}
-
-template <>
-uint64_t DataViewConvertValue<uint64_t>(Handle<Object> value) {
- return BigInt::cast(*value)->AsUint64();
-}
-
-// ES6 section 24.2.1.2 SetViewValue (view, requestIndex, isLittleEndian, type,
-// value)
-template <typename T>
-MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
- Handle<Object> request_index,
- bool is_little_endian, Handle<Object> value,
- const char* method) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, request_index,
- Object::ToIndex(isolate, request_index,
- MessageTemplate::kInvalidDataViewAccessorOffset),
- Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
- DataViewConvertInput<T>(isolate, value), Object);
- size_t get_index = 0;
- if (!TryNumberToSize(*request_index, &get_index)) {
- THROW_NEW_ERROR(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
- Object);
- }
- Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
- isolate);
- if (buffer->was_neutered()) {
- Handle<String> operation =
- isolate->factory()->NewStringFromAsciiChecked(method);
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kDetachedOperation, operation),
- Object);
- }
- size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
- size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
- if (get_index + sizeof(T) > data_view_byte_length ||
- get_index + sizeof(T) < get_index) { // overflow
- THROW_NEW_ERROR(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
- Object);
- }
- union {
- T data;
- uint8_t bytes[sizeof(T)];
- } v;
- v.data = DataViewConvertValue<T>(value);
- size_t const buffer_offset = data_view_byte_offset + get_index;
- DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
- uint8_t* const target =
- static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
- if (NeedToFlipBytes(is_little_endian)) {
- FlipBytes<sizeof(T)>(target, v.bytes);
- } else {
- CopyBytes<sizeof(T)>(target, v.bytes);
- }
- return isolate->factory()->undefined_value();
-}
-
-} // namespace
-
-#define DATA_VIEW_PROTOTYPE_GET(Type, type) \
- BUILTIN(DataViewPrototypeGet##Type) { \
- HandleScope scope(isolate); \
- CHECK_RECEIVER(JSDataView, data_view, "DataView.prototype.get" #Type); \
- Handle<Object> byte_offset = args.atOrUndefined(isolate, 1); \
- Handle<Object> is_little_endian = args.atOrUndefined(isolate, 2); \
- Handle<Object> result; \
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
- isolate, result, \
- GetViewValue<type>(isolate, data_view, byte_offset, \
- is_little_endian->BooleanValue(), \
- "DataView.prototype.get" #Type)); \
- return *result; \
- }
-DATA_VIEW_PROTOTYPE_GET(Int8, int8_t)
-DATA_VIEW_PROTOTYPE_GET(Uint8, uint8_t)
-DATA_VIEW_PROTOTYPE_GET(Int16, int16_t)
-DATA_VIEW_PROTOTYPE_GET(Uint16, uint16_t)
-DATA_VIEW_PROTOTYPE_GET(Int32, int32_t)
-DATA_VIEW_PROTOTYPE_GET(Uint32, uint32_t)
-DATA_VIEW_PROTOTYPE_GET(Float32, float)
-DATA_VIEW_PROTOTYPE_GET(Float64, double)
-DATA_VIEW_PROTOTYPE_GET(BigInt64, int64_t)
-DATA_VIEW_PROTOTYPE_GET(BigUint64, uint64_t)
-#undef DATA_VIEW_PROTOTYPE_GET
-
-#define DATA_VIEW_PROTOTYPE_SET(Type, type) \
- BUILTIN(DataViewPrototypeSet##Type) { \
- HandleScope scope(isolate); \
- CHECK_RECEIVER(JSDataView, data_view, "DataView.prototype.set" #Type); \
- Handle<Object> byte_offset = args.atOrUndefined(isolate, 1); \
- Handle<Object> value = args.atOrUndefined(isolate, 2); \
- Handle<Object> is_little_endian = args.atOrUndefined(isolate, 3); \
- Handle<Object> result; \
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
- isolate, result, \
- SetViewValue<type>(isolate, data_view, byte_offset, \
- is_little_endian->BooleanValue(), value, \
- "DataView.prototype.get" #Type)); \
- return *result; \
- }
-DATA_VIEW_PROTOTYPE_SET(Int8, int8_t)
-DATA_VIEW_PROTOTYPE_SET(Uint8, uint8_t)
-DATA_VIEW_PROTOTYPE_SET(Int16, int16_t)
-DATA_VIEW_PROTOTYPE_SET(Uint16, uint16_t)
-DATA_VIEW_PROTOTYPE_SET(Int32, int32_t)
-DATA_VIEW_PROTOTYPE_SET(Uint32, uint32_t)
-DATA_VIEW_PROTOTYPE_SET(Float32, float)
-DATA_VIEW_PROTOTYPE_SET(Float64, double)
-DATA_VIEW_PROTOTYPE_SET(BigInt64, int64_t)
-DATA_VIEW_PROTOTYPE_SET(BigUint64, uint64_t)
-#undef DATA_VIEW_PROTOTYPE_SET
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 6568469145..0669963a09 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -104,9 +104,8 @@ const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
// ES6 section 20.3.1.16 Date Time String Format
-double ParseDateTimeString(Handle<String> str) {
- Isolate* const isolate = str->GetIsolate();
- str = String::Flatten(str);
+double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
+ str = String::Flatten(isolate, str);
// TODO(bmeurer): Change DateParser to not use the FixedArray.
Handle<FixedArray> tmp =
isolate->factory()->NewFixedArray(DateParser::OUTPUT_SIZE);
@@ -174,10 +173,10 @@ void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
UNREACHABLE();
}
-Object* SetLocalDateValue(Handle<JSDate> date, double time_val) {
+Object* SetLocalDateValue(Isolate* isolate, Handle<JSDate> date,
+ double time_val) {
if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
- Isolate* const isolate = date->GetIsolate();
time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
} else {
time_val = std::numeric_limits<double>::quiet_NaN();
@@ -211,47 +210,48 @@ BUILTIN(DateConstructor) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
Object::ToPrimitive(value));
if (value->IsString()) {
- time_val = ParseDateTimeString(Handle<String>::cast(value));
+ time_val = ParseDateTimeString(isolate, Handle<String>::cast(value));
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToNumber(value));
+ Object::ToNumber(isolate, value));
time_val = value->Number();
}
}
} else {
Handle<Object> year_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at(1)));
+ Object::ToNumber(isolate, args.at(1)));
Handle<Object> month_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at(2)));
+ Object::ToNumber(isolate, args.at(2)));
double year = year_object->Number();
double month = month_object->Number();
double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
if (argc >= 3) {
Handle<Object> date_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
- Object::ToNumber(args.at(3)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, date_object, Object::ToNumber(isolate, args.at(3)));
date = date_object->Number();
if (argc >= 4) {
Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
- Object::ToNumber(args.at(4)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, hours_object, Object::ToNumber(isolate, args.at(4)));
hours = hours_object->Number();
if (argc >= 5) {
Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
- Object::ToNumber(args.at(5)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, minutes_object, Object::ToNumber(isolate, args.at(5)));
minutes = minutes_object->Number();
if (argc >= 6) {
Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
- Object::ToNumber(args.at(6)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, seconds_object,
+ Object::ToNumber(isolate, args.at(6)));
seconds = seconds_object->Number();
if (argc >= 7) {
Handle<Object> ms_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ms_object, Object::ToNumber(args.at(7)));
+ isolate, ms_object, Object::ToNumber(isolate, args.at(7)));
ms = ms_object->Number();
}
}
@@ -290,7 +290,7 @@ BUILTIN(DateParse) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, string,
Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
- return *isolate->factory()->NewNumber(ParseDateTimeString(string));
+ return *isolate->factory()->NewNumber(ParseDateTimeString(isolate, string));
}
// ES6 section 20.3.3.4 Date.UTC (year,month,date,hours,minutes,seconds,ms)
@@ -303,37 +303,38 @@ BUILTIN(DateUTC) {
if (argc >= 1) {
Handle<Object> year_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at(1)));
+ Object::ToNumber(isolate, args.at(1)));
year = year_object->Number();
if (argc >= 2) {
Handle<Object> month_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at(2)));
+ Object::ToNumber(isolate, args.at(2)));
month = month_object->Number();
if (argc >= 3) {
Handle<Object> date_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
- Object::ToNumber(args.at(3)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, date_object, Object::ToNumber(isolate, args.at(3)));
date = date_object->Number();
if (argc >= 4) {
Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
- Object::ToNumber(args.at(4)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, hours_object, Object::ToNumber(isolate, args.at(4)));
hours = hours_object->Number();
if (argc >= 5) {
Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
- Object::ToNumber(args.at(5)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, minutes_object, Object::ToNumber(isolate, args.at(5)));
minutes = minutes_object->Number();
if (argc >= 6) {
Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
- Object::ToNumber(args.at(6)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, seconds_object,
+ Object::ToNumber(isolate, args.at(6)));
seconds = seconds_object->Number();
if (argc >= 7) {
Handle<Object> ms_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ms_object, Object::ToNumber(args.at(7)));
+ isolate, ms_object, Object::ToNumber(isolate, args.at(7)));
ms = ms_object->Number();
}
}
@@ -357,7 +358,8 @@ BUILTIN(DatePrototypeSetDate) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.setDate");
Handle<Object> value = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(isolate, value));
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
@@ -368,7 +370,7 @@ BUILTIN(DatePrototypeSetDate) {
isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
time_val = MakeDate(MakeDay(year, month, value->Number()), time_within_day);
}
- return SetLocalDateValue(date, time_val);
+ return SetLocalDateValue(isolate, date, time_val);
}
// ES6 section 20.3.4.21 Date.prototype.setFullYear (year, month, date)
@@ -377,7 +379,8 @@ BUILTIN(DatePrototypeSetFullYear) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setFullYear");
int const argc = args.length() - 1;
Handle<Object> year = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
+ Object::ToNumber(isolate, year));
double y = year->Number(), m = 0.0, dt = 1.0;
int time_within_day = 0;
if (!std::isnan(date->value()->Number())) {
@@ -392,16 +395,18 @@ BUILTIN(DatePrototypeSetFullYear) {
}
if (argc >= 2) {
Handle<Object> month = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
+ Object::ToNumber(isolate, month));
m = month->Number();
if (argc >= 3) {
Handle<Object> date = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date,
+ Object::ToNumber(isolate, date));
dt = date->Number();
}
}
double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
- return SetLocalDateValue(date, time_val);
+ return SetLocalDateValue(isolate, date, time_val);
}
// ES6 section 20.3.4.22 Date.prototype.setHours(hour, min, sec, ms)
@@ -410,7 +415,8 @@ BUILTIN(DatePrototypeSetHours) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setHours");
int const argc = args.length() - 1;
Handle<Object> hour = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour,
+ Object::ToNumber(isolate, hour));
double h = hour->Number();
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
@@ -423,22 +429,25 @@ BUILTIN(DatePrototypeSetHours) {
double milli = time_within_day % 1000;
if (argc >= 2) {
Handle<Object> min = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min,
+ Object::ToNumber(isolate, min));
m = min->Number();
if (argc >= 3) {
Handle<Object> sec = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec,
+ Object::ToNumber(isolate, sec));
s = sec->Number();
if (argc >= 4) {
Handle<Object> ms = args.at(4);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
+ Object::ToNumber(isolate, ms));
milli = ms->Number();
}
}
}
time_val = MakeDate(day, MakeTime(h, m, s, milli));
}
- return SetLocalDateValue(date, time_val);
+ return SetLocalDateValue(isolate, date, time_val);
}
// ES6 section 20.3.4.23 Date.prototype.setMilliseconds(ms)
@@ -446,7 +455,8 @@ BUILTIN(DatePrototypeSetMilliseconds) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.setMilliseconds");
Handle<Object> ms = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
+ Object::ToNumber(isolate, ms));
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
@@ -458,7 +468,7 @@ BUILTIN(DatePrototypeSetMilliseconds) {
int s = (time_within_day / 1000) % 60;
time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
}
- return SetLocalDateValue(date, time_val);
+ return SetLocalDateValue(isolate, date, time_val);
}
// ES6 section 20.3.4.24 Date.prototype.setMinutes ( min, sec, ms )
@@ -467,7 +477,8 @@ BUILTIN(DatePrototypeSetMinutes) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setMinutes");
int const argc = args.length() - 1;
Handle<Object> min = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min,
+ Object::ToNumber(isolate, min));
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
@@ -480,17 +491,19 @@ BUILTIN(DatePrototypeSetMinutes) {
double milli = time_within_day % 1000;
if (argc >= 2) {
Handle<Object> sec = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec,
+ Object::ToNumber(isolate, sec));
s = sec->Number();
if (argc >= 3) {
Handle<Object> ms = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
+ Object::ToNumber(isolate, ms));
milli = ms->Number();
}
}
time_val = MakeDate(day, MakeTime(h, m, s, milli));
}
- return SetLocalDateValue(date, time_val);
+ return SetLocalDateValue(isolate, date, time_val);
}
// ES6 section 20.3.4.25 Date.prototype.setMonth ( month, date )
@@ -499,7 +512,8 @@ BUILTIN(DatePrototypeSetMonth) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setMonth");
int const argc = args.length() - 1;
Handle<Object> month = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
+ Object::ToNumber(isolate, month));
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
@@ -512,12 +526,13 @@ BUILTIN(DatePrototypeSetMonth) {
double dt = day;
if (argc >= 2) {
Handle<Object> date = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date,
+ Object::ToNumber(isolate, date));
dt = date->Number();
}
time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
}
- return SetLocalDateValue(date, time_val);
+ return SetLocalDateValue(isolate, date, time_val);
}
// ES6 section 20.3.4.26 Date.prototype.setSeconds ( sec, ms )
@@ -526,7 +541,8 @@ BUILTIN(DatePrototypeSetSeconds) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setSeconds");
int const argc = args.length() - 1;
Handle<Object> sec = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec,
+ Object::ToNumber(isolate, sec));
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
@@ -539,12 +555,13 @@ BUILTIN(DatePrototypeSetSeconds) {
double milli = time_within_day % 1000;
if (argc >= 2) {
Handle<Object> ms = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
+ Object::ToNumber(isolate, ms));
milli = ms->Number();
}
time_val = MakeDate(day, MakeTime(h, m, s, milli));
}
- return SetLocalDateValue(date, time_val);
+ return SetLocalDateValue(isolate, date, time_val);
}
// ES6 section 20.3.4.27 Date.prototype.setTime ( time )
@@ -552,7 +569,8 @@ BUILTIN(DatePrototypeSetTime) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.setTime");
Handle<Object> value = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(isolate, value));
return *JSDate::SetValue(date, DateCache::TimeClip(value->Number()));
}
@@ -561,7 +579,8 @@ BUILTIN(DatePrototypeSetUTCDate) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCDate");
Handle<Object> value = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(isolate, value));
if (std::isnan(date->value()->Number())) return date->value();
int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
int const days = isolate->date_cache()->DaysFromTime(time_ms);
@@ -579,7 +598,8 @@ BUILTIN(DatePrototypeSetUTCFullYear) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCFullYear");
int const argc = args.length() - 1;
Handle<Object> year = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
+ Object::ToNumber(isolate, year));
double y = year->Number(), m = 0.0, dt = 1.0;
int time_within_day = 0;
if (!std::isnan(date->value()->Number())) {
@@ -593,11 +613,13 @@ BUILTIN(DatePrototypeSetUTCFullYear) {
}
if (argc >= 2) {
Handle<Object> month = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
+ Object::ToNumber(isolate, month));
m = month->Number();
if (argc >= 3) {
Handle<Object> date = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date,
+ Object::ToNumber(isolate, date));
dt = date->Number();
}
}
@@ -611,7 +633,8 @@ BUILTIN(DatePrototypeSetUTCHours) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCHours");
int const argc = args.length() - 1;
Handle<Object> hour = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour,
+ Object::ToNumber(isolate, hour));
double h = hour->Number();
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
@@ -623,15 +646,18 @@ BUILTIN(DatePrototypeSetUTCHours) {
double milli = time_within_day % 1000;
if (argc >= 2) {
Handle<Object> min = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min,
+ Object::ToNumber(isolate, min));
m = min->Number();
if (argc >= 3) {
Handle<Object> sec = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec,
+ Object::ToNumber(isolate, sec));
s = sec->Number();
if (argc >= 4) {
Handle<Object> ms = args.at(4);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
+ Object::ToNumber(isolate, ms));
milli = ms->Number();
}
}
@@ -646,7 +672,8 @@ BUILTIN(DatePrototypeSetUTCMilliseconds) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMilliseconds");
Handle<Object> ms = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
+ Object::ToNumber(isolate, ms));
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
@@ -666,7 +693,8 @@ BUILTIN(DatePrototypeSetUTCMinutes) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMinutes");
int const argc = args.length() - 1;
Handle<Object> min = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min,
+ Object::ToNumber(isolate, min));
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
@@ -678,11 +706,13 @@ BUILTIN(DatePrototypeSetUTCMinutes) {
double milli = time_within_day % 1000;
if (argc >= 2) {
Handle<Object> sec = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec,
+ Object::ToNumber(isolate, sec));
s = sec->Number();
if (argc >= 3) {
Handle<Object> ms = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
+ Object::ToNumber(isolate, ms));
milli = ms->Number();
}
}
@@ -697,7 +727,8 @@ BUILTIN(DatePrototypeSetUTCMonth) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMonth");
int const argc = args.length() - 1;
Handle<Object> month = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
+ Object::ToNumber(isolate, month));
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
@@ -709,7 +740,8 @@ BUILTIN(DatePrototypeSetUTCMonth) {
double dt = day;
if (argc >= 2) {
Handle<Object> date = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date,
+ Object::ToNumber(isolate, date));
dt = date->Number();
}
time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
@@ -723,7 +755,8 @@ BUILTIN(DatePrototypeSetUTCSeconds) {
CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCSeconds");
int const argc = args.length() - 1;
Handle<Object> sec = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec,
+ Object::ToNumber(isolate, sec));
double time_val = date->value()->Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
@@ -735,7 +768,8 @@ BUILTIN(DatePrototypeSetUTCSeconds) {
double milli = time_within_day % 1000;
if (argc >= 2) {
Handle<Object> ms = args.at(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
+ Object::ToNumber(isolate, ms));
milli = ms->Number();
}
time_val = MakeDate(day, MakeTime(h, m, s, milli));
@@ -841,7 +875,8 @@ BUILTIN(DatePrototypeSetYear) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.setYear");
Handle<Object> year = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
+ Object::ToNumber(isolate, year));
double m = 0.0, dt = 1.0, y = year->Number();
if (0.0 <= y && y <= 99.0) {
y = 1900.0 + DoubleToInteger(y);
@@ -858,7 +893,7 @@ BUILTIN(DatePrototypeSetYear) {
dt = day;
}
double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
- return SetLocalDateValue(date, time_val);
+ return SetLocalDateValue(isolate, date, time_val);
}
// ES6 section 20.3.4.37 Date.prototype.toJSON ( key )
@@ -873,13 +908,13 @@ BUILTIN(DatePrototypeToJson) {
isolate, primitive,
Object::ToPrimitive(receiver_obj, ToPrimitiveHint::kNumber));
if (primitive->IsNumber() && !std::isfinite(primitive->Number())) {
- return isolate->heap()->null_value();
+ return ReadOnlyRoots(isolate).null_value();
} else {
Handle<String> name =
isolate->factory()->NewStringFromAsciiChecked("toISOString");
Handle<Object> function;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, function,
- Object::GetProperty(receiver_obj, name));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, function, Object::GetProperty(isolate, receiver_obj, name));
if (!function->IsCallable()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledNonCallable, name));
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 4a4b17006c..46b02d88d8 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -31,8 +31,8 @@ namespace internal {
TFC(RecordWrite, RecordWrite, 1) \
\
/* Adaptors for CPP/API builtin */ \
- ASM(AdaptorWithExitFrame) \
- ASM(AdaptorWithBuiltinExitFrame) \
+ TFC(AdaptorWithExitFrame, CppBuiltinAdaptor, 1) \
+ TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor, 1) \
\
/* Calls */ \
ASM(ArgumentsAdaptorTrampoline) \
@@ -78,9 +78,9 @@ namespace internal {
TFS(CreateEmptyArrayLiteral, kFeedbackVector, kSlot) \
TFS(CreateShallowArrayLiteral, kFeedbackVector, kSlot, kConstantElements) \
TFS(CreateShallowObjectLiteral, kFeedbackVector, kSlot, \
- kBoilerplateDescription, kFlags) \
+ kObjectBoilerplateDescription, kFlags) \
/* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \
- TFC(ConstructProxy, ConstructTrampoline, 1) \
+ TFC(ConstructProxy, JSTrampoline, 1) \
\
/* Apply and entries */ \
ASM(JSEntryTrampoline) \
@@ -120,9 +120,9 @@ namespace internal {
ASM(InterpreterOnStackReplacement) \
\
/* Code life-cycle */ \
- ASM(CompileLazy) \
- ASM(CompileLazyDeoptimizedCode) \
- ASM(DeserializeLazy) \
+ TFC(CompileLazy, JSTrampoline, 1) \
+ TFC(CompileLazyDeoptimizedCode, JSTrampoline, 1) \
+ TFC(DeserializeLazy, JSTrampoline, 1) \
ASM(InstantiateAsmJs) \
ASM(NotifyDeoptimized) \
\
@@ -159,8 +159,8 @@ namespace internal {
API(HandleApiCallAsConstructor) \
\
/* Adapters for Turbofan into runtime */ \
- ASM(AllocateInNewSpace) \
- ASM(AllocateInOldSpace) \
+ TFC(AllocateInNewSpace, Allocate, 1) \
+ TFC(AllocateInOldSpace, Allocate, 1) \
\
/* TurboFan support builtins */ \
TFS(CopyFastSmiOrObjectElements, kObject) \
@@ -223,8 +223,8 @@ namespace internal {
TFS(DeleteProperty, kObject, kKey, kLanguageMode) \
\
/* Abort */ \
- ASM(Abort) \
- TFC(AbortJS, AbortJS, 1) \
+ TFC(Abort, Abort, 1) \
+ TFC(AbortJS, Abort, 1) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
@@ -232,14 +232,56 @@ namespace internal {
CPP(Illegal) \
CPP(StrictPoisonPillThrower) \
CPP(UnsupportedThrower) \
- TFJ(ReturnReceiver, 0) \
+ TFJ(ReturnReceiver, 0, kReceiver) \
\
/* Array */ \
- ASM(ArrayConstructor) \
+ TFC(ArrayConstructor, JSTrampoline, 1) \
+ TFC(ArrayConstructorImpl, ArrayConstructor, 1) \
+ TFC(ArrayNoArgumentConstructor_PackedSmi_DontOverride, \
+ ArrayNoArgumentConstructor, 1) \
+ TFC(ArrayNoArgumentConstructor_HoleySmi_DontOverride, \
+ ArrayNoArgumentConstructor, 1) \
+ TFC(ArrayNoArgumentConstructor_PackedSmi_DisableAllocationSites, \
+ ArrayNoArgumentConstructor, 1) \
+ TFC(ArrayNoArgumentConstructor_HoleySmi_DisableAllocationSites, \
+ ArrayNoArgumentConstructor, 1) \
+ TFC(ArrayNoArgumentConstructor_Packed_DisableAllocationSites, \
+ ArrayNoArgumentConstructor, 1) \
+ TFC(ArrayNoArgumentConstructor_Holey_DisableAllocationSites, \
+ ArrayNoArgumentConstructor, 1) \
+ TFC(ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites, \
+ ArrayNoArgumentConstructor, 1) \
+ TFC(ArrayNoArgumentConstructor_HoleyDouble_DisableAllocationSites, \
+ ArrayNoArgumentConstructor, 1) \
+ TFC(ArraySingleArgumentConstructor_PackedSmi_DontOverride, \
+ ArraySingleArgumentConstructor, 1) \
+ TFC(ArraySingleArgumentConstructor_HoleySmi_DontOverride, \
+ ArraySingleArgumentConstructor, 1) \
+ TFC(ArraySingleArgumentConstructor_PackedSmi_DisableAllocationSites, \
+ ArraySingleArgumentConstructor, 1) \
+ TFC(ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites, \
+ ArraySingleArgumentConstructor, 1) \
+ TFC(ArraySingleArgumentConstructor_Packed_DisableAllocationSites, \
+ ArraySingleArgumentConstructor, 1) \
+ TFC(ArraySingleArgumentConstructor_Holey_DisableAllocationSites, \
+ ArraySingleArgumentConstructor, 1) \
+ TFC(ArraySingleArgumentConstructor_PackedDouble_DisableAllocationSites, \
+ ArraySingleArgumentConstructor, 1) \
+ TFC(ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites, \
+ ArraySingleArgumentConstructor, 1) \
+ TFC(ArrayNArgumentsConstructor, ArrayNArgumentsConstructor, 1) \
ASM(InternalArrayConstructor) \
+ ASM(InternalArrayConstructorImpl) \
+ TFC(InternalArrayNoArgumentConstructor_Packed, ArrayNoArgumentConstructor, \
+ 1) \
+ TFC(InternalArrayNoArgumentConstructor_Holey, ArrayNoArgumentConstructor, 1) \
+ TFC(InternalArraySingleArgumentConstructor_Packed, \
+ ArraySingleArgumentConstructor, 1) \
+ TFC(InternalArraySingleArgumentConstructor_Holey, \
+ ArraySingleArgumentConstructor, 1) \
CPP(ArrayConcat) \
/* ES6 #sec-array.isarray */ \
- TFJ(ArrayIsArray, 1, kArg) \
+ TFJ(ArrayIsArray, 1, kReceiver, kArg) \
/* ES6 #sec-array.from */ \
TFJ(ArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.of */ \
@@ -280,88 +322,90 @@ namespace internal {
/* ES6 #sec-array.prototype.every */ \
TFS(ArrayEveryLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayEveryLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
- kInitialK, kLength) \
- TFJ(ArrayEveryLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \
- kInitialK, kLength, kResult) \
+ TFJ(ArrayEveryLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
+ kThisArg, kInitialK, kLength) \
+ TFJ(ArrayEveryLoopLazyDeoptContinuation, 5, kReceiver, kCallbackFn, \
+ kThisArg, kInitialK, kLength, kResult) \
TFJ(ArrayEvery, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.some */ \
TFS(ArraySomeLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
- TFJ(ArraySomeLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
- kInitialK, kLength) \
- TFJ(ArraySomeLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, kInitialK, \
- kLength, kResult) \
+ TFJ(ArraySomeLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
+ kThisArg, kInitialK, kLength) \
+ TFJ(ArraySomeLoopLazyDeoptContinuation, 5, kReceiver, kCallbackFn, kThisArg, \
+ kInitialK, kLength, kResult) \
TFJ(ArraySome, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.filter */ \
TFS(ArrayFilterLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
TFJ(ArrayFilter, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(ArrayFilterLoopEagerDeoptContinuation, 6, kCallbackFn, kThisArg, kArray, \
- kInitialK, kLength, kTo) \
- TFJ(ArrayFilterLoopLazyDeoptContinuation, 8, kCallbackFn, kThisArg, kArray, \
- kInitialK, kLength, kValueK, kTo, kResult) \
+ TFJ(ArrayFilterLoopEagerDeoptContinuation, 6, kReceiver, kCallbackFn, \
+ kThisArg, kArray, kInitialK, kLength, kTo) \
+ TFJ(ArrayFilterLoopLazyDeoptContinuation, 8, kReceiver, kCallbackFn, \
+ kThisArg, kArray, kInitialK, kLength, kValueK, kTo, kResult) \
/* ES6 #sec-array.prototype.foreach */ \
TFS(ArrayMapLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayMapLoopEagerDeoptContinuation, 5, kCallbackFn, kThisArg, kArray, \
- kInitialK, kLength) \
- TFJ(ArrayMapLoopLazyDeoptContinuation, 6, kCallbackFn, kThisArg, kArray, \
- kInitialK, kLength, kResult) \
+ TFJ(ArrayMapLoopEagerDeoptContinuation, 5, kReceiver, kCallbackFn, kThisArg, \
+ kArray, kInitialK, kLength) \
+ TFJ(ArrayMapLoopLazyDeoptContinuation, 6, kReceiver, kCallbackFn, kThisArg, \
+ kArray, kInitialK, kLength, kResult) \
TFJ(ArrayMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.reduce */ \
TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayReducePreLoopEagerDeoptContinuation, 2, kCallbackFn, kLength) \
- TFJ(ArrayReduceLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
- kLength, kAccumulator) \
- TFJ(ArrayReduceLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
- kLength, kResult) \
+ TFJ(ArrayReducePreLoopEagerDeoptContinuation, 2, kReceiver, kCallbackFn, \
+ kLength) \
+ TFJ(ArrayReduceLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
+ kInitialK, kLength, kAccumulator) \
+ TFJ(ArrayReduceLoopLazyDeoptContinuation, 4, kReceiver, kCallbackFn, \
+ kInitialK, kLength, kResult) \
TFJ(ArrayReduce, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.reduceRight */ \
TFS(ArrayReduceRightLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayReduceRightPreLoopEagerDeoptContinuation, 2, kCallbackFn, kLength) \
- TFJ(ArrayReduceRightLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
- kLength, kAccumulator) \
- TFJ(ArrayReduceRightLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
- kLength, kResult) \
+ TFJ(ArrayReduceRightPreLoopEagerDeoptContinuation, 2, kReceiver, \
+ kCallbackFn, kLength) \
+ TFJ(ArrayReduceRightLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
+ kInitialK, kLength, kAccumulator) \
+ TFJ(ArrayReduceRightLoopLazyDeoptContinuation, 4, kReceiver, kCallbackFn, \
+ kInitialK, kLength, kResult) \
TFJ(ArrayReduceRight, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.entries */ \
- TFJ(ArrayPrototypeEntries, 0) \
+ TFJ(ArrayPrototypeEntries, 0, kReceiver) \
/* ES6 #sec-array.prototype.find */ \
TFS(ArrayFindLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayFindLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
- kInitialK, kLength) \
- TFJ(ArrayFindLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, kInitialK, \
- kLength, kResult) \
- TFJ(ArrayFindLoopAfterCallbackLazyDeoptContinuation, 6, kCallbackFn, \
- kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
+ TFJ(ArrayFindLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
+ kThisArg, kInitialK, kLength) \
+ TFJ(ArrayFindLoopLazyDeoptContinuation, 5, kReceiver, kCallbackFn, kThisArg, \
+ kInitialK, kLength, kResult) \
+ TFJ(ArrayFindLoopAfterCallbackLazyDeoptContinuation, 6, kReceiver, \
+ kCallbackFn, kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
TFJ(ArrayPrototypeFind, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.findIndex */ \
TFS(ArrayFindIndexLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kArray, kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayFindIndexLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
- kInitialK, kLength) \
- TFJ(ArrayFindIndexLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \
- kInitialK, kLength, kResult) \
- TFJ(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation, 6, kCallbackFn, \
- kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
+ TFJ(ArrayFindIndexLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
+ kThisArg, kInitialK, kLength) \
+ TFJ(ArrayFindIndexLoopLazyDeoptContinuation, 5, kReceiver, kCallbackFn, \
+ kThisArg, kInitialK, kLength, kResult) \
+ TFJ(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation, 6, kReceiver, \
+ kCallbackFn, kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
TFJ(ArrayPrototypeFindIndex, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.keys */ \
- TFJ(ArrayPrototypeKeys, 0) \
+ TFJ(ArrayPrototypeKeys, 0, kReceiver) \
/* ES6 #sec-array.prototype.values */ \
- TFJ(ArrayPrototypeValues, 0) \
+ TFJ(ArrayPrototypeValues, 0, kReceiver) \
/* ES6 #sec-%arrayiteratorprototype%.next */ \
- TFJ(ArrayIteratorPrototypeNext, 0) \
+ TFJ(ArrayIteratorPrototypeNext, 0, kReceiver) \
/* https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray */ \
TFS(FlattenIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth) \
TFS(FlatMapIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth, \
kMapperFunction, kThisArg) \
- /* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatten */ \
- TFJ(ArrayPrototypeFlatten, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat */ \
+ TFJ(ArrayPrototypeFlat, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap */ \
TFJ(ArrayPrototypeFlatMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
@@ -374,12 +418,18 @@ namespace internal {
CPP(ArrayBufferPrototypeSlice) \
\
/* AsyncFunction */ \
- TFJ(AsyncFunctionAwaitCaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitUncaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitRejectClosure, 1, kSentError) \
- TFJ(AsyncFunctionAwaitResolveClosure, 1, kSentValue) \
- TFJ(AsyncFunctionPromiseCreate, 0) \
- TFJ(AsyncFunctionPromiseRelease, 1, kPromise) \
+ TFJ(AsyncFunctionAwaitCaught, 3, kReceiver, kGenerator, kAwaited, \
+ kOuterPromise) \
+ TFJ(AsyncFunctionAwaitCaughtOptimized, 3, kReceiver, kGenerator, kAwaited, \
+ kOuterPromise) \
+ TFJ(AsyncFunctionAwaitUncaught, 3, kReceiver, kGenerator, kAwaited, \
+ kOuterPromise) \
+ TFJ(AsyncFunctionAwaitUncaughtOptimized, 3, kReceiver, kGenerator, kAwaited, \
+ kOuterPromise) \
+ TFJ(AsyncFunctionAwaitRejectClosure, 1, kReceiver, kSentError) \
+ TFJ(AsyncFunctionAwaitResolveClosure, 1, kReceiver, kSentValue) \
+ TFJ(AsyncFunctionPromiseCreate, 0, kReceiver) \
+ TFJ(AsyncFunctionPromiseRelease, 2, kReceiver, kPromise, kCanSuspend) \
\
/* BigInt */ \
CPP(BigIntConstructor) \
@@ -393,9 +443,9 @@ namespace internal {
/* ES #sec-boolean-constructor */ \
CPP(BooleanConstructor) \
/* ES6 #sec-boolean.prototype.tostring */ \
- TFJ(BooleanPrototypeToString, 0) \
+ TFJ(BooleanPrototypeToString, 0, kReceiver) \
/* ES6 #sec-boolean.prototype.valueof */ \
- TFJ(BooleanPrototypeValueOf, 0) \
+ TFJ(BooleanPrototypeValueOf, 0, kReceiver) \
\
/* CallSite */ \
CPP(CallSitePrototypeGetColumnNumber) \
@@ -433,11 +483,8 @@ namespace internal {
CPP(ConsoleCountReset) \
CPP(ConsoleAssert) \
TFJ(FastConsoleAssert, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- CPP(ConsoleMarkTimeline) \
CPP(ConsoleProfile) \
CPP(ConsoleProfileEnd) \
- CPP(ConsoleTimeline) \
- CPP(ConsoleTimelineEnd) \
CPP(ConsoleTime) \
CPP(ConsoleTimeEnd) \
CPP(ConsoleTimeStamp) \
@@ -446,73 +493,50 @@ namespace internal {
/* DataView */ \
/* ES #sec-dataview-constructor */ \
CPP(DataViewConstructor) \
- CPP(DataViewPrototypeGetBuffer) \
- CPP(DataViewPrototypeGetByteLength) \
- CPP(DataViewPrototypeGetByteOffset) \
- CPP(DataViewPrototypeGetInt8) \
- CPP(DataViewPrototypeSetInt8) \
- CPP(DataViewPrototypeGetUint8) \
- CPP(DataViewPrototypeSetUint8) \
- CPP(DataViewPrototypeGetInt16) \
- CPP(DataViewPrototypeSetInt16) \
- CPP(DataViewPrototypeGetUint16) \
- CPP(DataViewPrototypeSetUint16) \
- CPP(DataViewPrototypeGetInt32) \
- CPP(DataViewPrototypeSetInt32) \
- CPP(DataViewPrototypeGetUint32) \
- CPP(DataViewPrototypeSetUint32) \
- CPP(DataViewPrototypeGetFloat32) \
- CPP(DataViewPrototypeSetFloat32) \
- CPP(DataViewPrototypeGetFloat64) \
- CPP(DataViewPrototypeSetFloat64) \
- CPP(DataViewPrototypeGetBigInt64) \
- CPP(DataViewPrototypeSetBigInt64) \
- CPP(DataViewPrototypeGetBigUint64) \
- CPP(DataViewPrototypeSetBigUint64) \
\
/* Date */ \
/* ES #sec-date-constructor */ \
CPP(DateConstructor) \
/* ES6 #sec-date.prototype.getdate */ \
- TFJ(DatePrototypeGetDate, 0) \
+ TFJ(DatePrototypeGetDate, 0, kReceiver) \
/* ES6 #sec-date.prototype.getday */ \
- TFJ(DatePrototypeGetDay, 0) \
+ TFJ(DatePrototypeGetDay, 0, kReceiver) \
/* ES6 #sec-date.prototype.getfullyear */ \
- TFJ(DatePrototypeGetFullYear, 0) \
+ TFJ(DatePrototypeGetFullYear, 0, kReceiver) \
/* ES6 #sec-date.prototype.gethours */ \
- TFJ(DatePrototypeGetHours, 0) \
+ TFJ(DatePrototypeGetHours, 0, kReceiver) \
/* ES6 #sec-date.prototype.getmilliseconds */ \
- TFJ(DatePrototypeGetMilliseconds, 0) \
+ TFJ(DatePrototypeGetMilliseconds, 0, kReceiver) \
/* ES6 #sec-date.prototype.getminutes */ \
- TFJ(DatePrototypeGetMinutes, 0) \
+ TFJ(DatePrototypeGetMinutes, 0, kReceiver) \
/* ES6 #sec-date.prototype.getmonth */ \
- TFJ(DatePrototypeGetMonth, 0) \
+ TFJ(DatePrototypeGetMonth, 0, kReceiver) \
/* ES6 #sec-date.prototype.getseconds */ \
- TFJ(DatePrototypeGetSeconds, 0) \
+ TFJ(DatePrototypeGetSeconds, 0, kReceiver) \
/* ES6 #sec-date.prototype.gettime */ \
- TFJ(DatePrototypeGetTime, 0) \
+ TFJ(DatePrototypeGetTime, 0, kReceiver) \
/* ES6 #sec-date.prototype.gettimezoneoffset */ \
- TFJ(DatePrototypeGetTimezoneOffset, 0) \
+ TFJ(DatePrototypeGetTimezoneOffset, 0, kReceiver) \
/* ES6 #sec-date.prototype.getutcdate */ \
- TFJ(DatePrototypeGetUTCDate, 0) \
+ TFJ(DatePrototypeGetUTCDate, 0, kReceiver) \
/* ES6 #sec-date.prototype.getutcday */ \
- TFJ(DatePrototypeGetUTCDay, 0) \
+ TFJ(DatePrototypeGetUTCDay, 0, kReceiver) \
/* ES6 #sec-date.prototype.getutcfullyear */ \
- TFJ(DatePrototypeGetUTCFullYear, 0) \
+ TFJ(DatePrototypeGetUTCFullYear, 0, kReceiver) \
/* ES6 #sec-date.prototype.getutchours */ \
- TFJ(DatePrototypeGetUTCHours, 0) \
+ TFJ(DatePrototypeGetUTCHours, 0, kReceiver) \
/* ES6 #sec-date.prototype.getutcmilliseconds */ \
- TFJ(DatePrototypeGetUTCMilliseconds, 0) \
+ TFJ(DatePrototypeGetUTCMilliseconds, 0, kReceiver) \
/* ES6 #sec-date.prototype.getutcminutes */ \
- TFJ(DatePrototypeGetUTCMinutes, 0) \
+ TFJ(DatePrototypeGetUTCMinutes, 0, kReceiver) \
/* ES6 #sec-date.prototype.getutcmonth */ \
- TFJ(DatePrototypeGetUTCMonth, 0) \
+ TFJ(DatePrototypeGetUTCMonth, 0, kReceiver) \
/* ES6 #sec-date.prototype.getutcseconds */ \
- TFJ(DatePrototypeGetUTCSeconds, 0) \
+ TFJ(DatePrototypeGetUTCSeconds, 0, kReceiver) \
/* ES6 #sec-date.prototype.valueof */ \
- TFJ(DatePrototypeValueOf, 0) \
+ TFJ(DatePrototypeValueOf, 0, kReceiver) \
/* ES6 #sec-date.prototype-@@toprimitive */ \
- TFJ(DatePrototypeToPrimitive, 1, kHint) \
+ TFJ(DatePrototypeToPrimitive, 1, kReceiver, kHint) \
CPP(DatePrototypeGetYear) \
CPP(DatePrototypeSetYear) \
CPP(DateNow) \
@@ -559,7 +583,7 @@ namespace internal {
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
ASM(FunctionPrototypeCall) \
/* ES6 #sec-function.prototype-@@hasinstance */ \
- TFJ(FunctionPrototypeHasInstance, 1, kV) \
+ TFJ(FunctionPrototypeHasInstance, 1, kReceiver, kV) \
/* ES6 #sec-function.prototype.tostring */ \
CPP(FunctionPrototypeToString) \
\
@@ -588,9 +612,9 @@ namespace internal {
CPP(GlobalUnescape) \
CPP(GlobalEval) \
/* ES6 #sec-isfinite-number */ \
- TFJ(GlobalIsFinite, 1, kNumber) \
+ TFJ(GlobalIsFinite, 1, kReceiver, kNumber) \
/* ES6 #sec-isnan-number */ \
- TFJ(GlobalIsNaN, 1, kNumber) \
+ TFJ(GlobalIsNaN, 1, kReceiver, kNumber) \
\
/* JSON */ \
CPP(JsonParse) \
@@ -617,112 +641,112 @@ namespace internal {
/* Map */ \
TFS(FindOrderedHashMapEntry, kTable, kKey) \
TFJ(MapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(MapPrototypeSet, 2, kKey, kValue) \
- TFJ(MapPrototypeDelete, 1, kKey) \
- TFJ(MapPrototypeGet, 1, kKey) \
- TFJ(MapPrototypeHas, 1, kKey) \
+ TFJ(MapPrototypeSet, 2, kReceiver, kKey, kValue) \
+ TFJ(MapPrototypeDelete, 1, kReceiver, kKey) \
+ TFJ(MapPrototypeGet, 1, kReceiver, kKey) \
+ TFJ(MapPrototypeHas, 1, kReceiver, kKey) \
CPP(MapPrototypeClear) \
/* ES #sec-map.prototype.entries */ \
- TFJ(MapPrototypeEntries, 0) \
+ TFJ(MapPrototypeEntries, 0, kReceiver) \
/* ES #sec-get-map.prototype.size */ \
- TFJ(MapPrototypeGetSize, 0) \
+ TFJ(MapPrototypeGetSize, 0, kReceiver) \
/* ES #sec-map.prototype.forEach */ \
TFJ(MapPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES #sec-map.prototype.keys */ \
- TFJ(MapPrototypeKeys, 0) \
+ TFJ(MapPrototypeKeys, 0, kReceiver) \
/* ES #sec-map.prototype.values */ \
- TFJ(MapPrototypeValues, 0) \
+ TFJ(MapPrototypeValues, 0, kReceiver) \
/* ES #sec-%mapiteratorprototype%.next */ \
- TFJ(MapIteratorPrototypeNext, 0) \
+ TFJ(MapIteratorPrototypeNext, 0, kReceiver) \
\
/* Math */ \
/* ES6 #sec-math.abs */ \
- TFJ(MathAbs, 1, kX) \
+ TFJ(MathAbs, 1, kReceiver, kX) \
/* ES6 #sec-math.acos */ \
- TFJ(MathAcos, 1, kX) \
+ TFJ(MathAcos, 1, kReceiver, kX) \
/* ES6 #sec-math.acosh */ \
- TFJ(MathAcosh, 1, kX) \
+ TFJ(MathAcosh, 1, kReceiver, kX) \
/* ES6 #sec-math.asin */ \
- TFJ(MathAsin, 1, kX) \
+ TFJ(MathAsin, 1, kReceiver, kX) \
/* ES6 #sec-math.asinh */ \
- TFJ(MathAsinh, 1, kX) \
+ TFJ(MathAsinh, 1, kReceiver, kX) \
/* ES6 #sec-math.atan */ \
- TFJ(MathAtan, 1, kX) \
+ TFJ(MathAtan, 1, kReceiver, kX) \
/* ES6 #sec-math.atanh */ \
- TFJ(MathAtanh, 1, kX) \
+ TFJ(MathAtanh, 1, kReceiver, kX) \
/* ES6 #sec-math.atan2 */ \
- TFJ(MathAtan2, 2, kY, kX) \
+ TFJ(MathAtan2, 2, kReceiver, kY, kX) \
/* ES6 #sec-math.cbrt */ \
- TFJ(MathCbrt, 1, kX) \
+ TFJ(MathCbrt, 1, kReceiver, kX) \
/* ES6 #sec-math.ceil */ \
- TFJ(MathCeil, 1, kX) \
+ TFJ(MathCeil, 1, kReceiver, kX) \
/* ES6 #sec-math.clz32 */ \
- TFJ(MathClz32, 1, kX) \
+ TFJ(MathClz32, 1, kReceiver, kX) \
/* ES6 #sec-math.cos */ \
- TFJ(MathCos, 1, kX) \
+ TFJ(MathCos, 1, kReceiver, kX) \
/* ES6 #sec-math.cosh */ \
- TFJ(MathCosh, 1, kX) \
+ TFJ(MathCosh, 1, kReceiver, kX) \
/* ES6 #sec-math.exp */ \
- TFJ(MathExp, 1, kX) \
+ TFJ(MathExp, 1, kReceiver, kX) \
/* ES6 #sec-math.expm1 */ \
- TFJ(MathExpm1, 1, kX) \
+ TFJ(MathExpm1, 1, kReceiver, kX) \
/* ES6 #sec-math.floor */ \
- TFJ(MathFloor, 1, kX) \
+ TFJ(MathFloor, 1, kReceiver, kX) \
/* ES6 #sec-math.fround */ \
- TFJ(MathFround, 1, kX) \
+ TFJ(MathFround, 1, kReceiver, kX) \
/* ES6 #sec-math.hypot */ \
CPP(MathHypot) \
/* ES6 #sec-math.imul */ \
- TFJ(MathImul, 2, kX, kY) \
+ TFJ(MathImul, 2, kReceiver, kX, kY) \
/* ES6 #sec-math.log */ \
- TFJ(MathLog, 1, kX) \
+ TFJ(MathLog, 1, kReceiver, kX) \
/* ES6 #sec-math.log1p */ \
- TFJ(MathLog1p, 1, kX) \
+ TFJ(MathLog1p, 1, kReceiver, kX) \
/* ES6 #sec-math.log10 */ \
- TFJ(MathLog10, 1, kX) \
+ TFJ(MathLog10, 1, kReceiver, kX) \
/* ES6 #sec-math.log2 */ \
- TFJ(MathLog2, 1, kX) \
+ TFJ(MathLog2, 1, kReceiver, kX) \
/* ES6 #sec-math.max */ \
TFJ(MathMax, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-math.min */ \
TFJ(MathMin, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-math.pow */ \
- TFJ(MathPow, 2, kBase, kExponent) \
+ TFJ(MathPow, 2, kReceiver, kBase, kExponent) \
/* ES6 #sec-math.random */ \
- TFJ(MathRandom, 0) \
+ TFJ(MathRandom, 0, kReceiver) \
/* ES6 #sec-math.round */ \
- TFJ(MathRound, 1, kX) \
+ TFJ(MathRound, 1, kReceiver, kX) \
/* ES6 #sec-math.sign */ \
- TFJ(MathSign, 1, kX) \
+ TFJ(MathSign, 1, kReceiver, kX) \
/* ES6 #sec-math.sin */ \
- TFJ(MathSin, 1, kX) \
+ TFJ(MathSin, 1, kReceiver, kX) \
/* ES6 #sec-math.sinh */ \
- TFJ(MathSinh, 1, kX) \
+ TFJ(MathSinh, 1, kReceiver, kX) \
/* ES6 #sec-math.sqrt */ \
- TFJ(MathTan, 1, kX) \
+ TFJ(MathTan, 1, kReceiver, kX) \
/* ES6 #sec-math.tan */ \
- TFJ(MathTanh, 1, kX) \
+ TFJ(MathTanh, 1, kReceiver, kX) \
/* ES6 #sec-math.tanh */ \
- TFJ(MathSqrt, 1, kX) \
+ TFJ(MathSqrt, 1, kReceiver, kX) \
/* ES6 #sec-math.trunc */ \
- TFJ(MathTrunc, 1, kX) \
+ TFJ(MathTrunc, 1, kReceiver, kX) \
\
/* Number */ \
TFC(AllocateHeapNumber, AllocateHeapNumber, 1) \
/* ES #sec-number-constructor */ \
TFJ(NumberConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-number.isfinite */ \
- TFJ(NumberIsFinite, 1, kNumber) \
+ TFJ(NumberIsFinite, 1, kReceiver, kNumber) \
/* ES6 #sec-number.isinteger */ \
- TFJ(NumberIsInteger, 1, kNumber) \
+ TFJ(NumberIsInteger, 1, kReceiver, kNumber) \
/* ES6 #sec-number.isnan */ \
- TFJ(NumberIsNaN, 1, kNumber) \
+ TFJ(NumberIsNaN, 1, kReceiver, kNumber) \
/* ES6 #sec-number.issafeinteger */ \
- TFJ(NumberIsSafeInteger, 1, kNumber) \
+ TFJ(NumberIsSafeInteger, 1, kReceiver, kNumber) \
/* ES6 #sec-number.parsefloat */ \
- TFJ(NumberParseFloat, 1, kString) \
+ TFJ(NumberParseFloat, 1, kReceiver, kString) \
/* ES6 #sec-number.parseint */ \
- TFJ(NumberParseInt, 2, kString, kRadix) \
+ TFJ(NumberParseInt, 2, kReceiver, kString, kRadix) \
TFS(ParseInt, kString, kRadix) \
CPP(NumberPrototypeToExponential) \
CPP(NumberPrototypeToFixed) \
@@ -730,7 +754,7 @@ namespace internal {
CPP(NumberPrototypeToPrecision) \
CPP(NumberPrototypeToString) \
/* ES6 #sec-number.prototype.valueof */ \
- TFJ(NumberPrototypeValueOf, 0) \
+ TFJ(NumberPrototypeValueOf, 0, kReceiver) \
TFC(Add, BinaryOp, 1) \
TFC(Subtract, BinaryOp, 1) \
TFC(Multiply, BinaryOp, 1) \
@@ -766,37 +790,37 @@ namespace internal {
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
- TFJ(ObjectEntries, 1, kObject) \
+ TFJ(ObjectEntries, 1, kReceiver, kObject) \
CPP(ObjectFreeze) \
TFJ(ObjectGetOwnPropertyDescriptor, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(ObjectGetOwnPropertyDescriptors) \
- CPP(ObjectGetOwnPropertyNames) \
+ TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \
CPP(ObjectGetOwnPropertySymbols) \
CPP(ObjectGetPrototypeOf) \
CPP(ObjectSetPrototypeOf) \
- TFJ(ObjectIs, 2, kLeft, kRight) \
+ TFJ(ObjectIs, 2, kReceiver, kLeft, kRight) \
CPP(ObjectIsExtensible) \
CPP(ObjectIsFrozen) \
CPP(ObjectIsSealed) \
- TFJ(ObjectKeys, 1, kObject) \
+ TFJ(ObjectKeys, 1, kReceiver, kObject) \
CPP(ObjectLookupGetter) \
CPP(ObjectLookupSetter) \
CPP(ObjectPreventExtensions) \
/* ES6 #sec-object.prototype.tostring */ \
- TFJ(ObjectPrototypeToString, 0) \
+ TFJ(ObjectPrototypeToString, 0, kReceiver) \
/* ES6 #sec-object.prototype.valueof */ \
- TFJ(ObjectPrototypeValueOf, 0) \
+ TFJ(ObjectPrototypeValueOf, 0, kReceiver) \
/* ES6 #sec-object.prototype.hasownproperty */ \
- TFJ(ObjectPrototypeHasOwnProperty, 1, kKey) \
- TFJ(ObjectPrototypeIsPrototypeOf, 1, kValue) \
+ TFJ(ObjectPrototypeHasOwnProperty, 1, kReceiver, kKey) \
+ TFJ(ObjectPrototypeIsPrototypeOf, 1, kReceiver, kValue) \
CPP(ObjectPrototypePropertyIsEnumerable) \
CPP(ObjectPrototypeGetProto) \
CPP(ObjectPrototypeSetProto) \
/* ES #sec-object.prototype.tolocalestring */ \
- TFJ(ObjectPrototypeToLocaleString, 0) \
+ TFJ(ObjectPrototypeToLocaleString, 0, kReceiver) \
CPP(ObjectSeal) \
- TFJ(ObjectValues, 1, kObject) \
+ TFJ(ObjectValues, 1, kReceiver, kObject) \
\
/* instanceof */ \
TFC(OrdinaryHasInstance, Compare, 1) \
@@ -815,57 +839,57 @@ namespace internal {
/* Starting at step 6 of "Promise Resolve Functions" */ \
TFS(ResolvePromise, kPromise, kResolution) \
/* ES #sec-promise-reject-functions */ \
- TFJ(PromiseCapabilityDefaultReject, 1, kReason) \
+ TFJ(PromiseCapabilityDefaultReject, 1, kReceiver, kReason) \
/* ES #sec-promise-resolve-functions */ \
- TFJ(PromiseCapabilityDefaultResolve, 1, kResolution) \
+ TFJ(PromiseCapabilityDefaultResolve, 1, kReceiver, kResolution) \
/* ES6 #sec-getcapabilitiesexecutor-functions */ \
- TFJ(PromiseGetCapabilitiesExecutor, 2, kResolve, kReject) \
+ TFJ(PromiseGetCapabilitiesExecutor, 2, kReceiver, kResolve, kReject) \
/* ES6 #sec-newpromisecapability */ \
TFS(NewPromiseCapability, kConstructor, kDebugEvent) \
- TFJ(PromiseConstructorLazyDeoptContinuation, 4, kPromise, kReject, \
- kException, kResult) \
+ TFJ(PromiseConstructorLazyDeoptContinuation, 4, kReceiver, kPromise, \
+ kReject, kException, kResult) \
/* ES6 #sec-promise-executor */ \
- TFJ(PromiseConstructor, 1, kExecutor) \
+ TFJ(PromiseConstructor, 1, kReceiver, kExecutor) \
CPP(IsPromise) \
/* ES #sec-promise.prototype.then */ \
- TFJ(PromisePrototypeThen, 2, kOnFulfilled, kOnRejected) \
+ TFJ(PromisePrototypeThen, 2, kReceiver, kOnFulfilled, kOnRejected) \
/* ES #sec-performpromisethen */ \
TFS(PerformPromiseThen, kPromise, kOnFulfilled, kOnRejected, kResultPromise) \
/* ES #sec-promise.prototype.catch */ \
- TFJ(PromisePrototypeCatch, 1, kOnRejected) \
+ TFJ(PromisePrototypeCatch, 1, kReceiver, kOnRejected) \
/* ES #sec-promisereactionjob */ \
TFS(PromiseRejectReactionJob, kReason, kHandler, kPromiseOrCapability) \
TFS(PromiseFulfillReactionJob, kValue, kHandler, kPromiseOrCapability) \
/* ES #sec-promiseresolvethenablejob */ \
TFS(PromiseResolveThenableJob, kPromiseToResolve, kThenable, kThen) \
/* ES #sec-promise.resolve */ \
- TFJ(PromiseResolveTrampoline, 1, kValue) \
+ TFJ(PromiseResolveTrampoline, 1, kReceiver, kValue) \
/* ES #sec-promise-resolve */ \
TFS(PromiseResolve, kConstructor, kValue) \
/* ES #sec-promise.reject */ \
- TFJ(PromiseReject, 1, kReason) \
- TFJ(PromisePrototypeFinally, 1, kOnFinally) \
- TFJ(PromiseThenFinally, 1, kValue) \
- TFJ(PromiseCatchFinally, 1, kReason) \
- TFJ(PromiseValueThunkFinally, 0) \
- TFJ(PromiseThrowerFinally, 0) \
+ TFJ(PromiseReject, 1, kReceiver, kReason) \
+ TFJ(PromisePrototypeFinally, 1, kReceiver, kOnFinally) \
+ TFJ(PromiseThenFinally, 1, kReceiver, kValue) \
+ TFJ(PromiseCatchFinally, 1, kReceiver, kReason) \
+ TFJ(PromiseValueThunkFinally, 0, kReceiver) \
+ TFJ(PromiseThrowerFinally, 0, kReceiver) \
/* ES #sec-promise.all */ \
- TFJ(PromiseAll, 1, kIterable) \
- TFJ(PromiseAllResolveElementClosure, 1, kValue) \
+ TFJ(PromiseAll, 1, kReceiver, kIterable) \
+ TFJ(PromiseAllResolveElementClosure, 1, kReceiver, kValue) \
/* ES #sec-promise.race */ \
- TFJ(PromiseRace, 1, kIterable) \
+ TFJ(PromiseRace, 1, kReceiver, kIterable) \
/* V8 Extras: v8.createPromise(parent) */ \
- TFJ(PromiseInternalConstructor, 1, kParent) \
+ TFJ(PromiseInternalConstructor, 1, kReceiver, kParent) \
/* V8 Extras: v8.rejectPromise(promise, reason) */ \
- TFJ(PromiseInternalReject, 2, kPromise, kReason) \
+ TFJ(PromiseInternalReject, 2, kReceiver, kPromise, kReason) \
/* V8 Extras: v8.resolvePromise(promise, resolution) */ \
- TFJ(PromiseInternalResolve, 2, kPromise, kResolution) \
+ TFJ(PromiseInternalResolve, 2, kReceiver, kPromise, kResolution) \
\
/* Proxy */ \
- TFJ(ProxyConstructor, 2, kTarget, kHandler) \
- TFJ(ProxyRevocable, 2, kTarget, kHandler) \
- TFJ(ProxyRevoke, 0) \
- TFS(ProxyGetProperty, kProxy, kName, kReceiverValue) \
+ TFJ(ProxyConstructor, 2, kReceiver, kTarget, kHandler) \
+ TFJ(ProxyRevocable, 2, kReceiver, kTarget, kHandler) \
+ TFJ(ProxyRevoke, 0, kReceiver) \
+ TFS(ProxyGetProperty, kProxy, kName, kReceiverValue, kOnNonExistent) \
TFS(ProxyHasProperty, kProxy, kName) \
TFS(ProxySetProperty, kProxy, kName, kValue, kReceiverValue, kLanguageMode) \
\
@@ -877,7 +901,7 @@ namespace internal {
CPP(ReflectGet) \
CPP(ReflectGetOwnPropertyDescriptor) \
CPP(ReflectGetPrototypeOf) \
- TFJ(ReflectHas, 2, kTarget, kKey) \
+ TFJ(ReflectHas, 2, kReceiver, kTarget, kKey) \
CPP(ReflectIsExtensible) \
CPP(ReflectOwnKeys) \
CPP(ReflectPreventExtensions) \
@@ -895,42 +919,43 @@ namespace internal {
CPP(RegExpCapture8Getter) \
CPP(RegExpCapture9Getter) \
/* ES #sec-regexp-pattern-flags */ \
- TFJ(RegExpConstructor, 2, kPattern, kFlags) \
- TFJ(RegExpInternalMatch, 2, kRegExp, kString) \
+ TFJ(RegExpConstructor, 2, kReceiver, kPattern, kFlags) \
+ TFJ(RegExpInternalMatch, 2, kReceiver, kRegExp, kString) \
CPP(RegExpInputGetter) \
CPP(RegExpInputSetter) \
CPP(RegExpLastMatchGetter) \
CPP(RegExpLastParenGetter) \
CPP(RegExpLeftContextGetter) \
/* ES #sec-regexp.prototype.compile */ \
- TFJ(RegExpPrototypeCompile, 2, kPattern, kFlags) \
+ TFJ(RegExpPrototypeCompile, 2, kReceiver, kPattern, kFlags) \
/* ES #sec-regexp.prototype.exec */ \
- TFJ(RegExpPrototypeExec, 1, kString) \
+ TFJ(RegExpPrototypeExec, 1, kReceiver, kString) \
/* ES #sec-get-regexp.prototype.dotAll */ \
- TFJ(RegExpPrototypeDotAllGetter, 0) \
+ TFJ(RegExpPrototypeDotAllGetter, 0, kReceiver) \
/* ES #sec-get-regexp.prototype.flags */ \
- TFJ(RegExpPrototypeFlagsGetter, 0) \
+ TFJ(RegExpPrototypeFlagsGetter, 0, kReceiver) \
/* ES #sec-get-regexp.prototype.global */ \
- TFJ(RegExpPrototypeGlobalGetter, 0) \
+ TFJ(RegExpPrototypeGlobalGetter, 0, kReceiver) \
/* ES #sec-get-regexp.prototype.ignorecase */ \
- TFJ(RegExpPrototypeIgnoreCaseGetter, 0) \
+ TFJ(RegExpPrototypeIgnoreCaseGetter, 0, kReceiver) \
/* ES #sec-regexp.prototype-@@match */ \
- TFJ(RegExpPrototypeMatch, 1, kString) \
+ TFJ(RegExpPrototypeMatch, 1, kReceiver, kString) \
/* https://tc39.github.io/proposal-string-matchall/ */ \
- TFJ(RegExpPrototypeMatchAll, 1, kString) \
+ TFJ(RegExpPrototypeMatchAll, 1, kReceiver, kString) \
/* ES #sec-get-regexp.prototype.multiline */ \
- TFJ(RegExpPrototypeMultilineGetter, 0) \
+ TFJ(RegExpPrototypeMultilineGetter, 0, kReceiver) \
/* ES #sec-regexp.prototype-@@search */ \
- TFJ(RegExpPrototypeSearch, 1, kString) \
+ TFJ(RegExpPrototypeSearch, 1, kReceiver, kString) \
/* ES #sec-get-regexp.prototype.source */ \
- TFJ(RegExpPrototypeSourceGetter, 0) \
+ TFJ(RegExpPrototypeSourceGetter, 0, kReceiver) \
/* ES #sec-get-regexp.prototype.sticky */ \
- TFJ(RegExpPrototypeStickyGetter, 0) \
+ TFJ(RegExpPrototypeStickyGetter, 0, kReceiver) \
/* ES #sec-regexp.prototype.test */ \
- TFJ(RegExpPrototypeTest, 1, kString) \
+ TFJ(RegExpPrototypeTest, 1, kReceiver, kString) \
+ TFS(RegExpPrototypeTestFast, kReceiver, kString) \
CPP(RegExpPrototypeToString) \
/* ES #sec-get-regexp.prototype.unicode */ \
- TFJ(RegExpPrototypeUnicodeGetter, 0) \
+ TFJ(RegExpPrototypeUnicodeGetter, 0, kReceiver) \
CPP(RegExpRightContextGetter) \
\
/* ES #sec-regexp.prototype-@@replace */ \
@@ -939,6 +964,7 @@ namespace internal {
TFJ(RegExpPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* RegExp helpers */ \
TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo) \
+ TFS(RegExpExecInternal, kRegExp, kString, kLastIndex, kMatchInfo) \
TFS(RegExpMatchFast, kReceiver, kPattern) \
TFS(RegExpPrototypeExecSlow, kReceiver, kString) \
TFS(RegExpReplace, kRegExp, kString, kReplaceValue) \
@@ -947,37 +973,38 @@ namespace internal {
\
/* RegExp String Iterator */ \
/* https://tc39.github.io/proposal-string-matchall/ */ \
- TFJ(RegExpStringIteratorPrototypeNext, 0) \
+ TFJ(RegExpStringIteratorPrototypeNext, 0, kReceiver) \
\
/* Set */ \
TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(SetPrototypeHas, 1, kKey) \
- TFJ(SetPrototypeAdd, 1, kKey) \
- TFJ(SetPrototypeDelete, 1, kKey) \
+ TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
+ TFJ(SetPrototypeAdd, 1, kReceiver, kKey) \
+ TFJ(SetPrototypeDelete, 1, kReceiver, kKey) \
CPP(SetPrototypeClear) \
/* ES #sec-set.prototype.entries */ \
- TFJ(SetPrototypeEntries, 0) \
+ TFJ(SetPrototypeEntries, 0, kReceiver) \
/* ES #sec-get-set.prototype.size */ \
- TFJ(SetPrototypeGetSize, 0) \
+ TFJ(SetPrototypeGetSize, 0, kReceiver) \
/* ES #sec-set.prototype.foreach */ \
TFJ(SetPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES #sec-set.prototype.values */ \
- TFJ(SetPrototypeValues, 0) \
+ TFJ(SetPrototypeValues, 0, kReceiver) \
/* ES #sec-%setiteratorprototype%.next */ \
- TFJ(SetIteratorPrototypeNext, 0) \
+ TFJ(SetIteratorPrototypeNext, 0, kReceiver) \
\
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
CPP(SharedArrayBufferPrototypeSlice) \
- TFJ(AtomicsLoad, 2, kArray, kIndex) \
- TFJ(AtomicsStore, 3, kArray, kIndex, kValue) \
- TFJ(AtomicsExchange, 3, kArray, kIndex, kValue) \
- TFJ(AtomicsCompareExchange, 4, kArray, kIndex, kOldValue, kNewValue) \
- TFJ(AtomicsAdd, 3, kArray, kIndex, kValue) \
- TFJ(AtomicsSub, 3, kArray, kIndex, kValue) \
- TFJ(AtomicsAnd, 3, kArray, kIndex, kValue) \
- TFJ(AtomicsOr, 3, kArray, kIndex, kValue) \
- TFJ(AtomicsXor, 3, kArray, kIndex, kValue) \
+ TFJ(AtomicsLoad, 2, kReceiver, kArray, kIndex) \
+ TFJ(AtomicsStore, 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsExchange, 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsCompareExchange, 4, kReceiver, kArray, kIndex, kOldValue, \
+ kNewValue) \
+ TFJ(AtomicsAdd, 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsSub, 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsAnd, 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsOr, 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsXor, 3, kReceiver, kArray, kIndex, kValue) \
CPP(AtomicsIsLockFree) \
CPP(AtomicsWait) \
CPP(AtomicsWake) \
@@ -990,44 +1017,44 @@ namespace internal {
/* ES6 #sec-string.fromcharcode */ \
TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.anchor */ \
- TFJ(StringPrototypeAnchor, 1, kValue) \
+ TFJ(StringPrototypeAnchor, 1, kReceiver, kValue) \
/* ES6 #sec-string.prototype.big */ \
- TFJ(StringPrototypeBig, 0) \
+ TFJ(StringPrototypeBig, 0, kReceiver) \
/* ES6 #sec-string.prototype.blink */ \
- TFJ(StringPrototypeBlink, 0) \
+ TFJ(StringPrototypeBlink, 0, kReceiver) \
/* ES6 #sec-string.prototype.bold */ \
- TFJ(StringPrototypeBold, 0) \
+ TFJ(StringPrototypeBold, 0, kReceiver) \
/* ES6 #sec-string.prototype.charat */ \
- TFJ(StringPrototypeCharAt, 1, kPosition) \
+ TFJ(StringPrototypeCharAt, 1, kReceiver, kPosition) \
/* ES6 #sec-string.prototype.charcodeat */ \
- TFJ(StringPrototypeCharCodeAt, 1, kPosition) \
+ TFJ(StringPrototypeCharCodeAt, 1, kReceiver, kPosition) \
/* ES6 #sec-string.prototype.codepointat */ \
- TFJ(StringPrototypeCodePointAt, 1, kPosition) \
+ TFJ(StringPrototypeCodePointAt, 1, kReceiver, kPosition) \
/* ES6 #sec-string.prototype.concat */ \
TFJ(StringPrototypeConcat, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.endswith */ \
CPP(StringPrototypeEndsWith) \
/* ES6 #sec-string.prototype.fontcolor */ \
- TFJ(StringPrototypeFontcolor, 1, kValue) \
+ TFJ(StringPrototypeFontcolor, 1, kReceiver, kValue) \
/* ES6 #sec-string.prototype.fontsize */ \
- TFJ(StringPrototypeFontsize, 1, kValue) \
+ TFJ(StringPrototypeFontsize, 1, kReceiver, kValue) \
/* ES6 #sec-string.prototype.fixed */ \
- TFJ(StringPrototypeFixed, 0) \
+ TFJ(StringPrototypeFixed, 0, kReceiver) \
/* ES6 #sec-string.prototype.includes */ \
TFJ(StringPrototypeIncludes, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.indexof */ \
TFJ(StringPrototypeIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.italics */ \
- TFJ(StringPrototypeItalics, 0) \
+ TFJ(StringPrototypeItalics, 0, kReceiver) \
/* ES6 #sec-string.prototype.lastindexof */ \
CPP(StringPrototypeLastIndexOf) \
/* ES6 #sec-string.prototype.link */ \
- TFJ(StringPrototypeLink, 1, kValue) \
+ TFJ(StringPrototypeLink, 1, kReceiver, kValue) \
/* ES6 #sec-string.prototype.match */ \
- TFJ(StringPrototypeMatch, 1, kRegexp) \
+ TFJ(StringPrototypeMatch, 1, kReceiver, kRegexp) \
/* ES #sec-string.prototype.matchAll */ \
- TFJ(StringPrototypeMatchAll, 1, kRegexp) \
+ TFJ(StringPrototypeMatchAll, 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.localecompare */ \
CPP(StringPrototypeLocaleCompare) \
/* ES6 #sec-string.prototype.padEnd */ \
@@ -1036,46 +1063,46 @@ namespace internal {
TFJ(StringPrototypePadStart, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.repeat */ \
- TFJ(StringPrototypeRepeat, 1, kCount) \
+ TFJ(StringPrototypeRepeat, 1, kReceiver, kCount) \
/* ES6 #sec-string.prototype.replace */ \
- TFJ(StringPrototypeReplace, 2, kSearch, kReplace) \
+ TFJ(StringPrototypeReplace, 2, kReceiver, kSearch, kReplace) \
/* ES6 #sec-string.prototype.search */ \
- TFJ(StringPrototypeSearch, 1, kRegexp) \
+ TFJ(StringPrototypeSearch, 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.slice */ \
TFJ(StringPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.small */ \
- TFJ(StringPrototypeSmall, 0) \
+ TFJ(StringPrototypeSmall, 0, kReceiver) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.strike */ \
- TFJ(StringPrototypeStrike, 0) \
+ TFJ(StringPrototypeStrike, 0, kReceiver) \
/* ES6 #sec-string.prototype.sub */ \
- TFJ(StringPrototypeSub, 0) \
+ TFJ(StringPrototypeSub, 0, kReceiver) \
/* ES6 #sec-string.prototype.substr */ \
TFJ(StringPrototypeSubstr, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.substring */ \
TFJ(StringPrototypeSubstring, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.sup */ \
- TFJ(StringPrototypeSup, 0) \
+ TFJ(StringPrototypeSup, 0, kReceiver) \
/* ES6 #sec-string.prototype.startswith */ \
CPP(StringPrototypeStartsWith) \
/* ES6 #sec-string.prototype.tostring */ \
- TFJ(StringPrototypeToString, 0) \
+ TFJ(StringPrototypeToString, 0, kReceiver) \
TFJ(StringPrototypeTrim, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFJ(StringPrototypeTrimEnd, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFJ(StringPrototypeTrimStart, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.valueof */ \
- TFJ(StringPrototypeValueOf, 0) \
+ TFJ(StringPrototypeValueOf, 0, kReceiver) \
/* ES6 #sec-string.raw */ \
CPP(StringRaw) \
/* ES6 #sec-string.prototype-@@iterator */ \
- TFJ(StringPrototypeIterator, 0) \
+ TFJ(StringPrototypeIterator, 0, kReceiver) \
\
/* StringIterator */ \
/* ES6 #sec-%stringiteratorprototype%.next */ \
- TFJ(StringIteratorPrototypeNext, 0) \
+ TFJ(StringIteratorPrototypeNext, 0, kReceiver) \
\
/* Symbol */ \
/* ES #sec-symbol-constructor */ \
@@ -1084,12 +1111,14 @@ namespace internal {
CPP(SymbolFor) \
/* ES6 #sec-symbol.keyfor */ \
CPP(SymbolKeyFor) \
+ /* ES #sec-symbol.prototype.description */ \
+ TFJ(SymbolPrototypeDescriptionGetter, 0, kReceiver) \
/* ES6 #sec-symbol.prototype-@@toprimitive */ \
- TFJ(SymbolPrototypeToPrimitive, 1, kHint) \
+ TFJ(SymbolPrototypeToPrimitive, 1, kReceiver, kHint) \
/* ES6 #sec-symbol.prototype.tostring */ \
- TFJ(SymbolPrototypeToString, 0) \
+ TFJ(SymbolPrototypeToString, 0, kReceiver) \
/* ES6 #sec-symbol.prototype.valueof */ \
- TFJ(SymbolPrototypeValueOf, 0) \
+ TFJ(SymbolPrototypeValueOf, 0, kReceiver) \
\
/* TypedArray */ \
TFS(IterableToList, kIterable, kIteratorFn) \
@@ -1099,22 +1128,22 @@ namespace internal {
kByteOffset) \
/* ES #sec-typedarray-constructors */ \
TFS(CreateTypedArray, kTarget, kNewTarget, kArg1, kArg2, kArg3) \
- TFJ(TypedArrayBaseConstructor, 0) \
- TFJ(TypedArrayConstructorLazyDeoptContinuation, 1, kResult) \
+ TFJ(TypedArrayBaseConstructor, 0, kReceiver) \
+ TFJ(TypedArrayConstructorLazyDeoptContinuation, 1, kReceiver, kResult) \
TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
- TFJ(TypedArrayPrototypeByteLength, 0) \
+ TFJ(TypedArrayPrototypeByteLength, 0, kReceiver) \
/* ES6 #sec-get-%typedarray%.prototype.byteoffset */ \
- TFJ(TypedArrayPrototypeByteOffset, 0) \
+ TFJ(TypedArrayPrototypeByteOffset, 0, kReceiver) \
/* ES6 #sec-get-%typedarray%.prototype.length */ \
- TFJ(TypedArrayPrototypeLength, 0) \
+ TFJ(TypedArrayPrototypeLength, 0, kReceiver) \
/* ES6 #sec-%typedarray%.prototype.entries */ \
- TFJ(TypedArrayPrototypeEntries, 0) \
+ TFJ(TypedArrayPrototypeEntries, 0, kReceiver) \
/* ES6 #sec-%typedarray%.prototype.keys */ \
- TFJ(TypedArrayPrototypeKeys, 0) \
+ TFJ(TypedArrayPrototypeKeys, 0, kReceiver) \
/* ES6 #sec-%typedarray%.prototype.values */ \
- TFJ(TypedArrayPrototypeValues, 0) \
+ TFJ(TypedArrayPrototypeValues, 0, kReceiver) \
/* ES6 #sec-%typedarray%.prototype.copywithin */ \
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
@@ -1145,7 +1174,7 @@ namespace internal {
TFJ(TypedArrayPrototypeSubArray, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
- TFJ(TypedArrayPrototypeToStringTag, 0) \
+ TFJ(TypedArrayPrototypeToStringTag, 0, kReceiver) \
/* ES6 %TypedArray%.prototype.every */ \
TFJ(TypedArrayPrototypeEvery, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1167,34 +1196,37 @@ namespace internal {
TFJ(TypedArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.from */ \
TFJ(TypedArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFS(TypedArrayLoadElementAsTagged, kArray, kKind, kIndex) \
- TFS(TypedArrayStoreElementFromTagged, kArray, kKind, kIndex, kValue) \
\
/* Wasm */ \
ASM(WasmCompileLazy) \
- TFC(WasmStackGuard, WasmRuntimeCall, 1) \
- TFC(ThrowWasmTrapUnreachable, WasmRuntimeCall, 1) \
- TFC(ThrowWasmTrapMemOutOfBounds, WasmRuntimeCall, 1) \
- TFC(ThrowWasmTrapDivByZero, WasmRuntimeCall, 1) \
- TFC(ThrowWasmTrapDivUnrepresentable, WasmRuntimeCall, 1) \
- TFC(ThrowWasmTrapRemByZero, WasmRuntimeCall, 1) \
- TFC(ThrowWasmTrapFloatUnrepresentable, WasmRuntimeCall, 1) \
- TFC(ThrowWasmTrapFuncInvalid, WasmRuntimeCall, 1) \
- TFC(ThrowWasmTrapFuncSigMismatch, WasmRuntimeCall, 1) \
+ TFC(WasmAllocateHeapNumber, AllocateHeapNumber, 1) \
+ TFC(WasmArgumentsAdaptor, ArgumentAdaptor, 1) \
+ TFC(WasmCallJavaScript, CallTrampoline, 1) \
+ TFC(WasmGrowMemory, WasmGrowMemory, 1) \
+ TFC(WasmStackGuard, NoContext, 1) \
+ TFC(WasmToNumber, TypeConversion, 1) \
+ TFS(ThrowWasmTrapUnreachable) \
+ TFS(ThrowWasmTrapMemOutOfBounds) \
+ TFS(ThrowWasmTrapDivByZero) \
+ TFS(ThrowWasmTrapDivUnrepresentable) \
+ TFS(ThrowWasmTrapRemByZero) \
+ TFS(ThrowWasmTrapFloatUnrepresentable) \
+ TFS(ThrowWasmTrapFuncInvalid) \
+ TFS(ThrowWasmTrapFuncSigMismatch) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFS(WeakMapLookupHashIndex, kTable, kKey) \
- TFJ(WeakMapGet, 1, kKey) \
- TFJ(WeakMapHas, 1, kKey) \
- TFJ(WeakMapPrototypeSet, 2, kKey, kValue) \
- TFJ(WeakMapPrototypeDelete, 1, kKey) \
+ TFJ(WeakMapGet, 1, kReceiver, kKey) \
+ TFJ(WeakMapHas, 1, kReceiver, kKey) \
+ TFJ(WeakMapPrototypeSet, 2, kReceiver, kKey, kValue) \
+ TFJ(WeakMapPrototypeDelete, 1, kReceiver, kKey) \
\
/* WeakSet */ \
TFJ(WeakSetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(WeakSetHas, 1, kKey) \
- TFJ(WeakSetPrototypeAdd, 1, kValue) \
- TFJ(WeakSetPrototypeDelete, 1, kValue) \
+ TFJ(WeakSetHas, 1, kReceiver, kKey) \
+ TFJ(WeakSetPrototypeAdd, 1, kReceiver, kValue) \
+ TFJ(WeakSetPrototypeDelete, 1, kReceiver, kValue) \
\
/* WeakSet / WeakMap Helpers */ \
TFS(WeakCollectionDelete, kCollection, kKey) \
@@ -1226,27 +1258,30 @@ namespace internal {
\
/* Await (proposal-async-iteration/#await), with resume behaviour */ \
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
- TFJ(AsyncGeneratorAwaitCaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitUncaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorAwaitRejectClosure, 1, kValue) \
- TFJ(AsyncGeneratorYieldResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnClosedResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnClosedRejectClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnResolveClosure, 1, kValue) \
+ TFJ(AsyncGeneratorAwaitCaught, 2, kReceiver, kGenerator, kAwaited) \
+ TFJ(AsyncGeneratorAwaitUncaught, 2, kReceiver, kGenerator, kAwaited) \
+ TFJ(AsyncGeneratorAwaitResolveClosure, 1, kReceiver, kValue) \
+ TFJ(AsyncGeneratorAwaitRejectClosure, 1, kReceiver, kValue) \
+ TFJ(AsyncGeneratorYieldResolveClosure, 1, kReceiver, kValue) \
+ TFJ(AsyncGeneratorReturnClosedResolveClosure, 1, kReceiver, kValue) \
+ TFJ(AsyncGeneratorReturnClosedRejectClosure, 1, kReceiver, kValue) \
+ TFJ(AsyncGeneratorReturnResolveClosure, 1, kReceiver, kValue) \
\
/* Async-from-Sync Iterator */ \
\
/* %AsyncFromSyncIteratorPrototype% */ \
/* See tc39.github.io/proposal-async-iteration/ */ \
/* #sec-%asyncfromsynciteratorprototype%-object) */ \
- TFJ(AsyncFromSyncIteratorPrototypeNext, 1, kValue) \
+ TFJ(AsyncFromSyncIteratorPrototypeNext, 1, kReceiver, kValue) \
+ TFJ(AsyncFromSyncIteratorPrototypeNextOptimized, 1, kReceiver, kValue) \
/* #sec-%asyncfromsynciteratorprototype%.throw */ \
- TFJ(AsyncFromSyncIteratorPrototypeThrow, 1, kReason) \
+ TFJ(AsyncFromSyncIteratorPrototypeThrow, 1, kReceiver, kReason) \
+ TFJ(AsyncFromSyncIteratorPrototypeThrowOptimized, 1, kReceiver, kReason) \
/* #sec-%asyncfromsynciteratorprototype%.return */ \
- TFJ(AsyncFromSyncIteratorPrototypeReturn, 1, kValue) \
+ TFJ(AsyncFromSyncIteratorPrototypeReturn, 1, kReceiver, kValue) \
+ TFJ(AsyncFromSyncIteratorPrototypeReturnOptimized, 1, kReceiver, kValue) \
/* #sec-async-iterator-value-unwrap-functions */ \
- TFJ(AsyncIteratorValueUnwrap, 1, kValue) \
+ TFJ(AsyncIteratorValueUnwrap, 1, kReceiver, kValue) \
\
/* CEntry */ \
ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit) \
@@ -1260,50 +1295,59 @@ namespace internal {
ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit) \
ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit) \
\
- /* StringAdd */ \
+ /* String helpers */ \
TFS(StringAdd_CheckNone_NotTenured, kLeft, kRight) \
TFS(StringAdd_CheckNone_Tenured, kLeft, kRight) \
TFS(StringAdd_ConvertLeft_NotTenured, kLeft, kRight) \
TFS(StringAdd_ConvertRight_NotTenured, kLeft, kRight) \
+ TFS(SubString, kString, kFrom, kTo) \
\
/* Miscellaneous */ \
+ ASM(CallApiCallback_Argc0) \
+ ASM(CallApiCallback_Argc1) \
+ ASM(CallApiGetter) \
ASM(DoubleToI) \
TFC(GetProperty, GetProperty, 1) \
- ASM(MathPowInternal) \
- \
- /* Trace */ \
- CPP(IsTraceCategoryEnabled) \
- CPP(Trace)
+ ASM(MathPowInternal)
#ifdef V8_INTL_SUPPORT
-#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- \
- TFS(StringToLowerCaseIntl, kString) \
- /* ES #sec-string.prototype.tolowercase */ \
- TFJ(StringPrototypeToLowerCaseIntl, 0) \
- /* ES #sec-string.prototype.touppercase */ \
- CPP(StringPrototypeToUpperCaseIntl) \
- /* ES #sec-string.prototype.normalize */ \
- CPP(StringPrototypeNormalizeIntl) \
- /* ecma402 #sec-intl.numberformat.prototype.formattoparts */ \
- CPP(NumberFormatPrototypeFormatToParts) \
- /* ecma402 #sec-intl.datetimeformat.prototype.formattoparts */ \
- CPP(DateTimeFormatPrototypeFormatToParts) \
- /* ecma402 #new proposal */ \
- CPP(LocaleConstructor) \
- CPP(LocalePrototypeLanguage) \
- CPP(LocalePrototypeScript) \
- CPP(LocalePrototypeRegion) \
- CPP(LocalePrototypeBaseName) \
- CPP(LocalePrototypeCalendar) \
- CPP(LocalePrototypeCaseFirst) \
- CPP(LocalePrototypeCollation) \
- CPP(LocalePrototypeHourCycle) \
- CPP(LocalePrototypeNumeric) \
- CPP(LocalePrototypeNumberingSystem) \
- CPP(LocalePrototypeToString)
+#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+ \
+ TFS(StringToLowerCaseIntl, kString) \
+ /* ES #sec-string.prototype.tolowercase */ \
+ TFJ(StringPrototypeToLowerCaseIntl, 0, kReceiver) \
+ /* ES #sec-string.prototype.touppercase */ \
+ CPP(StringPrototypeToUpperCaseIntl) \
+ /* ES #sec-string.prototype.normalize */ \
+ CPP(StringPrototypeNormalizeIntl) \
+ /* ecma402 #sec-intl.numberformat.prototype.formattoparts */ \
+ CPP(NumberFormatPrototypeFormatToParts) \
+ /* ecma402 #sec-intl.datetimeformat.prototype.formattoparts */ \
+ CPP(DateTimeFormatPrototypeFormatToParts) \
+ /* ecma402 #new proposal */ \
+ /* ecma402 #sec-intl-locale-constructor */ \
+ CPP(LocaleConstructor) \
+ CPP(LocalePrototypeLanguage) \
+ CPP(LocalePrototypeScript) \
+ CPP(LocalePrototypeRegion) \
+ CPP(LocalePrototypeBaseName) \
+ CPP(LocalePrototypeCalendar) \
+ CPP(LocalePrototypeCaseFirst) \
+ CPP(LocalePrototypeCollation) \
+ CPP(LocalePrototypeHourCycle) \
+ CPP(LocalePrototypeNumeric) \
+ CPP(LocalePrototypeNumberingSystem) \
+ CPP(LocalePrototypeToString) \
+ /* ecma402 #sec-number-format-functions */ \
+ CPP(NumberFormatInternalFormatNumber) \
+ /* ecma402 #sec-intl.numberformat.prototype.format */ \
+ CPP(NumberFormatPrototypeFormatNumber) \
+ /* ecma402 #sec-intl-relativetimeformat-constructor */ \
+ CPP(RelativeTimeFormatConstructor) \
+ /* ecma402 #sec-intl.relativetimeformat.prototype.resolvedoptions */ \
+ CPP(RelativeTimeFormatPrototypeResolvedOptions)
#else
#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
@@ -1326,9 +1370,14 @@ namespace internal {
#define BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(V) \
V(AsyncFromSyncIteratorPrototypeNext) \
V(AsyncFromSyncIteratorPrototypeReturn) \
+ V(AsyncFromSyncIteratorPrototypeNextOptimized) \
+ V(AsyncFromSyncIteratorPrototypeThrowOptimized) \
+ V(AsyncFromSyncIteratorPrototypeReturnOptimized) \
V(AsyncFromSyncIteratorPrototypeThrow) \
V(AsyncFunctionAwaitCaught) \
+ V(AsyncFunctionAwaitCaughtOptimized) \
V(AsyncFunctionAwaitUncaught) \
+ V(AsyncFunctionAwaitUncaughtOptimized) \
V(AsyncGeneratorResolve) \
V(AsyncGeneratorAwaitCaught) \
V(AsyncGeneratorAwaitUncaught) \
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index 6026b9f721..abc5d58e27 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -6,22 +6,27 @@
#define V8_BUILTINS_BUILTINS_DESCRIPTORS_H_
#include "src/builtins/builtins.h"
+#include "src/compiler/code-assembler.h"
#include "src/interface-descriptors.h"
+#include "src/objects/shared-function-info.h"
namespace v8 {
namespace internal {
// Define interface descriptors for builtins with JS linkage.
-#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
- struct Builtin_##Name##_InterfaceDescriptor { \
- enum ParameterIndices { \
- kReceiver, \
- ##__VA_ARGS__, \
- kNewTarget, \
- kActualArgumentsCount, \
- kContext, \
- kParameterCount, \
- }; \
+#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
+ struct Builtin_##Name##_InterfaceDescriptor { \
+ enum ParameterIndices { \
+ kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
+ ##__VA_ARGS__, \
+ kJSNewTarget, \
+ kJSActualArgumentsCount, \
+ kContext, \
+ kParameterCount, \
+ }; \
+ static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4), \
+ "Inconsistent set of arguments"); \
+ static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
};
// Define interface descriptors for builtins with StubCall linkage.
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 4e5276484f..0043c42810 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -75,7 +75,7 @@ BUILTIN(ErrorCaptureStackTrace) {
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSObject::SetAccessor(object, name, error_stack, DONT_ENUM));
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// ES6 section 19.5.3.4 Error.prototype.toString ( )
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index d53f89418c..4924d4c0c4 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -17,9 +17,9 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* new_target = Parameter(Descriptor::kJSNewTarget);
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
@@ -49,7 +49,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// Minimum descriptor array length required for fast path.
const int min_descriptors_length = DescriptorArray::LengthFor(Max(
JSFunction::kLengthDescriptorIndex, JSFunction::kNameDescriptorIndex));
- TNode<Smi> descriptors_length = LoadFixedArrayBaseLength(descriptors);
+ TNode<Smi> descriptors_length = LoadWeakFixedArrayLength(descriptors);
GotoIf(SmiLessThanOrEqual(descriptors_length,
SmiConstant(min_descriptors_length)),
&slow);
@@ -60,25 +60,25 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Comment("Check name and length properties");
{
const int length_index = JSFunction::kLengthDescriptorIndex;
- TNode<Name> maybe_length = CAST(LoadFixedArrayElement(
+ TNode<Name> maybe_length = CAST(LoadWeakFixedArrayElement(
descriptors, DescriptorArray::ToKeyIndex(length_index)));
GotoIf(WordNotEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)),
&slow);
- TNode<Object> maybe_length_accessor = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToValueIndex(length_index));
+ TNode<Object> maybe_length_accessor = CAST(LoadWeakFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(length_index)));
GotoIf(TaggedIsSmi(maybe_length_accessor), &slow);
Node* length_value_map = LoadMap(CAST(maybe_length_accessor));
GotoIfNot(IsAccessorInfoMap(length_value_map), &slow);
const int name_index = JSFunction::kNameDescriptorIndex;
- TNode<Name> maybe_name = CAST(LoadFixedArrayElement(
+ TNode<Name> maybe_name = CAST(LoadWeakFixedArrayElement(
descriptors, DescriptorArray::ToKeyIndex(name_index)));
GotoIf(WordNotEqual(maybe_name, LoadRoot(Heap::kname_stringRootIndex)),
&slow);
- TNode<Object> maybe_name_accessor = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToValueIndex(name_index));
+ TNode<Object> maybe_name_accessor = CAST(LoadWeakFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(name_index)));
GotoIf(TaggedIsSmi(maybe_name_accessor), &slow);
TNode<Map> name_value_map = LoadMap(CAST(maybe_name_accessor));
GotoIfNot(IsAccessorInfoMap(name_value_map), &slow);
@@ -121,11 +121,10 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Label empty_arguments(this);
Label arguments_done(this, &argument_array);
GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments);
- Node* elements_length =
- ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1))));
- Node* elements =
- AllocateFixedArray(PACKED_ELEMENTS, elements_length, INTPTR_PARAMETERS,
- kAllowLargeObjectAllocation);
+ TNode<IntPtrT> elements_length =
+ Signed(ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1)))));
+ Node* elements = AllocateFixedArray(PACKED_ELEMENTS, elements_length,
+ kAllowLargeObjectAllocation);
VARIABLE(index, MachineType::PointerRepresentation());
index.Bind(IntPtrConstant(0));
VariableList foreach_vars({&index}, zone());
@@ -185,10 +184,14 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
}
BIND(&slow);
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- TailCallStub(CodeFactory::FunctionPrototypeBind(isolate()), context, target,
- new_target, argc);
+ {
+ // We are not using Parameter(Descriptor::kJSTarget) and loading the value
+ // from the current frame here in order to reduce register pressure on the
+ // fast path.
+ TNode<JSFunction> target = LoadTargetFromFrame();
+ TailCallBuiltin(Builtins::kFunctionPrototypeBind, context, target,
+ new_target, argc);
+ }
}
// ES6 #sec-function.prototype-@@hasinstance
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index df1dc19d82..663eedc29a 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -53,7 +53,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
Handle<String> param;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, param, Object::ToString(isolate, args.at(i)), Object);
- param = String::Flatten(param);
+ param = String::Flatten(isolate, param);
builder.AppendString(param);
if (!FLAG_harmony_function_tostring) {
// If the formal parameters string include ) - an illegal
@@ -135,7 +135,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
JSFunction::GetDerivedMap(isolate, target, new_target), Object);
Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
- Handle<Map> map = Map::AsLanguageMode(initial_map, shared_info);
+ Handle<Map> map = Map::AsLanguageMode(isolate, initial_map, shared_info);
Handle<Context> context(function->context(), isolate);
function = isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -173,7 +173,8 @@ BUILTIN(AsyncFunctionConstructor) {
// Do not lazily compute eval position for AsyncFunction, as they may not be
// determined after the function is resumed.
Handle<JSFunction> func = Handle<JSFunction>::cast(maybe_func);
- Handle<Script> script = handle(Script::cast(func->shared()->script()));
+ Handle<Script> script =
+ handle(Script::cast(func->shared()->script()), isolate);
int position = script->GetEvalPosition();
USE(position);
@@ -191,7 +192,8 @@ BUILTIN(AsyncGeneratorFunctionConstructor) {
// Do not lazily compute eval position for AsyncFunction, as they may not be
// determined after the function is resumed.
Handle<JSFunction> func = Handle<JSFunction>::cast(maybe_func);
- Handle<Script> script = handle(Script::cast(func->shared()->script()));
+ Handle<Script> script =
+ handle(Script::cast(func->shared()->script()), isolate);
int position = script->GetEvalPosition();
USE(position);
@@ -235,7 +237,7 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
Handle<Object> length(Smi::kZero, isolate);
Maybe<PropertyAttributes> attributes =
JSReceiver::GetPropertyAttributes(&length_lookup);
- if (attributes.IsNothing()) return isolate->heap()->exception();
+ if (attributes.IsNothing()) return ReadOnlyRoots(isolate).exception();
if (attributes.FromJust() != ABSENT) {
Handle<Object> target_length;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_length,
@@ -268,14 +270,14 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
if (target_name->IsString()) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, name,
- Name::ToFunctionName(Handle<String>::cast(target_name)));
+ Name::ToFunctionName(isolate, Handle<String>::cast(target_name)));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, name, isolate->factory()->NewConsString(
isolate->factory()->bound__string(), name));
} else {
name = isolate->factory()->bound__string();
}
- LookupIterator it(function, isolate->factory()->name_string());
+ LookupIterator it(isolate, function, isolate->factory()->name_string());
DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
RETURN_FAILURE_ON_EXCEPTION(isolate,
JSObject::DefineOwnPropertyIgnoreAttributes(
@@ -303,7 +305,7 @@ BUILTIN(FunctionPrototypeToString) {
// receivers for this method.
if (FLAG_harmony_function_tostring && receiver->IsJSReceiver() &&
JSReceiver::cast(*receiver)->map()->is_callable()) {
- return isolate->heap()->function_native_code_string();
+ return ReadOnlyRoots(isolate).function_native_code_string();
}
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index 769238ccea..b7f296ac4d 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -125,12 +125,12 @@ TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* receiver = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, value, context,
JSGeneratorObject::kNext,
@@ -142,12 +142,12 @@ TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* receiver = args.GetReceiver();
Node* value = args.GetOptionalArgumentValue(kValueArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, value, context,
JSGeneratorObject::kReturn,
@@ -159,12 +159,12 @@ TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
const int kExceptionArg = 0;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* receiver = args.GetReceiver();
Node* exception = args.GetOptionalArgumentValue(kExceptionArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, exception, context,
JSGeneratorObject::kThrow,
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index 4d3337f213..3c71a322f9 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -89,7 +89,7 @@ BUILTIN(GlobalEval) {
if (!x->IsString()) return *x;
if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
isolate->CountUsage(v8::Isolate::kFunctionConstructorReturnedUndefined);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
Handle<JSFunction> function;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index c1bf7df2ec..c2a16e3570 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -176,19 +176,21 @@ TF_BUILTIN(ReturnReceiver, CodeStubAssembler) {
TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
Label tailcall_to_shared(this);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
- TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
TNode<Int32T> arg_count =
- UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
- TNode<JSFunction> function = CAST(LoadFromFrame(
- StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer()));
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<JSFunction> function = CAST(Parameter(Descriptor::kJSTarget));
// Check break-at-entry flag on the debug info.
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
- TNode<Object> maybe_debug_info =
- LoadObjectField(shared, SharedFunctionInfo::kDebugInfoOffset);
- GotoIf(TaggedIsSmi(maybe_debug_info), &tailcall_to_shared);
+ TNode<Object> maybe_heap_object_or_smi = LoadObjectField(
+ shared, SharedFunctionInfo::kFunctionIdentifierOrDebugInfoOffset);
+ TNode<HeapObject> maybe_debug_info =
+ TaggedToHeapObject(maybe_heap_object_or_smi, &tailcall_to_shared);
+ GotoIfNot(HasInstanceType(maybe_debug_info, InstanceType::DEBUG_INFO_TYPE),
+ &tailcall_to_shared);
{
TNode<DebugInfo> debug_info = CAST(maybe_debug_info);
@@ -204,11 +206,7 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
BIND(&tailcall_to_shared);
// Tail call into code object on the SharedFunctionInfo.
TNode<Code> code = GetSharedFunctionInfoCode(shared);
- // Use the ConstructTrampolineDescriptor because it passes new.target too in
- // case this is called during construct.
- CSA_ASSERT(this, IsCode(code));
- ConstructTrampolineDescriptor descriptor(isolate());
- TailCallStub(descriptor, code, context, function, new_target, arg_count);
+ TailCallJSCode(code, context, function, new_target, arg_count);
}
class RecordWriteCodeStubAssembler : public CodeStubAssembler {
@@ -230,48 +228,6 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
IntPtrConstant(0));
}
- void GotoIfNotBlack(Node* object, Label* not_black) {
- Label exit(this);
- Label* black = &exit;
-
- DCHECK_EQ(strcmp(Marking::kBlackBitPattern, "11"), 0);
-
- Node* cell;
- Node* mask;
-
- GetMarkBit(object, &cell, &mask);
- mask = TruncateIntPtrToInt32(mask);
-
- Node* bits = Load(MachineType::Int32(), cell);
- Node* bit_0 = Word32And(bits, mask);
-
- GotoIf(Word32Equal(bit_0, Int32Constant(0)), not_black);
-
- mask = Word32Shl(mask, Int32Constant(1));
-
- Label word_boundary(this), in_word(this);
-
- // If mask becomes zero, we know mask was `1 << 31`, i.e., the bit is on
- // word boundary. Otherwise, the bit is within the word.
- Branch(Word32Equal(mask, Int32Constant(0)), &word_boundary, &in_word);
-
- BIND(&word_boundary);
- {
- Node* bit_1 = Word32And(
- Load(MachineType::Int32(), IntPtrAdd(cell, IntPtrConstant(4))),
- Int32Constant(1));
- Branch(Word32Equal(bit_1, Int32Constant(0)), not_black, black);
- }
-
- BIND(&in_word);
- {
- Branch(Word32Equal(Word32And(bits, mask), Int32Constant(0)), not_black,
- black);
- }
-
- BIND(&exit);
- }
-
Node* IsWhite(Node* object) {
DCHECK_EQ(strcmp(Marking::kWhiteBitPattern, "00"), 0);
Node* cell;
@@ -446,10 +402,6 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
{
Label call_incremental_wb(this);
-#ifndef V8_CONCURRENT_MARKING
- GotoIfNotBlack(object, &exit);
-#endif
-
// There are two cases we need to call incremental write barrier.
// 1) value_is_white
GotoIf(IsWhite(value), &call_incremental_wb);
@@ -692,8 +644,61 @@ class InternalBuiltinsAssembler : public CodeStubAssembler {
StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
TheHoleConstant());
}
+
+ template <typename Descriptor>
+ void GenerateAdaptorWithExitFrameType(
+ Builtins::ExitFrameType exit_frame_type);
};
+template <typename Descriptor>
+void InternalBuiltinsAssembler::GenerateAdaptorWithExitFrameType(
+ Builtins::ExitFrameType exit_frame_type) {
+ TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<WordT> c_function =
+ UncheckedCast<WordT>(Parameter(Descriptor::kCFunction));
+
+ // The logic contained here is mirrored for TurboFan inlining in
+ // JSTypedLowering::ReduceJSCall{Function,Construct}. Keep these in sync.
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ TNode<Context> context =
+ CAST(LoadObjectField(target, JSFunction::kContextOffset));
+
+ // Update arguments count for CEntry to contain the number of arguments
+ // including the receiver and the extra arguments.
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ argc = Int32Add(
+ argc,
+ Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
+
+ TNode<Code> code = HeapConstant(
+ CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ exit_frame_type == Builtins::BUILTIN_EXIT));
+
+ // Unconditionally push argc, target and new target as extra stack arguments.
+ // They will be used by stack frame iterators when constructing stack trace.
+ TailCallStub(CEntry1ArgvOnStackDescriptor{}, // descriptor
+ code, context, // standard arguments for TailCallStub
+ argc, c_function, // register arguments
+ TheHoleConstant(), // additional stack argument 1 (padding)
+ SmiFromInt32(argc), // additional stack argument 2
+ target, // additional stack argument 3
+ new_target); // additional stack argument 4
+}
+
+TF_BUILTIN(AdaptorWithExitFrame, InternalBuiltinsAssembler) {
+ GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::EXIT);
+}
+
+TF_BUILTIN(AdaptorWithBuiltinExitFrame, InternalBuiltinsAssembler) {
+ GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::BUILTIN_EXIT);
+}
+
TNode<IntPtrT> InternalBuiltinsAssembler::GetPendingMicrotaskCount() {
auto ref = ExternalReference::pending_microtask_count_address(isolate());
if (kIntSize == 8) {
@@ -792,7 +797,8 @@ void InternalBuiltinsAssembler::RunPromiseHook(
Runtime::FunctionId id, TNode<Context> context,
SloppyTNode<HeapObject> promise_or_capability) {
Label hook(this, Label::kDeferred), done_hook(this);
- Branch(IsPromiseHookEnabledOrDebugIsActive(), &hook, &done_hook);
+ GotoIf(IsDebugActive(), &hook);
+ Branch(IsPromiseHookEnabledOrHasAsyncEventDelegate(), &hook, &done_hook);
BIND(&hook);
{
// Get to the underlying JSPromise instance.
@@ -1091,10 +1097,31 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
}
}
+TF_BUILTIN(AllocateInNewSpace, CodeStubAssembler) {
+ TNode<Int32T> requested_size =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kRequestedSize));
+
+ TailCallRuntime(Runtime::kAllocateInNewSpace, NoContextConstant(),
+ SmiFromInt32(requested_size));
+}
+
+TF_BUILTIN(AllocateInOldSpace, CodeStubAssembler) {
+ TNode<Int32T> requested_size =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kRequestedSize));
+
+ int flags = AllocateTargetSpace::encode(OLD_SPACE);
+ TailCallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
+ SmiFromInt32(requested_size), SmiConstant(flags));
+}
+
+TF_BUILTIN(Abort, CodeStubAssembler) {
+ TNode<Smi> message_id = CAST(Parameter(Descriptor::kMessageOrMessageId));
+ TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id);
+}
+
TF_BUILTIN(AbortJS, CodeStubAssembler) {
- Node* message = Parameter(Descriptor::kObject);
- Node* reason = SmiConstant(0);
- TailCallRuntime(Runtime::kAbortJS, reason, message);
+ TNode<String> message = CAST(Parameter(Descriptor::kMessageOrMessageId));
+ TailCallRuntime(Runtime::kAbortJS, NoContextConstant(), message);
}
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
@@ -1149,6 +1176,34 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, true);
}
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ // CallApiGetterStub only exists as a stub to avoid duplicating code between
+ // here and code-stubs-<arch>.cc. For example, see CallApiFunctionAndReturn.
+ // Here we abuse the instantiated stub to generate code.
+ CallApiGetterStub stub(masm->isolate());
+ stub.Generate(masm);
+}
+
+void Builtins::Generate_CallApiCallback_Argc0(MacroAssembler* masm) {
+ // The common variants of CallApiCallbackStub (i.e. all that are embedded into
+ // the snapshot) are generated as builtins. The rest remain available as code
+ // stubs. Here we abuse the instantiated stub to generate code and avoid
+ // duplication.
+ const int kArgc = 0;
+ CallApiCallbackStub stub(masm->isolate(), kArgc);
+ stub.Generate(masm);
+}
+
+void Builtins::Generate_CallApiCallback_Argc1(MacroAssembler* masm) {
+ // The common variants of CallApiCallbackStub (i.e. all that are embedded into
+ // the snapshot) are generated as builtins. The rest remain available as code
+ // stubs. Here we abuse the instantiated stub to generate code and avoid
+ // duplication.
+ const int kArgc = 1;
+ CallApiCallbackStub stub(masm->isolate(), kArgc);
+ stub.Generate(masm);
+}
+
// ES6 [[Get]] operation.
TF_BUILTIN(GetProperty, CodeStubAssembler) {
Label call_runtime(this, Label::kDeferred), return_undefined(this), end(this);
diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc
index 810d6e930d..3a1eb078d0 100644
--- a/deps/v8/src/builtins/builtins-internal.cc
+++ b/deps/v8/src/builtins/builtins-internal.cc
@@ -15,7 +15,7 @@ BUILTIN(Illegal) {
UNREACHABLE();
}
-BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
+BUILTIN(EmptyFunction) { return ReadOnlyRoots(isolate).undefined_value(); }
BUILTIN(UnsupportedThrower) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index e15fb9d943..e6664950d0 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -14,6 +14,7 @@
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/js-relative-time-format-inl.h"
#include "unicode/datefmt.h"
#include "unicode/decimfmt.h"
@@ -33,7 +34,7 @@ namespace internal {
BUILTIN(StringPrototypeToUpperCaseIntl) {
HandleScope scope(isolate);
TO_THIS_STRING(string, "String.prototype.toUpperCase");
- string = String::Flatten(string);
+ string = String::Flatten(isolate, string);
return ConvertCase(string, true, isolate);
}
@@ -53,16 +54,19 @@ BUILTIN(StringPrototypeNormalizeIntl) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, form,
Object::ToString(isolate, form_input));
- if (String::Equals(form, isolate->factory()->NFC_string())) {
+ if (String::Equals(isolate, form, isolate->factory()->NFC_string())) {
form_name = "nfc";
form_mode = UNORM2_COMPOSE;
- } else if (String::Equals(form, isolate->factory()->NFD_string())) {
+ } else if (String::Equals(isolate, form,
+ isolate->factory()->NFD_string())) {
form_name = "nfc";
form_mode = UNORM2_DECOMPOSE;
- } else if (String::Equals(form, isolate->factory()->NFKC_string())) {
+ } else if (String::Equals(isolate, form,
+ isolate->factory()->NFKC_string())) {
form_name = "nfkc";
form_mode = UNORM2_COMPOSE;
- } else if (String::Equals(form, isolate->factory()->NFKD_string())) {
+ } else if (String::Equals(isolate, form,
+ isolate->factory()->NFKD_string())) {
form_name = "nfkc";
form_mode = UNORM2_DECOMPOSE;
} else {
@@ -75,7 +79,7 @@ BUILTIN(StringPrototypeNormalizeIntl) {
}
int length = string->length();
- string = String::Flatten(string);
+ string = String::Flatten(isolate, string);
icu::UnicodeString result;
std::unique_ptr<uc16[]> sap;
UErrorCode status = U_ZERO_ERROR;
@@ -102,7 +106,7 @@ BUILTIN(StringPrototypeNormalizeIntl) {
}
if (U_FAILURE(status)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RETURN_RESULT_OR_FAILURE(
@@ -216,8 +220,8 @@ bool AddElement(Handle<JSArray> array, int index,
Factory* factory = isolate->factory();
Handle<JSObject> element = factory->NewJSObject(isolate->object_function());
Handle<String> value;
- JSObject::AddProperty(element, factory->type_string(), field_type_string,
- NONE);
+ JSObject::AddProperty(isolate, element, factory->type_string(),
+ field_type_string, NONE);
icu::UnicodeString field(formatted.tempSubStringBetween(begin, end));
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -227,9 +231,8 @@ bool AddElement(Handle<JSArray> array, int index,
field.length())),
false);
- JSObject::AddProperty(element, factory->value_string(), value, NONE);
- RETURN_ON_EXCEPTION_VALUE(
- isolate, JSObject::AddDataElement(array, index, element, NONE), false);
+ JSObject::AddProperty(isolate, element, factory->value_string(), value, NONE);
+ JSObject::AddDataElement(array, index, element, NONE);
return true;
}
@@ -256,7 +259,7 @@ Object* FormatNumberToParts(Isolate* isolate, icu::NumberFormat* fmt,
icu::FieldPositionIterator fp_iter;
UErrorCode status = U_ZERO_ERROR;
fmt->format(number, formatted, &fp_iter, status);
- if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+ if (U_FAILURE(status)) return ReadOnlyRoots(isolate).undefined_value();
Handle<JSArray> result = factory->NewJSArray(0);
int32_t length = formatted.length();
@@ -288,7 +291,7 @@ Object* FormatNumberToParts(Isolate* isolate, icu::NumberFormat* fmt,
: IcuNumberFieldIdToNumberType(part.field_id, number, isolate);
if (!AddElement(result, index, field_type_string, formatted, part.begin_pos,
part.end_pos, isolate)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
++index;
}
@@ -306,7 +309,7 @@ Object* FormatDateToParts(Isolate* isolate, icu::DateFormat* format,
icu::FieldPosition fp;
UErrorCode status = U_ZERO_ERROR;
format->format(date_value, formatted, &fp_iter, status);
- if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+ if (U_FAILURE(status)) return ReadOnlyRoots(isolate).undefined_value();
Handle<JSArray> result = factory->NewJSArray(0);
int32_t length = formatted.length();
@@ -321,14 +324,14 @@ Object* FormatDateToParts(Isolate* isolate, icu::DateFormat* format,
if (previous_end_pos < begin_pos) {
if (!AddElement(result, index, IcuDateFieldIdToDateType(-1, isolate),
formatted, previous_end_pos, begin_pos, isolate)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
++index;
}
if (!AddElement(result, index,
IcuDateFieldIdToDateType(fp.getField(), isolate), formatted,
begin_pos, end_pos, isolate)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
previous_end_pos = end_pos;
++index;
@@ -336,7 +339,7 @@ Object* FormatDateToParts(Isolate* isolate, icu::DateFormat* format,
if (previous_end_pos < length) {
if (!AddElement(result, index, IcuDateFieldIdToDateType(-1, isolate),
formatted, previous_end_pos, length, isolate)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
}
JSObject::ValidateElements(*result);
@@ -440,12 +443,8 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
HandleScope handle_scope(isolate);
CHECK_RECEIVER(JSObject, number_format_holder, method);
- Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
- Handle<Object> tag =
- JSReceiver::GetDataProperty(number_format_holder, marker);
- Handle<String> expected_tag =
- isolate->factory()->NewStringFromStaticChars("numberformat");
- if (!(tag->IsString() && String::cast(*tag)->Equals(*expected_tag))) {
+ if (!Intl::IsObjectOfType(isolate, number_format_holder,
+ Intl::Type::kNumberFormat)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
@@ -456,7 +455,7 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
Handle<Object> x;
if (args.length() >= 2) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
- Object::ToNumber(args.at(1)));
+ Object::ToNumber(isolate, args.at(1)));
} else {
x = isolate->factory()->nan_value();
}
@@ -475,10 +474,8 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
CHECK_RECEIVER(JSObject, date_format_holder, method);
Factory* factory = isolate->factory();
- Handle<Symbol> marker = factory->intl_initialized_marker_symbol();
- Handle<Object> tag = JSReceiver::GetDataProperty(date_format_holder, marker);
- Handle<String> expected_tag = factory->NewStringFromStaticChars("dateformat");
- if (!(tag->IsString() && String::cast(*tag)->Equals(*expected_tag))) {
+ if (!Intl::IsObjectOfType(isolate, date_format_holder,
+ Intl::Type::kDateTimeFormat)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
factory->NewStringFromAsciiChecked(method),
@@ -490,7 +487,7 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
x = factory->NewNumber(JSDate::CurrentTimeValue(isolate));
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
- Object::ToNumber(args.at(1)));
+ Object::ToNumber(isolate, args.at(1)));
}
double date_value = DateCache::TimeClip(x->Number());
@@ -506,6 +503,96 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
return FormatDateToParts(isolate, date_format, date_value);
}
+BUILTIN(NumberFormatPrototypeFormatNumber) {
+ const char* const method = "get Intl.NumberFormat.prototype.format";
+ HandleScope scope(isolate);
+
+ // 1. Let nf be the this value.
+ // 2. If Type(nf) is not Object, throw a TypeError exception.
+ CHECK_RECEIVER(JSReceiver, receiver, method);
+
+ // 3. Let nf be ? UnwrapNumberFormat(nf).
+ Handle<JSObject> number_format_holder;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, number_format_holder,
+ NumberFormat::Unwrap(isolate, receiver, method));
+
+ DCHECK(Intl::IsObjectOfType(isolate, number_format_holder,
+ Intl::Type::kNumberFormat));
+
+ Handle<Object> bound_format = Handle<Object>(
+ number_format_holder->GetEmbedderField(NumberFormat::kBoundFormatIndex),
+ isolate);
+
+ // 4. If nf.[[BoundFormat]] is undefined, then
+ if (!bound_format->IsUndefined(isolate)) {
+ DCHECK(bound_format->IsJSFunction());
+ // 5. Return nf.[[BoundFormat]].
+ return *bound_format;
+ }
+
+ Handle<Context> native_context =
+ Handle<Context>(isolate->context()->native_context(), isolate);
+
+ Handle<Context> context = isolate->factory()->NewBuiltinContext(
+ native_context, NumberFormat::ContextSlot::kLength);
+
+ // 4. b. Set F.[[NumberFormat]] to nf.
+ context->set(NumberFormat::ContextSlot::kNumberFormat, *number_format_holder);
+
+ Handle<SharedFunctionInfo> info = Handle<SharedFunctionInfo>(
+ native_context->number_format_internal_format_number_shared_fun(),
+ isolate);
+
+ Handle<Map> map = isolate->strict_function_without_prototype_map();
+
+ // 4. a. Let F be a new built-in function object as defined in
+ // Number Format Functions (11.1.4).
+ Handle<JSFunction> new_bound_format_function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+
+ // 4. c. Set nf.[[BoundFormat]] to F.
+ number_format_holder->SetEmbedderField(NumberFormat::kBoundFormatIndex,
+ *new_bound_format_function);
+
+ // 5. Return nf.[[BoundFormat]].
+ return *new_bound_format_function;
+}
+
+BUILTIN(NumberFormatInternalFormatNumber) {
+ HandleScope scope(isolate);
+
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
+
+ // 1. Let nf be F.[[NumberFormat]].
+ Handle<JSObject> number_format_holder = Handle<JSObject>(
+ JSObject::cast(context->get(NumberFormat::ContextSlot::kNumberFormat)),
+ isolate);
+
+ // 2. Assert: Type(nf) is Object and nf has an
+ // [[InitializedNumberFormat]] internal slot.
+ DCHECK(Intl::IsObjectOfType(isolate, number_format_holder,
+ Intl::Type::kNumberFormat));
+
+ // 3. If value is not provided, let value be undefined.
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+
+ // 4. Let x be ? ToNumber(value).
+ Handle<Object> number_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_obj,
+ Object::ToNumber(isolate, value));
+
+ // Spec treats -0 as 0.
+ if (number_obj->IsMinusZero()) {
+ number_obj = Handle<Smi>(Smi::kZero, isolate);
+ }
+
+ double number = number_obj->Number();
+ // Return FormatNumber(nf, x).
+ RETURN_RESULT_OR_FAILURE(isolate, NumberFormat::FormatNumber(
+ isolate, number_format_holder, number));
+}
+
// Intl.Locale implementation
BUILTIN(LocaleConstructor) {
HandleScope scope(isolate);
@@ -517,8 +604,8 @@ BUILTIN(LocaleConstructor) {
} else { // [[Construct]]
Handle<JSFunction> target = args.target();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<JSObject> result;
+ Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
JSObject::New(target, new_target));
@@ -534,7 +621,8 @@ BUILTIN(LocaleConstructor) {
Handle<String> locale_string;
if (tag->IsJSLocale() &&
Handle<JSLocale>::cast(tag)->locale()->IsString()) {
- locale_string = Handle<String>(Handle<JSLocale>::cast(tag)->locale());
+ locale_string =
+ Handle<String>(Handle<JSLocale>::cast(tag)->locale(), isolate);
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, locale_string,
Object::ToString(isolate, tag));
@@ -549,14 +637,49 @@ BUILTIN(LocaleConstructor) {
Object::ToObject(isolate, options));
}
- if (!JSLocale::InitializeLocale(isolate, Handle<JSLocale>::cast(result),
- locale_string, options_object)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kLocaleBadParameters));
- }
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ JSLocale::InitializeLocale(isolate, Handle<JSLocale>::cast(result),
+ locale_string, options_object));
+ }
+}
- return *result;
+BUILTIN(RelativeTimeFormatConstructor) {
+ HandleScope scope(isolate);
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromStaticChars(
+ "Intl.RelativeTimeFormat")));
}
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+
+ Handle<JSObject> result;
+ // 2. Let relativeTimeFormat be
+ // ! OrdinaryCreateFromConstructor(NewTarget,
+ // "%RelativeTimeFormatPrototype%").
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ // 3. Return ? InitializeRelativeTimeFormat(relativeTimeFormat, locales,
+ // options).
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSRelativeTimeFormat::InitializeRelativeTimeFormat(
+ isolate, Handle<JSRelativeTimeFormat>::cast(result), locales,
+ options));
+}
+
+BUILTIN(RelativeTimeFormatPrototypeResolvedOptions) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSRelativeTimeFormat, format_holder,
+ "Intl.RelativeTimeFormat.prototype.resolvedOptions");
+ return *JSRelativeTimeFormat::ResolvedOptions(isolate, format_holder);
}
// Locale getters.
diff --git a/deps/v8/src/builtins/builtins-json.cc b/deps/v8/src/builtins/builtins-json.cc
index 7bc6ab0b06..3c317d5b88 100644
--- a/deps/v8/src/builtins/builtins-json.cc
+++ b/deps/v8/src/builtins/builtins-json.cc
@@ -21,7 +21,7 @@ BUILTIN(JsonParse) {
Handle<String> string;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
Object::ToString(isolate, source));
- string = String::Flatten(string);
+ string = String::Flatten(isolate, string);
RETURN_RESULT_OR_FAILURE(
isolate, string->IsSeqOneByteString()
? JsonParser<true>::Parse(isolate, string, reviver)
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
new file mode 100644
index 0000000000..d1314733c7
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -0,0 +1,203 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-lazy-gen.h"
+
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/builtins/builtins.h"
+#include "src/feedback-vector.h"
+#include "src/globals.h"
+#include "src/objects/shared-function-info.h"
+
+namespace v8 {
+namespace internal {
+
+void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
+ TNode<Code> code, TNode<JSFunction> function) {
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+
+ TailCallJSCode(code, context, function, new_target, argc);
+}
+
+void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
+ Runtime::FunctionId function_id, TNode<JSFunction> function) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Code> code = CAST(CallRuntime(function_id, context, function));
+ GenerateTailCallToJSCode(code, function);
+}
+
+void LazyBuiltinsAssembler::TailCallRuntimeIfMarkerEquals(
+ TNode<Smi> marker, OptimizationMarker expected_marker,
+ Runtime::FunctionId function_id, TNode<JSFunction> function) {
+ Label no_match(this);
+ GotoIfNot(SmiEqual(marker, SmiConstant(expected_marker)), &no_match);
+ GenerateTailCallToReturnedCode(function_id, function);
+ BIND(&no_match);
+}
+
+void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
+ TNode<JSFunction> function, TNode<FeedbackVector> feedback_vector) {
+ Label fallthrough(this);
+
+ TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
+ feedback_vector, FeedbackVector::kOptimizedCodeOffset);
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
+ // object.
+ Label optimized_code_slot_is_smi(this), optimized_code_slot_is_weak_ref(this);
+ Branch(TaggedIsSmi(maybe_optimized_code_entry), &optimized_code_slot_is_smi,
+ &optimized_code_slot_is_weak_ref);
+
+ BIND(&optimized_code_slot_is_smi);
+ {
+ // Optimized code slot is a Smi optimization marker.
+ TNode<Smi> marker = CAST(maybe_optimized_code_entry);
+
+ // Fall through if no optimization trigger.
+ GotoIf(SmiEqual(marker, SmiConstant(OptimizationMarker::kNone)),
+ &fallthrough);
+
+ // TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
+ // all these marker values there.
+ TailCallRuntimeIfMarkerEquals(marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution, function);
+ TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent,
+ function);
+ TailCallRuntimeIfMarkerEquals(
+ marker, OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent, function);
+
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
+ CSA_ASSERT(this,
+ SmiEqual(marker,
+ SmiConstant(OptimizationMarker::kInOptimizationQueue)));
+ Goto(&fallthrough);
+ }
+
+ BIND(&optimized_code_slot_is_weak_ref);
+ {
+ // Optimized code slot is a weak reference.
+ TNode<Code> optimized_code =
+ CAST(ToWeakHeapObject(maybe_optimized_code_entry, &fallthrough));
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code(this);
+ TNode<CodeDataContainer> code_data_container =
+ CAST(LoadObjectField(optimized_code, Code::kCodeDataContainerOffset));
+
+ TNode<Int32T> code_kind_specific_flags = LoadObjectField<Int32T>(
+ code_data_container, CodeDataContainer::kKindSpecificFlagsOffset);
+ GotoIf(IsSetWord32<Code::MarkedForDeoptimizationField>(
+ code_kind_specific_flags),
+ &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ StoreObjectField(function, JSFunction::kCodeOffset, optimized_code);
+ GenerateTailCallToJSCode(optimized_code, function);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ BIND(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(Runtime::kEvictOptimizedCodeSlot, function);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ BIND(&fallthrough);
+}
+
+void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
+ // First lookup code, maybe we don't need to compile!
+ Label compile_function(this, Label::kDeferred);
+
+ // Compile function if we don't have a valid feedback vector.
+ TNode<FeedbackVector> feedback_vector =
+ LoadFeedbackVector(function, &compile_function);
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(function, feedback_vector);
+
+ // We found no optimized code. Infer the code object needed for the SFI.
+ TNode<SharedFunctionInfo> shared =
+ CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
+ // If code entry points to anything other than CompileLazy, install that,
+ // otherwise call runtime to compile the function.
+ TNode<Code> code = GetSharedFunctionInfoCode(shared, &compile_function);
+
+ CSA_ASSERT(
+ this,
+ WordNotEqual(code, HeapConstant(BUILTIN_CODE(isolate(), CompileLazy))));
+
+ // Install the SFI's code entry.
+ StoreObjectField(function, JSFunction::kCodeOffset, code);
+ GenerateTailCallToJSCode(code, function);
+
+ BIND(&compile_function);
+ { GenerateTailCallToReturnedCode(Runtime::kCompileLazy, function); }
+}
+
+TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
+ TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+
+ CompileLazy(function);
+}
+
+TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
+ TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+
+ // Set the code slot inside the JSFunction to CompileLazy.
+ TNode<Code> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
+ StoreObjectField(function, JSFunction::kCodeOffset, code);
+ GenerateTailCallToJSCode(code, function);
+}
+
+// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
+TF_BUILTIN(DeserializeLazy, LazyBuiltinsAssembler) {
+ Label deserialize_in_runtime(this, Label::kDeferred);
+
+ TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+
+ // Load the builtin id for lazy deserialization from SharedFunctionInfo.
+ TNode<SharedFunctionInfo> shared =
+ CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
+
+ TNode<Smi> sfi_data =
+ CAST(LoadObjectField(shared, SharedFunctionInfo::kFunctionDataOffset));
+
+ // The builtin may already have been deserialized. If that is the case, it is
+ // stored in the builtins table, and we can copy to correct code object to
+ // both the shared function info and function without calling into runtime.
+ //
+ // Otherwise, we need to call into runtime to deserialize.
+
+ TNode<Code> code = LoadBuiltin(sfi_data);
+
+ // Check if the loaded code object has already been deserialized. This is
+ // the case iff it does not equal DeserializeLazy.
+ GotoIf(
+ WordEqual(code, HeapConstant(BUILTIN_CODE(isolate(), DeserializeLazy))),
+ &deserialize_in_runtime);
+
+ // If we've reached this spot, the target builtin has been deserialized and
+ // we simply need to copy it over to the target function.
+ StoreObjectField(function, JSFunction::kCodeOffset, code);
+
+ // All copying is done. Jump to the deserialized code object.
+ GenerateTailCallToJSCode(code, function);
+
+ BIND(&deserialize_in_runtime);
+ { GenerateTailCallToReturnedCode(Runtime::kDeserializeLazy, function); }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.h b/deps/v8/src/builtins/builtins-lazy-gen.h
new file mode 100644
index 0000000000..7f64aa096b
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-lazy-gen.h
@@ -0,0 +1,37 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_LAZY_GEN_H_
+#define V8_BUILTINS_BUILTINS_LAZY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class LazyBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ typedef JSTrampolineDescriptor Descriptor;
+
+ explicit LazyBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ void GenerateTailCallToJSCode(TNode<Code> code, TNode<JSFunction> function);
+
+ void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
+ TNode<JSFunction> function);
+ void TailCallRuntimeIfMarkerEquals(TNode<Smi> marker,
+ OptimizationMarker expected_marker,
+ Runtime::FunctionId function_id,
+ TNode<JSFunction> function);
+
+ void MaybeTailCallOptimizedCodeSlot(TNode<JSFunction> function,
+ TNode<FeedbackVector> feedback_vector);
+ void CompileLazy(TNode<JSFunction> function);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_LAZY_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index ca59db0972..952bdda5de 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -503,8 +503,8 @@ TF_BUILTIN(MathTrunc, MathBuiltinsAssembler) {
TF_BUILTIN(MathMax, MathBuiltinsAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
MathMaxMin(context, argc, &CodeStubAssembler::Float64Max, -1.0 * V8_INFINITY);
}
@@ -512,8 +512,8 @@ TF_BUILTIN(MathMax, MathBuiltinsAssembler) {
TF_BUILTIN(MathMin, MathBuiltinsAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
MathMaxMin(context, argc, &CodeStubAssembler::Float64Min, V8_INFINITY);
}
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
index d390b5db31..ae2bf03d69 100644
--- a/deps/v8/src/builtins/builtins-math.cc
+++ b/deps/v8/src/builtins/builtins-math.cc
@@ -25,7 +25,8 @@ BUILTIN(MathHypot) {
abs_values.reserve(length);
for (int i = 0; i < length; i++) {
Handle<Object> x = args.at(i + 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
+ Object::ToNumber(isolate, x));
double abs_value = std::abs(x->Number());
if (std::isnan(abs_value)) {
@@ -43,7 +44,7 @@ BUILTIN(MathHypot) {
}
if (one_arg_is_nan) {
- return isolate->heap()->nan_value();
+ return ReadOnlyRoots(isolate).nan_value();
}
if (max == 0) {
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 360ddd1091..cfc81612f2 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -101,31 +101,8 @@ TF_BUILTIN(AllocateHeapNumber, CodeStubAssembler) {
// ES6 #sec-number.isinteger
TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
- Node* number = Parameter(Descriptor::kNumber);
-
- Label return_true(this), return_false(this);
-
- // Check if {number} is a Smi.
- GotoIf(TaggedIsSmi(number), &return_true);
-
- // Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumber(number), &return_false);
-
- // Load the actual value of {number}.
- Node* number_value = LoadHeapNumberValue(number);
-
- // Truncate the value of {number} to an integer (or an infinity).
- Node* integer = Float64Trunc(number_value);
-
- // Check if {number}s value matches the integer (ruling out the infinities).
- Branch(Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
- &return_true, &return_false);
-
- BIND(&return_true);
- Return(TrueConstant());
-
- BIND(&return_false);
- Return(FalseConstant());
+ TNode<Object> number = CAST(Parameter(Descriptor::kNumber));
+ Return(SelectBooleanConstant(IsInteger(number)));
}
// ES6 #sec-number.isnan
@@ -153,37 +130,8 @@ TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
// ES6 #sec-number.issafeinteger
TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
- Node* number = Parameter(Descriptor::kNumber);
-
- Label return_true(this), return_false(this);
-
- // Check if {number} is a Smi.
- GotoIf(TaggedIsSmi(number), &return_true);
-
- // Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumber(number), &return_false);
-
- // Load the actual value of {number}.
- Node* number_value = LoadHeapNumberValue(number);
-
- // Truncate the value of {number} to an integer (or an infinity).
- Node* integer = Float64Trunc(number_value);
-
- // Check if {number}s value matches the integer (ruling out the infinities).
- GotoIfNot(
- Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
- &return_false);
-
- // Check if the {integer} value is in safe integer range.
- Branch(Float64LessThanOrEqual(Float64Abs(integer),
- Float64Constant(kMaxSafeInteger)),
- &return_true, &return_false);
-
- BIND(&return_true);
- Return(TrueConstant());
-
- BIND(&return_false);
- Return(FalseConstant());
+ TNode<Object> number = CAST(Parameter(Descriptor::kNumber));
+ Return(SelectBooleanConstant(IsSafeInteger(number)));
}
// ES6 #sec-number.parsefloat
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 46aa051484..7e701c1546 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -39,10 +39,10 @@ BUILTIN(NumberPrototypeToExponential) {
isolate, fraction_digits, Object::ToInteger(isolate, fraction_digits));
double const fraction_digits_number = fraction_digits->Number();
- if (std::isnan(value_number)) return isolate->heap()->NaN_string();
+ if (std::isnan(value_number)) return ReadOnlyRoots(isolate).NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
- : isolate->heap()->Infinity_string();
+ return (value_number < 0.0) ? ReadOnlyRoots(isolate).minus_Infinity_string()
+ : ReadOnlyRoots(isolate).Infinity_string();
}
if (fraction_digits_number < 0.0 ||
fraction_digits_number > kMaxFractionDigits) {
@@ -93,10 +93,10 @@ BUILTIN(NumberPrototypeToFixed) {
"toFixed() digits")));
}
- if (std::isnan(value_number)) return isolate->heap()->NaN_string();
+ if (std::isnan(value_number)) return ReadOnlyRoots(isolate).NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
- : isolate->heap()->Infinity_string();
+ return (value_number < 0.0) ? ReadOnlyRoots(isolate).minus_Infinity_string()
+ : ReadOnlyRoots(isolate).Infinity_string();
}
char* const str = DoubleToFixedCString(
value_number, static_cast<int>(fraction_digits_number));
@@ -155,10 +155,10 @@ BUILTIN(NumberPrototypeToPrecision) {
Object::ToInteger(isolate, precision));
double const precision_number = precision->Number();
- if (std::isnan(value_number)) return isolate->heap()->NaN_string();
+ if (std::isnan(value_number)) return ReadOnlyRoots(isolate).NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
- : isolate->heap()->Infinity_string();
+ return (value_number < 0.0) ? ReadOnlyRoots(isolate).minus_Infinity_string()
+ : ReadOnlyRoots(isolate).Infinity_string();
}
if (precision_number < 1.0 || precision_number > kMaxFractionDigits) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -219,10 +219,10 @@ BUILTIN(NumberPrototypeToString) {
}
// Slow case.
- if (std::isnan(value_number)) return isolate->heap()->NaN_string();
+ if (std::isnan(value_number)) return ReadOnlyRoots(isolate).NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
- : isolate->heap()->Infinity_string();
+ return (value_number < 0.0) ? ReadOnlyRoots(isolate).minus_Infinity_string()
+ : ReadOnlyRoots(isolate).Infinity_string();
}
char* const str =
DoubleToRadixCString(value_number, static_cast<int>(radix_number));
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 6d397952d2..fb89694c31 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -266,7 +266,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
TNode<Context> context, TNode<JSObject> object,
Label* if_call_runtime_with_fast_path, Label* if_no_properties,
CollectType collect_type) {
- Node* native_context = LoadNativeContext(context);
+ TNode<Context> native_context = LoadNativeContext(context);
TNode<Map> array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
TNode<Map> map = LoadMap(object);
@@ -274,9 +274,9 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
Label if_has_enum_cache(this), if_not_has_enum_cache(this),
collect_entries(this);
- Node* object_enum_length =
- DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
- Node* has_enum_cache = WordNotEqual(
+ TNode<IntPtrT> object_enum_length =
+ Signed(DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3));
+ TNode<BoolT> has_enum_cache = WordNotEqual(
object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel));
// In case, we found enum_cache in object,
@@ -292,9 +292,8 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
BIND(&if_has_enum_cache);
{
GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
- TNode<FixedArray> values_or_entries = TNode<FixedArray>::UncheckedCast(
- AllocateFixedArray(PACKED_ELEMENTS, object_enum_length,
- INTPTR_PARAMETERS, kAllowLargeObjectAllocation));
+ TNode<FixedArray> values_or_entries = AllocateFixedArray(
+ PACKED_ELEMENTS, object_enum_length, kAllowLargeObjectAllocation);
// If in case we have enum_cache,
// we can't detect accessor of object until loop through descriptors.
@@ -342,8 +341,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
// the next descriptor.
GotoIfNot(IsPropertyEnumerable(details), &next_descriptor);
- VARIABLE(var_property_value, MachineRepresentation::kTagged,
- UndefinedConstant());
+ TVARIABLE(Object, var_property_value, UndefinedConstant());
TNode<IntPtrT> descriptor_name_index = ToKeyIndex<DescriptorArray>(
Unsigned(TruncateIntPtrToInt32(var_descriptor_number.value())));
@@ -353,7 +351,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
&var_property_value);
// If kind is "value", append value to properties.
- Node* value = var_property_value.value();
+ TNode<Object> value = var_property_value.value();
if (collect_type == CollectType::kEntries) {
// Let entry be CreateArrayFromList(« key, value »).
@@ -364,7 +362,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
IntPtrConstant(2));
StoreFixedArrayElement(elements, 0, next_key, SKIP_WRITE_BARRIER);
StoreFixedArrayElement(elements, 1, value, SKIP_WRITE_BARRIER);
- value = array;
+ value = TNode<JSArray>::UncheckedCast(array);
}
StoreFixedArrayElement(values_or_entries, var_result_index.value(),
@@ -487,10 +485,10 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
// ES #sec-object.assign
TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> target = args.GetOptionalArgumentValue(0);
// 1. Let to be ? ToObject(target).
@@ -583,8 +581,8 @@ void ObjectBuiltinsAssembler::ObjectAssignFast(TNode<Context> context,
DescriptorArrayForEach(
list, Unsigned(Int32Constant(0)), nof_descriptors,
[=, &var_stable](TNode<UintPtrT> descriptor_key_index) {
- TNode<Name> next_key =
- CAST(LoadFixedArrayElement(from_descriptors, descriptor_key_index));
+ TNode<Name> next_key = CAST(
+ LoadWeakFixedArrayElement(from_descriptors, descriptor_key_index));
TVARIABLE(Object, var_value, SmiConstant(0));
Label do_store(this), next_iteration(this);
@@ -622,12 +620,11 @@ void ObjectBuiltinsAssembler::ObjectAssignFast(TNode<Context> context,
BIND(&if_found_fast);
{
- Node* descriptors = var_meta_storage.value();
- Node* name_index = var_entry.value();
+ TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value());
+ TNode<IntPtrT> name_index = var_entry.value();
// Skip non-enumerable properties.
- var_details =
- LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
+ var_details = LoadDetailsByKeyIndex(descriptors, name_index);
GotoIf(IsSetWord32(var_details.value(),
PropertyDetails::kAttributesDontEnumMask),
&next_iteration);
@@ -782,6 +779,108 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
}
}
+// ES #sec-object.getOwnPropertyNames
+TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
+
+ VARIABLE(var_length, MachineRepresentation::kTagged);
+ VARIABLE(var_elements, MachineRepresentation::kTagged);
+ Label if_empty(this, Label::kDeferred), if_empty_elements(this),
+ if_fast(this), try_fast(this, Label::kDeferred),
+ if_slow(this, Label::kDeferred), if_join(this);
+
+ // Check if the {object} has a usable enum cache.
+ GotoIf(TaggedIsSmi(object), &if_slow);
+ Node* object_map = LoadMap(object);
+ Node* object_bit_field3 = LoadMapBitField3(object_map);
+ Node* object_enum_length =
+ DecodeWordFromWord32<Map::EnumLengthBits>(object_bit_field3);
+ GotoIf(
+ WordEqual(object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel)),
+ &try_fast);
+
+ // Ensure that the {object} doesn't have any elements.
+ CSA_ASSERT(this, IsJSObjectMap(object_map));
+ Node* object_elements = LoadElements(object);
+ GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
+ Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
+ &if_slow);
+
+ // Check whether all own properties are enumerable.
+ BIND(&if_empty_elements);
+ Node* number_descriptors =
+ DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(object_bit_field3);
+ GotoIfNot(WordEqual(object_enum_length, number_descriptors), &if_slow);
+
+ // Check whether there are enumerable properties.
+ Branch(WordEqual(object_enum_length, IntPtrConstant(0)), &if_empty, &if_fast);
+
+ BIND(&if_fast);
+ {
+ // The {object} has a usable enum cache and all own properties are
+ // enumerable, use that.
+ Node* object_descriptors = LoadMapDescriptors(object_map);
+ Node* object_enum_cache =
+ LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset);
+ Node* object_enum_keys =
+ LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
+
+ // Allocate a JSArray and copy the elements from the {object_enum_keys}.
+ Node* array = nullptr;
+ Node* elements = nullptr;
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ Node* array_length = SmiTag(object_enum_length);
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
+ INTPTR_PARAMETERS);
+ CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements,
+ object_enum_length, SKIP_WRITE_BARRIER);
+ Return(array);
+ }
+
+ BIND(&try_fast);
+ {
+ // Let the runtime compute the elements and try initializing enum cache.
+ Node* elements = CallRuntime(Runtime::kObjectGetOwnPropertyNamesTryFast,
+ context, object);
+ var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset));
+ var_elements.Bind(elements);
+ Goto(&if_join);
+ }
+
+ BIND(&if_empty);
+ {
+ // The {object} doesn't have any enumerable keys.
+ var_length.Bind(SmiConstant(0));
+ var_elements.Bind(EmptyFixedArrayConstant());
+ Goto(&if_join);
+ }
+
+ BIND(&if_slow);
+ {
+ // Let the runtime compute the elements.
+ Node* elements =
+ CallRuntime(Runtime::kObjectGetOwnPropertyNames, context, object);
+ var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset));
+ var_elements.Bind(elements);
+ Goto(&if_join);
+ }
+
+ BIND(&if_join);
+ {
+ // Wrap the elements into a proper JSArray and return that.
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ Node* array = AllocateUninitializedJSArrayWithoutElements(
+ array_map, var_length.value(), nullptr);
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset,
+ var_elements.value());
+ Return(array);
+ }
+}
+
TF_BUILTIN(ObjectValues, ObjectEntriesValuesBuiltinsAssembler) {
TNode<JSObject> object =
TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
@@ -1168,10 +1267,10 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) {
Comment("Try loading the prototype info");
Node* prototype_info =
LoadMapPrototypeInfo(LoadMap(prototype), &call_runtime);
- Node* weak_cell =
- LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
- GotoIf(IsUndefined(weak_cell), &call_runtime);
- map.Bind(LoadWeakCellValue(weak_cell, &call_runtime));
+ TNode<MaybeObject> maybe_map = LoadMaybeWeakObjectField(
+ prototype_info, PrototypeInfo::kObjectCreateMapOffset);
+ GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()), &call_runtime);
+ map.Bind(ToWeakHeapObject(maybe_map, &call_runtime));
Goto(&instantiate_map);
}
@@ -1197,12 +1296,12 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
int const kPropertiesArg = 1;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* prototype = args.GetOptionalArgumentValue(kPrototypeArg);
Node* properties = args.GetOptionalArgumentValue(kPropertiesArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
Label call_runtime(this, Label::kDeferred), prototype_valid(this),
no_properties(this);
@@ -1263,10 +1362,11 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
Node* prototype_info =
LoadMapPrototypeInfo(LoadMap(prototype), &call_runtime);
Comment("Load ObjectCreateMap from PrototypeInfo");
- Node* weak_cell =
- LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
- GotoIf(IsUndefined(weak_cell), &call_runtime);
- map.Bind(LoadWeakCellValue(weak_cell, &call_runtime));
+ TNode<MaybeObject> maybe_map = LoadMaybeWeakObjectField(
+ prototype_info, PrototypeInfo::kObjectCreateMapOffset);
+ GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()),
+ &call_runtime);
+ map.Bind(ToWeakHeapObject(maybe_map, &call_runtime));
Goto(&instantiate_map);
}
@@ -1366,12 +1466,17 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
Node* bytecode_array = LoadSharedFunctionInfoBytecodeArray(shared);
+ Node* formal_parameter_count = ChangeInt32ToIntPtr(
+ LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
+ MachineType::Uint16()));
Node* frame_size = ChangeInt32ToIntPtr(LoadObjectField(
bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
- Node* size = WordSar(frame_size, IntPtrConstant(kPointerSizeLog2));
- Node* register_file = AllocateFixedArray(HOLEY_ELEMENTS, size);
- FillFixedArrayWithValue(HOLEY_ELEMENTS, register_file, IntPtrConstant(0),
- size, Heap::kUndefinedValueRootIndex);
+ Node* size = IntPtrAdd(WordSar(frame_size, IntPtrConstant(kPointerSizeLog2)),
+ formal_parameter_count);
+ Node* parameters_and_registers = AllocateFixedArray(HOLEY_ELEMENTS, size);
+ FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
+ IntPtrConstant(0), size,
+ Heap::kUndefinedValueRootIndex);
// TODO(cbruni): support start_offset to avoid double initialization.
Node* result = AllocateJSObjectFromMap(maybe_map, nullptr, nullptr, kNone,
kWithSlackTracking);
@@ -1381,8 +1486,9 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
context);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kReceiverOffset,
receiver);
- StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kRegisterFileOffset,
- register_file);
+ StoreObjectFieldNoWriteBarrier(
+ result, JSGeneratorObject::kParametersAndRegistersOffset,
+ parameters_and_registers);
Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset,
executing);
@@ -1403,9 +1509,9 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
// ES6 section 19.1.2.7 Object.getOwnPropertyDescriptor ( O, P )
TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
- CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
+ Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);
+ Node* context = Parameter(Descriptor::kContext);
+ CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
Node* object = args.GetOptionalArgumentValue(0);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 2df04d6bca..074c926587 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -29,8 +29,8 @@ BUILTIN(ObjectPrototypePropertyIsEnumerable) {
isolate, object, JSReceiver::ToObject(isolate, args.receiver()));
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnPropertyAttributes(object, name);
- if (maybe.IsNothing()) return isolate->heap()->exception();
- if (maybe.FromJust() == ABSENT) return isolate->heap()->false_value();
+ if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
+ if (maybe.FromJust() == ABSENT) return ReadOnlyRoots(isolate).false_value();
return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
}
@@ -92,12 +92,12 @@ Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
// throwing an exception.
Maybe<bool> success = JSReceiver::DefineOwnProperty(isolate, receiver, name,
&desc, kThrowOnError);
- MAYBE_RETURN(success, isolate->heap()->exception());
+ MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception());
if (!success.FromJust()) {
isolate->CountUsage(v8::Isolate::kDefineGetterOrSetterWouldThrow);
}
// 6. Return undefined.
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
@@ -123,13 +123,13 @@ Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
if (it.HasAccess()) continue;
isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
case LookupIterator::JSPROXY: {
PropertyDescriptor desc;
Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
isolate, it.GetHolder<JSProxy>(), it.GetName(), &desc);
- MAYBE_RETURN(found, isolate->heap()->exception());
+ MAYBE_RETURN(found, ReadOnlyRoots(isolate).exception());
if (found.FromJust()) {
if (component == ACCESSOR_GETTER && desc.has_get()) {
return *desc.get();
@@ -137,32 +137,32 @@ Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
if (component == ACCESSOR_SETTER && desc.has_set()) {
return *desc.set();
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
Handle<Object> prototype;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, prototype, JSProxy::GetPrototype(it.GetHolder<JSProxy>()));
if (prototype->IsNull(isolate)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
return ObjectLookupAccessor(isolate, prototype, key, component);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::DATA:
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
case LookupIterator::ACCESSOR: {
Handle<Object> maybe_pair = it.GetAccessors();
if (maybe_pair->IsAccessorPair()) {
return *AccessorPair::GetComponent(
- Handle<AccessorPair>::cast(maybe_pair), component);
+ isolate, Handle<AccessorPair>::cast(maybe_pair), component);
}
}
}
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace
@@ -212,7 +212,7 @@ BUILTIN(ObjectFreeze) {
if (object->IsJSReceiver()) {
MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
FROZEN, kThrowOnError),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
}
return *object;
}
@@ -257,7 +257,7 @@ BUILTIN(ObjectSetPrototypeOf) {
// 4. Let status be ? O.[[SetPrototypeOf]](proto).
// 5. If status is false, throw a TypeError exception.
MAYBE_RETURN(JSReceiver::SetPrototype(receiver, proto, true, kThrowOnError),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
// 6. Return O.
return *receiver;
@@ -291,20 +291,20 @@ BUILTIN(ObjectPrototypeSetProto) {
// 2. If Type(proto) is neither Object nor Null, return undefined.
Handle<Object> proto = args.at(1);
if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// 3. If Type(O) is not Object, return undefined.
- if (!object->IsJSReceiver()) return isolate->heap()->undefined_value();
+ if (!object->IsJSReceiver()) return ReadOnlyRoots(isolate).undefined_value();
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
// 4. Let status be ? O.[[SetPrototypeOf]](proto).
// 5. If status is false, throw a TypeError exception.
MAYBE_RETURN(JSReceiver::SetPrototype(receiver, proto, true, kThrowOnError),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
// Return undefined.
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
namespace {
@@ -326,11 +326,6 @@ Object* GetOwnPropertyKeys(Isolate* isolate, BuiltinArguments args,
} // namespace
-// ES6 section 19.1.2.7 Object.getOwnPropertyNames ( O )
-BUILTIN(ObjectGetOwnPropertyNames) {
- return GetOwnPropertyKeys(isolate, args, SKIP_SYMBOLS);
-}
-
// ES6 section 19.1.2.8 Object.getOwnPropertySymbols ( O )
BUILTIN(ObjectGetOwnPropertySymbols) {
return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS);
@@ -344,7 +339,7 @@ BUILTIN(ObjectIsExtensible) {
object->IsJSReceiver()
? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
: Just(false);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -356,7 +351,7 @@ BUILTIN(ObjectIsFrozen) {
? JSReceiver::TestIntegrityLevel(
Handle<JSReceiver>::cast(object), FROZEN)
: Just(true);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -368,7 +363,7 @@ BUILTIN(ObjectIsSealed) {
? JSReceiver::TestIntegrityLevel(
Handle<JSReceiver>::cast(object), SEALED)
: Just(true);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -394,7 +389,7 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) {
PropertyDescriptor descriptor;
Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
isolate, receiver, key, &descriptor);
- MAYBE_RETURN(did_get_descriptor, isolate->heap()->exception());
+ MAYBE_RETURN(did_get_descriptor, ReadOnlyRoots(isolate).exception());
if (!did_get_descriptor.FromJust()) continue;
Handle<Object> from_descriptor = descriptor.ToObject(isolate);
@@ -416,7 +411,7 @@ BUILTIN(ObjectPreventExtensions) {
if (object->IsJSReceiver()) {
MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
kThrowOnError),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
}
return *object;
}
@@ -428,7 +423,7 @@ BUILTIN(ObjectSeal) {
if (object->IsJSReceiver()) {
MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
SEALED, kThrowOnError),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
}
return *object;
}
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index aef9e40ac8..ccfb3b11b0 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -11,6 +11,7 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/js-promise.h"
namespace v8 {
namespace internal {
@@ -55,7 +56,7 @@ Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
PromiseInit(instance);
Label out(this);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
+ GotoIfNot(IsPromiseHookEnabledOrHasAsyncEventDelegate(), &out);
CallRuntime(Runtime::kPromiseHookInit, context, instance, parent);
Goto(&out);
@@ -79,7 +80,7 @@ Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(
}
Label out(this);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
+ GotoIfNot(IsPromiseHookEnabledOrHasAsyncEventDelegate(), &out);
CallRuntime(Runtime::kPromiseHookInit, context, instance,
UndefinedConstant());
Goto(&out);
@@ -812,7 +813,7 @@ TF_BUILTIN(PromiseConstructorLazyDeoptContinuation, PromiseBuiltinsAssembler) {
// ES6 #sec-promise-executor
TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
Node* const executor = Parameter(Descriptor::kExecutor);
- Node* const new_target = Parameter(Descriptor::kNewTarget);
+ Node* const new_target = Parameter(Descriptor::kJSNewTarget);
Node* const context = Parameter(Descriptor::kContext);
Isolate* isolate = this->isolate();
@@ -860,7 +861,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
PromiseInit(instance);
var_result.Bind(instance);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &debug_push);
+ GotoIfNot(IsPromiseHookEnabledOrHasAsyncEventDelegate(), &debug_push);
CallRuntime(Runtime::kPromiseHookInit, context, instance,
UndefinedConstant());
Goto(&debug_push);
@@ -1080,7 +1081,8 @@ TF_BUILTIN(PromiseResolveThenableJob, PromiseBuiltinsAssembler) {
GotoIfNot(WordEqual(then, promise_then), &if_slow);
Node* const thenable_map = LoadMap(thenable);
GotoIfNot(IsJSPromiseMap(thenable_map), &if_slow);
- GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_slow);
+ GotoIf(IsPromiseHookEnabled(), &if_slow);
+ GotoIf(IsDebugActive(), &if_slow);
BranchIfPromiseSpeciesLookupChainIntact(native_context, thenable_map,
&if_fast, &if_slow);
@@ -1686,7 +1688,8 @@ TF_BUILTIN(RejectPromise, PromiseBuiltinsAssembler) {
// the runtime handle this operation, which greatly reduces
// the complexity here and also avoids a couple of back and
// forth between JavaScript and C++ land.
- GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_runtime);
+ GotoIf(IsPromiseHookEnabled(), &if_runtime);
+ GotoIf(IsDebugActive(), &if_runtime);
// 7. If promise.[[PromiseIsHandled]] is false, perform
// HostPromiseRejectionTracker(promise, "reject").
@@ -1733,7 +1736,8 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
// the runtime handle this operation, which greatly reduces
// the complexity here and also avoids a couple of back and
// forth between JavaScript and C++ land.
- GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_runtime);
+ GotoIf(IsPromiseHookEnabled(), &if_runtime);
+ GotoIf(IsDebugActive(), &if_runtime);
// 6. If SameValue(resolution, promise) is true, then
// We can use pointer comparison here, since the {promise} is guaranteed
@@ -1742,29 +1746,47 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
// 7. If Type(resolution) is not Object, then
GotoIf(TaggedIsSmi(resolution), &if_fulfill);
- Node* const result_map = LoadMap(resolution);
- GotoIfNot(IsJSReceiverMap(result_map), &if_fulfill);
+ Node* const resolution_map = LoadMap(resolution);
+ GotoIfNot(IsJSReceiverMap(resolution_map), &if_fulfill);
// We can skip the "then" lookup on {resolution} if its [[Prototype]]
// is the (initial) Promise.prototype and the Promise#then protector
// is intact, as that guards the lookup path for the "then" property
// on JSPromise instances which have the (initial) %PromisePrototype%.
- Label if_fast(this), if_slow(this, Label::kDeferred);
+ Label if_fast(this), if_receiver(this), if_slow(this, Label::kDeferred);
Node* const native_context = LoadNativeContext(context);
- BranchIfPromiseThenLookupChainIntact(native_context, result_map, &if_fast,
- &if_slow);
+ GotoIfForceSlowPath(&if_slow);
+ GotoIf(IsPromiseThenProtectorCellInvalid(), &if_slow);
+ GotoIfNot(IsJSPromiseMap(resolution_map), &if_receiver);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ Branch(WordEqual(LoadMapPrototype(resolution_map), promise_prototype),
+ &if_fast, &if_slow);
- // Resolution is a native promise and if it's already resolved or
- // rejected, shortcircuit the resolution procedure by directly
- // reusing the value from the promise.
BIND(&if_fast);
{
+ // The {resolution} is a native Promise in this case.
Node* const then =
LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
var_then.Bind(then);
Goto(&do_enqueue);
}
+ BIND(&if_receiver);
+ {
+ // We can skip the lookup of "then" if the {resolution} is a (newly
+ // created) IterResultObject, as the Promise#then() protector also
+ // ensures that the intrinsic %ObjectPrototype% doesn't contain any
+ // "then" property. This helps to avoid negative lookups on iterator
+ // results from async generators.
+ CSA_ASSERT(this, IsJSReceiverMap(resolution_map));
+ CSA_ASSERT(this, Word32BinaryNot(IsPromiseThenProtectorCellInvalid()));
+ Node* const iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ Branch(WordEqual(resolution_map, iterator_result_map), &if_fulfill,
+ &if_slow);
+ }
+
BIND(&if_slow);
{
// 8. Let then be Get(resolution, "then").
@@ -2044,9 +2066,9 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
}
TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
- Node* const function = LoadFromFrame(StandardFrameConstants::kFunctionOffset);
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSFunction> function = CAST(Parameter(Descriptor::kJSTarget));
Label already_called(this, Label::kDeferred), resolve_promise(this);
@@ -2058,22 +2080,22 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
GotoIf(IsNativeContext(context), &already_called);
CSA_ASSERT(this, SmiEqual(LoadFixedArrayBaseLength(context),
SmiConstant(kPromiseAllResolveElementLength)));
- Node* const native_context = LoadNativeContext(context);
+ TNode<Context> native_context = LoadNativeContext(context);
StoreObjectField(function, JSFunction::kContextOffset, native_context);
// Determine the index from the {function}.
Label unreachable(this, Label::kDeferred);
STATIC_ASSERT(PropertyArray::kNoHashSentinel == 0);
- Node* const identity_hash =
+ TNode<IntPtrT> identity_hash =
LoadJSReceiverIdentityHash(function, &unreachable);
CSA_ASSERT(this, IntPtrGreaterThan(identity_hash, IntPtrConstant(0)));
- Node* const index = IntPtrSub(identity_hash, IntPtrConstant(1));
+ TNode<IntPtrT> index = IntPtrSub(identity_hash, IntPtrConstant(1));
// Check if we need to grow the [[ValuesArray]] to store {value} at {index}.
- Node* const values_array =
- LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot);
- Node* const elements = LoadElements(values_array);
- Node* const values_length =
+ TNode<JSArray> values_array = CAST(
+ LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot));
+ TNode<FixedArray> elements = CAST(LoadElements(values_array));
+ TNode<IntPtrT> values_length =
LoadAndUntagObjectField(values_array, JSArray::kLengthOffset);
Label if_inbounds(this), if_outofbounds(this), done(this);
Branch(IntPtrLessThan(index, values_length), &if_inbounds, &if_outofbounds);
@@ -2081,8 +2103,8 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
BIND(&if_outofbounds);
{
// Check if we need to grow the backing store.
- Node* const new_length = IntPtrAdd(index, IntPtrConstant(1));
- Node* const elements_length =
+ TNode<IntPtrT> new_length = IntPtrAdd(index, IntPtrConstant(1));
+ TNode<IntPtrT> elements_length =
LoadAndUntagObjectField(elements, FixedArray::kLengthOffset);
Label if_grow(this, Label::kDeferred), if_nogrow(this);
Branch(IntPtrLessThan(index, elements_length), &if_nogrow, &if_grow);
@@ -2090,14 +2112,14 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
BIND(&if_grow);
{
// We need to grow the backing store to fit the {index} as well.
- Node* const new_elements_length =
+ TNode<IntPtrT> new_elements_length =
IntPtrMin(CalculateNewElementsCapacity(new_length),
IntPtrConstant(PropertyArray::HashField::kMax + 1));
CSA_ASSERT(this, IntPtrLessThan(index, new_elements_length));
CSA_ASSERT(this, IntPtrLessThan(elements_length, new_elements_length));
- Node* const new_elements = AllocateFixedArray(
- PACKED_ELEMENTS, new_elements_length, INTPTR_PARAMETERS,
- AllocationFlag::kAllowLargeObjectAllocation);
+ TNode<FixedArray> new_elements =
+ AllocateFixedArray(PACKED_ELEMENTS, new_elements_length,
+ AllocationFlag::kAllowLargeObjectAllocation);
CopyFixedArrayElements(PACKED_ELEMENTS, elements, PACKED_ELEMENTS,
new_elements, elements_length,
new_elements_length);
@@ -2139,9 +2161,9 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
Return(UndefinedConstant());
BIND(&resolve_promise);
- Node* const capability =
- LoadContextElement(context, kPromiseAllResolveElementCapabilitySlot);
- Node* const resolve =
+ TNode<PromiseCapability> capability = CAST(
+ LoadContextElement(context, kPromiseAllResolveElementCapabilitySlot));
+ TNode<Object> resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
context, resolve, UndefinedConstant(), values_array);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 680d1c8d9f..1d4c6d0802 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -150,7 +150,7 @@ TF_BUILTIN(ProxyConstructor, ProxiesCodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
// 1. If NewTarget is undefined, throw a TypeError exception.
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* new_target = Parameter(Descriptor::kJSNewTarget);
Label throwtypeerror(this, Label::kDeferred), createproxy(this);
Branch(IsUndefined(new_target), &throwtypeerror, &createproxy);
@@ -289,7 +289,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
CSA_ASSERT(this, IsJSProxy(proxy));
CSA_ASSERT(this, IsCallable(proxy));
- PerformStackCheck(context);
+ PerformStackCheck(CAST(context));
Label throw_proxy_handler_revoked(this, Label::kDeferred),
trap_undefined(this);
@@ -337,7 +337,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
Node* argc = Parameter(Descriptor::kActualArgumentsCount);
Node* argc_ptr = ChangeInt32ToIntPtr(argc);
- Node* proxy = Parameter(Descriptor::kFunction);
+ Node* proxy = Parameter(Descriptor::kTarget);
Node* new_target = Parameter(Descriptor::kNewTarget);
Node* context = Parameter(Descriptor::kContext);
@@ -408,7 +408,7 @@ TF_BUILTIN(ProxyHasProperty, ProxiesCodeStubAssembler) {
CSA_ASSERT(this, IsJSProxy(proxy));
- PerformStackCheck(context);
+ PerformStackCheck(CAST(context));
// 1. Assert: IsPropertyKey(P) is true.
CSA_ASSERT(this, IsName(name));
@@ -482,6 +482,7 @@ TF_BUILTIN(ProxyGetProperty, ProxiesCodeStubAssembler) {
Node* proxy = Parameter(Descriptor::kProxy);
Node* name = Parameter(Descriptor::kName);
Node* receiver = Parameter(Descriptor::kReceiverValue);
+ Node* on_non_existent = Parameter(Descriptor::kOnNonExistent);
CSA_ASSERT(this, IsJSProxy(proxy));
@@ -531,7 +532,7 @@ TF_BUILTIN(ProxyGetProperty, ProxiesCodeStubAssembler) {
// 7.a. Return ? target.[[Get]](P, Receiver).
// TODO(mslekova): Introduce GetPropertyWithReceiver stub
Return(CallRuntime(Runtime::kGetPropertyWithReceiver, context, target, name,
- receiver));
+ receiver, on_non_existent));
}
BIND(&throw_proxy_handler_revoked);
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index bc596e0ccb..cc97caf6a9 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -38,12 +38,12 @@ BUILTIN(ReflectDefineProperty) {
PropertyDescriptor desc;
if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
Maybe<bool> result = JSReceiver::DefineOwnProperty(
isolate, Handle<JSReceiver>::cast(target), name, &desc, kDontThrow);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -67,7 +67,7 @@ BUILTIN(ReflectDeleteProperty) {
Maybe<bool> result = JSReceiver::DeletePropertyOrElement(
Handle<JSReceiver>::cast(target), name, LanguageMode::kSloppy);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -115,8 +115,8 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) {
PropertyDescriptor desc;
Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
isolate, Handle<JSReceiver>::cast(target), name, &desc);
- MAYBE_RETURN(found, isolate->heap()->exception());
- if (!found.FromJust()) return isolate->heap()->undefined_value();
+ MAYBE_RETURN(found, ReadOnlyRoots(isolate).exception());
+ if (!found.FromJust()) return ReadOnlyRoots(isolate).undefined_value();
return *desc.ToObject(isolate);
}
@@ -152,7 +152,7 @@ BUILTIN(ReflectIsExtensible) {
Maybe<bool> result =
JSReceiver::IsExtensible(Handle<JSReceiver>::cast(target));
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -193,7 +193,7 @@ BUILTIN(ReflectPreventExtensions) {
Maybe<bool> result = JSReceiver::PreventExtensions(
Handle<JSReceiver>::cast(target), kDontThrow);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -220,7 +220,7 @@ BUILTIN(ReflectSet) {
isolate, receiver, name, Handle<JSReceiver>::cast(target));
Maybe<bool> result = Object::SetSuperProperty(
&it, value, LanguageMode::kSloppy, Object::MAY_BE_STORE_FROM_KEYED);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -245,7 +245,7 @@ BUILTIN(ReflectSetPrototypeOf) {
Maybe<bool> result = JSReceiver::SetPrototype(
Handle<JSReceiver>::cast(target), proto, true, kDontThrow);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 5b086e01b5..1cf5d4f61f 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -114,7 +114,7 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(TNode<Context> context,
pattern, flags);
}
-Node* RegExpBuiltinsAssembler::FastLoadLastIndex(Node* regexp) {
+TNode<Object> RegExpBuiltinsAssembler::FastLoadLastIndex(Node* regexp) {
// Load the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
@@ -179,7 +179,8 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// Calculate the substring of the first match before creating the result array
// to avoid an unnecessary write barrier storing the first result.
- TNode<String> const first = SubString(string, SmiUntag(start), SmiUntag(end));
+ TNode<String> first =
+ CAST(CallBuiltin(Builtins::kSubString, context, string, start, end));
Node* const result =
AllocateRegExpResult(context, num_results, start, string);
@@ -217,7 +218,7 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Node* const end = LoadFixedArrayElement(match_info, from_cursor_plus1);
TNode<String> const capture =
- SubString(string, SmiUntag(start), SmiUntag(end));
+ CAST(CallBuiltin(Builtins::kSubString, context, string, start, end));
StoreFixedArrayElement(result_elements, to_cursor, capture);
Goto(&next_iter);
@@ -672,7 +673,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
Node* const smi_zero = SmiConstant(0);
if (is_fastpath) {
- CSA_ASSERT(this, IsFastRegExpNoPrototype(context, regexp));
+ CSA_ASSERT(this, HasInstanceType(regexp, JS_REGEXP_TYPE));
} else {
ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
"RegExp.prototype.exec");
@@ -875,6 +876,51 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
return var_result.value();
}
+// We also return true if exec is undefined (and hence per spec)
+// the original {exec} will be used.
+TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec(
+ TNode<Context> context, TNode<JSRegExp> object) {
+ CSA_ASSERT(this, TaggedIsNotSmi(object));
+ Label out(this);
+ Label check_last_index(this);
+ TVARIABLE(BoolT, var_result);
+
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
+ var_result = BoolConstant(0);
+ GotoIfForceSlowPath(&out);
+#endif
+
+ TNode<BoolT> is_regexp = HasInstanceType(object, JS_REGEXP_TYPE);
+
+ var_result = is_regexp;
+ GotoIfNot(is_regexp, &out);
+
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Object> original_exec =
+ LoadContextElement(native_context, Context::REGEXP_EXEC_FUNCTION_INDEX);
+
+ TNode<Object> regexp_exec =
+ GetProperty(context, object, isolate()->factory()->exec_string());
+
+ TNode<BoolT> has_initialexec = WordEqual(regexp_exec, original_exec);
+ var_result = has_initialexec;
+ GotoIf(has_initialexec, &check_last_index);
+ TNode<BoolT> is_undefined = IsUndefined(regexp_exec);
+ var_result = is_undefined;
+ GotoIfNot(is_undefined, &out);
+ Goto(&check_last_index);
+
+ BIND(&check_last_index);
+ // The smi check is required to omit ToLength(lastIndex) calls with possible
+ // user-code execution on the fast path.
+ TNode<Object> last_index = FastLoadLastIndex(object);
+ var_result = TaggedIsPositiveSmi(last_index);
+ Goto(&out);
+
+ BIND(&out);
+ return var_result.value();
+}
+
Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
Node* const object) {
CSA_ASSERT(this, TaggedIsNotSmi(object));
@@ -1039,6 +1085,19 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
Return(NullConstant());
}
+TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
+ TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<Number> last_index = CAST(Parameter(Descriptor::kLastIndex));
+ TNode<FixedArray> match_info = CAST(Parameter(Descriptor::kMatchInfo));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ CSA_ASSERT(this, IsNumberNormalized(last_index));
+ CSA_ASSERT(this, IsNumberPositive(last_index));
+
+ Return(RegExpExecInternal(context, regexp, string, last_index, match_info));
+}
+
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
@@ -1253,7 +1312,7 @@ TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
Node* const pattern = Parameter(Descriptor::kPattern);
Node* const flags = Parameter(Descriptor::kFlags);
- Node* const new_target = Parameter(Descriptor::kNewTarget);
+ Node* const new_target = Parameter(Descriptor::kJSNewTarget);
Node* const context = Parameter(Descriptor::kContext);
Isolate* isolate = this->isolate();
@@ -1741,6 +1800,20 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
}
}
+TF_BUILTIN(RegExpPrototypeTestFast, RegExpBuiltinsAssembler) {
+ TNode<JSRegExp> const regexp = CAST(Parameter(Descriptor::kReceiver));
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
+ TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
+ Label if_didnotmatch(this);
+ CSA_ASSERT(this, IsFastRegExpWithOriginalExec(context, regexp));
+ RegExpPrototypeExecBodyWithoutResult(context, regexp, string, &if_didnotmatch,
+ true);
+ Return(TrueConstant());
+
+ BIND(&if_didnotmatch);
+ Return(FalseConstant());
+}
+
Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
Node* const index,
Node* const is_unicode,
@@ -1859,8 +1932,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const match_to = LoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
- var_match.Bind(
- SubString(string, SmiUntag(match_from), SmiUntag(match_to)));
+ var_match.Bind(CallBuiltin(Builtins::kSubString, context, string,
+ match_from, match_to));
Goto(&if_didmatch);
} else {
DCHECK(!is_fastpath);
@@ -1962,8 +2035,7 @@ TNode<Object> RegExpBuiltinsAssembler::MatchAllIterator(
TNode<Object> maybe_regexp, TNode<String> string,
TNode<BoolT> is_fast_regexp, char const* method_name) {
Label create_iterator(this), if_fast_regexp(this),
- if_slow_regexp(this, Label::kDeferred), if_not_regexp(this),
- throw_type_error(this, Label::kDeferred);
+ if_slow_regexp(this, Label::kDeferred), if_not_regexp(this);
// 1. Let S be ? ToString(O).
// Handled by the caller of MatchAllIterator.
@@ -2044,35 +2116,22 @@ TNode<Object> RegExpBuiltinsAssembler::MatchAllIterator(
var_matcher = RegExpCreate(context, native_context, maybe_regexp,
StringConstant("g"));
- // d. Let global be true.
+ // c. Let global be true.
var_global = Int32Constant(1);
- // e. Let fullUnicode be false.
+ // d. Let fullUnicode be false.
var_unicode = Int32Constant(0);
- Label if_matcher_slow_regexp(this, Label::kDeferred);
- BranchIfFastRegExp(context, var_matcher.value(), &create_iterator,
- &if_matcher_slow_regexp);
- BIND(&if_matcher_slow_regexp);
- {
- // c. If ? IsRegExp(matcher) is not true, throw a TypeError exception.
- GotoIfNot(IsRegExp(context, var_matcher.value()), &throw_type_error);
-
- // f. If ? Get(matcher, "lastIndex") is not 0, throw a TypeError
- // exception.
- TNode<Object> last_index =
- CAST(LoadLastIndex(context, var_matcher.value(), false));
- Branch(WordEqual(SmiConstant(0), last_index), &create_iterator,
- &throw_type_error);
- }
- }
- BIND(&throw_type_error);
- {
- ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
- StringConstant(method_name), maybe_regexp);
+#ifdef DEBUG
+ // Assert: ! Get(matcher, "lastIndex") is 0.
+ TNode<Object> last_index =
+ CAST(LoadLastIndex(context, var_matcher.value(), false));
+ CSA_ASSERT(this, WordEqual(SmiConstant(0), last_index));
+#endif // DEBUG
+
+ Goto(&create_iterator);
}
// 4. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
- // CreateRegExpStringIterator ( R, S, global, fullUnicode )
BIND(&create_iterator);
{
TNode<Map> map = CAST(LoadContextElement(
@@ -2340,8 +2399,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const last_match_info = LoadContextElement(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Node* const match_indices = RegExpExecInternal(context, regexp, string,
- smi_zero, last_match_info);
+ Node* const match_indices =
+ CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
+ smi_zero, last_match_info);
Label return_singleton_array(this);
Branch(IsNull(match_indices), &return_singleton_array,
@@ -2396,8 +2456,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const last_match_info = LoadContextElement(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Node* const match_indices = RegExpExecInternal(
- context, regexp, string, next_search_from, last_match_info);
+ Node* const match_indices =
+ CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
+ next_search_from, last_match_info);
// We're done if no match was found.
{
@@ -2439,7 +2500,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
{
TNode<Smi> const from = last_matched_until;
TNode<Smi> const to = match_from;
- array.Push(SubString(string, SmiUntag(from), SmiUntag(to)));
+ array.Push(CallBuiltin(Builtins::kSubString, context, string, from, to));
GotoIf(WordEqual(array.length(), int_limit), &out);
}
@@ -2476,7 +2537,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
BIND(&select_capture);
{
- var_value.Bind(SubString(string, SmiUntag(from), SmiUntag(to)));
+ var_value.Bind(
+ CallBuiltin(Builtins::kSubString, context, string, from, to));
Goto(&store_value);
}
@@ -2511,7 +2573,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
{
Node* const from = var_last_matched_until.value();
Node* const to = string_length;
- array.Push(SubString(string, SmiUntag(from), SmiUntag(to)));
+ array.Push(CallBuiltin(Builtins::kSubString, context, string, from, to));
Goto(&out);
}
@@ -2593,13 +2655,13 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
const int kLimitArg = 1;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* const maybe_receiver = args.GetReceiver();
Node* const maybe_string = args.GetOptionalArgumentValue(kStringArg);
Node* const maybe_limit = args.GetOptionalArgumentValue(kLimitArg);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const context = Parameter(Descriptor::kContext);
// Ensure {maybe_receiver} is a JSReceiver.
ThrowIfNotJSReceiver(context, maybe_receiver,
@@ -2872,32 +2934,22 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
TNode<Smi> const match_end = CAST(LoadFixedArrayElement(
var_match_indices.value(), RegExpMatchInfo::kFirstCaptureIndex + 1));
- Label if_replaceisempty(this), if_replaceisnotempty(this);
TNode<Smi> const replace_length = LoadStringLengthAsSmi(replace_string);
- Branch(SmiEqual(replace_length, smi_zero), &if_replaceisempty,
- &if_replaceisnotempty);
- BIND(&if_replaceisempty);
- {
- // TODO(jgruber): We could skip many of the checks that using SubString
- // here entails.
- TNode<String> const first_part =
- SubString(string, SmiUntag(var_last_match_end.value()),
- SmiUntag(match_start));
- var_result = StringAdd(context, var_result.value(), first_part);
- Goto(&loop_end);
- }
+ // TODO(jgruber): We could skip many of the checks that using SubString
+ // here entails.
+ TNode<String> first_part =
+ CAST(CallBuiltin(Builtins::kSubString, context, string,
+ var_last_match_end.value(), match_start));
+ var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured,
+ context, var_result.value(), first_part));
- BIND(&if_replaceisnotempty);
- {
- TNode<String> const first_part =
- SubString(string, SmiUntag(var_last_match_end.value()),
- SmiUntag(match_start));
- TNode<String> result =
- StringAdd(context, var_result.value(), first_part);
- var_result = StringAdd(context, result, replace_string);
- Goto(&loop_end);
- }
+ GotoIf(SmiEqual(replace_length, smi_zero), &loop_end);
+
+ var_result =
+ CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured, context,
+ var_result.value(), replace_string));
+ Goto(&loop_end);
BIND(&loop_end);
{
@@ -2919,9 +2971,11 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
BIND(&if_nofurthermatches);
{
TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
- TNode<String> const last_part = SubString(
- string, SmiUntag(var_last_match_end.value()), SmiUntag(string_length));
- var_result = StringAdd(context, var_result.value(), last_part);
+ TNode<String> last_part =
+ CAST(CallBuiltin(Builtins::kSubString, context, string,
+ var_last_match_end.value(), string_length));
+ var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured,
+ context, var_result.value(), last_part));
Goto(&out);
}
@@ -3003,13 +3057,13 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
const int kReplaceValueArg = 1;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* const maybe_receiver = args.GetReceiver();
Node* const maybe_string = args.GetOptionalArgumentValue(kStringArg);
Node* const replace_value = args.GetOptionalArgumentValue(kReplaceValueArg);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const context = Parameter(Descriptor::kContext);
// RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
//
@@ -3062,8 +3116,9 @@ TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
Node* const native_context = LoadNativeContext(context);
Node* const internal_match_info = LoadContextElement(
native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
- Node* const match_indices = RegExpExecInternal(context, regexp, string,
- smi_zero, internal_match_info);
+ Node* const match_indices =
+ CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
+ smi_zero, internal_match_info);
Node* const null = NullConstant();
Label if_matched(this);
GotoIfNot(WordEqual(match_indices, null), &if_matched);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 5616769dd9..251e8c035e 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -41,7 +41,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* AllocateRegExpResult(Node* context, Node* length, Node* index,
Node* input);
- Node* FastLoadLastIndex(Node* regexp);
+ TNode<Object> FastLoadLastIndex(Node* regexp);
Node* SlowLoadLastIndex(Node* context, Node* regexp);
Node* LoadLastIndex(Node* context, Node* regexp, bool is_fastpath);
@@ -89,6 +89,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
// Performs fast path checks on the given object itself, but omits prototype
// checks.
Node* IsFastRegExpNoPrototype(Node* const context, Node* const object);
+ TNode<BoolT> IsFastRegExpWithOriginalExec(TNode<Context> context,
+ TNode<JSRegExp> object);
Node* IsFastRegExpNoPrototype(Node* const context, Node* const object,
Node* const map);
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
index 49506f4697..86bf06b658 100644
--- a/deps/v8/src/builtins/builtins-regexp.cc
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -31,7 +31,8 @@ BUILTIN(RegExpPrototypeToString) {
Handle<Object> source;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, source,
- JSReceiver::GetProperty(recv, isolate->factory()->source_string()));
+ JSReceiver::GetProperty(isolate, recv,
+ isolate->factory()->source_string()));
Handle<String> source_str;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source_str,
Object::ToString(isolate, source));
@@ -43,7 +44,8 @@ BUILTIN(RegExpPrototypeToString) {
Handle<Object> flags;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, flags,
- JSReceiver::GetProperty(recv, isolate->factory()->flags_string()));
+ JSReceiver::GetProperty(isolate, recv,
+ isolate->factory()->flags_string()));
Handle<String> flags_str;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags_str,
Object::ToString(isolate, flags));
@@ -80,7 +82,7 @@ DEFINE_CAPTURE_GETTER(9)
BUILTIN(RegExpInputGetter) {
HandleScope scope(isolate);
Handle<Object> obj(isolate->regexp_last_match_info()->LastInput(), isolate);
- return obj->IsUndefined(isolate) ? isolate->heap()->empty_string()
+ return obj->IsUndefined(isolate) ? ReadOnlyRoots(isolate).empty_string()
: String::cast(*obj);
}
@@ -91,7 +93,7 @@ BUILTIN(RegExpInputSetter) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str,
Object::ToString(isolate, value));
isolate->regexp_last_match_info()->SetLastInput(*str);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// Getters for the static properties lastMatch, lastParen, leftContext, and
@@ -108,7 +110,9 @@ BUILTIN(RegExpLastParenGetter) {
HandleScope scope(isolate);
Handle<RegExpMatchInfo> match_info = isolate->regexp_last_match_info();
const int length = match_info->NumberOfCaptureRegisters();
- if (length <= 2) return isolate->heap()->empty_string(); // No captures.
+ if (length <= 2) {
+ return ReadOnlyRoots(isolate).empty_string(); // No captures.
+ }
DCHECK_EQ(0, length % 2);
const int last_capture = (length / 2) - 1;
@@ -123,7 +127,7 @@ BUILTIN(RegExpLeftContextGetter) {
HandleScope scope(isolate);
Handle<RegExpMatchInfo> match_info = isolate->regexp_last_match_info();
const int start_index = match_info->Capture(0);
- Handle<String> last_subject(match_info->LastSubject());
+ Handle<String> last_subject(match_info->LastSubject(), isolate);
return *isolate->factory()->NewSubString(last_subject, 0, start_index);
}
@@ -131,7 +135,7 @@ BUILTIN(RegExpRightContextGetter) {
HandleScope scope(isolate);
Handle<RegExpMatchInfo> match_info = isolate->regexp_last_match_info();
const int start_index = match_info->Capture(1);
- Handle<String> last_subject(match_info->LastSubject());
+ Handle<String> last_subject(match_info->LastSubject(), isolate);
const int len = last_subject->length();
return *isolate->factory()->NewSubString(last_subject, start_index, len);
}
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 2c9f0791da..52673bfd36 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -111,8 +111,8 @@ void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
Node* context) {
// Check if the index is in bounds. If not, throw RangeError.
Label check_passed(this);
- Node* array_length_word32 = TruncateTaggedToWord32(
- context, LoadObjectField(array, JSTypedArray::kLengthOffset));
+ Node* array_length_word32 =
+ TruncateTaggedToWord32(context, LoadTypedArrayLength(CAST(array)));
GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed);
ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex);
@@ -127,12 +127,10 @@ void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
// ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be
// neutered and the TypedArray length can't change either, so skipping this
// check in Release mode is safe.
- CSA_ASSERT(
- this,
- Uint32LessThan(
- index_word,
- TruncateTaggedToWord32(
- context, LoadObjectField(array, JSTypedArray::kLengthOffset))));
+ CSA_ASSERT(this,
+ Uint32LessThan(index_word,
+ TruncateTaggedToWord32(
+ context, LoadTypedArrayLength(CAST(array)))));
}
#endif
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index a847a5d892..cb9ecfbc61 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -29,7 +29,8 @@ inline bool AtomicIsLockFree(uint32_t size) {
BUILTIN(AtomicsIsLockFree) {
HandleScope scope(isolate);
Handle<Object> size = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, size, Object::ToNumber(size));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, size,
+ Object::ToNumber(isolate, size));
return *isolate->factory()->ToBoolean(AtomicIsLockFree(size->Number()));
}
@@ -93,7 +94,7 @@ BUILTIN(AtomicsWake) {
isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true));
Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
- if (maybe_index.IsNothing()) return isolate->heap()->exception();
+ if (maybe_index.IsNothing()) return ReadOnlyRoots(isolate).exception();
size_t i = maybe_index.FromJust();
uint32_t c;
@@ -130,7 +131,7 @@ BUILTIN(AtomicsWait) {
isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true));
Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
- if (maybe_index.IsNothing()) return isolate->heap()->exception();
+ if (maybe_index.IsNothing()) return ReadOnlyRoots(isolate).exception();
size_t i = maybe_index.FromJust();
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
@@ -139,13 +140,13 @@ BUILTIN(AtomicsWait) {
double timeout_number;
if (timeout->IsUndefined(isolate)) {
- timeout_number = isolate->heap()->infinity_value()->Number();
+ timeout_number = ReadOnlyRoots(isolate).infinity_value()->Number();
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, timeout,
- Object::ToNumber(timeout));
+ Object::ToNumber(isolate, timeout));
timeout_number = timeout->Number();
if (std::isnan(timeout_number))
- timeout_number = isolate->heap()->infinity_value()->Number();
+ timeout_number = ReadOnlyRoots(isolate).infinity_value()->Number();
else if (timeout_number < 0)
timeout_number = 0;
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index ce8075c062..5524db56da 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -347,6 +347,13 @@ TF_BUILTIN(StringAdd_ConvertRight_NotTenured, StringBuiltinsAssembler) {
right);
}
+TF_BUILTIN(SubString, StringBuiltinsAssembler) {
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<Smi> from = CAST(Parameter(Descriptor::kFrom));
+ TNode<Smi> to = CAST(Parameter(Descriptor::kTo));
+ Return(SubString(string, SmiUntag(from), SmiUntag(to)));
+}
+
void StringBuiltinsAssembler::GenerateStringAt(char const* method_name,
TNode<Context> context,
Node* receiver,
@@ -628,8 +635,8 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ Node* context = Parameter(Descriptor::kContext);
CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
TNode<Smi> smi_argc = SmiTag(arguments.GetLength(INTPTR_PARAMETERS));
@@ -787,9 +794,10 @@ TF_BUILTIN(StringPrototypeConcat, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
CodeStubArguments arguments(
- this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ this,
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
Node* receiver = arguments.GetReceiver();
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
// Check that {receiver} is coercible to Object and convert it to a String.
receiver = ToThisString(context, receiver, "String.prototype.concat");
@@ -990,24 +998,26 @@ TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
// ES6 String.prototype.includes(searchString [, position])
// #sec-string.prototype.includes
TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
- Generate(kIncludes);
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ Generate(kIncludes, argc, context);
}
// ES6 String.prototype.indexOf(searchString [, position])
// #sec-string.prototype.indexof
TF_BUILTIN(StringPrototypeIndexOf, StringIncludesIndexOfAssembler) {
- Generate(kIndexOf);
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ Generate(kIndexOf, argc, context);
}
-void StringIncludesIndexOfAssembler::Generate(SearchVariant variant) {
- // TODO(ishell): use constants from Descriptor once the JSFunction linkage
- // arguments are reordered.
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
- CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
+void StringIncludesIndexOfAssembler::Generate(SearchVariant variant,
+ TNode<IntPtrT> argc,
+ TNode<Context> context) {
+ CodeStubArguments arguments(this, argc);
Node* const receiver = arguments.GetReceiver();
- // From now on use word-size argc value.
- argc = arguments.GetLength(INTPTR_PARAMETERS);
VARIABLE(var_search_string, MachineRepresentation::kTagged);
VARIABLE(var_position, MachineRepresentation::kTagged);
@@ -1620,10 +1630,8 @@ class StringPadAssembler : public StringBuiltinsAssembler {
protected:
enum Variant { kStart, kEnd };
- void Generate(Variant variant, const char* method_name) {
- Node* const context = Parameter(BuiltinDescriptor::kContext);
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ void Generate(Variant variant, const char* method_name, TNode<IntPtrT> argc,
+ TNode<Context> context) {
CodeStubArguments arguments(this, argc);
Node* const receiver = arguments.GetReceiver();
Node* const receiver_string = ToThisString(context, receiver, method_name);
@@ -1733,11 +1741,19 @@ class StringPadAssembler : public StringBuiltinsAssembler {
};
TF_BUILTIN(StringPrototypePadEnd, StringPadAssembler) {
- Generate(kEnd, "String.prototype.padEnd");
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ Generate(kEnd, "String.prototype.padEnd", argc, context);
}
TF_BUILTIN(StringPrototypePadStart, StringPadAssembler) {
- Generate(kStart, "String.prototype.padStart");
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ Generate(kStart, "String.prototype.padStart", argc, context);
}
// ES6 #sec-string.prototype.search
@@ -1757,12 +1773,12 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
const int kStart = 0;
const int kEnd = 1;
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
TNode<Object> start = args.GetOptionalArgumentValue(kStart);
TNode<Object> end = args.GetOptionalArgumentValue(kEnd);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// 1. Let O be ? RequireObjectCoercible(this value).
RequireObjectCoercible(context, receiver, "String.prototype.slice");
@@ -1821,9 +1837,8 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
ToDirectStringAssembler to_direct(state(), subject_string);
to_direct.TryToDirect(&call_runtime);
- TNode<FixedArray> elements =
- AllocateFixedArray(PACKED_ELEMENTS, length_smi,
- AllocationFlag::kAllowLargeObjectAllocation);
+ TNode<FixedArray> elements = AllocateFixedArray(
+ PACKED_ELEMENTS, length, AllocationFlag::kAllowLargeObjectAllocation);
// Don't allocate anything while {string_data} is live!
TNode<RawPtrT> string_data = UncheckedCast<RawPtrT>(
to_direct.PointerToData(&fill_thehole_and_call_runtime));
@@ -1882,13 +1897,13 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
const int kLimitArg = 1;
Node* const argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
Node* const separator = args.GetOptionalArgumentValue(kSeparatorArg);
Node* const limit = args.GetOptionalArgumentValue(kLimitArg);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Smi> smi_zero = SmiConstant(0);
@@ -1984,13 +1999,13 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
const int kLengthArg = 1;
Node* const argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
TNode<Object> start = args.GetOptionalArgumentValue(kStartArg);
TNode<Object> length = args.GetOptionalArgumentValue(kLengthArg);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label out(this);
@@ -2140,13 +2155,13 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
const int kEndArg = 1;
Node* const argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
Node* const start = args.GetOptionalArgumentValue(kStartArg);
Node* const end = args.GetOptionalArgumentValue(kEndArg);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const context = Parameter(Descriptor::kContext);
Label out(this);
@@ -2191,26 +2206,37 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
// ES6 #sec-string.prototype.trim
TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
- Generate(String::kTrim, "String.prototype.trim");
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ Generate(String::kTrim, "String.prototype.trim", argc, context);
}
// https://github.com/tc39/proposal-string-left-right-trim
TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
- Generate(String::kTrimStart, "String.prototype.trimLeft");
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ Generate(String::kTrimStart, "String.prototype.trimLeft", argc, context);
}
// https://github.com/tc39/proposal-string-left-right-trim
TF_BUILTIN(StringPrototypeTrimEnd, StringTrimAssembler) {
- Generate(String::kTrimEnd, "String.prototype.trimRight");
+ TNode<IntPtrT> argc =
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ Generate(String::kTrimEnd, "String.prototype.trimRight", argc, context);
}
void StringTrimAssembler::Generate(String::TrimMode mode,
- const char* method_name) {
+ const char* method_name, TNode<IntPtrT> argc,
+ TNode<Context> context) {
Label return_emptystring(this), if_runtime(this);
- Node* const argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
- CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
+ CodeStubArguments arguments(this, argc);
Node* const receiver = arguments.GetReceiver();
// Check that {receiver} is coercible to Object and convert it to a String.
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index c3dd543a98..06ac127f13 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -120,7 +120,8 @@ class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
protected:
enum SearchVariant { kIncludes, kIndexOf };
- void Generate(SearchVariant variant);
+ void Generate(SearchVariant variant, TNode<IntPtrT> argc,
+ TNode<Context> context);
};
class StringTrimAssembler : public StringBuiltinsAssembler {
@@ -132,7 +133,8 @@ class StringTrimAssembler : public StringBuiltinsAssembler {
Label* const if_not_whitespace);
protected:
- void Generate(String::TrimMode mode, const char* method);
+ void Generate(String::TrimMode mode, const char* method, TNode<IntPtrT> argc,
+ TNode<Context> context);
void ScanForNonWhiteSpaceOrLineTerminator(Node* const string_data,
Node* const string_data_offset,
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 854bb5e58a..e52fbd577d 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -19,7 +19,8 @@ namespace internal {
namespace { // for String.fromCodePoint
bool IsValidCodePoint(Isolate* isolate, Handle<Object> value) {
- if (!value->IsNumber() && !Object::ToNumber(value).ToHandle(&value)) {
+ if (!value->IsNumber() &&
+ !Object::ToNumber(isolate, value).ToHandle(&value)) {
return false;
}
@@ -37,7 +38,8 @@ bool IsValidCodePoint(Isolate* isolate, Handle<Object> value) {
uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) {
Handle<Object> value = args.at(1 + index);
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::ToNumber(value), -1);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value,
+ Object::ToNumber(isolate, value), -1);
if (!IsValidCodePoint(isolate, value)) {
isolate->Throw(*isolate->factory()->NewRangeError(
MessageTemplate::kInvalidCodePoint, value));
@@ -52,7 +54,7 @@ uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) {
BUILTIN(StringFromCodePoint) {
HandleScope scope(isolate);
int const length = args.length() - 1;
- if (length == 0) return isolate->heap()->empty_string();
+ if (length == 0) return ReadOnlyRoots(isolate).empty_string();
DCHECK_LT(0, length);
// Optimistically assume that the resulting String contains only one byte
@@ -64,7 +66,7 @@ BUILTIN(StringFromCodePoint) {
for (index = 0; index < length; index++) {
code = NextCodePoint(isolate, args, index);
if (code < 0) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
if (code > String::kMaxOneByteCharCode) {
break;
@@ -94,7 +96,7 @@ BUILTIN(StringFromCodePoint) {
}
code = NextCodePoint(isolate, args, index);
if (code < 0) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
}
@@ -122,7 +124,7 @@ BUILTIN(StringPrototypeEndsWith) {
Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
if (is_reg_exp.IsNothing()) {
DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
if (is_reg_exp.FromJust()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -146,10 +148,10 @@ BUILTIN(StringPrototypeEndsWith) {
}
int start = end - search_string->length();
- if (start < 0) return isolate->heap()->false_value();
+ if (start < 0) return ReadOnlyRoots(isolate).false_value();
- str = String::Flatten(str);
- search_string = String::Flatten(search_string);
+ str = String::Flatten(isolate, str);
+ search_string = String::Flatten(isolate, search_string);
DisallowHeapAllocation no_gc; // ensure vectors stay valid
String::FlatContent str_content = str->GetFlatContent();
@@ -169,10 +171,10 @@ BUILTIN(StringPrototypeEndsWith) {
for (int i = 0; i < search_string->length(); i++) {
if (str_reader.Get(start + i) != search_reader.Get(i)) {
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
}
- return isolate->heap()->true_value();
+ return ReadOnlyRoots(isolate).true_value();
}
// ES6 section 21.1.3.9
@@ -219,8 +221,8 @@ BUILTIN(StringPrototypeLocaleCompare) {
int d = str1->Get(0) - str2->Get(0);
if (d != 0) return Smi::FromInt(d);
- str1 = String::Flatten(str1);
- str2 = String::Flatten(str2);
+ str1 = String::Flatten(isolate, str1);
+ str2 = String::Flatten(isolate, str2);
DisallowHeapAllocation no_gc;
String::FlatContent flat1 = str1->GetFlatContent();
@@ -252,13 +254,13 @@ BUILTIN(StringPrototypeNormalize) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, form,
Object::ToString(isolate, form_input));
- if (!(String::Equals(form,
+ if (!(String::Equals(isolate, form,
isolate->factory()->NewStringFromStaticChars("NFC")) ||
- String::Equals(form,
+ String::Equals(isolate, form,
isolate->factory()->NewStringFromStaticChars("NFD")) ||
- String::Equals(form,
+ String::Equals(isolate, form,
isolate->factory()->NewStringFromStaticChars("NFKC")) ||
- String::Equals(form,
+ String::Equals(isolate, form,
isolate->factory()->NewStringFromStaticChars("NFKD")))) {
Handle<String> valid_forms =
isolate->factory()->NewStringFromStaticChars("NFC, NFD, NFKC, NFKD");
@@ -280,7 +282,7 @@ BUILTIN(StringPrototypeStartsWith) {
Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
if (is_reg_exp.IsNothing()) {
DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
if (is_reg_exp.FromJust()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -304,18 +306,19 @@ BUILTIN(StringPrototypeStartsWith) {
}
if (start + search_string->length() > str->length()) {
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
- FlatStringReader str_reader(isolate, String::Flatten(str));
- FlatStringReader search_reader(isolate, String::Flatten(search_string));
+ FlatStringReader str_reader(isolate, String::Flatten(isolate, str));
+ FlatStringReader search_reader(isolate,
+ String::Flatten(isolate, search_string));
for (int i = 0; i < search_string->length(); i++) {
if (str_reader.Get(start + i) != search_reader.Get(i)) {
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
}
- return isolate->heap()->true_value();
+ return ReadOnlyRoots(isolate).true_value();
}
#ifndef V8_INTL_SUPPORT
@@ -430,7 +433,7 @@ template <class Converter>
V8_WARN_UNUSED_RESULT static Object* ConvertCase(
Handle<String> s, Isolate* isolate,
unibrow::Mapping<Converter, 128>* mapping) {
- s = String::Flatten(s);
+ s = String::Flatten(isolate, s);
int length = s->length();
// Assume that the string is not empty; we need this assumption later
if (length == 0) return *s;
@@ -525,14 +528,14 @@ BUILTIN(StringRaw) {
Object::ToObject(isolate, templ));
Handle<Object> raw;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, raw,
- Object::GetProperty(cooked, raw_string));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, raw, Object::GetProperty(isolate, cooked, raw_string));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, raw,
Object::ToObject(isolate, raw));
Handle<Object> raw_len;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, raw_len,
- Object::GetProperty(raw, isolate->factory()->length_string()));
+ Object::GetProperty(isolate, raw, isolate->factory()->length_string()));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, raw_len,
Object::ToLength(isolate, raw_len));
diff --git a/deps/v8/src/builtins/builtins-symbol-gen.cc b/deps/v8/src/builtins/builtins-symbol-gen.cc
index 4015a2dcba..425fbab5d1 100644
--- a/deps/v8/src/builtins/builtins-symbol-gen.cc
+++ b/deps/v8/src/builtins/builtins-symbol-gen.cc
@@ -11,6 +11,16 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 #sec-symbol-objects
+// ES ##sec-symbol.prototype.description
+TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+
+ Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
+ "Symbol.prototype.description");
+ Node* result = LoadObjectField(value, Symbol::kNameOffset);
+ Return(result);
+}
// ES6 #sec-symbol.prototype-@@toprimitive
TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) {
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 1343a293bd..9ebb8c499d 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -57,7 +57,7 @@ BUILTIN(SymbolKeyFor) {
result = symbol->name();
DCHECK(result->IsString());
} else {
- result = isolate->heap()->undefined_value();
+ result = ReadOnlyRoots(isolate).undefined_value();
}
DCHECK_EQ(isolate->heap()->public_symbol_table()->SlowReverseLookup(*symbol),
result);
diff --git a/deps/v8/src/builtins/builtins-trace.cc b/deps/v8/src/builtins/builtins-trace.cc
deleted file mode 100644
index a10c136338..0000000000
--- a/deps/v8/src/builtins/builtins-trace.cc
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/api.h"
-#include "src/builtins/builtins-utils.h"
-#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/json-stringifier.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-using v8::tracing::TracedValue;
-
-#define MAX_STACK_LENGTH 100
-
-class MaybeUtf8 {
- public:
- explicit MaybeUtf8(Isolate* isolate, Handle<String> string) : buf_(data_) {
- string = String::Flatten(string);
- int len;
- if (string->IsOneByteRepresentation()) {
- // Technically this allows unescaped latin1 characters but the trace
- // events mechanism currently does the same and the current consuming
- // tools are tolerant of it. A more correct approach here would be to
- // escape non-ascii characters but this is easier and faster.
- len = string->length();
- AllocateSufficientSpace(len);
- if (len > 0) {
- // Why copy? Well, the trace event mechanism requires null-terminated
- // strings, the bytes we get from SeqOneByteString are not. buf_ is
- // guaranteed to be null terminated.
- memcpy(buf_, Handle<SeqOneByteString>::cast(string)->GetChars(), len);
- }
- } else {
- Local<v8::String> local = Utils::ToLocal(string);
- len = local->Utf8Length();
- AllocateSufficientSpace(len);
- if (len > 0) {
- local->WriteUtf8(reinterpret_cast<char*>(buf_));
- }
- }
- buf_[len] = 0;
- }
- const char* operator*() const { return reinterpret_cast<const char*>(buf_); }
-
- private:
- void AllocateSufficientSpace(int len) {
- if (len + 1 > MAX_STACK_LENGTH) {
- allocated_.reset(new uint8_t[len + 1]);
- buf_ = allocated_.get();
- }
- }
-
- // In the most common cases, the buffer here will be stack allocated.
- // A heap allocation will only occur if the data is more than MAX_STACK_LENGTH
- // Given that this is used primarily for trace event categories and names,
- // the MAX_STACK_LENGTH should be more than enough.
- uint8_t* buf_;
- uint8_t data_[MAX_STACK_LENGTH];
- std::unique_ptr<uint8_t> allocated_;
-};
-
-class JsonTraceValue : public ConvertableToTraceFormat {
- public:
- explicit JsonTraceValue(Isolate* isolate, Handle<String> object) {
- // object is a JSON string serialized using JSON.stringify() from within
- // the BUILTIN(Trace) method. This may (likely) contain UTF8 values so
- // to grab the appropriate buffer data we have to serialize it out. We
- // hold on to the bits until the AppendAsTraceFormat method is called.
- MaybeUtf8 data(isolate, object);
- data_ = *data;
- }
-
- void AppendAsTraceFormat(std::string* out) const override { *out += data_; }
-
- private:
- std::string data_;
-};
-
-const uint8_t* GetCategoryGroupEnabled(Isolate* isolate,
- Handle<String> string) {
- MaybeUtf8 category(isolate, string);
- return TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(*category);
-}
-
-#undef MAX_STACK_LENGTH
-
-} // namespace
-
-// Builins::kIsTraceCategoryEnabled(category) : bool
-BUILTIN(IsTraceCategoryEnabled) {
- HandleScope scope(isolate);
- Handle<Object> category = args.atOrUndefined(isolate, 1);
- if (!category->IsString()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kTraceEventCategoryError));
- }
- return isolate->heap()->ToBoolean(
- *GetCategoryGroupEnabled(isolate, Handle<String>::cast(category)));
-}
-
-// Builtins::kTrace(phase, category, name, id, data) : bool
-BUILTIN(Trace) {
- HandleScope handle_scope(isolate);
-
- Handle<Object> phase_arg = args.atOrUndefined(isolate, 1);
- Handle<Object> category = args.atOrUndefined(isolate, 2);
- Handle<Object> name_arg = args.atOrUndefined(isolate, 3);
- Handle<Object> id_arg = args.atOrUndefined(isolate, 4);
- Handle<Object> data_arg = args.atOrUndefined(isolate, 5);
-
- const uint8_t* category_group_enabled =
- GetCategoryGroupEnabled(isolate, Handle<String>::cast(category));
-
- // Exit early if the category group is not enabled.
- if (!*category_group_enabled) {
- return isolate->heap()->false_value();
- }
-
- if (!phase_arg->IsNumber()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kTraceEventPhaseError));
- }
- if (!category->IsString()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kTraceEventCategoryError));
- }
- if (!name_arg->IsString()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kTraceEventNameError));
- }
-
- uint32_t flags = TRACE_EVENT_FLAG_COPY;
- int32_t id = 0;
- if (!id_arg->IsNullOrUndefined(isolate)) {
- if (!id_arg->IsNumber()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kTraceEventIDError));
- }
- flags |= TRACE_EVENT_FLAG_HAS_ID;
- id = DoubleToInt32(id_arg->Number());
- }
-
- Handle<String> name_str = Handle<String>::cast(name_arg);
- if (name_str->length() == 0) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kTraceEventNameLengthError));
- }
- MaybeUtf8 name(isolate, name_str);
-
- // We support passing one additional trace event argument with the
- // name "data". Any JSON serializable value may be passed.
- static const char* arg_name = "data";
- int32_t num_args = 0;
- uint8_t arg_type;
- uint64_t arg_value;
-
- if (!data_arg->IsUndefined(isolate)) {
- // Serializes the data argument as a JSON string, which is then
- // copied into an object. This eliminates duplicated code but
- // could have perf costs. It is also subject to all the same
- // limitations as JSON.stringify() as it relates to circular
- // references and value limitations (e.g. BigInt is not supported).
- JsonStringifier stringifier(isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- stringifier.Stringify(data_arg, isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value()));
- std::unique_ptr<JsonTraceValue> traced_value;
- traced_value.reset(
- new JsonTraceValue(isolate, Handle<String>::cast(result)));
- tracing::SetTraceValue(std::move(traced_value), &arg_type, &arg_value);
- num_args++;
- }
-
- TRACE_EVENT_API_ADD_TRACE_EVENT(
- static_cast<char>(DoubleToInt32(phase_arg->Number())),
- category_group_enabled, *name, tracing::kGlobalScope, id, tracing::kNoId,
- num_args, &arg_name, &arg_type, &arg_value, flags);
-
- return isolate->heap()->true_value();
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 2abde8e126..595ec1f97b 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -36,7 +36,8 @@ TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
DispatchTypedArrayByElementsKind(
elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) {
- Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(kind));
+ Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(kind),
+ isolate());
var_typed_map = HeapConstant(map);
});
@@ -502,8 +503,7 @@ void TypedArrayBuiltinsAssembler::ConstructByTypedArray(
Goto(&check_for_sab);
BIND(&if_notdetached);
- source_length =
- CAST(LoadObjectField(typed_array, JSTypedArray::kLengthOffset));
+ source_length = LoadTypedArrayLength(typed_array);
Goto(&check_for_sab);
// The spec requires that constructing a typed array using a SAB-backed typed
@@ -590,8 +590,8 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayLike(
BIND(&fill);
GotoIf(SmiEqual(length, SmiConstant(0)), &done);
- TNode<Int32T> holder_kind = LoadMapElementsKind(LoadMap(holder));
- TNode<Int32T> source_kind = LoadMapElementsKind(LoadMap(array_like));
+ TNode<Int32T> holder_kind = LoadElementsKind(holder);
+ TNode<Int32T> source_kind = LoadElementsKind(array_like);
GotoIf(Word32Equal(holder_kind, source_kind), &fast_copy);
// Copy using the elements accessor.
@@ -758,12 +758,11 @@ TF_BUILTIN(TypedArrayConstructorLazyDeoptContinuation,
// ES #sec-typedarray-constructors
TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
- Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* arg1 = args.GetOptionalArgumentValue(0);
Node* arg2 = args.GetOptionalArgumentValue(1);
@@ -855,11 +854,6 @@ TNode<Word32T> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
Word32Equal(kind, Int32Constant(BIGUINT64_ELEMENTS)));
}
-TNode<Word32T> TypedArrayBuiltinsAssembler::LoadElementsKind(
- TNode<JSTypedArray> typed_array) {
- return LoadMapElementsKind(LoadMap(typed_array));
-}
-
TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
TNode<Word32T> elements_kind) {
TVARIABLE(IntPtrT, element_size);
@@ -873,42 +867,6 @@ TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
return element_size.value();
}
-TF_BUILTIN(TypedArrayLoadElementAsTagged, TypedArrayBuiltinsAssembler) {
- TVARIABLE(Object, result);
- TNode<JSTypedArray> array = CAST(Parameter(Descriptor::kArray));
- TNode<Smi> kind = CAST(Parameter(Descriptor::kKind));
- TNode<Smi> index_node = CAST(Parameter(Descriptor::kIndex));
-
- TNode<RawPtrT> data_pointer = UncheckedCast<RawPtrT>(LoadDataPtr(array));
- TNode<Int32T> elements_kind = SmiToInt32(kind);
-
- DispatchTypedArrayByElementsKind(
- elements_kind, [&](ElementsKind el_kind, int, int) {
- result = CAST(LoadFixedTypedArrayElementAsTagged(
- data_pointer, index_node, el_kind, SMI_PARAMETERS));
- });
-
- Return(result.value());
-}
-
-TF_BUILTIN(TypedArrayStoreElementFromTagged, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSTypedArray> array = CAST(Parameter(Descriptor::kArray));
- TNode<Smi> kind = CAST(Parameter(Descriptor::kKind));
- TNode<Smi> index_node = CAST(Parameter(Descriptor::kIndex));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
-
- TNode<FixedTypedArrayBase> elements = CAST(LoadElements(array));
- TNode<Int32T> elements_kind = SmiToInt32(kind);
-
- DispatchTypedArrayByElementsKind(
- elements_kind, [&](ElementsKind el_kind, int, int) {
- StoreFixedTypedArrayElementFromTagged(context, elements, index_node,
- value, el_kind, SMI_PARAMETERS);
- });
- Return(UndefinedConstant());
-}
-
TNode<Object> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
TNode<Context> context, TNode<JSTypedArray> exemplar) {
TVARIABLE(IntPtrT, context_slot);
@@ -986,8 +944,7 @@ TNode<JSTypedArray> TypedArrayBuiltinsAssembler::CreateByLength(
// If newTypedArray.[[ArrayLength]] < argumentList[0], throw a TypeError
// exception.
Label if_length_is_not_short(this);
- TNode<Smi> new_length =
- LoadObjectField<Smi>(new_typed_array, JSTypedArray::kLengthOffset);
+ TNode<Smi> new_length = LoadTypedArrayLength(new_typed_array);
GotoIfNot(SmiLessThan(new_length, len), &if_length_is_not_short);
ThrowTypeError(context, MessageTemplate::kTypedArrayTooShort);
@@ -1049,10 +1006,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
// Check for possible range errors.
- TNode<IntPtrT> source_length =
- LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset);
- TNode<IntPtrT> target_length =
- LoadAndUntagObjectField(target, JSTypedArray::kLengthOffset);
+ TNode<IntPtrT> source_length = SmiUntag(LoadTypedArrayLength(source));
+ TNode<IntPtrT> target_length = SmiUntag(LoadTypedArrayLength(target));
TNode<IntPtrT> required_target_length = IntPtrAdd(source_length, offset);
GotoIf(IntPtrGreaterThan(required_target_length, target_length),
@@ -1102,8 +1057,7 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
IsBigInt64ElementsKind(target_el_kind)),
&exception);
- TNode<IntPtrT> source_length =
- LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset);
+ TNode<IntPtrT> source_length = SmiUntag(LoadTypedArrayLength(source));
CallCCopyTypedArrayElementsToTypedArray(source, target, source_length,
offset);
Goto(&out);
@@ -1124,8 +1078,7 @@ void TypedArrayBuiltinsAssembler::SetJSArraySource(
IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue)));
TNode<IntPtrT> source_length = SmiUntag(LoadFastJSArrayLength(source));
- TNode<IntPtrT> target_length =
- LoadAndUntagObjectField(target, JSTypedArray::kLengthOffset);
+ TNode<IntPtrT> target_length = SmiUntag(LoadTypedArrayLength(target));
// Maybe out of bounds?
GotoIf(IntPtrGreaterThan(IntPtrAdd(source_length, offset), target_length),
@@ -1147,7 +1100,7 @@ void TypedArrayBuiltinsAssembler::SetJSArraySource(
};
STATIC_ASSERT(arraysize(values) == arraysize(labels));
- TNode<Int32T> source_elements_kind = LoadMapElementsKind(LoadMap(source));
+ TNode<Int32T> source_elements_kind = LoadElementsKind(source);
Switch(source_elements_kind, call_runtime, values, labels,
arraysize(values));
}
@@ -1246,32 +1199,12 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next);
}
-TNode<BoolT> TypedArrayBuiltinsAssembler::NumberIsNaN(TNode<Number> value) {
- Label is_heapnumber(this), done(this);
- TVARIABLE(BoolT, result);
-
- GotoIf(TaggedIsNotSmi(value), &is_heapnumber);
- result = Int32FalseConstant();
- Goto(&done);
-
- BIND(&is_heapnumber);
- {
- CSA_ASSERT(this, IsHeapNumber(CAST(value)));
-
- TNode<Float64T> value_f = LoadHeapNumberValue(CAST(value));
- result = Float64NotEqual(value_f, value_f);
- Goto(&done);
- }
-
- BIND(&done);
- return result.value();
-}
-
// ES #sec-get-%typedarray%.prototype.set
TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CodeStubArguments args(
- this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ this,
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
Label if_source_is_typed_array(this), if_source_is_fast_jsarray(this),
if_offset_is_out_of_bounds(this, Label::kDeferred),
@@ -1359,16 +1292,16 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
if_typed_array_is_neutered(this, Label::kDeferred),
if_bigint_mixed_types(this, Label::kDeferred);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CodeStubArguments args(
- this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ this,
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Object> receiver = args.GetReceiver();
TNode<JSTypedArray> source =
ValidateTypedArray(context, receiver, method_name);
- TNode<Smi> source_length =
- LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+ TNode<Smi> source_length = LoadTypedArrayLength(source);
// Convert start offset argument to integer, and calculate relative offset.
TNode<Object> start = args.GetOptionalArgumentValue(0, SmiConstant(0));
@@ -1434,15 +1367,24 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
TNode<IntPtrT> count_bytes = IntPtrMul(SmiToIntPtr(count), source_el_size);
#ifdef DEBUG
- TNode<IntPtrT> target_byte_length =
- LoadAndUntagObjectField(result_array, JSTypedArray::kByteLengthOffset);
+ Label done(this), to_intptr_failed(this, Label::kDeferred);
+ TNode<IntPtrT> target_byte_length = TryToIntptr(
+ LoadObjectField<Number>(result_array, JSTypedArray::kByteLengthOffset),
+ &to_intptr_failed);
CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, target_byte_length));
- TNode<IntPtrT> source_byte_length =
- LoadAndUntagObjectField(source, JSTypedArray::kByteLengthOffset);
+ TNode<IntPtrT> source_byte_length = TryToIntptr(
+ LoadObjectField<Number>(source, JSTypedArray::kByteLengthOffset),
+ &to_intptr_failed);
TNode<IntPtrT> source_size_in_bytes =
IntPtrSub(source_byte_length, source_start_bytes);
CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, source_size_in_bytes));
+ Goto(&done);
+
+ BIND(&to_intptr_failed);
+ Unreachable();
+
+ BIND(&done);
#endif // DEBUG
CallCMemmove(target_data_ptr, source_start, count_bytes);
@@ -1475,9 +1417,10 @@ TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) {
TVARIABLE(Smi, var_begin);
TVARIABLE(Smi, var_end);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CodeStubArguments args(
- this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ this,
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
// 1. Let O be the this value.
// 3. If O does not have a [[TypedArrayName]] internal slot, throw a TypeError
@@ -1490,8 +1433,7 @@ TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) {
// 5. Let buffer be O.[[ViewedArrayBuffer]].
TNode<JSArrayBuffer> buffer = GetBuffer(context, source);
// 6. Let srcLength be O.[[ArrayLength]].
- TNode<Smi> source_length =
- LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+ TNode<Smi> source_length = LoadTypedArrayLength(source);
// 7. Let relativeBegin be ? ToInteger(begin).
// 8. If relativeBegin < 0, let beginIndex be max((srcLength + relativeBegin),
@@ -1572,7 +1514,7 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
// performance.
BIND(&if_receiverisheapobject);
Node* elements_kind =
- Int32Sub(LoadMapElementsKind(LoadMap(receiver)),
+ Int32Sub(LoadElementsKind(receiver),
Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
Switch(elements_kind, &return_undefined, elements_kinds, elements_kind_labels,
kTypedElementsKindCount);
@@ -1632,11 +1574,11 @@ TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
// ES6 #sec-%typedarray%.of
TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// 1. Let len be the actual number of arguments passed to this function.
TNode<IntPtrT> length = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
// 2. Let items be the List of arguments passed to this function.
CodeStubArguments args(this, length, nullptr, INTPTR_PARAMETERS,
CodeStubArguments::ReceiverMode::kHasReceiver);
@@ -1784,7 +1726,7 @@ TF_BUILTIN(IterableToList, TypedArrayBuiltinsAssembler) {
// ES6 #sec-%typedarray%.from
TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label check_iterator(this), from_array_like(this), fast_path(this),
slow_path(this), create_typed_array(this),
@@ -1794,7 +1736,8 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
if_neutered(this, Label::kDeferred);
CodeStubArguments args(
- this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ this,
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Object> source = args.GetOptionalArgumentValue(0);
// 5. If thisArg is present, let T be thisArg; else let T be undefined.
@@ -1966,9 +1909,10 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeFilter, TypedArrayBuiltinsAssembler) {
const char* method_name = "%TypedArray%.prototype.filter";
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CodeStubArguments args(
- this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ this,
+ ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
Label if_callback_not_callable(this, Label::kDeferred),
detached(this, Label::kDeferred);
@@ -1980,7 +1924,7 @@ TF_BUILTIN(TypedArrayPrototypeFilter, TypedArrayBuiltinsAssembler) {
ValidateTypedArray(context, receiver, method_name);
// 3. Let len be O.[[ArrayLength]].
- TNode<Smi> length = LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+ TNode<Smi> length = LoadTypedArrayLength(source);
// 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
TNode<Object> callbackfn = args.GetOptionalArgumentValue(0);
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 6464ce70a4..e74469187f 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -5,15 +5,15 @@
#ifndef V8_BUILTINS_BUILTINS_TYPED_ARRAY_GEN_H_
#define V8_BUILTINS_BUILTINS_TYPED_ARRAY_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "torque-generated/builtins-base-from-dsl-gen.h"
namespace v8 {
namespace internal {
-class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
+class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
public:
explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+ : BaseBuiltinsFromDSLAssembler(state) {}
TNode<JSTypedArray> SpeciesCreateByLength(TNode<Context> context,
TNode<JSTypedArray> exemplar,
@@ -71,16 +71,9 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
// Returns true if kind is either BIGINT64_ELEMENTS or BIGUINT64_ELEMENTS.
TNode<Word32T> IsBigInt64ElementsKind(TNode<Word32T> kind);
- // Loads the element kind of TypedArray instance.
- TNode<Word32T> LoadElementsKind(TNode<JSTypedArray> typed_array);
-
// Returns the byte size of an element for a TypedArray elements kind.
TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
- TNode<Smi> LoadTypedArrayLength(TNode<JSTypedArray> typed_array) {
- return LoadObjectField<Smi>(typed_array, JSTypedArray::kLengthOffset);
- }
-
TNode<JSArrayBuffer> LoadTypedArrayBuffer(TNode<JSTypedArray> typed_array) {
return LoadObjectField<JSArrayBuffer>(typed_array,
JSTypedArray::kBufferOffset);
@@ -141,10 +134,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
-
- // Returns true iff number is NaN.
- // TOOD(szuend): Remove when UncheckedCasts are supported in Torque.
- TNode<BoolT> NumberIsNaN(TNode<Number> number);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc
index 6fcc279c66..6750f2863b 100644
--- a/deps/v8/src/builtins/builtins-typed-array.cc
+++ b/deps/v8/src/builtins/builtins-typed-array.cc
@@ -95,7 +95,7 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
DCHECK_GE(len - count, 0);
Handle<FixedTypedArrayBase> elements(
- FixedTypedArrayBase::cast(array->elements()));
+ FixedTypedArrayBase::cast(array->elements()), isolate);
size_t element_size = array->element_size();
to = to * element_size;
from = from * element_size;
@@ -122,7 +122,7 @@ BUILTIN(TypedArrayPrototypeFill) {
BigInt::FromObject(isolate, obj_value));
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value,
- Object::ToNumber(obj_value));
+ Object::ToNumber(isolate, obj_value));
}
int64_t len = array->length_value();
@@ -170,10 +170,10 @@ BUILTIN(TypedArrayPrototypeIncludes) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
- if (args.length() < 2) return isolate->heap()->false_value();
+ if (args.length() < 2) return ReadOnlyRoots(isolate).false_value();
int64_t len = array->length_value();
- if (len == 0) return isolate->heap()->false_value();
+ if (len == 0) return ReadOnlyRoots(isolate).false_value();
int64_t index = 0;
if (args.length() > 2) {
@@ -184,14 +184,15 @@ BUILTIN(TypedArrayPrototypeIncludes) {
}
// TODO(cwhan.tunz): throw. See the above comment in CopyWithin.
- if (V8_UNLIKELY(array->WasNeutered())) return isolate->heap()->false_value();
+ if (V8_UNLIKELY(array->WasNeutered()))
+ return ReadOnlyRoots(isolate).false_value();
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
ElementsAccessor* elements = array->GetElementsAccessor();
Maybe<bool> result = elements->IncludesValue(isolate, array, search_element,
static_cast<uint32_t>(index),
static_cast<uint32_t>(len));
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -222,7 +223,7 @@ BUILTIN(TypedArrayPrototypeIndexOf) {
Maybe<int64_t> result = elements->IndexOfValue(isolate, array, search_element,
static_cast<uint32_t>(index),
static_cast<uint32_t>(len));
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->NewNumberFromInt64(result.FromJust());
}
@@ -256,7 +257,7 @@ BUILTIN(TypedArrayPrototypeLastIndexOf) {
ElementsAccessor* elements = array->GetElementsAccessor();
Maybe<int64_t> result = elements->LastIndexOfValue(
isolate, array, search_element, static_cast<uint32_t>(index));
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->NewNumberFromInt64(result.FromJust());
}
diff --git a/deps/v8/src/builtins/builtins-utils-gen.h b/deps/v8/src/builtins/builtins-utils-gen.h
index 6af5eff357..9984330980 100644
--- a/deps/v8/src/builtins/builtins-utils-gen.h
+++ b/deps/v8/src/builtins/builtins-utils-gen.h
@@ -38,11 +38,6 @@ class CodeAssemblerState;
Node* Parameter(Descriptor::ParameterIndices index) { \
return CodeAssembler::Parameter(static_cast<int>(index)); \
} \
- /* TODO(ishell): Remove this way of accessing parameters once the */ \
- /* JSFunction linkage arguments are reordered. */ \
- Node* Parameter(BuiltinDescriptor::ParameterIndices index) { \
- return CodeAssembler::Parameter(static_cast<int>(index)); \
- } \
}; \
void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
Name##Assembler assembler(state); \
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 87fe14743a..5826ec546e 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -10,17 +10,111 @@
namespace v8 {
namespace internal {
-typedef compiler::Node Node;
+class WasmBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit WasmBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
-TF_BUILTIN(WasmStackGuard, CodeStubAssembler) {
- TailCallRuntime(Runtime::kWasmStackGuard, NoContextConstant());
+ protected:
+ TNode<Object> UncheckedParameter(int index) {
+ return UncheckedCast<Object>(Parameter(index));
+ }
+
+ TNode<Code> LoadBuiltinFromFrame(Builtins::Name id) {
+ TNode<Object> instance = LoadInstanceFromFrame();
+ TNode<IntPtrT> roots = UncheckedCast<IntPtrT>(
+ Load(MachineType::Pointer(), instance,
+ IntPtrConstant(WasmInstanceObject::kRootsArrayAddressOffset -
+ kHeapObjectTag)));
+ TNode<Code> target = UncheckedCast<Code>(Load(
+ MachineType::TaggedPointer(), roots,
+ IntPtrConstant(Heap::roots_to_builtins_offset() + id * kPointerSize)));
+ return target;
+ }
+
+ TNode<Object> LoadInstanceFromFrame() {
+ return UncheckedCast<Object>(
+ LoadFromParentFrame(WasmCompiledFrameConstants::kWasmInstanceOffset));
+ }
+
+ TNode<Code> LoadCEntryFromInstance(TNode<Object> instance) {
+ return UncheckedCast<Code>(
+ Load(MachineType::AnyTagged(), instance,
+ IntPtrConstant(WasmInstanceObject::kCEntryStubOffset -
+ kHeapObjectTag)));
+ }
+
+ TNode<Code> LoadCEntryFromFrame() {
+ return LoadCEntryFromInstance(LoadInstanceFromFrame());
+ }
+};
+
+TF_BUILTIN(WasmAllocateHeapNumber, WasmBuiltinsAssembler) {
+ TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber);
+ TailCallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant());
+}
+
+TF_BUILTIN(WasmArgumentsAdaptor, WasmBuiltinsAssembler) {
+ TNode<Object> context = UncheckedParameter(Descriptor::kContext);
+ TNode<Object> function = UncheckedParameter(Descriptor::kTarget);
+ TNode<Object> new_target = UncheckedParameter(Descriptor::kNewTarget);
+ TNode<Object> argc1 = UncheckedParameter(Descriptor::kActualArgumentsCount);
+ TNode<Object> argc2 = UncheckedParameter(Descriptor::kExpectedArgumentsCount);
+ TNode<Code> target =
+ LoadBuiltinFromFrame(Builtins::kArgumentsAdaptorTrampoline);
+ TailCallStub(ArgumentAdaptorDescriptor{}, target, context, function,
+ new_target, argc1, argc2);
+}
+
+TF_BUILTIN(WasmCallJavaScript, WasmBuiltinsAssembler) {
+ TNode<Object> context = UncheckedParameter(Descriptor::kContext);
+ TNode<Object> function = UncheckedParameter(Descriptor::kFunction);
+ TNode<Object> argc = UncheckedParameter(Descriptor::kActualArgumentsCount);
+ TNode<Code> target = LoadBuiltinFromFrame(Builtins::kCall_ReceiverIsAny);
+ TailCallStub(CallTrampolineDescriptor{}, target, context, function, argc);
+}
+
+TF_BUILTIN(WasmToNumber, WasmBuiltinsAssembler) {
+ TNode<Object> context = UncheckedParameter(Descriptor::kContext);
+ TNode<Object> argument = UncheckedParameter(Descriptor::kArgument);
+ TNode<Code> target = LoadBuiltinFromFrame(Builtins::kToNumber);
+ TailCallStub(TypeConversionDescriptor(), target, context, argument);
+}
+
+TF_BUILTIN(WasmStackGuard, WasmBuiltinsAssembler) {
+ TNode<Code> centry = LoadCEntryFromFrame();
+ TailCallRuntimeWithCEntry(Runtime::kWasmStackGuard, centry,
+ NoContextConstant());
+}
+
+TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) {
+ TNode<Int32T> num_pages =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kNumPages));
+ Label num_pages_out_of_range(this, Label::kDeferred);
+
+ TNode<BoolT> num_pages_fits_in_smi =
+ IsValidPositiveSmi(ChangeInt32ToIntPtr(num_pages));
+ GotoIfNot(num_pages_fits_in_smi, &num_pages_out_of_range);
+
+ TNode<Smi> num_pages_smi = SmiFromInt32(num_pages);
+ TNode<Object> instance = LoadInstanceFromFrame();
+ TNode<Code> centry = LoadCEntryFromInstance(instance);
+ TNode<Smi> ret_smi = UncheckedCast<Smi>(
+ CallRuntimeWithCEntry(Runtime::kWasmGrowMemory, centry,
+ NoContextConstant(), instance, num_pages_smi));
+ TNode<Int32T> ret = SmiToInt32(ret_smi);
+ ReturnRaw(ret);
+
+ BIND(&num_pages_out_of_range);
+ ReturnRaw(Int32Constant(-1));
}
#define DECLARE_ENUM(name) \
- TF_BUILTIN(ThrowWasm##name, CodeStubAssembler) { \
+ TF_BUILTIN(ThrowWasm##name, WasmBuiltinsAssembler) { \
+ TNode<Code> centry = LoadCEntryFromFrame(); \
int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
- TailCallRuntime(Runtime::kThrowWasmError, NoContextConstant(), \
- SmiConstant(message_id)); \
+ TailCallRuntimeWithCEntry(Runtime::kThrowWasmError, centry, \
+ NoContextConstant(), SmiConstant(message_id)); \
}
FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
#undef DECLARE_ENUM
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index d4ca442d7a..ee4031d71a 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -65,12 +65,6 @@ const BuiltinMetadata builtin_metadata[] = {
} // namespace
-Builtins::Builtins() : initialized_(false) {
- memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
-}
-
-Builtins::~Builtins() {}
-
BailoutId Builtins::GetContinuationBailoutId(Name name) {
DCHECK(Builtins::KindOf(name) == TFJ || Builtins::KindOf(name) == TFC);
return BailoutId(BailoutId::kFirstBuiltinContinuationId + name);
@@ -85,18 +79,11 @@ Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) {
void Builtins::TearDown() { initialized_ = false; }
-void Builtins::IterateBuiltins(RootVisitor* v) {
- for (int i = 0; i < builtin_count; i++) {
- v->VisitRootPointer(Root::kBuiltins, name(i), &builtins_[i]);
- }
-}
-
const char* Builtins::Lookup(Address pc) {
// may be called during initialization (disassembler!)
if (initialized_) {
for (int i = 0; i < builtin_count; i++) {
- Code* entry = Code::cast(builtins_[i]);
- if (entry->contains(pc)) return name(i);
+ if (isolate_->heap()->builtin(i)->contains(pc)) return name(i);
}
}
return nullptr;
@@ -137,16 +124,15 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
}
void Builtins::set_builtin(int index, HeapObject* builtin) {
- DCHECK(Builtins::IsBuiltinId(index));
- DCHECK(Internals::HasHeapObjectTag(builtin));
- // The given builtin may be completely uninitialized thus we cannot check its
- // type here.
- builtins_[index] = builtin;
+ isolate_->heap()->set_builtin(index, builtin);
}
+Code* Builtins::builtin(int index) { return isolate_->heap()->builtin(index); }
+
Handle<Code> Builtins::builtin_handle(int index) {
DCHECK(IsBuiltinId(index));
- return Handle<Code>(reinterpret_cast<Code**>(builtin_address(index)));
+ return Handle<Code>(
+ reinterpret_cast<Code**>(isolate_->heap()->builtin_address(index)));
}
// static
@@ -157,8 +143,7 @@ int Builtins::GetStackParameterCount(Name name) {
// static
Callable Builtins::CallableFor(Isolate* isolate, Name name) {
- Handle<Code> code(
- reinterpret_cast<Code**>(isolate->builtins()->builtin_address(name)));
+ Handle<Code> code = isolate->builtins()->builtin_handle(name);
CallDescriptors::Key key;
switch (name) {
// This macro is deliberately crafted so as to emit very little code,
@@ -174,11 +159,11 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
default:
Builtins::Kind kind = Builtins::KindOf(name);
if (kind == TFJ || kind == CPP) {
- return Callable(code, BuiltinDescriptor(isolate));
+ return Callable(code, JSTrampolineDescriptor{});
}
UNREACHABLE();
}
- CallInterfaceDescriptor descriptor(isolate, key);
+ CallInterfaceDescriptor descriptor(key);
return Callable(code, descriptor);
}
@@ -199,24 +184,38 @@ bool Builtins::IsBuiltin(const Code* code) {
return Builtins::IsBuiltinId(code->builtin_index());
}
+bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
+ int* index) const {
+ Heap* heap = isolate_->heap();
+ Address handle_location = maybe_code.address();
+ Address start = heap->builtin_address(0);
+ Address end = heap->builtin_address(Builtins::builtin_count);
+ if (handle_location >= end) return false;
+ if (handle_location < start) return false;
+ *index = static_cast<int>(handle_location - start) >> kPointerSizeLog2;
+ DCHECK(Builtins::IsBuiltinId(*index));
+ return true;
+}
+
// static
-bool Builtins::IsEmbeddedBuiltin(const Code* code) {
-#ifdef V8_EMBEDDED_BUILTINS
- return Builtins::IsBuiltinId(code->builtin_index()) &&
- Builtins::IsIsolateIndependent(code->builtin_index());
-#else
- return false;
-#endif
+bool Builtins::IsIsolateIndependentBuiltin(const Code* code) {
+ if (FLAG_embedded_builtins) {
+ const int builtin_index = code->builtin_index();
+ return Builtins::IsBuiltinId(builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+ } else {
+ return false;
+ }
}
// static
bool Builtins::IsLazy(int index) {
DCHECK(IsBuiltinId(index));
-#ifdef V8_EMBEDDED_BUILTINS
- // We don't want to lazy-deserialize off-heap builtins.
- if (Builtins::IsIsolateIndependent(index)) return false;
-#endif
+ if (FLAG_embedded_builtins) {
+ // We don't want to lazy-deserialize off-heap builtins.
+ if (Builtins::IsIsolateIndependent(index)) return false;
+ }
// There are a couple of reasons that builtins can require eager-loading,
// i.e. deserialization at isolate creation instead of on-demand. For
@@ -302,22 +301,44 @@ bool Builtins::IsLazy(int index) {
// static
bool Builtins::IsIsolateIndependent(int index) {
DCHECK(IsBuiltinId(index));
- // TODO(jgruber): There's currently two blockers for moving
- // InterpreterEntryTrampoline into the binary:
- // 1. InterpreterEnterBytecode calculates a pointer into the middle of
- // InterpreterEntryTrampoline (see interpreter_entry_return_pc_offset).
- // When the builtin is embedded, the pointer would need to be calculated
- // at an offset from the embedded instruction stream (instead of the
- // trampoline code object).
- // 2. We create distinct copies of the trampoline to make it possible to
- // attribute ticks in the interpreter to individual JS functions.
- // See https://crrev.com/c/959081 and InstallBytecodeArray. When the
- // trampoline is embedded, we need to ensure that CopyCode creates a copy
- // of the builtin itself (and not just the trampoline).
- return index != kInterpreterEntryTrampoline;
+ switch (index) {
+ // TODO(jgruber): There's currently two blockers for moving
+ // InterpreterEntryTrampoline into the binary:
+ // 1. InterpreterEnterBytecode calculates a pointer into the middle of
+ // InterpreterEntryTrampoline (see interpreter_entry_return_pc_offset).
+ // When the builtin is embedded, the pointer would need to be calculated
+ // at an offset from the embedded instruction stream (instead of the
+ // trampoline code object).
+ // 2. We create distinct copies of the trampoline to make it possible to
+ // attribute ticks in the interpreter to individual JS functions.
+ // See https://crrev.com/c/959081 and InstallBytecodeArray. When the
+ // trampoline is embedded, we need to ensure that CopyCode creates a copy
+ // of the builtin itself (and not just the trampoline).
+ case kInterpreterEntryTrampoline:
+ return false;
+#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
+ // TODO(7882): The size of these builtins on MIP64 and MIPS32 is greater
+ // than 128KB, and this triggers generation of MIPS specific trampolines.
+ // Trampoline code is not PIC and therefore the builtin is not isolate
+ // independent.
+ case kArraySpliceTorque:
+ case kKeyedLoadIC_Megamorphic:
+ case kKeyedStoreIC_Megamorphic:
+ case kObjectAssign:
+ case kObjectGetOwnPropertyDescriptor:
+ case kRegExpMatchFast:
+ case kRegExpReplace:
+ case kRegExpSplit:
+ case kRegExpStringIteratorPrototypeNext:
+ case kStoreIC_Uninitialized:
+ return false;
+#endif
+ default:
+ return true;
+ }
+ UNREACHABLE();
}
-#ifdef V8_EMBEDDED_BUILTINS
// static
Handle<Code> Builtins::GenerateOffHeapTrampolineFor(Isolate* isolate,
Address off_heap_entry) {
@@ -341,7 +362,6 @@ Handle<Code> Builtins::GenerateOffHeapTrampolineFor(Isolate* isolate,
return isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
}
-#endif // V8_EMBEDDED_BUILTINS
// static
Builtins::Kind Builtins::KindOf(int index) {
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index 4cb091492d..ccb9619eeb 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -31,13 +31,10 @@ class CodeAssemblerState;
class Builtins {
public:
- ~Builtins();
+ explicit Builtins(Isolate* isolate) : isolate_(isolate) {}
void TearDown();
- // Garbage collection support.
- void IterateBuiltins(RootVisitor* v);
-
// Disassembler support.
const char* Lookup(Address pc);
@@ -75,24 +72,9 @@ class Builtins {
// Used by BuiltinDeserializer and CreateOffHeapTrampolines in isolate.cc.
void set_builtin(int index, HeapObject* builtin);
- Code* builtin(int index) {
- DCHECK(IsBuiltinId(index));
- // Code::cast cannot be used here since we access builtins
- // during the marking phase of mark sweep. See IC::Clear.
- return reinterpret_cast<Code*>(builtins_[index]);
- }
-
- Address builtin_address(int index) {
- DCHECK(IsBuiltinId(index));
- return reinterpret_cast<Address>(&builtins_[index]);
- }
-
+ Code* builtin(int index);
V8_EXPORT_PRIVATE Handle<Code> builtin_handle(int index);
- // Used by lazy deserialization to determine whether a given builtin has been
- // deserialized. See the DeserializeLazy builtin.
- Object** builtins_table_address() { return &builtins_[0]; }
-
V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate, Name name);
static int GetStackParameterCount(Name name);
@@ -113,8 +95,12 @@ class Builtins {
// necessarily mean that its kind is Code::BUILTIN.
static bool IsBuiltin(const Code* code);
+ // As above, but safe to access off the main thread since the check is done
+ // by handle location. Similar to Heap::IsRootHandle.
+ bool IsBuiltinHandle(Handle<HeapObject> maybe_code, int* index) const;
+
// True, iff the given code object is a builtin with off-heap embedded code.
- static bool IsEmbeddedBuiltin(const Code* code);
+ static bool IsIsolateIndependentBuiltin(const Code* code);
// Returns true iff the given builtin can be lazy-loaded from the snapshot.
// This is true in general for most builtins with the exception of a few
@@ -150,17 +136,13 @@ class Builtins {
static bool AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
Handle<JSObject> target_global_proxy);
- private:
- Builtins();
-
-#ifdef V8_EMBEDDED_BUILTINS
// Creates a trampoline code object that jumps to the given off-heap entry.
// The result should not be used directly, but only from the related Factory
// function.
static Handle<Code> GenerateOffHeapTrampolineFor(Isolate* isolate,
Address off_heap_entry);
-#endif
+ private:
static void Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode);
@@ -193,14 +175,9 @@ class Builtins {
#undef DECLARE_ASM
#undef DECLARE_TF
- // Note: These are always Code objects, but to conform with
- // IterateBuiltins() above which assumes Object**'s for the callback
- // function f, we use an Object* array here.
- Object* builtins_[builtin_count];
- bool initialized_;
+ Isolate* isolate_;
+ bool initialized_ = false;
- friend class Factory; // For GenerateOffHeapTrampolineFor.
- friend class Isolate;
friend class SetupIsolateDelegate;
DISALLOW_COPY_AND_ASSIGN(Builtins);
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index d9805834ca..6dd390c795 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -12,7 +12,7 @@ namespace internal {
BuiltinsConstantsTableBuilder::BuiltinsConstantsTableBuilder(Isolate* isolate)
: isolate_(isolate), map_(isolate->heap()) {
// Ensure this is only called once per Isolate.
- DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
// And that the initial value of the builtins constants table can be treated
@@ -30,9 +30,13 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
DCHECK(!isolate_->heap()->IsRootHandle(object, &root_list_index));
// Not yet finalized.
- DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
+ // Must be on the main thread.
+ DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+
+ // Must be serializing.
DCHECK(isolate_->serializer_enabled());
#endif
@@ -56,7 +60,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
DCHECK(!isolate_->heap()->IsRootHandle(code_object, &root_list_index));
// Not yet finalized.
- DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
DCHECK(isolate_->serializer_enabled());
@@ -68,7 +72,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
// During indirection generation, we always create a distinct marker for each
// macro assembler. The canonical marker is only used when not generating a
// snapshot.
- DCHECK(*self_reference != isolate_->heap()->self_reference_marker());
+ DCHECK(*self_reference != ReadOnlyRoots(isolate_).self_reference_marker());
#endif
uint32_t key;
@@ -81,7 +85,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
void BuiltinsConstantsTableBuilder::Finalize() {
HandleScope handle_scope(isolate_);
- DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
DCHECK(isolate_->serializer_enabled());
@@ -108,8 +112,8 @@ void BuiltinsConstantsTableBuilder::Finalize() {
#ifdef DEBUG
for (int i = 0; i < map_.size(); i++) {
DCHECK(table->get(i)->IsHeapObject());
- DCHECK_NE(isolate_->heap()->undefined_value(), table->get(i));
- DCHECK_NE(isolate_->heap()->self_reference_marker(), table->get(i));
+ DCHECK_NE(ReadOnlyRoots(isolate_).undefined_value(), table->get(i));
+ DCHECK_NE(ReadOnlyRoots(isolate_).self_reference_marker(), table->get(i));
}
#endif
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
new file mode 100644
index 0000000000..7cdc74a944
--- /dev/null
+++ b/deps/v8/src/builtins/data-view.tq
@@ -0,0 +1,896 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module data_view {
+
+ extern operator '.buffer'
+ macro LoadArrayBufferViewBuffer(JSArrayBufferView): JSArrayBuffer;
+ extern operator '.byte_length'
+ macro LoadDataViewByteLength(JSDataView): Number;
+ extern operator '.byte_offset'
+ macro LoadDataViewByteOffset(JSDataView): Number;
+ extern operator '.backing_store'
+ macro LoadArrayBufferBackingStore(JSArrayBuffer): RawPtr;
+
+ macro WasNeutered(view: JSArrayBufferView): bool {
+ return IsDetachedBuffer(view.buffer);
+ }
+
+ macro ValidateDataView(context: Context,
+ o: Object, method: String): JSDataView {
+ try {
+ return cast<JSDataView>(o) otherwise CastError;
+ }
+ label CastError {
+ ThrowTypeError(context, kIncompatibleMethodReceiver, method);
+ }
+ }
+
+ // ES6 section 24.2.4.1 get DataView.prototype.buffer
+ javascript builtin DataViewPrototypeGetBuffer(
+ context: Context, receiver: Object, ...arguments): JSArrayBuffer {
+ let data_view: JSDataView = ValidateDataView(
+ context, receiver, 'get DataView.prototype.buffer');
+ return data_view.buffer;
+ }
+
+ // ES6 section 24.2.4.2 get DataView.prototype.byteLength
+ javascript builtin DataViewPrototypeGetByteLength(
+ context: Context, receiver: Object, ...arguments): Number {
+ let data_view: JSDataView = ValidateDataView(
+ context, receiver, 'get DataView.prototype.byte_length');
+ if (WasNeutered(data_view)) {
+ // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+ // here if the JSArrayBuffer of the {data_view} was neutered.
+ return 0;
+ }
+ return data_view.byte_length;
+ }
+
+ // ES6 section 24.2.4.3 get DataView.prototype.byteOffset
+ javascript builtin DataViewPrototypeGetByteOffset(
+ context: Context, receiver: Object, ...arguments): Number {
+ let data_view: JSDataView = ValidateDataView(
+ context, receiver, 'get DataView.prototype.byte_offset');
+ if (WasNeutered(data_view)) {
+ // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+ // here if the JSArrayBuffer of the {data_view} was neutered.
+ return 0;
+ }
+ return data_view.byte_offset;
+ }
+
+ extern macro BitcastInt32ToFloat32(uint32): float32;
+ extern macro BitcastFloat32ToInt32(float32): uint32;
+ extern macro Float64ExtractLowWord32(float64): uint32;
+ extern macro Float64ExtractHighWord32(float64): uint32;
+ extern macro Float64InsertLowWord32(float64, uint32): float64;
+ extern macro Float64InsertHighWord32(float64, uint32): float64;
+
+ extern macro LoadUint8(RawPtr, intptr): uint32;
+ extern macro LoadInt8(RawPtr, intptr): int32;
+
+ macro LoadDataViewUint8(buffer: JSArrayBuffer, offset: intptr): Smi {
+ return convert<Smi>(LoadUint8(buffer.backing_store, offset));
+ }
+
+ macro LoadDataViewInt8(buffer: JSArrayBuffer, offset: intptr): Smi {
+ return convert<Smi>(LoadInt8(buffer.backing_store, offset));
+ }
+
+ macro LoadDataView16(buffer: JSArrayBuffer, offset: intptr,
+ requested_little_endian: bool,
+ signed: constexpr bool): Number {
+ let data_pointer: RawPtr = buffer.backing_store;
+
+ let b0: int32;
+ let b1: int32;
+ let result: int32;
+
+ // Sign-extend the most significant byte by loading it as an Int8.
+ if (requested_little_endian) {
+ b0 = Signed(LoadUint8(data_pointer, offset));
+ b1 = LoadInt8(data_pointer, offset + 1);
+ result = (b1 << 8) + b0;
+ } else {
+ b0 = LoadInt8(data_pointer, offset);
+ b1 = Signed(LoadUint8(data_pointer, offset + 1));
+ result = (b0 << 8) + b1;
+ }
+ if constexpr (signed) {
+ return convert<Smi>(result);
+ } else {
+ // Bit-mask the higher bits to prevent sign extension if we're unsigned.
+ return convert<Smi>(result & 0xFFFF);
+ }
+ }
+
+ macro LoadDataView32(buffer: JSArrayBuffer, offset: intptr,
+ requested_little_endian: bool,
+ signed: constexpr bool): Number {
+ let data_pointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = LoadUint8(data_pointer, offset);
+ let b1: uint32 = LoadUint8(data_pointer, offset + 1);
+ let b2: uint32 = LoadUint8(data_pointer, offset + 2);
+ let b3: uint32 = LoadUint8(data_pointer, offset + 3);
+ let result: uint32;
+
+ if (requested_little_endian) {
+ result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ } else {
+ result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ }
+
+ if constexpr (signed) {
+ return convert<Number>(Signed(result));
+ } else {
+ return convert<Number>(result);
+ }
+ }
+
+ macro LoadDataViewFloat32(buffer: JSArrayBuffer, offset: intptr,
+ requested_little_endian: bool): Number {
+ let data_pointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = LoadUint8(data_pointer, offset);
+ let b1: uint32 = LoadUint8(data_pointer, offset + 1);
+ let b2: uint32 = LoadUint8(data_pointer, offset + 2);
+ let b3: uint32 = LoadUint8(data_pointer, offset + 3);
+ let result: uint32;
+
+ if (requested_little_endian) {
+ result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ } else {
+ result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ }
+
+ let float_res: float64 = convert<float64>(BitcastInt32ToFloat32(result));
+ return convert<Number>(float_res);
+ }
+
+ macro LoadDataViewFloat64(buffer: JSArrayBuffer, offset: intptr,
+ requested_little_endian: bool): Number {
+ let data_pointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = LoadUint8(data_pointer, offset);
+ let b1: uint32 = LoadUint8(data_pointer, offset + 1);
+ let b2: uint32 = LoadUint8(data_pointer, offset + 2);
+ let b3: uint32 = LoadUint8(data_pointer, offset + 3);
+ let b4: uint32 = LoadUint8(data_pointer, offset + 4);
+ let b5: uint32 = LoadUint8(data_pointer, offset + 5);
+ let b6: uint32 = LoadUint8(data_pointer, offset + 6);
+ let b7: uint32 = LoadUint8(data_pointer, offset + 7);
+ let low_word: uint32;
+ let high_word: uint32;
+
+ if (requested_little_endian) {
+ low_word = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ high_word = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
+ } else {
+ high_word = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ low_word = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
+ }
+
+ let result: float64 = 0;
+ result = Float64InsertLowWord32(result, low_word);
+ result = Float64InsertHighWord32(result, high_word);
+
+ return convert<Number>(result);
+ }
+
+ extern macro AllocateBigInt(intptr): BigInt;
+ extern macro StoreBigIntBitfield(BigInt, intptr): void;
+ extern macro StoreBigIntDigit(BigInt, constexpr int31, uintptr): void;
+ extern macro DataViewEncodeBigIntBits(constexpr bool,
+ constexpr int31): intptr;
+
+ const kPositiveBigInt: constexpr bool generates 'false';
+ const kNegativeBigInt: constexpr bool generates 'true';
+ const kZeroDigitBigInt: constexpr int31 generates '0';
+ const kOneDigitBigInt: constexpr int31 generates '1';
+ const kTwoDigitBigInt: constexpr int31 generates '2';
+
+ macro CreateEmptyBigInt(is_positive: bool, length: constexpr int31): BigInt {
+ // Allocate a BigInt with the desired length (number of digits).
+ let result: BigInt = AllocateBigInt(length);
+
+ // Write the desired sign and length to the BigInt bitfield.
+ if (is_positive) {
+ StoreBigIntBitfield(result,
+ DataViewEncodeBigIntBits(kPositiveBigInt, length));
+ } else {
+ StoreBigIntBitfield(result,
+ DataViewEncodeBigIntBits(kNegativeBigInt, length));
+ }
+
+ return result;
+ }
+
+ // Create a BigInt on a 64-bit architecture from two 32-bit values.
+ macro MakeBigIntOn64Bit(low_word: uint32, high_word: uint32,
+ signed: constexpr bool): BigInt {
+
+ // 0n is represented by a zero-length BigInt.
+ if (low_word == 0 && high_word == 0) {
+ return AllocateBigInt(kZeroDigitBigInt);
+ }
+
+ let is_positive: bool = true;
+ let high_part: intptr = Signed(convert<uintptr>(high_word));
+ let low_part: intptr = Signed(convert<uintptr>(low_word));
+ let raw_value: intptr = (high_part << 32) + low_part;
+
+ if constexpr (signed) {
+ if (raw_value < 0) {
+ is_positive = false;
+ // We have to store the absolute value of raw_value in the digit.
+ raw_value = 0 - raw_value;
+ }
+ }
+
+ // Allocate the BigInt and store the absolute value.
+ let result: BigInt = CreateEmptyBigInt(is_positive, kOneDigitBigInt);
+
+ StoreBigIntDigit(result, 0, Unsigned(raw_value));
+
+ return result;
+ }
+
+ // Create a BigInt on a 32-bit architecture from two 32-bit values.
+ macro MakeBigIntOn32Bit(low_word: uint32, high_word: uint32,
+ signed: constexpr bool): BigInt {
+
+ // 0n is represented by a zero-length BigInt.
+ if (low_word == 0 && high_word == 0) {
+ return AllocateBigInt(kZeroDigitBigInt);
+ }
+
+ // On a 32-bit platform, we might need 1 or 2 digits to store the number.
+ let need_two_digits: bool = false;
+ let is_positive: bool = true;
+
+ // We need to do some math on low_word and high_word,
+ // so convert them to int32.
+ let low_part: int32 = Signed(low_word);
+ let high_part: int32 = Signed(high_word);
+
+ // If high_word == 0, the number is positive, and we only need 1 digit,
+ // so we don't have anything to do.
+ // Otherwise, all cases are possible.
+ if (high_word != 0) {
+ if constexpr (signed) {
+
+ // If high_part < 0, the number is always negative.
+ if (high_part < 0) {
+ is_positive = false;
+
+ // We have to compute the absolute value by hand.
+ // There will be a negative carry from the low word
+ // to the high word iff low != 0.
+ high_part = 0 - high_part;
+ if (low_part != 0) {
+ high_part = high_part - 1;
+ }
+ low_part = 0 - low_part;
+
+ // Here, high_part could be 0 again so we might have 1 or 2 digits.
+ if (high_part != 0) {
+ need_two_digits = true;
+ }
+
+ } else {
+ // In this case, the number is positive, and we need 2 digits.
+ need_two_digits = true;
+ }
+
+ } else {
+ // In this case, the number is positive (unsigned),
+ // and we need 2 digits.
+ need_two_digits = true;
+ }
+ }
+
+ // Allocate the BigInt with the right sign and length.
+ let result: BigInt;
+ if (need_two_digits) {
+ result = CreateEmptyBigInt(is_positive, kTwoDigitBigInt);
+ } else {
+ result = CreateEmptyBigInt(is_positive, kOneDigitBigInt);
+ }
+
+ // Finally, write the digit(s) to the BigInt.
+ StoreBigIntDigit(result, 0, Unsigned(convert<intptr>(low_part)));
+
+ if (need_two_digits) {
+ StoreBigIntDigit(result, 1, Unsigned(convert<intptr>(high_part)));
+ }
+
+ return result;
+ }
+
+ macro MakeBigInt(low_word: uint32, high_word: uint32,
+ signed: constexpr bool): BigInt {
+ // A BigInt digit has the platform word size, so we only need one digit
+ // on 64-bit platforms but may need two on 32-bit.
+ if constexpr (Is64()) {
+ return MakeBigIntOn64Bit(low_word, high_word, signed);
+ } else {
+ return MakeBigIntOn32Bit(low_word, high_word, signed);
+ }
+ }
+
+ macro LoadDataViewBigInt(buffer: JSArrayBuffer, offset: intptr,
+ requested_little_endian: bool,
+ signed: constexpr bool): BigInt {
+ let data_pointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = LoadUint8(data_pointer, offset);
+ let b1: uint32 = LoadUint8(data_pointer, offset + 1);
+ let b2: uint32 = LoadUint8(data_pointer, offset + 2);
+ let b3: uint32 = LoadUint8(data_pointer, offset + 3);
+ let b4: uint32 = LoadUint8(data_pointer, offset + 4);
+ let b5: uint32 = LoadUint8(data_pointer, offset + 5);
+ let b6: uint32 = LoadUint8(data_pointer, offset + 6);
+ let b7: uint32 = LoadUint8(data_pointer, offset + 7);
+ let low_word: uint32;
+ let high_word: uint32;
+
+ if (requested_little_endian) {
+ low_word = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ high_word = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
+ } else {
+ high_word = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ low_word = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
+ }
+
+ return MakeBigInt(low_word, high_word, signed);
+ }
+
+ extern macro ToSmiIndex(Object, Context): Smi labels RangeError;
+ extern macro DataViewElementSize(constexpr ElementsKind): constexpr int31;
+
+ macro DataViewGet(context: Context,
+ receiver: Object,
+ offset: Object,
+ requested_little_endian: Object,
+ kind: constexpr ElementsKind): Numeric {
+
+ // TODO(theotime): add more specific method name to match
+ // the former implementation.
+ let data_view: JSDataView = ValidateDataView(
+ context, receiver, 'get DataView.prototype.get');
+
+ let getIndex: Number;
+ try {
+ getIndex = ToIndex(offset, context) otherwise RangeError;
+ }
+ label RangeError {
+ ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ }
+
+ let littleEndian: bool = ToBoolean(requested_little_endian);
+ let buffer: JSArrayBuffer = data_view.buffer;
+
+ if (IsDetachedBuffer(buffer)) {
+ ThrowTypeError(context, kDetachedOperation, 'DataView.prototype.get');
+ }
+
+ let viewOffset: Number = data_view.byte_offset;
+ let viewSize: Number = data_view.byte_length;
+ let elementSize: Number = DataViewElementSize(kind);
+
+ if (getIndex + elementSize > viewSize) {
+ ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ }
+
+ let getIndexFloat: float64 = convert<float64>(getIndex);
+ let getIndexIntptr: intptr = Signed(convert<uintptr>(getIndexFloat));
+ let viewOffsetFloat: float64 = convert<float64>(viewOffset);
+ let viewOffsetIntptr: intptr = Signed(convert<uintptr>(viewOffsetFloat));
+
+ let bufferIndex: intptr = getIndexIntptr + viewOffsetIntptr;
+
+ if constexpr (kind == UINT8_ELEMENTS) {
+ return LoadDataViewUint8(buffer, bufferIndex);
+ } else if constexpr (kind == INT8_ELEMENTS) {
+ return LoadDataViewInt8(buffer, bufferIndex);
+ } else if constexpr (kind == UINT16_ELEMENTS) {
+ return LoadDataView16(buffer, bufferIndex, littleEndian, false);
+ } else if constexpr (kind == INT16_ELEMENTS) {
+ return LoadDataView16(buffer, bufferIndex, littleEndian, true);
+ } else if constexpr (kind == UINT32_ELEMENTS) {
+ return LoadDataView32(buffer, bufferIndex, littleEndian, false);
+ } else if constexpr (kind == INT32_ELEMENTS) {
+ return LoadDataView32(buffer, bufferIndex, littleEndian, true);
+ } else if constexpr (kind == FLOAT32_ELEMENTS) {
+ return LoadDataViewFloat32(buffer, bufferIndex, littleEndian);
+ } else if constexpr (kind == FLOAT64_ELEMENTS) {
+ return LoadDataViewFloat64(buffer, bufferIndex, littleEndian);
+ } else if constexpr (kind == BIGINT64_ELEMENTS) {
+ return LoadDataViewBigInt(buffer, bufferIndex, littleEndian, true);
+ } else if constexpr (kind == BIGUINT64_ELEMENTS) {
+ return LoadDataViewBigInt(buffer, bufferIndex, littleEndian, false);
+ } else {
+ unreachable;
+ }
+ }
+
+ javascript builtin DataViewPrototypeGetInt8(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, Undefined, INT8_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetUint8(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, Undefined, UINT8_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetInt16(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, is_little_endian,
+ INT16_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetUint16(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, is_little_endian,
+ UINT16_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetInt32(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, is_little_endian,
+ INT32_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetUint32(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, is_little_endian,
+ UINT32_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetFloat32(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, is_little_endian,
+ FLOAT32_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetFloat64(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, is_little_endian,
+ FLOAT64_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetBigInt64(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, is_little_endian,
+ BIGINT64_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetBigUint64(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, is_little_endian,
+ BIGUINT64_ELEMENTS);
+ }
+
+ extern macro ToNumber(Context, Object): Number;
+ extern macro ToBigInt(Context, Object): BigInt;
+ extern macro TruncateFloat64ToFloat32(float64): float32;
+ extern macro TruncateFloat64ToWord32(float64): uint32;
+
+ extern macro StoreWord8(RawPtr, intptr, uint32): void;
+
+ macro StoreDataView8(buffer: JSArrayBuffer, offset: intptr,
+ value: uint32) {
+ StoreWord8(buffer.backing_store, offset, value & 0xFF);
+ }
+
+ macro StoreDataView16(buffer: JSArrayBuffer, offset: intptr, value: uint32,
+ requested_little_endian: bool) {
+ let data_pointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = value & 0xFF;
+ let b1: uint32 = (value >>> 8) & 0xFF;
+
+ if (requested_little_endian) {
+ StoreWord8(data_pointer, offset, b0);
+ StoreWord8(data_pointer, offset + 1, b1);
+ } else {
+ StoreWord8(data_pointer, offset, b1);
+ StoreWord8(data_pointer, offset + 1, b0);
+ }
+ }
+
+ macro StoreDataView32(buffer: JSArrayBuffer, offset: intptr, value: uint32,
+ requested_little_endian: bool) {
+ let data_pointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = value & 0xFF;
+ let b1: uint32 = (value >>> 8) & 0xFF;
+ let b2: uint32 = (value >>> 16) & 0xFF;
+ let b3: uint32 = value >>> 24; // We don't need to mask here.
+
+ if (requested_little_endian) {
+ StoreWord8(data_pointer, offset, b0);
+ StoreWord8(data_pointer, offset + 1, b1);
+ StoreWord8(data_pointer, offset + 2, b2);
+ StoreWord8(data_pointer, offset + 3, b3);
+ } else {
+ StoreWord8(data_pointer, offset, b3);
+ StoreWord8(data_pointer, offset + 1, b2);
+ StoreWord8(data_pointer, offset + 2, b1);
+ StoreWord8(data_pointer, offset + 3, b0);
+ }
+ }
+
+ macro StoreDataView64(buffer: JSArrayBuffer, offset: intptr,
+ low_word: uint32, high_word: uint32,
+ requested_little_endian: bool) {
+ let data_pointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = low_word & 0xFF;
+ let b1: uint32 = (low_word >>> 8) & 0xFF;
+ let b2: uint32 = (low_word >>> 16) & 0xFF;
+ let b3: uint32 = low_word >>> 24;
+
+ let b4: uint32 = high_word & 0xFF;
+ let b5: uint32 = (high_word >>> 8) & 0xFF;
+ let b6: uint32 = (high_word >>> 16) & 0xFF;
+ let b7: uint32 = high_word >>> 24;
+
+
+ if (requested_little_endian) {
+ StoreWord8(data_pointer, offset, b0);
+ StoreWord8(data_pointer, offset + 1, b1);
+ StoreWord8(data_pointer, offset + 2, b2);
+ StoreWord8(data_pointer, offset + 3, b3);
+ StoreWord8(data_pointer, offset + 4, b4);
+ StoreWord8(data_pointer, offset + 5, b5);
+ StoreWord8(data_pointer, offset + 6, b6);
+ StoreWord8(data_pointer, offset + 7, b7);
+ } else {
+ StoreWord8(data_pointer, offset, b7);
+ StoreWord8(data_pointer, offset + 1, b6);
+ StoreWord8(data_pointer, offset + 2, b5);
+ StoreWord8(data_pointer, offset + 3, b4);
+ StoreWord8(data_pointer, offset + 4, b3);
+ StoreWord8(data_pointer, offset + 5, b2);
+ StoreWord8(data_pointer, offset + 6, b1);
+ StoreWord8(data_pointer, offset + 7, b0);
+ }
+ }
+
+ extern macro DataViewDecodeBigIntLength(BigInt): uintptr;
+ extern macro DataViewDecodeBigIntSign(BigInt): uintptr;
+ extern macro LoadBigIntDigit(BigInt, constexpr int31): uintptr;
+
+ // We might get here a BigInt that is bigger than 64 bits, but we're only
+ // interested in the 64 lowest ones. This means the lowest BigInt digit
+ // on 64-bit platforms, and the 2 lowest BigInt digits on 32-bit ones.
+ macro StoreDataViewBigInt(buffer: JSArrayBuffer, offset: intptr,
+ bigint_value: BigInt,
+ requested_little_endian: bool) {
+
+ let length: uintptr = DataViewDecodeBigIntLength(bigint_value);
+ let sign: uintptr = DataViewDecodeBigIntSign(bigint_value);
+
+ // The 32-bit words that will hold the BigInt's value in
+ // two's complement representation.
+ let low_word: uint32 = 0;
+ let high_word: uint32 = 0;
+
+ // The length is nonzero if and only if the BigInt's value is nonzero.
+ if (length != 0) {
+ if constexpr (Is64()) {
+ // There is always exactly 1 BigInt digit to load in this case.
+ let value: uintptr = LoadBigIntDigit(bigint_value, 0);
+ low_word = convert<uint32>(value); // Truncates value to 32 bits.
+ high_word = convert<uint32>(value >>> 32);
+ }
+ else { // There might be either 1 or 2 BigInt digits we need to load.
+ low_word = convert<uint32>(LoadBigIntDigit(bigint_value, 0));
+ if (length >= 2) { // Only load the second digit if there is one.
+ high_word = convert<uint32>(LoadBigIntDigit(bigint_value, 1));
+ }
+ }
+ }
+
+ if (sign != 0) { // The number is negative, convert it.
+ high_word = Unsigned(0 - Signed(high_word));
+ if (low_word != 0) {
+ high_word = Unsigned(Signed(high_word) - 1);
+ }
+ low_word = Unsigned(0 - Signed(low_word));
+ }
+
+ StoreDataView64(buffer, offset, low_word, high_word,
+ requested_little_endian);
+ }
+
+ macro DataViewSet(context: Context,
+ receiver: Object,
+ offset: Object,
+ value: Object,
+ requested_little_endian: Object,
+ kind: constexpr ElementsKind): Object {
+
+ // TODO(theotime): add more specific method name to match
+ // the former implementation.
+ let data_view: JSDataView = ValidateDataView(
+ context, receiver, 'get DataView.prototype.get');
+
+ let getIndex: Number;
+ try {
+ getIndex = ToIndex(offset, context) otherwise RangeError;
+ }
+ label RangeError {
+ ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ }
+
+ let littleEndian: bool = ToBoolean(requested_little_endian);
+ let buffer: JSArrayBuffer = data_view.buffer;
+
+ let bigint_value: BigInt;
+ let num_value: Number;
+ // According to ES6 section 24.2.1.2 SetViewValue, we must perform
+ // the conversion before doing the bounds check.
+ if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) {
+ bigint_value = ToBigInt(context, value);
+ } else {
+ num_value = ToNumber(context, value);
+ }
+
+ if (IsDetachedBuffer(buffer)) {
+ ThrowTypeError(context, kDetachedOperation, 'DataView.prototype.get');
+ }
+
+ let viewOffset: Number = data_view.byte_offset;
+ let viewSize: Number = data_view.byte_length;
+ let elementSize: Number = DataViewElementSize(kind);
+
+ if (getIndex + elementSize > viewSize) {
+ ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ }
+
+ let getIndexFloat: float64 = convert<float64>(getIndex);
+ let getIndexIntptr: intptr = Signed(convert<uintptr>(getIndexFloat));
+ let viewOffsetFloat: float64 = convert<float64>(viewOffset);
+ let viewOffsetIntptr: intptr = Signed(convert<uintptr>(viewOffsetFloat));
+
+ let bufferIndex: intptr = getIndexIntptr + viewOffsetIntptr;
+
+ if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) {
+ StoreDataViewBigInt(buffer, bufferIndex, bigint_value,
+ littleEndian);
+ }
+ else {
+ let double_value: float64 = ChangeNumberToFloat64(num_value);
+
+ if constexpr (kind == UINT8_ELEMENTS || kind == INT8_ELEMENTS) {
+ StoreDataView8(buffer, bufferIndex,
+ TruncateFloat64ToWord32(double_value));
+ }
+ else if constexpr (kind == UINT16_ELEMENTS || kind == INT16_ELEMENTS) {
+ StoreDataView16(buffer, bufferIndex,
+ TruncateFloat64ToWord32(double_value), littleEndian);
+ }
+ else if constexpr (kind == UINT32_ELEMENTS || kind == INT32_ELEMENTS) {
+ StoreDataView32(buffer, bufferIndex,
+ TruncateFloat64ToWord32(double_value), littleEndian);
+ }
+ else if constexpr (kind == FLOAT32_ELEMENTS) {
+ let float_value: float32 = TruncateFloat64ToFloat32(double_value);
+ StoreDataView32(buffer, bufferIndex,
+ BitcastFloat32ToInt32(float_value), littleEndian);
+ }
+ else if constexpr (kind == FLOAT64_ELEMENTS) {
+ let low_word: uint32 = Float64ExtractLowWord32(double_value);
+ let high_word: uint32 = Float64ExtractHighWord32(double_value);
+ StoreDataView64(buffer, bufferIndex, low_word, high_word,
+ littleEndian);
+ }
+ }
+ return Undefined;
+ }
+
+ javascript builtin DataViewPrototypeSetInt8(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value, Undefined,
+ INT8_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeSetUint8(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value, Undefined,
+ UINT8_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeSetInt16(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 2 ?
+ arguments[2] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value,
+ is_little_endian, INT16_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeSetUint16(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 2 ?
+ arguments[2] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value,
+ is_little_endian, UINT16_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeSetInt32(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 2 ?
+ arguments[2] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value,
+ is_little_endian, INT32_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeSetUint32(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 2 ?
+ arguments[2] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value,
+ is_little_endian, UINT32_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeSetFloat32(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 2 ?
+ arguments[2] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value,
+ is_little_endian, FLOAT32_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeSetFloat64(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 2 ?
+ arguments[2] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value,
+ is_little_endian, FLOAT64_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeSetBigInt64(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 2 ?
+ arguments[2] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value,
+ is_little_endian, BIGINT64_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeSetBigUint64(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ let value : Object = arguments.length > 1 ?
+ arguments[1] :
+ Undefined;
+ let is_little_endian : Object = arguments.length > 2 ?
+ arguments[2] :
+ Undefined;
+ return DataViewSet(context, receiver, offset, value,
+ is_little_endian, BIGUINT64_ELEMENTS);
+ }
+
+}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index dfeec1339c..d1c0a5d5fb 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -20,7 +20,8 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
- __ mov(ebx, Immediate(ExternalReference::Create(address)));
+ __ mov(kJavaScriptCallExtraArg1Register,
+ Immediate(ExternalReference::Create(address)));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -31,60 +32,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
-namespace {
-
-void AdaptorWithExitFrameType(MacroAssembler* masm,
- Builtins::ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments excluding receiver
- // -- ebx : entry point
- // -- edi : target
- // -- edx : new.target
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -- ...
- // -- esp[4 * argc] : first argument
- // -- esp[4 * (argc +1)] : receiver
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // CEntry expects eax to contain the number of arguments including the
- // receiver and the extra arguments.
- __ add(eax, Immediate(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
-
- // Insert extra arguments.
- __ PopReturnAddressTo(ecx);
- __ SmiTag(eax);
- __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
- __ Push(eax);
- __ SmiUntag(eax);
- __ Push(edi);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
-
- // Jump to the C entry runtime stub directly here instead of using
- // JumpToExternalReference because ebx is loaded by Generate_adaptor.
- Handle<Code> code =
- CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-} // namespace
-
-void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, EXIT);
-}
-
-void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -539,21 +486,26 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- esp[0] : generator receiver
// -----------------------------------
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
+ // Copy the function arguments from the generator object's register file.
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movzx_w(
+ ecx, FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(ebx,
+ FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
+ __ Set(edi, 0);
+
__ bind(&loop);
- __ sub(ecx, Immediate(1));
- __ j(carry, &done_loop, Label::kNear);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ cmp(edi, ecx);
+ __ j(greater_equal, &done_loop);
+ __ Push(
+ FieldOperand(ebx, edi, times_pointer_size, FixedArray::kHeaderSize));
+ __ add(edi, Immediate(1));
__ jmp(&loop);
+
__ bind(&done_loop);
+ __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
}
// Underlying function needs to have bytecode available.
@@ -571,8 +523,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
__ PushReturnAddressFrom(eax);
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax,
- FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movzx_w(eax, FieldOperand(
+ eax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -848,19 +800,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded,
- apply_instrumentation;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
__ Push(eax);
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, eax);
__ Pop(eax);
- __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
- &maybe_load_debug_bytecode_array);
- __ bind(&bytecode_array_loaded);
__ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
@@ -969,35 +916,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in eax.
LeaveInterpreterFrame(masm, ebx, ecx);
__ ret(0);
-
- // Load debug copy of the bytecode array if it exists.
- // kInterpreterBytecodeArrayRegister is already loaded with
- // SharedFunctionInfo::kFunctionDataOffset.
- __ bind(&maybe_load_debug_bytecode_array);
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
- __ mov(ecx, FieldOperand(eax, DebugInfo::kDebugBytecodeArrayOffset));
- __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
-
- __ mov(kInterpreterBytecodeArrayRegister, ecx);
- __ mov(ecx, FieldOperand(eax, DebugInfo::kFlagsOffset));
- __ SmiUntag(ecx);
- __ and_(ecx, Immediate(DebugInfo::kDebugExecutionMode));
- STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
- static_cast<int>(DebugInfo::kSideEffects));
- ExternalReference debug_execution_mode =
- ExternalReference::debug_execution_mode_address(masm->isolate());
- __ cmp(ecx, Operand::StaticVariable(debug_execution_mode));
- __ j(equal, &bytecode_array_loaded);
-
- __ pop(ecx); // get JSFunction from stack
- __ push(ecx);
- __ push(ebx); // preserve feedback_vector and bytecode array register
- __ push(kInterpreterBytecodeArrayRegister);
- __ push(ecx); // pass function as argument
- __ CallRuntime(Runtime::kDebugApplyInstrumentation);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ pop(ebx);
- __ jmp(&bytecode_array_loaded);
}
@@ -1115,30 +1033,11 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
Generate_StackOverflowCheck(masm, num_args, scratch1, scratch2,
stack_overflow, true);
-// Step 1 - Update the stack pointer. scratch1 already contains the required
-// increment to the stack. i.e. num_args + 1 stack slots. This is computed in
-// the Generate_StackOverflowCheck.
-
-#ifdef _MSC_VER
- // TODO(mythria): Move it to macro assembler.
- // In windows, we cannot increment the stack size by more than one page
- // (mimimum page size is 4KB) without accessing at least one byte on the
- // page. Check this:
- // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
- const int page_size = 4 * 1024;
- Label check_offset, update_stack_pointer;
- __ bind(&check_offset);
- __ cmp(scratch1, page_size);
- __ j(less, &update_stack_pointer);
- __ sub(esp, Immediate(page_size));
- // Just to touch the page, before we increment further.
- __ mov(Operand(esp, 0), Immediate(0));
- __ sub(scratch1, Immediate(page_size));
- __ jmp(&check_offset);
- __ bind(&update_stack_pointer);
-#endif
-
- __ sub(esp, scratch1);
+ // Step 1 - Update the stack pointer. scratch1 already contains the required
+ // increment to the stack. i.e. num_args + 1 stack slots. This is computed in
+ // Generate_StackOverflowCheck.
+
+ __ AllocateStackFrame(scratch1);
// Step 2 move return_address and slots above it to the correct locations.
// Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
@@ -1214,8 +1113,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
__ AssertFunction(edi);
- ArrayConstructorStub array_constructor_stub(masm->isolate());
- __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with unmodified eax, edi, edx values.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1335,208 +1234,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to CompileLazy.
- __ Move(ecx, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ mov(FieldOperand(edi, JSFunction::kCodeOffset), ecx);
- __ RecordWriteField(edi, JSFunction::kCodeOffset, ecx, ebx, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Jump to compile lazy.
- Generate_CompileLazy(masm);
-}
-
-static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
- Register scratch1) {
- // Figure out the SFI's code object.
- Label done;
- Label check_is_bytecode_array;
- Label check_is_exported_function_data;
- Label check_is_fixed_array;
- Label check_is_pre_parsed_scope_data;
- Label check_is_function_template_info;
- Label check_is_interpreter_data;
-
- Register data_type = scratch1;
-
- // IsSmi: Is builtin
- __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
- __ mov(scratch1,
- Immediate(ExternalReference::builtins_address(masm->isolate())));
- // Avoid untagging the Smi unnecessarily.
- STATIC_ASSERT(times_2 == times_pointer_size - kSmiTagSize);
- __ mov(sfi_data, Operand(scratch1, sfi_data, times_2, 0));
- __ jmp(&done);
-
- // Get map for subsequent checks.
- __ bind(&check_is_bytecode_array);
- __ mov(data_type, FieldOperand(sfi_data, HeapObject::kMapOffset));
- __ mov(data_type, FieldOperand(data_type, Map::kInstanceTypeOffset));
-
- // IsBytecodeArray: Interpret bytecode
- __ cmpw(data_type, Immediate(BYTECODE_ARRAY_TYPE));
- __ j(not_equal, &check_is_exported_function_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
- __ jmp(&done);
-
- // IsWasmExportedFunctionData: Use the wrapper code
- __ bind(&check_is_exported_function_data);
- __ cmpw(data_type, Immediate(WASM_EXPORTED_FUNCTION_DATA_TYPE));
- __ j(not_equal, &check_is_fixed_array);
- __ mov(sfi_data,
- FieldOperand(sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
- __ jmp(&done);
-
- // IsFixedArray: Instantiate using AsmWasmData
- __ bind(&check_is_fixed_array);
- __ cmpw(data_type, Immediate(FIXED_ARRAY_TYPE));
- __ j(not_equal, &check_is_pre_parsed_scope_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
- __ jmp(&done);
-
- // IsPreParsedScopeData: Compile lazy
- __ bind(&check_is_pre_parsed_scope_data);
- __ cmpw(data_type, Immediate(TUPLE2_TYPE));
- __ j(not_equal, &check_is_function_template_info);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ jmp(&done);
-
- // IsFunctionTemplateInfo: API call
- __ bind(&check_is_function_template_info);
- __ cmpw(data_type, Immediate(FUNCTION_TEMPLATE_INFO_TYPE));
- __ j(not_equal, &check_is_interpreter_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
- __ jmp(&done);
-
- // IsInterpreterData: Interpret bytecode
- __ bind(&check_is_interpreter_data);
- if (FLAG_debug_code) {
- __ cmpw(data_type, Immediate(INTERPRETER_DATA_TYPE));
- __ Check(equal, AbortReason::kInvalidSharedFunctionInfoData);
- }
- __ mov(sfi_data,
- FieldOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
-
- __ bind(&done);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argument count (preserved for callee)
- // -- edx : new target (preserved for callee)
- // -- edi : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime;
-
- Register closure = edi;
- Register feedback_vector = ebx;
-
- // Do we have a valid feedback vector?
- __ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime);
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
-
- // We found no optimized code. Infer the code object needed for the SFI.
- Register entry = ecx;
- __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoCode(masm, entry, ebx);
-
- // If code entry points to anything other than CompileLazy, install that.
- __ Move(ebx, masm->CodeObject());
- __ cmp(entry, ebx);
- __ j(equal, &gotta_call_runtime);
-
- // Install the SFI's code entry.
- __ mov(FieldOperand(closure, JSFunction::kCodeOffset), entry);
- __ RecordWriteField(closure, JSFunction::kCodeOffset, entry, ebx,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ mov(entry, FieldOperand(closure, JSFunction::kCodeOffset));
- __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
- __ jmp(entry);
-
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argument count (preserved for callee)
- // -- edx : new target (preserved for callee)
- // -- edi : target function (preserved for callee)
- // -----------------------------------
-
- Label deserialize_in_runtime;
-
- Register target = edi; // Must be preserved
- Register scratch0 = ebx;
- Register scratch1 = ecx;
-
- CHECK(scratch0 != eax && scratch0 != edx && scratch0 != edi);
- CHECK(scratch1 != eax && scratch1 != edx && scratch1 != edi);
- CHECK(scratch0 != scratch1);
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
-
- __ AssertFunction(target);
- __ mov(scratch0, FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ mov(scratch1,
- FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
- __ AssertSmi(scratch1);
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- {
- // Load the code object at builtins_table[builtin_id] into scratch1.
-
- __ SmiUntag(scratch1);
- __ mov(scratch0,
- Immediate(ExternalReference::builtins_address(masm->isolate())));
- __ mov(scratch1, Operand(scratch0, scratch1, times_pointer_size, 0));
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
-
- __ Move(scratch0, masm->CodeObject());
- __ cmp(scratch1, scratch0);
- __ j(equal, &deserialize_in_runtime);
- }
-
- {
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
-
- Register target_builtin = scratch1;
-
- __ mov(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
- __ push(eax); // Write barrier clobbers these below.
- __ push(target_builtin);
- __ RecordWriteField(target, JSFunction::kCodeOffset, target_builtin, eax,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ pop(target_builtin);
- __ pop(eax);
-
- // All copying is done. Jump to the deserialized code object.
-
- __ lea(target_builtin, FieldOperand(target_builtin, Code::kHeaderSize));
- __ jmp(target_builtin);
- }
-
- __ bind(&deserialize_in_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1910,44 +1607,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- // tail call a stub
- __ mov(ebx, masm->isolate()->factory()->undefined_value());
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : array function
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // ebx is the AllocationSite - here undefined.
__ mov(ebx, masm->isolate()->factory()->undefined_value());
- // If edx (new target) is undefined, then this is the 'Call' case, so move
- // edi (the constructor) to rdx.
- Label call;
- __ cmp(edx, ebx);
- __ j(not_equal, &call);
- __ mov(edx, edi);
-
- // Run the native code for the Array function called as a normal function.
- __ bind(&call);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1995,13 +1657,30 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- edx : new.target (checked to be constructor or undefined)
// -- esp[0] : return address.
// -----------------------------------
- __ AssertFixedArray(ebx);
// We need to preserve eax, edi and ebx.
__ movd(xmm0, edx);
__ movd(xmm1, edi);
__ movd(xmm2, eax);
+ if (masm->emit_debug_code()) {
+ // Allow ebx to be a FixedArray, or a FixedDoubleArray if ecx == 0.
+ Label ok, fail;
+ __ AssertNotSmi(ebx);
+ __ mov(edx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ CmpInstanceType(edx, FIXED_ARRAY_TYPE);
+ __ j(equal, &ok);
+ __ CmpInstanceType(edx, FIXED_DOUBLE_ARRAY_TYPE);
+ __ j(not_equal, &fail);
+ __ cmp(ecx, 0);
+ __ j(equal, &ok);
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
+
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2097,8 +1776,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ mov(edx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(edx, FieldOperand(edx, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movzx_w(edx, FieldOperand(
+ edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(ebx, ebp);
}
__ jmp(&arguments_done, Label::kNear);
@@ -2247,8 +1926,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- esi : the function context.
// -----------------------------------
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movzx_w(
+ ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(eax);
ParameterCount expected(ebx);
__ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION);
@@ -2526,48 +2205,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : requested object size (untagged)
- // -- esp[0] : return address
- // -----------------------------------
- __ SmiTag(edx);
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : requested object size (untagged)
- // -- esp[0] : return address
- // -----------------------------------
- __ SmiTag(edx);
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : message_id as Smi
- // -- esp[0] : return address
- // -----------------------------------
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
@@ -2580,10 +2217,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
Label enough, too_few;
- __ cmp(eax, ebx);
- __ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ j(equal, &dont_adapt_arguments);
+ __ cmp(eax, ebx);
+ __ j(less, &too_few);
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
@@ -2742,43 +2379,54 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in edi by the jump table trampoline.
+ // Convert to Smi for the runtime call.
+ __ SmiTag(edi);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
+ static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs ==
+ arraysize(wasm::kGpParamRegisters),
+ "frame size mismatch");
for (Register reg : wasm::kGpParamRegisters) {
- if (reg == kWasmInstanceRegister) continue;
__ Push(reg);
}
- __ sub(esp, Immediate(16 * arraysize(wasm::kFpParamRegisters)));
+ static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
+ arraysize(wasm::kFpParamRegisters),
+ "frame size mismatch");
+ __ sub(esp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
int offset = 0;
for (DoubleRegister reg : wasm::kFpParamRegisters) {
__ movdqu(Operand(esp, offset), reg);
- offset += 16;
+ offset += kSimd128Size;
}
- // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ // Push the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
+ // Push the function index as second argument.
+ __ Push(edi);
+ // Load the correct CEntry builtin from the instance object.
+ __ mov(ecx, FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
- __ CallRuntime(Runtime::kWasmCompileLazy);
- // The entrypoint address is the first return value.
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, ecx);
+ // The entrypoint address is the return value.
__ mov(edi, kReturnRegister0);
- // The WASM instance is the second return value.
- __ mov(kWasmInstanceRegister, kReturnRegister1);
// Restore registers.
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
- offset -= 16;
+ offset -= kSimd128Size;
__ movdqu(reg, Operand(esp, offset));
}
DCHECK_EQ(0, offset);
- __ add(esp, Immediate(16 * arraysize(wasm::kFpParamRegisters)));
+ __ add(esp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
- if (reg == kWasmInstanceRegister) continue;
__ Pop(reg);
}
}
@@ -3013,8 +2661,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent == eax);
+ const Register exponent = eax;
const Register scratch = ecx;
const XMMRegister double_result = xmm3;
const XMMRegister double_base = xmm2;
@@ -3144,6 +2791,99 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ ret(0);
}
+namespace {
+
+void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ mov(ecx, Operand(esp, kPointerSize));
+ __ test(ecx, ecx);
+ __ j(zero, &normal_sequence);
+
+ __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
+ masm->isolate(), GetHoleyElementsKind(kind))
+ .code(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&normal_sequence);
+ __ Jump(
+ CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&not_one_case);
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+} // namespace
+
+void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a nullptr and a Smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
+ __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(ecx);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(ecx, Immediate(PACKED_ELEMENTS));
+ __ j(equal, &done);
+ __ cmp(ecx, Immediate(HOLEY_ELEMENTS));
+ __ Assert(
+ equal,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(ecx, Immediate(PACKED_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index f831cf12e1..12c1d60757 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -22,7 +22,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
- __ li(s2, ExternalReference::Create(address));
+ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -33,58 +33,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
-namespace {
-
-void AdaptorWithExitFrameType(MacroAssembler* masm,
- Builtins::ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments excluding receiver
- // -- a1 : target
- // -- a3 : new.target
- // -- s2 : entry point
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument
- // -- sp[4 * agrc] : receiver
- // -----------------------------------
- __ AssertFunction(a1);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // CEntry expects a0 to contain the number of arguments including the
- // receiver and the extra arguments.
- __ Addu(a0, a0, BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
-
- // Insert extra arguments.
- __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
- __ SmiTag(a0);
- __ Push(a0, a1, a3);
- __ SmiUntag(a0);
-
- // Jump to the C entry runtime stub directly here instead of using
- // JumpToExternalReference. We have already loaded entry point to s2
- // in Generate_adaptor.
- __ mov(a1, s2);
- Handle<Code> code =
- CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT);
- __ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
- PROTECT);
-}
-} // namespace
-
-void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, EXIT);
-}
-
-void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -106,44 +54,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- // Tail call a stub.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : array function
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code;
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ SmiTst(a2, t0);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, t0,
- Operand(zero_reg));
- __ GetObjectType(a2, t1, t0);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, t0,
- Operand(MAP_TYPE));
- }
-
- // a2 is the AllocationSite - here undefined.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- // If a3 (new target) is undefined, then this is the 'Call' case, so move
- // a1 (the constructor) to a3.
- Label call;
- __ Branch(&call, ne, a3, Operand(a2));
- __ mov(a3, a1);
-
- // Run the native code for the Array function called as a normal function.
- __ bind(&call);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -591,19 +504,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- sp[0] : generator receiver
// -----------------------------------
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
+ // Copy the function arguments from the generator object's register file.
+
__ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3,
- FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lhu(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lw(t1,
+ FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
+ __ Move(t2, zero_reg);
__ bind(&loop);
__ Subu(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg));
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ Lsa(kScratchReg, t1, t2, kPointerSizeLog2);
+ __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
+ __ Push(kScratchReg);
+ __ Addu(t2, t2, Operand(1));
__ Branch(&loop);
__ bind(&done_loop);
}
@@ -621,8 +538,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lhu(a0, FieldMemOperand(
+ a0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -887,16 +804,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
__ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, t0);
- __ lw(t0, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
- __ JumpIfNotSmi(t0, &maybe_load_debug_bytecode_array);
- __ bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ lw(t0, FieldMemOperand(feedback_vector,
@@ -1010,36 +923,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
-
- // Load debug copy of the bytecode array if it exists.
- // kInterpreterBytecodeArrayRegister is already loaded with
- // SharedFunctionInfo::kFunctionDataOffset.
- __ bind(&maybe_load_debug_bytecode_array);
- __ lw(t1, FieldMemOperand(t0, DebugInfo::kDebugBytecodeArrayOffset));
- __ JumpIfRoot(t1, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
-
- __ mov(kInterpreterBytecodeArrayRegister, t1);
- __ lw(t1, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
- __ SmiUntag(t1);
- __ And(t1, t1, Operand(DebugInfo::kDebugExecutionMode));
-
- ExternalReference debug_execution_mode =
- ExternalReference::debug_execution_mode_address(masm->isolate());
- __ li(t0, Operand(debug_execution_mode));
- __ lb(t0, MemOperand(t0, kLeastSignificantByteInInt32Offset));
- STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
- static_cast<int>(DebugInfo::kSideEffects));
- __ Branch(&bytecode_array_loaded, eq, t0, Operand(t1));
-
- __ push(closure);
- __ push(feedback_vector);
- __ push(kInterpreterBytecodeArrayRegister);
- __ push(closure);
- __ CallRuntime(Runtime::kDebugApplyInstrumentation);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ pop(feedback_vector);
- __ pop(closure);
- __ Branch(&bytecode_array_loaded);
}
@@ -1159,8 +1042,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
- ArrayConstructorStub array_constructor_stub(masm->isolate());
- __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1277,204 +1160,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to CompileLazy.
- __ Move(a2, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ sw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ RecordWriteField(a1, JSFunction::kCodeOffset, a2, t0, kRAHasNotBeenSaved,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Jump to compile lazy.
- Generate_CompileLazy(masm);
-}
-
-static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
- Register scratch1) {
- // Figure out the SFI's code object.
- Label done;
- Label check_is_bytecode_array;
- Label check_is_exported_function_data;
- Label check_is_fixed_array;
- Label check_is_pre_parsed_scope_data;
- Label check_is_function_template_info;
- Label check_is_interpreter_data;
-
- Register data_type = scratch1;
-
- // IsSmi: Is builtin
- __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
- __ li(scratch1, ExternalReference::builtins_address(masm->isolate()));
- // Avoid untagging the Smi.
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- STATIC_ASSERT(kSmiShiftSize == 0);
- __ Lsa(scratch1, scratch1, sfi_data, kPointerSizeLog2 - kSmiTagSize);
- __ lw(sfi_data, MemOperand(scratch1));
- __ Branch(&done);
-
- // Get map for subsequent checks.
- __ bind(&check_is_bytecode_array);
- __ lw(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
- __ lhu(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
-
- // IsBytecodeArray: Interpret bytecode
- __ Branch(&check_is_exported_function_data, ne, data_type,
- Operand(BYTECODE_ARRAY_TYPE));
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
- __ Branch(&done);
-
- // IsWasmExportedFunctionData: Use the wrapper code
- __ bind(&check_is_exported_function_data);
- __ Branch(&check_is_fixed_array, ne, data_type,
- Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
- __ lw(sfi_data, FieldMemOperand(
- sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
- __ Branch(&done);
-
- // IsFixedArray: Instantiate using AsmWasmData
- __ bind(&check_is_fixed_array);
- __ Branch(&check_is_pre_parsed_scope_data, ne, data_type,
- Operand(FIXED_ARRAY_TYPE));
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
- __ Branch(&done);
-
- // IsPreParsedScopeData: Compile lazy
- __ bind(&check_is_pre_parsed_scope_data);
- __ Branch(&check_is_function_template_info, ne, data_type,
- Operand(TUPLE2_TYPE));
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ Branch(&done);
-
- // IsFunctionTemplateInfo: API call
- __ bind(&check_is_function_template_info);
- __ Branch(&check_is_interpreter_data, ne, data_type,
- Operand(FUNCTION_TEMPLATE_INFO_TYPE));
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
-
- // IsInterpreterData: Interpret bytecode
- __ bind(&check_is_interpreter_data);
- if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData, data_type,
- Operand(INTERPRETER_DATA_TYPE));
- }
- __ lw(sfi_data, FieldMemOperand(
- sfi_data, InterpreterData::kInterpreterTrampolineOffset));
-
- __ bind(&done);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -- a1 : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime;
-
- Register closure = a1;
- Register feedback_vector = a2;
-
- // Do we have a valid feedback vector?
- __ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime);
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
-
- // We found no optimized code. Infer the code object needed for the SFI.
- Register entry = t0;
- __ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoCode(masm, entry, t1);
-
- // If code entry points to anything other than CompileLazy, install that.
- __ Move(t1, masm->CodeObject());
- __ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
-
- // Install the SFI's code entry.
- __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeOffset));
- __ mov(t3, entry); // Write barrier clobbers t3 below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, t3, t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Jump(entry, Code::kHeaderSize - kHeapObjectTag);
-
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -- a1 : target function (preserved for callee)
- // -----------------------------------
-
- Label deserialize_in_runtime;
-
- Register target = a1; // Must be preserved
- Register scratch0 = a2;
- Register scratch1 = t0;
-
- CHECK(scratch0 != a0 && scratch0 != a3 && scratch0 != a1);
- CHECK(scratch1 != a0 && scratch1 != a3 && scratch1 != a1);
- CHECK(scratch0 != scratch1);
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
-
- __ AssertFunction(target);
- __ lw(scratch0,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ lw(scratch1,
- FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
- __ AssertSmi(scratch1);
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- {
- // Load the code object at builtins_table[builtin_id] into scratch1.
-
- __ SmiUntag(scratch1);
- __ li(scratch0, ExternalReference::builtins_address(masm->isolate()));
- __ Lsa(scratch1, scratch0, scratch1, kPointerSizeLog2);
- __ lw(scratch1, MemOperand(scratch1));
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
-
- __ Move(scratch0, masm->CodeObject());
- __ Branch(&deserialize_in_runtime, eq, scratch1, Operand(scratch0));
- }
-
- {
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
-
- Register target_builtin = scratch1;
-
- __ sw(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
- __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
- __ RecordWriteField(target, JSFunction::kCodeOffset, t3, t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // All copying is done. Jump to the deserialized code object.
-
- __ Jump(target_builtin, Code::kHeaderSize - kHeapObjectTag);
- }
-
- __ bind(&deserialize_in_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1900,7 +1585,20 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- t0 : len (number of elements to push from args)
// -- a3 : new.target (for [[Construct]])
// -----------------------------------
- __ AssertFixedArray(a2);
+ if (masm->emit_debug_code()) {
+ // Allow a2 to be a FixedArray, or a FixedDoubleArray if t0 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, t8, t8);
+ __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
+ __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ Branch(&ok, eq, t0, Operand(0));
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
// Check for stack overflow.
{
@@ -1979,8 +1677,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ lw(t2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(t2, FieldMemOperand(t2, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t2,
- FieldMemOperand(t2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lhu(t2, FieldMemOperand(
+ t2, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(t3, fp);
}
__ Branch(&arguments_done);
@@ -2114,8 +1812,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- __ lw(a2,
- FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lhu(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(a0);
ParameterCount expected(a2);
__ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION);
@@ -2433,42 +2131,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : requested object size (untagged)
- // -- ra : return address
- // -----------------------------------
- __ SmiTag(a0);
- __ Push(a0);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : requested object size (untagged)
- // -- ra : return address
- // -----------------------------------
- __ SmiTag(a0);
- __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ Push(a0, a1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : message_id as Smi
- // -- ra : return address
- // -----------------------------------
- __ Push(a0);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -2605,26 +2267,32 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in t0 by the jump table trampoline.
+ // Convert to Smi for the runtime call.
+ __ SmiTag(t0);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<a1, a2, a3>();
+ constexpr RegList gp_regs = Register::ListOf<a0, a1, a2, a3>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
- // Pass the WASM instance as an explicit argument to WasmCompileLazy.
- __ push(kWasmInstanceRegister);
+ // Pass instance and function index as an explicit arguments to the runtime
+ // function.
+ __ Push(kWasmInstanceRegister, t0);
+ // Load the correct CEntry builtin from the instance object.
+ __ lw(a2, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
- __ CallRuntime(Runtime::kWasmCompileLazy);
- // The WASM instance is the second return value.
- __ mov(kWasmInstanceRegister, kReturnRegister1);
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, a2);
// Restore registers.
__ MultiPopFPU(fp_regs);
@@ -2933,8 +2601,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent == a2);
+ const Register exponent = a2;
const DoubleRegister double_base = f2;
const DoubleRegister double_exponent = f4;
const DoubleRegister double_result = f0;
@@ -3033,6 +2700,86 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET, lo, a0, Operand(1));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor),
+ RelocInfo::CODE_TARGET, hi, a0, Operand(1));
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument.
+ __ lw(kScratchReg, MemOperand(sp, 0));
+
+ __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
+ masm->isolate(), GetHoleyElementsKind(kind))
+ .code(),
+ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
+ }
+
+ __ Jump(
+ CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+}
+
+} // namespace
+
+void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- a1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a nullptr and a Smi.
+ __ SmiTst(a3, kScratchReg);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
+ kScratchReg, Operand(zero_reg));
+ __ GetObjectType(a3, a3, t0);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0,
+ Operand(MAP_TYPE));
+ }
+
+ // Figure out the right elements kind.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into a3. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(a3);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+ a3, Operand(HOLEY_ELEMENTS));
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
+ GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 7186b3a7c3..a93c75b2fc 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -22,7 +22,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
- __ li(s2, ExternalReference::Create(address));
+ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -33,58 +33,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
-namespace {
-
-void AdaptorWithExitFrameType(MacroAssembler* masm,
- Builtins::ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments excluding receiver
- // -- a1 : target
- // -- a3 : new.target
- // -- s2 : entry point
- // -- sp[0] : last argument
- // -- ...
- // -- sp[8 * (argc - 1)] : first argument
- // -- sp[8 * agrc] : receiver
- // -----------------------------------
- __ AssertFunction(a1);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // CEntry expects a0 to contain the number of arguments including the
- // receiver and the extra arguments.
- __ Daddu(a0, a0, BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
-
- // Insert extra arguments.
- __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
- __ SmiTag(a0);
- __ Push(a0, a1, a3);
- __ SmiUntag(a0);
-
- // Jump to the C entry runtime stub directly here instead of using
- // JumpToExternalReference. We have already loaded entry point to s2
- // in Generate_adaptor.
- __ mov(a1, s2);
- Handle<Code> code =
- CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT);
- __ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
- PROTECT);
-}
-} // namespace
-
-void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, EXIT);
-}
-
-void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -106,44 +54,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- // Tail call a stub.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : array function
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code;
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ SmiTst(a2, a4);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, a4,
- Operand(zero_reg));
- __ GetObjectType(a2, t0, a4);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, a4,
- Operand(MAP_TYPE));
- }
-
- // a2 is the AllocationSite - here undefined.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- // If a3 (new target) is undefined, then this is the 'Call' case, so move
- // a1 (the constructor) to a3.
- Label call;
- __ Branch(&call, ne, a3, Operand(a2));
- __ mov(a3, a1);
-
- // Run the native code for the Array function called as a normal function.
- __ bind(&call);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -487,14 +400,20 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// values have already been copied into the context and these dummy values
// will never be used.
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ Lw(a3,
- FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lhu(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ld(t1,
+ FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
+ __ Move(t2, zero_reg);
__ bind(&loop);
__ Dsubu(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg));
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ Dlsa(kScratchReg, t1, t2, kPointerSizeLog2);
+ __ Ld(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
+ __ Push(kScratchReg);
+ __ Daddu(t2, t2, Operand(1));
__ Branch(&loop);
__ bind(&done_loop);
}
@@ -512,8 +431,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ Ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ Lw(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lhu(a0, FieldMemOperand(
+ a0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -884,16 +803,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
__ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4);
- __ Ld(a4, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
- __ JumpIfNotSmi(a4, &maybe_load_debug_bytecode_array);
- __ bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ Lw(a4, FieldMemOperand(feedback_vector,
@@ -1008,36 +923,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
-
- // Load debug copy of the bytecode array if it exists.
- // kInterpreterBytecodeArrayRegister is already loaded with
- // SharedFunctionInfo::kFunctionDataOffset.
- __ bind(&maybe_load_debug_bytecode_array);
- __ Ld(a5, FieldMemOperand(a4, DebugInfo::kDebugBytecodeArrayOffset));
- __ JumpIfRoot(a5, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
-
- __ mov(kInterpreterBytecodeArrayRegister, a5);
- __ Ld(a5, FieldMemOperand(a4, DebugInfo::kFlagsOffset));
- __ SmiUntag(a5);
- __ And(a5, a5, Operand(DebugInfo::kDebugExecutionMode));
-
- ExternalReference debug_execution_mode =
- ExternalReference::debug_execution_mode_address(masm->isolate());
- __ li(a4, Operand(debug_execution_mode));
- __ Lb(a4, MemOperand(a4, kLeastSignificantByteInInt32Offset));
- STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
- static_cast<int>(DebugInfo::kSideEffects));
- __ Branch(&bytecode_array_loaded, eq, a4, Operand(a5));
-
- __ push(closure);
- __ push(feedback_vector);
- __ push(kInterpreterBytecodeArrayRegister);
- __ push(closure);
- __ CallRuntime(Runtime::kDebugApplyInstrumentation);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ pop(feedback_vector);
- __ pop(closure);
- __ Branch(&bytecode_array_loaded);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1156,8 +1041,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
- ArrayConstructorStub array_constructor_stub(masm->isolate());
- __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1225,9 +1110,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ Lw(
- kInterpreterBytecodeOffsetRegister,
- UntagSmiMemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
@@ -1274,206 +1158,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to CompileLazy.
- __ Move(a2, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ Sd(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ RecordWriteField(a1, JSFunction::kCodeOffset, a2, a4, kRAHasNotBeenSaved,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Jump to compile lazy.
- Generate_CompileLazy(masm);
-}
-
-static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
- Register scratch1) {
- // Figure out the SFI's code object.
- Label done;
- Label check_is_bytecode_array;
- Label check_is_exported_function_data;
- Label check_is_fixed_array;
- Label check_is_pre_parsed_scope_data;
- Label check_is_function_template_info;
- Label check_is_interpreter_data;
-
- Register data_type = scratch1;
-
- // IsSmi: Is builtin
- __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
- __ li(scratch1, ExternalReference::builtins_address(masm->isolate()));
- // Avoid untagging the Smi by merging the shift
- STATIC_ASSERT(kPointerSizeLog2 < kSmiShift);
- __ dsrl(sfi_data, sfi_data, kSmiShift - kPointerSizeLog2);
- __ Daddu(scratch1, scratch1, sfi_data);
- __ Ld(sfi_data, MemOperand(scratch1));
- __ Branch(&done);
-
- // Get map for subsequent checks.
- __ bind(&check_is_bytecode_array);
- __ Ld(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
- __ Lhu(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
-
- // IsBytecodeArray: Interpret bytecode
- __ Branch(&check_is_exported_function_data, ne, data_type,
- Operand(BYTECODE_ARRAY_TYPE));
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
- __ Branch(&done);
-
- // IsWasmExportedFunctionData: Use the wrapper code
- __ bind(&check_is_exported_function_data);
- __ Branch(&check_is_fixed_array, ne, data_type,
- Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
- __ Ld(sfi_data, FieldMemOperand(
- sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
- __ Branch(&done);
-
- // IsFixedArray: Instantiate using AsmWasmData
- __ bind(&check_is_fixed_array);
- __ Branch(&check_is_pre_parsed_scope_data, ne, data_type,
- Operand(FIXED_ARRAY_TYPE));
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
- __ Branch(&done);
-
- // IsPreParsedScopeData: Compile lazy
- __ bind(&check_is_pre_parsed_scope_data);
- __ Branch(&check_is_function_template_info, ne, data_type,
- Operand(TUPLE2_TYPE));
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ Branch(&done);
-
- // IsFunctionTemplateInfo: API call
- __ bind(&check_is_function_template_info);
- __ Branch(&check_is_interpreter_data, ne, data_type,
- Operand(FUNCTION_TEMPLATE_INFO_TYPE));
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
-
- // IsInterpreterData: Interpret bytecode
- __ bind(&check_is_interpreter_data);
- if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData, data_type,
- Operand(INTERPRETER_DATA_TYPE));
- }
- __ Ld(sfi_data, FieldMemOperand(
- sfi_data, InterpreterData::kInterpreterTrampolineOffset));
-
- __ bind(&done);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -- a1 : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime;
-
- Register closure = a1;
- Register feedback_vector = a2;
-
- // Do we have a valid feedback vector?
- __ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime);
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
-
- // We found no optimized code. Infer the code object needed for the SFI.
- Register entry = a4;
- __ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoCode(masm, entry, t1);
-
- __ Move(t1, masm->CodeObject());
- __ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
-
- // Install the SFI's code entry.
- __ Sd(entry, FieldMemOperand(closure, JSFunction::kCodeOffset));
- __ mov(t3, entry); // Write barrier clobbers t3 below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, t3, a5,
- kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(entry);
-
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -- a1 : target function (preserved for callee)
- // -----------------------------------
-
- Label deserialize_in_runtime;
-
- Register target = a1; // Must be preserved
- Register scratch0 = a2;
- Register scratch1 = t0;
-
- CHECK(scratch0 != a0 && scratch0 != a3 && scratch0 != a1);
- CHECK(scratch1 != a0 && scratch1 != a3 && scratch1 != a1);
- CHECK(scratch0 != scratch1);
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
-
- __ AssertFunction(target);
- __ Ld(scratch0,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ Ld(scratch1,
- FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
- __ AssertSmi(scratch1);
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- {
- // Load the code object at builtins_table[builtin_id] into scratch1.
-
- __ SmiUntag(scratch1);
- __ li(scratch0, ExternalReference::builtins_address(masm->isolate()));
- __ Dlsa(scratch1, scratch0, scratch1, kPointerSizeLog2);
- __ Ld(scratch1, MemOperand(scratch1));
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
-
- __ Move(scratch0, masm->CodeObject());
- __ Branch(&deserialize_in_runtime, eq, scratch1, Operand(scratch0));
- }
-
- {
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
-
- Register target_builtin = scratch1;
-
- __ Sd(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
- __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
- __ RecordWriteField(target, JSFunction::kCodeOffset, t3, t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // All copying is done. Jump to the deserialized code object.
-
- __ Daddu(target_builtin, target_builtin,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(target_builtin);
- }
-
- __ bind(&deserialize_in_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1633,9 +1317,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ Lw(a1, UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
+ __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1882,8 +1566,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- // __ sll(a0, a0, kSmiTagSize);
- __ dsll32(a0, a0, 0);
+ __ SmiTag(a0);
__ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
__ Push(Smi::kZero); // Padding.
@@ -1916,7 +1599,20 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- a4 : len (number of elements to push from args)
// -- a3 : new.target (for [[Construct]])
// -----------------------------------
- __ AssertFixedArray(a2);
+ if (masm->emit_debug_code()) {
+ // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, t8, t8);
+ __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
+ __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ Branch(&ok, eq, a4, Operand(zero_reg));
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
Register args = a2;
Register len = a4;
@@ -2002,16 +1698,16 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ Ld(a7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ld(a7, FieldMemOperand(a7, JSFunction::kSharedFunctionInfoOffset));
- __ Lw(a7,
- FieldMemOperand(a7, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lhu(a7, FieldMemOperand(
+ a7, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(a6, fp);
}
__ Branch(&arguments_done);
__ bind(&arguments_adaptor);
{
// Just get the length from the ArgumentsAdaptorFrame.
- __ Lw(a7, UntagSmiMemOperand(
- a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(a7,
+ MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
@@ -2137,8 +1833,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- __ Lw(a2,
- FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lhu(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(a0);
ParameterCount expected(a2);
__ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION);
@@ -2169,7 +1865,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Load [[BoundArguments]] into a2 and length of that into a4.
__ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2216,7 +1912,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
- __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
@@ -2322,7 +2018,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Load [[BoundArguments]] into a2 and length of that into a4.
__ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2370,7 +2066,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
- __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
@@ -2450,42 +2146,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : requested object size (untagged)
- // -- ra : return address
- // -----------------------------------
- __ SmiTag(a0);
- __ Push(a0);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : requested object size (untagged)
- // -- ra : return address
- // -----------------------------------
- __ SmiTag(a0);
- __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ Push(a0, a1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : message_id as Smi
- // -- ra : return address
- // -----------------------------------
- __ Push(a0);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -2624,26 +2284,33 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in t0 by the jump table trampoline.
+ // Convert to Smi for the runtime call
+ __ SmiTag(t0);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<a1, a2, a3, a4, a5, a6, a7>();
+ constexpr RegList gp_regs =
+ Register::ListOf<a0, a1, a2, a3, a4, a5, a6, a7>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
- // Pass the WASM instance as an explicit argument to WasmCompileLazy.
- __ push(kWasmInstanceRegister);
+ // Pass instance and function index as an explicit arguments to the runtime
+ // function.
+ __ Push(kWasmInstanceRegister, t0);
+ // Load the correct CEntry builtin from the instance object.
+ __ Ld(a2, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
- __ CallRuntime(Runtime::kWasmCompileLazy);
- // The WASM instance is the second return value.
- __ mov(kWasmInstanceRegister, kReturnRegister1);
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, a2);
// Restore registers.
__ MultiPopFPU(fp_regs);
@@ -2953,8 +2620,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent == a2);
+ const Register exponent = a2;
const DoubleRegister double_base = f2;
const DoubleRegister double_exponent = f4;
const DoubleRegister double_result = f0;
@@ -3053,6 +2719,86 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET, lo, a0, Operand(1));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor),
+ RelocInfo::CODE_TARGET, hi, a0, Operand(1));
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument.
+ __ Ld(kScratchReg, MemOperand(sp, 0));
+
+ __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
+ masm->isolate(), GetHoleyElementsKind(kind))
+ .code(),
+ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
+ }
+
+ __ Jump(
+ CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+}
+
+} // namespace
+
+void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- a1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a nullptr and a Smi.
+ __ SmiTst(a3, kScratchReg);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
+ kScratchReg, Operand(zero_reg));
+ __ GetObjectType(a3, a3, a4);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a4,
+ Operand(MAP_TYPE));
+ }
+
+ // Figure out the right elements kind.
+ __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into a3. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ Lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(a3);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+ a3, Operand(HOLEY_ELEMENTS));
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
+ GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 3d6c397d6b..3ba3e99789 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -20,7 +20,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
- __ Move(r15, ExternalReference::Create(address));
+ __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -31,58 +31,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
-namespace {
-
-void AdaptorWithExitFrameType(MacroAssembler* masm,
- Builtins::ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments excluding receiver
- // -- r4 : target
- // -- r6 : new.target
- // -- r15 : entry point
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument
- // -- sp[4 * argc] : receiver
- // -----------------------------------
- __ AssertFunction(r4);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
- // CEntry expects r3 to contain the number of arguments including the
- // receiver and the extra arguments.
- __ addi(r3, r3,
- Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
-
- // Insert extra arguments.
- __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
- __ SmiTag(r3);
- __ Push(r3, r4, r6);
- __ SmiUntag(r3);
-
- // Jump to the C entry runtime stub directly here instead of using
- // JumpToExternalReference. We have already loaded entry point to r15
- // in Generate_adaptor.
- __ mr(r4, r15);
- Handle<Code> code =
- CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-} // namespace
-
-void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, EXIT);
-}
-
-void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -105,41 +53,8 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// function.
// tail call a stub
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- r4 : array function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- __ TestIfSmi(r5, r0);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r5, r7, r8, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // r5 is the AllocationSite - here undefined.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- // If r6 (new target) is undefined, then this is the 'Call' case, so move
- // r4 (the constructor) to r6.
- Label call;
- __ cmp(r6, r5);
- __ bne(&call);
- __ mr(r6, r4);
-
- // Run the native code for the Array function called as a normal function.
- __ bind(&call);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -500,22 +415,27 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- sp[0] : generator receiver
// -----------------------------------
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
+ // Copy the function arguments from the generator object's register file.
__ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r3, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadHalfWord(
+ r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadP(r5, FieldMemOperand(
+ r4, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done_loop;
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmpi(r3, Operand::Zero());
- __ beq(&done_loop);
- __ mtctr(r3);
+ __ cmpi(r6, Operand::Zero());
+ __ ble(&done_loop);
+
+ // setup r9 to first element address - kPointerSize
+ __ addi(r9, r5,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+
+ __ mtctr(r6);
__ bind(&loop);
+ __ LoadPU(ip, MemOperand(r9, kPointerSize));
__ push(ip);
__ bdnz(&loop);
+
__ bind(&done_loop);
}
@@ -910,18 +830,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
__ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r7);
- __ LoadP(r7, FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
- __ TestIfSmi(r7, r0);
- __ bne(&maybe_load_debug_bytecode_array, cr0);
- __ bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ LoadWord(
@@ -1043,32 +958,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r3.
LeaveInterpreterFrame(masm, r5);
__ blr();
-
- // Load debug copy of the bytecode array if it exists.
- // kInterpreterBytecodeArrayRegister is already loaded with
- // SharedFunctionInfo::kFunctionDataOffset.
- __ bind(&maybe_load_debug_bytecode_array);
- __ LoadP(ip, FieldMemOperand(r7, DebugInfo::kDebugBytecodeArrayOffset));
- __ JumpIfRoot(ip, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
-
- __ mr(kInterpreterBytecodeArrayRegister, ip);
- __ LoadP(ip, FieldMemOperand(r7, DebugInfo::kFlagsOffset));
- __ SmiUntag(ip);
- __ andi(ip, ip, Operand(DebugInfo::kDebugExecutionMode));
-
- ExternalReference debug_execution_mode =
- ExternalReference::debug_execution_mode_address(masm->isolate());
- __ mov(r7, Operand(debug_execution_mode));
- __ lwz(r7, MemOperand(r7));
- STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
- static_cast<int>(DebugInfo::kSideEffects));
- __ cmp(r7, ip);
- __ beq(&bytecode_array_loaded);
-
- __ Push(closure, feedback_vector, kInterpreterBytecodeArrayRegister, closure);
- __ CallRuntime(Runtime::kDebugApplyInstrumentation);
- __ Pop(closure, feedback_vector, kInterpreterBytecodeArrayRegister);
- __ b(&bytecode_array_loaded);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1187,8 +1076,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
- ArrayConstructorStub array_constructor_stub(masm->isolate());
- __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r3, r4, and r6 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1306,215 +1195,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to CompileLazy.
- __ Move(r5, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ StoreP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
- __ RecordWriteField(r4, JSFunction::kCodeOffset, r5, r7, kLRHasNotBeenSaved,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Jump to compile lazy.
- Generate_CompileLazy(masm);
-}
-
-static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
- Register scratch1) {
- // Figure out the SFI's code object.
- Label done;
- Label check_is_bytecode_array;
- Label check_is_exported_function_data;
- Label check_is_fixed_array;
- Label check_is_pre_parsed_scope_data;
- Label check_is_function_template_info;
- Label check_is_interpreter_data;
-
- Register data_type = scratch1;
-
- // IsSmi: Is builtin
- __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
- __ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
- __ SmiUntag(sfi_data, LeaveRC, kPointerSizeLog2);
- __ LoadPX(sfi_data, MemOperand(scratch1, sfi_data));
- __ b(&done);
-
- // Get map for subsequent checks.
- __ bind(&check_is_bytecode_array);
- __ LoadP(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
- __ LoadHalfWord(data_type,
- FieldMemOperand(data_type, Map::kInstanceTypeOffset), r0);
-
- // IsBytecodeArray: Interpret bytecode
- __ cmpi(data_type, Operand(BYTECODE_ARRAY_TYPE));
- __ bne(&check_is_exported_function_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
- __ b(&done);
-
- // IsWasmExportedFunctionData: Use the wrapper code
- __ bind(&check_is_exported_function_data);
- __ cmpi(data_type, Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
- __ bne(&check_is_fixed_array);
- __ LoadP(
- sfi_data,
- FieldMemOperand(sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
- __ b(&done);
-
- // IsFixedArray: Instantiate using AsmWasmData
- __ bind(&check_is_fixed_array);
- __ cmpi(data_type, Operand(FIXED_ARRAY_TYPE));
- __ bne(&check_is_pre_parsed_scope_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
- __ b(&done);
-
- // IsPreParsedScopeData: Compile lazy
- __ bind(&check_is_pre_parsed_scope_data);
- __ cmpi(data_type, Operand(TUPLE2_TYPE));
- __ bne(&check_is_function_template_info);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ b(&done);
-
- // IsFunctionTemplateInfo: API call
- __ bind(&check_is_function_template_info);
- __ cmpi(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
- __ bne(&check_is_interpreter_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
- __ b(&done);
-
- // IsInterpreterData: Interpret bytecode
- __ bind(&check_is_interpreter_data);
- if (FLAG_debug_code) {
- __ cmpi(data_type, Operand(INTERPRETER_DATA_TYPE));
- __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
- }
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
- __ LoadP(
- sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
-
- __ bind(&done);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : argument count (preserved for callee)
- // -- r6 : new target (preserved for callee)
- // -- r4 : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime;
-
- Register closure = r4;
- Register feedback_vector = r5;
-
- // Do we have a valid feedback vector?
- __ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadP(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime);
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
-
- // We found no optimized code. Infer the code object needed for the SFI.
- Register entry = r7;
- __ LoadP(entry,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(entry,
- FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoCode(masm, entry, r8);
-
- // If code entry points to anything other than CompileLazy, install that.
- __ Move(r8, masm->CodeObject());
- __ cmp(entry, r8);
- __ beq(&gotta_call_runtime);
-
- // Install the SFI's code entry.
- __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
- __ mr(r9, entry); // Write barrier clobbers ip below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, r9, r8,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(entry);
-
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : argument count (preserved for callee)
- // -- r6 : new target (preserved for callee)
- // -- r4 : target function (preserved for callee)
- // -----------------------------------
-
- Label deserialize_in_runtime;
-
- Register target = r4; // Must be preserved
- Register scratch0 = r5;
- Register scratch1 = r7;
-
- CHECK(scratch0 != r3 && scratch0 != r6 && scratch0 != r4);
- CHECK(scratch1 != r3 && scratch1 != r6 && scratch1 != r4);
- CHECK(scratch0 != scratch1);
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
-
- __ AssertFunction(target);
- __ LoadP(scratch0,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ LoadP(scratch1,
- FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
- __ AssertSmi(scratch1);
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- {
- // Load the code object at builtins_table[builtin_id] into scratch1.
-
- __ SmiUntag(scratch1);
- __ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
- __ ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2));
- __ LoadPX(scratch1, MemOperand(scratch0, scratch1));
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
-
- __ Move(scratch0, masm->CodeObject());
- __ cmp(scratch1, scratch0);
- __ beq(&deserialize_in_runtime);
- }
- {
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
-
- Register target_builtin = scratch1;
-
- __ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset),
- r0);
- __ mr(r9, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(target, JSFunction::kCodeOffset, r9, r8,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // All copying is done. Jump to the deserialized code object.
-
- __ addi(target_builtin, target_builtin,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mr(ip, target_builtin);
- __ Jump(ip);
- }
-
- __ bind(&deserialize_in_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1961,7 +1641,28 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- r6 : new.target (for [[Construct]])
// -----------------------------------
- __ AssertFixedArray(r5);
+ Register scratch = ip;
+
+ if (masm->emit_debug_code()) {
+ // Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(r5);
+ __ LoadP(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadHalfWord(scratch,
+ FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
+ __ beq(&ok);
+ __ cmpi(scratch, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ bne(&fail);
+ __ cmpi(r7, Operand::Zero());
+ __ beq(&ok);
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
+
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2042,7 +1743,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadP(r8, FieldMemOperand(r8, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
+ __ LoadHalfWord(
r8,
FieldMemOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
__ mr(r7, fp);
@@ -2179,7 +1880,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- __ LoadWordArith(
+ __ LoadHalfWord(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(r3);
ParameterCount expected(r5);
@@ -2460,42 +2161,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(r4);
- __ Push(r4);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(r4);
- __ LoadSmiLiteral(r5, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ Push(r4, r5);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : message_id as Smi
- // -- lr : return address
- // -----------------------------------
- __ push(r4);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : actual number of arguments
@@ -2507,10 +2172,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
+ __ cmpli(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ beq(&dont_adapt_arguments);
__ cmp(r3, r5);
__ blt(&too_few);
- __ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ beq(&dont_adapt_arguments);
{ // Enough parameters: actual >= expected
__ bind(&enough);
@@ -2630,28 +2295,35 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in r15 by the jump table trampoline.
+ // Convert to Smi for the runtime call.
+ __ SmiTag(r15, r15);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<r3, r4, r5, r6, r7, r8, r9>();
+ constexpr RegList gp_regs =
+ Register::ListOf<r3, r4, r5, r6, r7, r8, r9, r10>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<d1, d2, d3, d4, d5, d6, d7, d8>();
__ MultiPush(gp_regs);
__ MultiPushDoubles(fp_regs);
- // Pass the WASM instance as an explicit argument to WasmCompileLazy.
- __ Push(kWasmInstanceRegister);
+ // Pass instance and function index as explicit arguments to the runtime
+ // function.
+ __ Push(kWasmInstanceRegister, r15);
+ // Load the correct CEntry builtin from the instance object.
+ __ LoadP(r5, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ LoadSmiLiteral(cp, Smi::kZero);
- __ CallRuntime(Runtime::kWasmCompileLazy);
- // The entrypoint address is the first return value.
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, r5);
+ // The entrypoint address is the return value.
__ mr(r11, kReturnRegister0);
- // The WASM instance is the second return value.
- __ mr(kWasmInstanceRegister, kReturnRegister1);
// Restore registers.
__ MultiPopDoubles(fp_regs);
@@ -2858,6 +2530,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
Register result_reg = r3;
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+
// Immediate values for this stub fit in instructions, so it's safe to use ip.
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
@@ -2970,8 +2644,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent == r5);
+ const Register exponent = r5;
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
@@ -3064,6 +2737,87 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ __ cmpli(r3, Operand(1));
+
+ __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET, lt);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor),
+ RelocInfo::CODE_TARGET, gt);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ LoadP(r6, MemOperand(sp, 0));
+ __ cmpi(r6, Operand::Zero());
+
+ __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
+ masm->isolate(), GetHoleyElementsKind(kind))
+ .code(),
+ RelocInfo::CODE_TARGET, ne);
+ }
+
+ __ Jump(
+ CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+}
+
+} // namespace
+
+void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- r4 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a nullptr and a Smi.
+ __ TestIfSmi(r6, r0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
+ __ CompareObjectType(r6, r6, r7, MAP_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|.
+ __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r6);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmpi(r6, Operand(PACKED_ELEMENTS));
+ __ beq(&done);
+ __ cmpi(r6, Operand(HOLEY_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmpi(r6, Operand(PACKED_ELEMENTS));
+ __ beq(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+}
+
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index dbcf633a43..c7e955f6e4 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -20,7 +20,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
- __ Move(r7, ExternalReference::Create(address));
+ __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -31,64 +31,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
-namespace {
-
-void AdaptorWithExitFrameType(MacroAssembler* masm,
- Builtins::ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- r2 : number of arguments excluding receiver
- // -- r3 : target
- // -- r5 : new.target
- // -- r7 : entry point
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument
- // -- sp[4 * argc] : receiver
- // -----------------------------------
- __ AssertFunction(r3);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
-
- // CEntry expects r2 to contain the number of arguments including the
- // receiver and the extra arguments.
- __ AddP(r2, r2,
- Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
-
- // Insert extra arguments.
- __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
- __ SmiTag(r2);
- __ Push(r2, r3, r5);
- __ SmiUntag(r2);
-
- // Jump to the C entry runtime stub directly here instead of using
- // JumpToExternalReference. We have already loaded entry point to r7
- // in Generate_adaptor.
- __ LoadRR(r3, r7);
- Handle<Code> code =
- CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-} // namespace
-
-void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, EXIT);
-}
-
-void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the current native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
@@ -111,44 +53,8 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// function.
// tail call a stub
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : number of arguments
- // -- r3 : array function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- // Get the Array function.
- GenerateLoadArrayFunction(masm, r3);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- __ TestIfSmi(r4);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r4, r6, r7, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // r4 is the AllocationSite - here undefined.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- // If r5 (new target) is undefined, then this is the 'Call' case, so move
- // r3 (the constructor) to r5.
- Label call;
- __ CmpP(r5, r4);
- __ bne(&call);
- __ LoadRR(r5, r3);
-
- // Run the native code for the Array function called as a normal function.
- __ bind(&call);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -500,27 +406,36 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- sp[0] : generator receiver
// -----------------------------------
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
+ // Copy the function arguments from the generator object's register file.
__ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(
- r2, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadLogicalHalfWordP(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadP(r4, FieldMemOperand(
+ r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done_loop;
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-#if V8_TARGET_ARCH_S390X
- __ CmpP(r2, Operand::Zero());
- __ beq(&done_loop);
-#else
- __ LoadAndTestP(r2, r2);
- __ beq(&done_loop);
-#endif
- __ LoadRR(r1, r2);
+ __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2));
+ __ SubP(sp, r5);
+
+ // ip = stack offset
+ // r5 = parameter array offset
+ __ LoadImmP(ip, Operand::Zero());
+ __ SubP(r5, Operand(kPointerSize));
+ __ blt(&done_loop);
+
+ __ lgfi(r1, Operand(-kPointerSize));
+
__ bind(&loop);
- __ push(ip);
- __ BranchOnCount(r1, &loop);
+
+ // parameter copy loop
+ __ LoadP(r0, FieldMemOperand(r4, r5, FixedArray::kHeaderSize));
+ __ StoreP(r0, MemOperand(sp, ip));
+
+ // update offsets
+ __ lay(ip, MemOperand(ip, kPointerSize));
+
+ __ BranchRelativeOnIdxHighP(r5, r1, &loop);
+
__ bind(&done_loop);
}
@@ -528,7 +443,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, r5, r1);
+ GetSharedFunctionInfoBytecode(masm, r5, ip);
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -924,18 +839,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
__ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
- __ LoadP(r6, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
- __ TestIfSmi(r6);
- __ bne(&maybe_load_debug_bytecode_array);
- __ bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ LoadW(r1, FieldMemOperand(feedback_vector,
@@ -1053,32 +963,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r2.
LeaveInterpreterFrame(masm, r4);
__ Ret();
-
- // Load debug copy of the bytecode array if it exists.
- // kInterpreterBytecodeArrayRegister is already loaded with
- // SharedFunctionInfo::kFunctionDataOffset.
- __ bind(&maybe_load_debug_bytecode_array);
- __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
- __ JumpIfRoot(ip, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
-
- __ LoadRR(kInterpreterBytecodeArrayRegister, ip);
- __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
- __ SmiUntag(ip);
- __ AndP(ip, ip, Operand(DebugInfo::kDebugExecutionMode));
-
- ExternalReference debug_execution_mode =
- ExternalReference::debug_execution_mode_address(masm->isolate());
- __ mov(r6, Operand(debug_execution_mode));
- __ LoadW(r6, MemOperand(r6));
- STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
- static_cast<int>(DebugInfo::kSideEffects));
- __ CmpP(r6, ip);
- __ beq(&bytecode_array_loaded);
-
- __ Push(closure, feedback_vector, kInterpreterBytecodeArrayRegister, closure);
- __ CallRuntime(Runtime::kDebugApplyInstrumentation);
- __ Pop(closure, feedback_vector, kInterpreterBytecodeArrayRegister);
- __ b(&bytecode_array_loaded);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1197,8 +1081,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
- ArrayConstructorStub array_constructor_stub(masm->isolate());
- __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r2, r3, and r5 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1314,214 +1198,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to CompileLazy.
- __ Move(r4, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ StoreP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ RecordWriteField(r3, JSFunction::kCodeOffset, r4, r6, kLRHasNotBeenSaved,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Jump to compile lazy.
- Generate_CompileLazy(masm);
-}
-
-static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
- Register scratch1) {
- // Figure out the SFI's code object.
- Label done;
- Label check_is_bytecode_array;
- Label check_is_exported_function_data;
- Label check_is_fixed_array;
- Label check_is_pre_parsed_scope_data;
- Label check_is_function_template_info;
- Label check_is_interpreter_data;
-
- Register data_type = scratch1;
-
- // IsSmi: Is builtin
- __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
- __ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
- __ SmiUntag(sfi_data, kPointerSizeLog2);
- __ LoadP(sfi_data, MemOperand(scratch1, sfi_data));
- __ b(&done);
-
- // Get map for subsequent checks.
- __ bind(&check_is_bytecode_array);
- __ LoadP(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
- __ LoadHalfWordP(data_type,
- FieldMemOperand(data_type, Map::kInstanceTypeOffset));
-
- // IsBytecodeArray: Interpret bytecode
- __ CmpP(data_type, Operand(BYTECODE_ARRAY_TYPE));
- __ bne(&check_is_exported_function_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
- __ b(&done);
-
- // IsWasmExportedFunctionData: Use the wrapper code
- __ bind(&check_is_exported_function_data);
- __ CmpP(data_type, Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
- __ bne(&check_is_fixed_array);
- __ LoadP(
- sfi_data,
- FieldMemOperand(sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
- __ b(&done);
-
- // IsFixedArray: Instantiate using AsmWasmData
- __ bind(&check_is_fixed_array);
- __ CmpP(data_type, Operand(FIXED_ARRAY_TYPE));
- __ bne(&check_is_pre_parsed_scope_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
- __ b(&done);
-
- // IsPreParsedScopeData: Compile lazy
- __ bind(&check_is_pre_parsed_scope_data);
- __ CmpP(data_type, Operand(TUPLE2_TYPE));
- __ bne(&check_is_function_template_info);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ b(&done);
-
- // IsFunctionTemplateInfo: API call
- __ bind(&check_is_function_template_info);
- __ CmpP(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
- __ bne(&check_is_interpreter_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
- __ b(&done);
-
- // IsInterpreterData: Interpret bytecode
- __ bind(&check_is_interpreter_data);
- if (FLAG_debug_code) {
- __ CmpP(data_type, Operand(INTERPRETER_DATA_TYPE));
- __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
- }
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
- __ LoadP(
- sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
-
- __ bind(&done);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : argument count (preserved for callee)
- // -- r5 : new target (preserved for callee)
- // -- r3 : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime;
-
- Register closure = r3;
- Register feedback_vector = r4;
-
- // Do we have a valid feedback vector?
- __ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadP(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime);
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
-
- // We found no optimized code. Infer the code object needed for the SFI.
- Register entry = r6;
- __ LoadP(entry,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(entry,
- FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoCode(masm, entry, r7);
-
- // If code entry points to anything other than CompileLazy, install that.
- __ Move(r7, masm->CodeObject());
- __ CmpP(entry, r7);
- __ beq(&gotta_call_runtime);
-
- // Install the SFI's code entry.
- __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
- __ LoadRR(r8, entry); // Write barrier clobbers r8 below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, r8, r7,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(entry);
-
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : argument count (preserved for callee)
- // -- r5 : new target (preserved for callee)
- // -- r3 : target function (preserved for callee)
- // -----------------------------------
-
- Label deserialize_in_runtime;
-
- Register target = r3; // Must be preserved
- Register scratch0 = r4;
- Register scratch1 = r6;
-
- CHECK(scratch0 != r2 && scratch0 != r5 && scratch0 != r3);
- CHECK(scratch1 != r2 && scratch1 != r5 && scratch1 != r3);
- CHECK(scratch0 != scratch1);
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
-
- __ AssertFunction(target);
- __ LoadP(scratch0,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ LoadP(scratch1,
- FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
- __ AssertSmi(scratch1);
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- {
- // Load the code object at builtins_table[builtin_id] into scratch1.
-
- __ SmiUntag(scratch1);
- __ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
- __ ShiftLeftP(scratch1, scratch1, Operand(kPointerSizeLog2));
- __ LoadP(scratch1, MemOperand(scratch0, scratch1));
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
-
- __ Move(scratch0, masm->CodeObject());
- __ CmpP(scratch1, scratch0);
- __ beq(&deserialize_in_runtime);
- }
- {
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
-
- Register target_builtin = scratch1;
-
- __ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
- __ LoadRR(r8, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(target, JSFunction::kCodeOffset, r8, r7,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // All copying is done. Jump to the deserialized code object.
-
- __ AddP(target_builtin, target_builtin,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ LoadRR(ip, target_builtin);
- __ Jump(ip);
- }
-
- __ bind(&deserialize_in_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1969,7 +1645,28 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- r5 : new.target (for [[Construct]])
// -----------------------------------
- __ AssertFixedArray(r4);
+ Register scratch = ip;
+
+ if (masm->emit_debug_code()) {
+ // Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(r4);
+ __ LoadP(scratch, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ LoadHalfWordP(scratch,
+ FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ CmpP(scratch, Operand(FIXED_ARRAY_TYPE));
+ __ beq(&ok);
+ __ CmpP(scratch, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ bne(&fail);
+ __ CmpP(r6, Operand::Zero());
+ __ beq(&ok);
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
+
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2051,8 +1748,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(r7, FieldMemOperand(
- r7, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadLogicalHalfWordP(
+ r7,
+ FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
__ LoadRR(r6, fp);
}
__ b(&arguments_done);
@@ -2187,7 +1885,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- __ LoadW(
+ __ LoadLogicalHalfWordP(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(r2);
ParameterCount expected(r4);
@@ -2469,42 +2167,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(r3);
- __ Push(r3);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : requested object size (untagged)
- // -- lr : return address
- // -----------------------------------
- __ SmiTag(r3);
- __ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ Push(r3, r4);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : message_id as Smi
- // -- lr : return address
- // -----------------------------------
- __ push(r3);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : actual number of arguments
@@ -2516,10 +2178,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ CmpP(r2, r4);
+ __ tmll(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ b(Condition(1), &dont_adapt_arguments);
+ __ CmpLogicalP(r2, r4);
__ blt(&too_few);
- __ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ beq(&dont_adapt_arguments);
{ // Enough parameters: actual >= expected
__ bind(&enough);
@@ -2638,8 +2300,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in r7 by the jump table trampoline.
+ // Convert to Smi for the runtime call.
+ __ SmiTag(r7, r7);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
@@ -2653,16 +2319,18 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ MultiPush(gp_regs);
__ MultiPushDoubles(fp_regs);
- // Pass the WASM instance as an explicit argument to WasmCompileLazy.
- __ Push(kWasmInstanceRegister);
+ // Pass instance and function index as explicit arguments to the runtime
+ // function.
+ __ Push(kWasmInstanceRegister, r7);
+ // Load the correct CEntry builtin from the instance object.
+ __ LoadP(r4, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ LoadSmiLiteral(cp, Smi::kZero);
- __ CallRuntime(Runtime::kWasmCompileLazy);
- // The entrypoint address is the first return value.
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, r4);
+ // The entrypoint address is the return value.
__ LoadRR(ip, r2);
- // The WASM instance is the second return value.
- __ LoadRR(kWasmInstanceRegister, kReturnRegister1);
// Restore registers.
__ MultiPopDoubles(fp_regs);
@@ -2856,6 +2524,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
Register result_reg = r2;
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+
// Immediate values for this stub fit in instructions, so it's safe to use ip.
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
@@ -2963,8 +2633,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent == r4);
+ const Register exponent = r4;
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
@@ -3054,6 +2723,87 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ __ CmpLogicalP(r2, Operand(1));
+
+ __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET, lt);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor),
+ RelocInfo::CODE_TARGET, gt);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ LoadP(r5, MemOperand(sp, 0));
+ __ CmpP(r5, Operand::Zero());
+
+ __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
+ masm->isolate(), GetHoleyElementsKind(kind))
+ .code(),
+ RelocInfo::CODE_TARGET, ne);
+ }
+
+ __ Jump(
+ CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+}
+
+} // namespace
+
+void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argc
+ // -- r3 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a nullptr and a Smi.
+ __ TestIfSmi(r5);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
+ __ CompareObjectType(r5, r5, r6, MAP_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|.
+ __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r5);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ CmpP(r5, Operand(PACKED_ELEMENTS));
+ __ beq(&done);
+ __ CmpP(r5, Operand(HOLEY_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ CmpP(r5, Operand(PACKED_ELEMENTS));
+ __ beq(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index c30b4e114f..ab0180e825 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -30,15 +30,24 @@ void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
AbstractCode::cast(code), name));
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
- CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
- OFStream os(trace_scope.file());
- os << "Builtin: " << name << "\n";
- code->Disassemble(name, os);
- os << "\n";
+ code->PrintBuiltinCode(isolate, name);
}
#endif
}
+AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
+ int32_t builtin_index) {
+ AssemblerOptions options = AssemblerOptions::Default(isolate);
+ if (isolate->ShouldLoadConstantsFromRootList() &&
+ Builtins::IsIsolateIndependent(builtin_index) &&
+ isolate->heap()->memory_allocator()->code_range()->valid() &&
+ isolate->heap()->memory_allocator()->code_range()->size() <=
+ kMaxPCRelativeCodeRangeInMB * MB) {
+ options.use_pc_relative_calls_and_jumps = true;
+ }
+ return options;
+}
+
typedef void (*MacroAssemblerGenerator)(MacroAssembler*);
typedef void (*CodeAssemblerGenerator)(compiler::CodeAssemblerState*);
@@ -71,7 +80,10 @@ Code* BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
CanonicalHandleScope canonical(isolate);
const size_t buffer_size = 32 * KB;
byte buffer[buffer_size]; // NOLINT(runtime/arrays)
- MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+
+ MacroAssembler masm(isolate, BuiltinAssemblerOptions(isolate, builtin_index),
+ buffer, buffer_size, CodeObjectRequired::kYes);
+ masm.set_builtin_index(builtin_index);
DCHECK(!masm.has_frame());
generator(&masm);
CodeDesc desc;
@@ -91,7 +103,9 @@ Code* BuildAdaptor(Isolate* isolate, int32_t builtin_index,
CanonicalHandleScope canonical(isolate);
const size_t buffer_size = 32 * KB;
byte buffer[buffer_size]; // NOLINT(runtime/arrays)
- MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, BuiltinAssemblerOptions(isolate, builtin_index),
+ buffer, buffer_size, CodeObjectRequired::kYes);
+ masm.set_builtin_index(builtin_index);
DCHECK(!masm.has_frame());
Builtins::Generate_Adaptor(&masm, builtin_address, exit_frame_type);
CodeDesc desc;
@@ -121,7 +135,8 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
isolate, &zone, argc_with_recv, Code::BUILTIN, name,
PoisoningMitigationLevel::kDontPoison, builtin_index);
generator(&state);
- Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(
+ &state, BuiltinAssemblerOptions(isolate, builtin_index));
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
@@ -141,25 +156,29 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
// The interface descriptor with given key must be initialized at this point
// and this construction just queries the details from the descriptors table.
- CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
+ CallInterfaceDescriptor descriptor(interface_descriptor);
// Ensure descriptor is already initialized.
+ DCHECK_EQ(result_size, descriptor.GetReturnCount());
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
compiler::CodeAssemblerState state(
isolate, &zone, descriptor, Code::BUILTIN, name,
- PoisoningMitigationLevel::kDontPoison, result_size, 0, builtin_index);
+ PoisoningMitigationLevel::kDontPoison, 0, builtin_index);
generator(&state);
- Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(
+ &state, BuiltinAssemblerOptions(isolate, builtin_index));
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
} // anonymous namespace
+// static
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, int index,
Code* code) {
DCHECK_EQ(index, code->builtin_index());
- builtins->builtins_[index] = code;
+ builtins->set_builtin(index, code);
}
+// static
void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) {
// Fill the builtins list with placeholders. References to these placeholder
// builtins are eventually replaced by the actual builtins. This is to
@@ -172,13 +191,16 @@ void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) {
}
}
+// static
void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
// Replace references from all code objects to placeholders.
Builtins* builtins = isolate->builtins();
DisallowHeapAllocation no_gc;
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- static const int kRelocMask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ static const int kRelocMask =
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
HeapIterator iterator(isolate->heap());
while (HeapObject* obj = iterator.next()) {
if (!obj->IsCode()) continue;
@@ -186,11 +208,12 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
bool flush_icache = false;
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
- if (RelocInfo::IsCodeTarget(rinfo->rmode())) {
+ if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) {
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
+ Builtins::IsIsolateIndependent(target->builtin_index()));
if (!target->is_builtin()) continue;
- Code* new_target =
- Code::cast(builtins->builtins_[target->builtin_index()]);
+ Code* new_target = builtins->builtin(target->builtin_index());
rinfo->set_target_address(new_target->raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
@@ -199,10 +222,9 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
if (!object->IsCode()) continue;
Code* target = Code::cast(object);
if (!target->is_builtin()) continue;
- Code* new_target =
- Code::cast(builtins->builtins_[target->builtin_index()]);
- rinfo->set_target_object(new_target, UPDATE_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
+ Code* new_target = builtins->builtin(target->builtin_index());
+ rinfo->set_target_object(isolate->heap(), new_target,
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
}
flush_icache = true;
}
@@ -213,6 +235,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
}
}
+// static
void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
Builtins* builtins = isolate->builtins();
DCHECK(!builtins->initialized_);
@@ -237,7 +260,6 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
isolate, index, &Builtins::Generate_##Name, Argc, #Name); \
AddBuiltin(builtins, index++, code);
#define BUILD_TFC(Name, InterfaceDescriptor, result_size) \
- { InterfaceDescriptor##Descriptor descriptor(isolate); } \
code = BuildWithCodeStubAssemblerCS( \
isolate, index, &Builtins::Generate_##Name, \
CallDescriptors::InterfaceDescriptor, #Name, result_size); \
@@ -249,7 +271,6 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
CallDescriptors::Name, #Name, 1); \
AddBuiltin(builtins, index++, code);
#define BUILD_TFH(Name, InterfaceDescriptor) \
- { InterfaceDescriptor##Descriptor descriptor(isolate); } \
/* Return size for IC builtins/handlers is always 1. */ \
code = BuildWithCodeStubAssemblerCS( \
isolate, index, &Builtins::Generate_##Name, \
@@ -274,16 +295,14 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
ReplacePlaceholders(isolate);
-#define SET_PROMISE_REJECTION_PREDICTION(Name) \
- Code::cast(builtins->builtins_[Builtins::k##Name]) \
- ->set_is_promise_rejection(true);
+#define SET_PROMISE_REJECTION_PREDICTION(Name) \
+ builtins->builtin(Builtins::k##Name)->set_is_promise_rejection(true);
BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(SET_PROMISE_REJECTION_PREDICTION)
#undef SET_PROMISE_REJECTION_PREDICTION
-#define SET_EXCEPTION_CAUGHT_PREDICTION(Name) \
- Code::cast(builtins->builtins_[Builtins::k##Name]) \
- ->set_is_exception_caught(true);
+#define SET_EXCEPTION_CAUGHT_PREDICTION(Name) \
+ builtins->builtin(Builtins::k##Name)->set_is_exception_caught(true);
BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(SET_EXCEPTION_CAUGHT_PREDICTION)
#undef SET_EXCEPTION_CAUGHT_PREDICTION
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 40e0bcbb2c..2d2a086de5 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -4,17 +4,68 @@
module typed_array {
extern runtime TypedArraySortFast(Context, Object): JSTypedArray;
+ extern macro ValidateTypedArray(
+ Context, Object, constexpr string): JSTypedArray;
- type MethodName;
- const kTypedArrayProtoSort: MethodName = '\"%TypedArray%.prototype.sort\"';
- extern macro ValidateTypedArray(Context, Object, MethodName): JSTypedArray;
+ extern macro LoadFixedTypedArrayElementAsTagged(
+ RawPtr, Smi, constexpr ElementsKind, constexpr ParameterMode): Object;
+ extern macro StoreFixedTypedArrayElementFromTagged(
+ Context, FixedArrayBase, Smi, Object, constexpr ElementsKind,
+ constexpr ParameterMode);
- extern builtin TypedArrayLoadElementAsTagged(
- Context, JSTypedArray, Smi, Smi): Object;
- extern builtin TypedArrayStoreElementFromTagged(
- Context, JSTypedArray, Smi, Smi, Object);
+ type LoadFn = builtin(Context, JSTypedArray, Smi) => Object;
+ type StoreFn = builtin(Context, JSTypedArray, Smi, Object) => Object;
- extern macro NumberIsNaN(Number): bool;
+ macro KindForArrayType<T : type>(): constexpr ElementsKind;
+ KindForArrayType<FixedUint8Array>(): constexpr ElementsKind {
+ return UINT8_ELEMENTS;
+ }
+ KindForArrayType<FixedInt8Array>(): constexpr ElementsKind {
+ return INT8_ELEMENTS;
+ }
+ KindForArrayType<FixedUint16Array>(): constexpr ElementsKind {
+ return UINT16_ELEMENTS;
+ }
+ KindForArrayType<FixedInt16Array>(): constexpr ElementsKind {
+ return INT16_ELEMENTS;
+ }
+ KindForArrayType<FixedUint32Array>(): constexpr ElementsKind {
+ return UINT32_ELEMENTS;
+ }
+ KindForArrayType<FixedInt32Array>(): constexpr ElementsKind {
+ return INT32_ELEMENTS;
+ }
+ KindForArrayType<FixedFloat32Array>(): constexpr ElementsKind {
+ return FLOAT32_ELEMENTS;
+ }
+ KindForArrayType<FixedFloat64Array>(): constexpr ElementsKind {
+ return FLOAT64_ELEMENTS;
+ }
+ KindForArrayType<FixedUint8ClampedArray>(): constexpr ElementsKind {
+ return UINT8_CLAMPED_ELEMENTS;
+ }
+ KindForArrayType<FixedBigUint64Array>(): constexpr ElementsKind {
+ return BIGUINT64_ELEMENTS;
+ }
+ KindForArrayType<FixedBigInt64Array>(): constexpr ElementsKind {
+ return BIGINT64_ELEMENTS;
+ }
+
+ builtin LoadFixedElement<T : type>(
+ context: Context, array: JSTypedArray, index: Smi): Object {
+ return LoadFixedTypedArrayElementAsTagged(
+ array.data_ptr, index, KindForArrayType<T>(), SMI_PARAMETERS);
+ }
+
+ builtin StoreFixedElement<T : type>(
+ context: Context, array: JSTypedArray, index: Smi,
+ value: Object): Object {
+ let elements: FixedTypedArrayBase =
+ unsafe_cast<FixedTypedArrayBase>(array.elements);
+ StoreFixedTypedArrayElementFromTagged(
+ context, elements, index, value, KindForArrayType<T>(), SMI_PARAMETERS);
+ return Undefined;
+ }
macro CallCompareWithDetachedCheck(
context: Context, array: JSTypedArray, comparefn: Callable, a: Object,
@@ -33,32 +84,10 @@ module typed_array {
return v;
}
- // Wrapped CSA macro for better readability. Ideally we want to map this
- // as the array operator "[]".
- // TODO(szuend): Change Load/Store macros so they use function pointers to
- // the correct builtins as soon as they are available in Torque.
- //
- // Currently the dispatch to the correct load/store instruction
- // is done during runtime in a builtin. This costs around 20%
- // performance in relevant benchmarks, but greatly reduces the
- // code size - compared to sort macro "copies" for each
- // ElementsKind that inline the correct load/store.
- macro Load(
- context: Context, array: JSTypedArray, kind: Smi, index: Smi): Object {
- return TypedArrayLoadElementAsTagged(context, array, kind, index);
- }
-
- // Wrapped array store CSA macro for better readability.
- macro Store(
- context: Context, array: JSTypedArray, kind: Smi, index: Smi,
- value: Object) {
- TypedArrayStoreElementFromTagged(context, array, kind, index, value);
- }
-
// InsertionSort is used for smaller arrays.
macro TypedArrayInsertionSort(
- context: Context, array: JSTypedArray, kind: Smi, from_arg: Smi,
- to_arg: Smi, comparefn: Callable)
+ context: Context, array: JSTypedArray, from_arg: Smi, to_arg: Smi,
+ comparefn: Callable, Load: LoadFn, Store: StoreFn)
labels Detached {
let from: Smi = from_arg;
let to: Smi = to_arg;
@@ -66,25 +95,25 @@ module typed_array {
if (IsDetachedBuffer(array.buffer)) goto Detached;
for (let i: Smi = from + 1; i < to; ++i) {
- let element: Object = Load(context, array, kind, i);
+ let element: Object = Load(context, array, i);
let j: Smi = i - 1;
for (; j >= from; --j) {
- let tmp: Object = Load(context, array, kind, j);
+ let tmp: Object = Load(context, array, j);
let order: Number = CallCompareWithDetachedCheck(
context, array, comparefn, tmp, element) otherwise Detached;
if (order > 0) {
- Store(context, array, kind, j + 1, tmp);
+ Store(context, array, j + 1, tmp);
} else {
break;
}
}
- Store(context, array, kind, j + 1, element);
+ Store(context, array, j + 1, element);
}
}
macro TypedArrayQuickSortImpl(
- context: Context, array: JSTypedArray, kind: Smi, from_arg: Smi,
- to_arg: Smi, comparefn: Callable)
+ context: Context, array: JSTypedArray, from_arg: Smi, to_arg: Smi,
+ comparefn: Callable, Load: LoadFn, Store: StoreFn)
labels Detached {
let from: Smi = from_arg;
let to: Smi = to_arg;
@@ -94,7 +123,8 @@ module typed_array {
// TODO(szuend): Investigate InsertionSort removal.
// Currently it does not make any difference when the
// benchmarks are run locally.
- TypedArrayInsertionSort(context, array, kind, from, to, comparefn)
+ TypedArrayInsertionSort(
+ context, array, from, to, comparefn, Load, Store)
otherwise Detached;
break;
}
@@ -106,9 +136,9 @@ module typed_array {
if (IsDetachedBuffer(array.buffer)) goto Detached;
// Find a pivot as the median of first, last and middle element.
- let v0: Object = Load(context, array, kind, from);
- let v1: Object = Load(context, array, kind, to - 1);
- let v2: Object = Load(context, array, kind, third_index);
+ let v0: Object = Load(context, array, from);
+ let v1: Object = Load(context, array, to - 1);
+ let v2: Object = Load(context, array, third_index);
let c01: Number = CallCompareWithDetachedCheck(
context, array, comparefn, v0, v1) otherwise Detached;
@@ -140,28 +170,28 @@ module typed_array {
}
// v0 <= v1 <= v2.
- Store(context, array, kind, from, v0);
- Store(context, array, kind, to - 1, v2);
+ Store(context, array, from, v0);
+ Store(context, array, to - 1, v2);
let pivot: Object = v1;
let low_end: Smi = from + 1; // Upper bound of elems lower than pivot.
let high_start: Smi = to - 1; // Lower bound of elems greater than pivot.
- let low_end_value: Object = Load(context, array, kind, low_end);
- Store(context, array, kind, third_index, low_end_value);
- Store(context, array, kind, low_end, pivot);
+ let low_end_value: Object = Load(context, array, low_end);
+ Store(context, array, third_index, low_end_value);
+ Store(context, array, low_end, pivot);
// From low_end to idx are elements equal to pivot.
// From idx to high_start are elements that haven"t been compared yet.
for (let idx: Smi = low_end + 1; idx < high_start; idx++) {
- let element: Object = Load(context, array, kind, idx);
+ let element: Object = Load(context, array, idx);
let order: Number = CallCompareWithDetachedCheck(
context, array, comparefn, element, pivot) otherwise Detached;
if (order < 0) {
- low_end_value = Load(context, array, kind, low_end);
- Store(context, array, kind, idx, low_end_value);
- Store(context, array, kind, low_end, element);
+ low_end_value = Load(context, array, low_end);
+ Store(context, array, idx, low_end_value);
+ Store(context, array, low_end, element);
low_end++;
} else if (order > 0) {
let break_for: bool = false;
@@ -173,7 +203,7 @@ module typed_array {
break;
}
- let top_elem: Object = Load(context, array, kind, high_start);
+ let top_elem: Object = Load(context, array, high_start);
order = CallCompareWithDetachedCheck(
context, array, comparefn, top_elem, pivot) otherwise Detached;
}
@@ -182,36 +212,38 @@ module typed_array {
break;
}
- let high_start_value: Object = Load(context, array, kind, high_start);
- Store(context, array, kind, idx, high_start_value);
- Store(context, array, kind, high_start, element);
+ let high_start_value: Object = Load(context, array, high_start);
+ Store(context, array, idx, high_start_value);
+ Store(context, array, high_start, element);
if (order < 0) {
- element = Load(context, array, kind, idx);
+ element = Load(context, array, idx);
- low_end_value = Load(context, array, kind, low_end);
- Store(context, array, kind, idx, low_end_value);
- Store(context, array, kind, low_end, element);
+ low_end_value = Load(context, array, low_end);
+ Store(context, array, idx, low_end_value);
+ Store(context, array, low_end, element);
low_end++;
}
}
}
if ((to - high_start) < (low_end - from)) {
- TypedArrayQuickSort(context, array, kind, high_start, to, comparefn);
+ TypedArrayQuickSort(
+ context, array, high_start, to, comparefn, Load, Store);
to = low_end;
} else {
- TypedArrayQuickSort(context, array, kind, from, low_end, comparefn);
+ TypedArrayQuickSort(
+ context, array, from, low_end, comparefn, Load, Store);
from = high_start;
}
}
}
builtin TypedArrayQuickSort(
- context: Context, array: JSTypedArray, kind: Smi, from: Smi, to: Smi,
- comparefn: Callable): JSTypedArray {
+ context: Context, array: JSTypedArray, from: Smi, to: Smi,
+ comparefn: Callable, Load: LoadFn, Store: StoreFn): JSTypedArray {
try {
- TypedArrayQuickSortImpl(context, array, kind, from, to, comparefn)
+ TypedArrayQuickSortImpl(context, array, from, to, comparefn, Load, Store)
otherwise Detached;
}
label Detached {
@@ -237,7 +269,7 @@ module typed_array {
// 3. Let buffer be ? ValidateTypedArray(obj).
// ValidateTypedArray currently returns the array, not the ViewBuffer.
let array: JSTypedArray =
- ValidateTypedArray(context, obj, kTypedArrayProtoSort);
+ ValidateTypedArray(context, obj, '%TypedArray%.prototype.sort');
// Default sorting is done in C++ using std::sort
if (comparefn_obj == Undefined) {
@@ -250,8 +282,55 @@ module typed_array {
try {
let comparefn: Callable =
cast<Callable>(comparefn_obj) otherwise CastError;
- let elements_kind: Smi = convert<Smi>(array.elements_kind);
- TypedArrayQuickSort(context, array, elements_kind, 0, len, comparefn);
+ let loadfn: LoadFn;
+ let storefn: StoreFn;
+
+ let elements_kind: ElementsKind = array.elements_kind;
+
+ if (IsElementsKindGreaterThan(elements_kind, UINT32_ELEMENTS)) {
+ if (elements_kind == INT32_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedInt32Array>;
+ storefn = StoreFixedElement<FixedInt32Array>;
+ } else if (elements_kind == FLOAT32_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedFloat32Array>;
+ storefn = StoreFixedElement<FixedFloat32Array>;
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedFloat64Array>;
+ storefn = StoreFixedElement<FixedFloat64Array>;
+ } else if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedUint8ClampedArray>;
+ storefn = StoreFixedElement<FixedUint8ClampedArray>;
+ } else if (elements_kind == BIGUINT64_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedBigUint64Array>;
+ storefn = StoreFixedElement<FixedBigUint64Array>;
+ } else if (elements_kind == BIGINT64_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedBigInt64Array>;
+ storefn = StoreFixedElement<FixedBigInt64Array>;
+ } else {
+ unreachable;
+ }
+ } else {
+ if (elements_kind == UINT8_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedUint8Array>;
+ storefn = StoreFixedElement<FixedUint8Array>;
+ } else if (elements_kind == INT8_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedInt8Array>;
+ storefn = StoreFixedElement<FixedInt8Array>;
+ } else if (elements_kind == UINT16_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedUint16Array>;
+ storefn = StoreFixedElement<FixedUint16Array>;
+ } else if (elements_kind == INT16_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedInt16Array>;
+ storefn = StoreFixedElement<FixedInt16Array>;
+ } else if (elements_kind == UINT32_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedUint32Array>;
+ storefn = StoreFixedElement<FixedUint32Array>;
+ } else {
+ unreachable;
+ }
+ }
+
+ TypedArrayQuickSort(context, array, 0, len, comparefn, loadfn, storefn);
}
label CastError {
unreachable;
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index c721787093..438b577af6 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -21,7 +21,8 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
- __ LoadAddress(rbx, ExternalReference::Create(address));
+ __ LoadAddress(kJavaScriptCallExtraArg1Register,
+ ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -32,64 +33,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
-namespace {
-
-void AdaptorWithExitFrameType(MacroAssembler* masm,
- Builtins::ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments excluding receiver
- // -- rbx : entry point
- // -- rdi : target
- // -- rdx : new.target
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -- ...
- // -- rsp[8 * argc] : first argument
- // -- rsp[8 * (argc + 1)] : receiver
- // -----------------------------------
- __ AssertFunction(rdi);
-
- // The logic contained here is mirrored for TurboFan inlining in
- // JSTypedLowering::ReduceJSCall{Function,Construct}. Keep these in sync.
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // CEntry expects rax to contain the number of arguments including the
- // receiver and the extra arguments.
- __ addp(rax, Immediate(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
-
- // Unconditionally insert argc, target and new target as extra arguments. They
- // will be used by stack frame iterators when constructing the stack trace.
- __ PopReturnAddressTo(kScratchRegister);
- __ Integer32ToSmi(rax, rax);
- __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
- __ Push(rax);
- __ SmiToInteger32(rax, rax);
- __ Push(rdi);
- __ Push(rdx);
- __ PushReturnAddressFrom(kScratchRegister);
-
- // Jump to the C entry runtime stub directly here instead of using
- // JumpToExternalReference because rbx is loaded by Generate_adaptor.
- Handle<Code> code =
- CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-} // namespace
-
-void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, EXIT);
-}
-
-void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
- AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -100,7 +43,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the number of arguments to the callee.
- __ Integer32ToSmi(rax, rax);
+ __ SmiTag(rax, rax);
__ Push(rax);
// Push a copy of the target function and the new target.
__ Push(rdi);
@@ -115,7 +58,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(rdx);
__ Pop(rdi);
__ Pop(rax);
- __ SmiToInteger32(rax, rax);
+ __ SmiUntag(rax, rax);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
@@ -137,7 +80,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- __ Integer32ToSmi(rcx, rax);
+ __ SmiTag(rcx, rax);
__ Push(rsi);
__ Push(rcx);
@@ -208,7 +151,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label post_instantiation_deopt_entry, not_create_implicit_receiver;
// Preserve the incoming parameters on the stack.
- __ Integer32ToSmi(rcx, rax);
+ __ SmiTag(rcx, rax);
__ Push(rsi);
__ Push(rcx);
__ Push(rdi);
@@ -271,8 +214,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore constructor function and argument count.
__ movp(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
- __ SmiToInteger32(rax,
- Operand(rbp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
@@ -606,20 +548,25 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- rsp[0] : generator receiver
// -----------------------------------
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
+ // Copy the function arguments from the generator object's register file.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movl(rcx,
- FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movzxwq(
+ rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
+
+ __ movp(rbx,
+ FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
+
{
Label done_loop, loop;
+ __ Set(r9, 0);
+
__ bind(&loop);
- __ subl(rcx, Immediate(1));
- __ j(carry, &done_loop, Label::kNear);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ cmpl(r9, rcx);
+ __ j(greater_equal, &done_loop, Label::kNear);
+ __ Push(FieldOperand(rbx, r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ addl(r9, Immediate(1));
__ jmp(&loop);
+
__ bind(&done_loop);
}
@@ -636,7 +583,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
__ PushReturnAddressFrom(rax);
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rax, FieldOperand(
+ __ movzxwq(rax, FieldOperand(
rax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
@@ -910,17 +857,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rsi); // Callee's context.
__ Push(rdi); // Callee's JS function.
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
kScratchRegister);
- __ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
- &maybe_load_debug_bytecode_array);
- __ bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ incl(
@@ -947,7 +890,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Push bytecode array and Smi tagged bytecode offset.
__ Push(kInterpreterBytecodeArrayRegister);
- __ Integer32ToSmi(rcx, kInterpreterBytecodeOffsetRegister);
+ __ SmiTag(rcx, kInterpreterBytecodeOffsetRegister);
__ Push(rcx);
// Allocate the local and temporary register file on the stack.
@@ -1017,8 +960,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeOffsetRegister);
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister);
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
@@ -1033,39 +976,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in rax.
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
-
- // Load debug copy of the bytecode array if it exists.
- // kInterpreterBytecodeArrayRegister is already loaded with
- // SharedFunctionInfo::kFunctionDataOffset.
- __ bind(&maybe_load_debug_bytecode_array);
- __ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
- __ movp(kScratchRegister,
- FieldOperand(rcx, DebugInfo::kDebugBytecodeArrayOffset));
- __ JumpIfRoot(kScratchRegister, Heap::kUndefinedValueRootIndex,
- &bytecode_array_loaded);
-
- __ movp(kInterpreterBytecodeArrayRegister, kScratchRegister);
- __ SmiToInteger32(rax, FieldOperand(rcx, DebugInfo::kFlagsOffset));
- __ andb(rax, Immediate(DebugInfo::kDebugExecutionMode));
- STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
- static_cast<int>(DebugInfo::kSideEffects));
- ExternalReference debug_execution_mode_address =
- ExternalReference::debug_execution_mode_address(masm->isolate());
- Operand debug_execution_mode =
- masm->ExternalOperand(debug_execution_mode_address);
- __ cmpb(rax, debug_execution_mode);
- __ j(equal, &bytecode_array_loaded);
-
- __ Push(closure);
- __ Push(feedback_vector);
- __ Push(kInterpreterBytecodeArrayRegister);
- __ Push(closure);
- __ CallRuntime(Runtime::kDebugApplyInstrumentation);
- __ Pop(kInterpreterBytecodeArrayRegister);
- __ Pop(feedback_vector);
- __ Pop(closure);
- __ jmp(&bytecode_array_loaded);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1189,8 +1099,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// context at this point).
__ AssertFunction(rdi);
// Jump to the constructor function (rax, rbx, rdx passed on).
- ArrayConstructorStub array_constructor_stub(masm->isolate());
- __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor (rax, rdx, rdi passed on).
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1260,8 +1170,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Get the target bytecode offset from the frame.
__ movp(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeOffsetRegister);
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
@@ -1278,8 +1188,8 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeOffsetRegister);
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister);
// Load the current bytecode.
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
@@ -1292,7 +1202,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
&if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
- __ Integer32ToSmi(rbx, kInterpreterBytecodeOffsetRegister);
+ __ SmiTag(rbx, kInterpreterBytecodeOffsetRegister);
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
Generate_InterpreterEnterBytecode(masm);
@@ -1306,207 +1216,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
-// builtin does not set the code field in the JS function. If there isn't then
-// we do not need this builtin and can jump directly to CompileLazy.
-void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to CompileLazy.
- __ Move(rcx, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ movp(FieldOperand(rdi, JSFunction::kCodeOffset), rcx);
- __ RecordWriteField(rdi, JSFunction::kCodeOffset, rcx, r15, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Jump to compile lazy.
- Generate_CompileLazy(masm);
-}
-
-static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
- Register scratch1) {
- // Figure out the SFI's code object.
- Label done;
- Label check_is_bytecode_array;
- Label check_is_exported_function_data;
- Label check_is_fixed_array;
- Label check_is_pre_parsed_scope_data;
- Label check_is_function_template_info;
- Label check_is_interpreter_data;
-
- Register data_type = scratch1;
-
- // IsSmi: Is builtin
- __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
- __ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
- SmiIndex index = masm->SmiToIndex(sfi_data, sfi_data, kPointerSizeLog2);
- __ movp(sfi_data, Operand(scratch1, index.reg, index.scale, 0));
- __ j(always, &done);
-
- // Get map for subsequent checks.
- __ bind(&check_is_bytecode_array);
- __ movp(data_type, FieldOperand(sfi_data, HeapObject::kMapOffset));
- __ movw(data_type, FieldOperand(data_type, Map::kInstanceTypeOffset));
-
- // IsBytecodeArray: Interpret bytecode
- __ cmpw(data_type, Immediate(BYTECODE_ARRAY_TYPE));
- __ j(not_equal, &check_is_exported_function_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
- __ j(always, &done);
-
- // IsWasmExportedFunctionData: Use the wrapper code
- __ bind(&check_is_exported_function_data);
- __ cmpw(data_type, Immediate(WASM_EXPORTED_FUNCTION_DATA_TYPE));
- __ j(not_equal, &check_is_fixed_array);
- __ movp(sfi_data,
- FieldOperand(sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
- __ j(always, &done);
-
- // IsFixedArray: Instantiate using AsmWasmData
- __ bind(&check_is_fixed_array);
- __ cmpw(data_type, Immediate(FIXED_ARRAY_TYPE));
- __ j(not_equal, &check_is_pre_parsed_scope_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
- __ j(always, &done);
-
- // IsPreParsedScopeData: Compile lazy
- __ bind(&check_is_pre_parsed_scope_data);
- __ cmpw(data_type, Immediate(TUPLE2_TYPE));
- __ j(not_equal, &check_is_function_template_info);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
- __ j(always, &done);
-
- // IsFunctionTemplateInfo: API call
- __ bind(&check_is_function_template_info);
- __ cmpw(data_type, Immediate(FUNCTION_TEMPLATE_INFO_TYPE));
- __ j(not_equal, &check_is_interpreter_data);
- __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
- __ j(always, &done);
-
- // IsInterpreterData: Interpret bytecode with unique interpreter
- __ bind(&check_is_interpreter_data);
- if (FLAG_debug_code) {
- __ cmpw(data_type, Immediate(INTERPRETER_DATA_TYPE));
- __ Check(equal, AbortReason::kInvalidSharedFunctionInfoData);
- }
- __ movp(
- sfi_data,
- FieldOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
-
- __ bind(&done);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argument count (preserved for callee)
- // -- rdx : new target (preserved for callee)
- // -- rdi : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime;
-
- Register closure = rdi;
- Register feedback_vector = rbx;
-
- // Do we have a valid feedback vector?
- __ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime);
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
-
- // We found no optimized code. Infer the code object needed for the SFI.
- Register entry = rcx;
- __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoCode(masm, entry, rbx);
-
- // If code entry points to anything other than CompileLazy, install that.
- __ Move(rbx, masm->CodeObject());
- __ cmpp(entry, rbx);
- __ j(equal, &gotta_call_runtime);
-
- // Install the SFI's code entry.
- __ movp(FieldOperand(closure, JSFunction::kCodeOffset), entry);
- __ movp(r14, entry); // Write barrier clobbers r14 below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, r14, r15,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
- __ jmp(entry);
-
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argument count (preserved for callee)
- // -- rdx : new target (preserved for callee)
- // -- rdi : target function (preserved for callee)
- // -----------------------------------
-
- Label deserialize_in_runtime;
-
- Register target = rdi; // Must be preserved
- Register scratch0 = rbx;
- Register scratch1 = r12;
-
- CHECK(scratch0 != rax && scratch0 != rdx && scratch0 != rdi);
- CHECK(scratch1 != rax && scratch1 != rdx && scratch1 != rdi);
- CHECK(scratch0 != scratch1);
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
-
- __ AssertFunction(target);
- __ movp(scratch0,
- FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ movp(scratch1,
- FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
- __ AssertSmi(scratch1);
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- {
- // Load the code object at builtins_table[builtin_id] into scratch1.
-
- __ SmiToInteger32(scratch1, scratch1);
- __ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
- __ movp(scratch1, Operand(scratch0, scratch1, times_pointer_size, 0));
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
-
- __ Move(scratch0, masm->CodeObject());
- __ cmpp(scratch1, scratch0);
- __ j(equal, &deserialize_in_runtime);
- }
-
- {
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
-
- Register target_builtin = scratch1;
-
- __ movp(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
- __ movp(r14, target_builtin); // Write barrier clobbers r14 below.
- __ RecordWriteField(target, JSFunction::kCodeOffset, r14, r15,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // All copying is done. Jump to the deserialized code object.
-
- __ leap(target_builtin, FieldOperand(target_builtin, Code::kHeaderSize));
- __ jmp(target_builtin);
- }
-
- __ bind(&deserialize_in_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1519,7 +1228,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// Preserve argument count for later compare.
__ movp(rcx, rax);
// Push the number of arguments to the callee.
- __ Integer32ToSmi(rax, rax);
+ __ SmiTag(rax, rax);
__ Push(rax);
// Push a copy of the target function and the new target.
__ Push(rdi);
@@ -1556,7 +1265,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Drop(2);
__ Pop(rcx);
- __ SmiToInteger32(rcx, rcx);
+ __ SmiUntag(rcx, rcx);
scope.GenerateLeaveFrame();
__ PopReturnAddressTo(rbx);
@@ -1570,7 +1279,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Pop(rdx);
__ Pop(rdi);
__ Pop(rax);
- __ SmiToInteger32(rax, rax);
+ __ SmiUntag(rax, rax);
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
@@ -1597,7 +1306,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
int code = config->GetAllocatableGeneralCode(i);
__ popq(Register::from_code(code));
if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
- __ SmiToInteger32(Register::from_code(code), Register::from_code(code));
+ __ SmiUntag(Register::from_code(code), Register::from_code(code));
}
}
__ movq(
@@ -1890,45 +1599,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- // tail call a stub
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : array function
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // rbx is the AllocationSite - here undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- // If rdx (new target) is undefined, then this is the 'Call' case, so move
- // rdi (the constructor) to rdx.
- Label call;
- __ cmpp(rdx, rbx);
- __ j(not_equal, &call);
- __ movp(rdx, rdi);
-
- // Run the native code for the Array function called as a normal function.
- __ bind(&call);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1944,7 +1617,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Preserve the number of arguments on the stack. Must preserve rax,
// rbx and rcx because these registers are used when copying the
// arguments and the receiver.
- __ Integer32ToSmi(r8, rax);
+ __ SmiTag(r8, rax);
__ Push(r8);
__ Push(Immediate(0)); // Padding.
@@ -1965,48 +1638,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ PushReturnAddressFrom(rcx);
}
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdx : requested object size (untagged)
- // -- rsp[0] : return address
- // -----------------------------------
- __ Integer32ToSmi(rdx, rdx);
- __ PopReturnAddressTo(rcx);
- __ Push(rdx);
- __ PushReturnAddressFrom(rcx);
- __ Move(rsi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdx : requested object size (untagged)
- // -- rsp[0] : return address
- // -----------------------------------
- __ Integer32ToSmi(rdx, rdx);
- __ PopReturnAddressTo(rcx);
- __ Push(rdx);
- __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ PushReturnAddressFrom(rcx);
- __ Move(rsi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdx : message_id as Smi
- // -- rsp[0] : return address
- // -----------------------------------
- __ PopReturnAddressTo(rcx);
- __ Push(rdx);
- __ PushReturnAddressFrom(rcx);
- __ Move(rsi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
@@ -2020,10 +1651,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ IncrementCounter(counters->arguments_adaptors(), 1);
Label enough, too_few;
- __ cmpp(rax, rbx);
- __ j(less, &too_few);
__ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
+ __ cmpp(rax, rbx);
+ __ j(less, &too_few);
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
@@ -2125,7 +1756,24 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- rdx : new.target (for [[Construct]])
// -- rsp[0] : return address
// -----------------------------------
- __ AssertFixedArray(rbx);
+ if (masm->emit_debug_code()) {
+ // Allow rbx to be a FixedArray, or a FixedDoubleArray if rcx == 0.
+ Label ok, fail;
+ __ AssertNotSmi(rbx);
+ Register map = r9;
+ __ movp(map, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CmpInstanceType(map, FIXED_ARRAY_TYPE);
+ __ j(equal, &ok);
+ __ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
+ __ j(not_equal, &fail);
+ __ cmpl(rcx, Immediate(0));
+ __ j(equal, &ok);
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
// Check for stack overflow.
{
@@ -2210,15 +1858,15 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ movp(r8, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movp(r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset));
- __ movl(r8,
- FieldOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movzxwq(
+ r8, FieldOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rbx, rbp);
}
__ jmp(&arguments_done, Label::kNear);
__ bind(&arguments_adaptor);
{
- __ SmiToInteger32(
- r8, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(r8,
+ Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
@@ -2324,7 +1972,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// TODO(bmeurer): Inline the allocation here to avoid building the frame
// in the fast case? (fall back to AllocateInNewSpace?)
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rax, rax);
+ __ SmiTag(rax, rax);
__ Push(rax);
__ Push(rdi);
__ movp(rax, rcx);
@@ -2335,7 +1983,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movp(rcx, rax);
__ Pop(rdi);
__ Pop(rax);
- __ SmiToInteger32(rax, rax);
+ __ SmiUntag(rax, rax);
}
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
@@ -2351,7 +1999,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- rsi : the function context.
// -----------------------------------
- __ movsxlq(
+ __ movzxwq(
rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(rax);
ParameterCount expected(rbx);
@@ -2379,7 +2027,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into rcx and length of that into rbx.
Label no_bound_arguments;
__ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ SmiUntag(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ testl(rbx, rbx);
__ j(zero, &no_bound_arguments);
{
@@ -2431,7 +2079,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ SmiUntag(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ bind(&loop);
__ decl(rbx);
__ movp(kScratchRegister, FieldOperand(rcx, rbx, times_pointer_size,
@@ -2662,10 +2310,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
- __ SmiToInteger32(rbx,
- Operand(rbx, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
+ __ SmiUntag(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
__ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
@@ -2686,43 +2333,55 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was pushed to the stack by the caller as int32.
+ __ Pop(r11);
+ // Convert to Smi for the runtime call.
+ __ SmiTag(r11, r11);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
+ static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs ==
+ arraysize(wasm::kGpParamRegisters),
+ "frame size mismatch");
for (Register reg : wasm::kGpParamRegisters) {
- if (reg == kWasmInstanceRegister) continue;
__ Push(reg);
}
- __ subp(rsp, Immediate(16 * arraysize(wasm::kFpParamRegisters)));
+ static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
+ arraysize(wasm::kFpParamRegisters),
+ "frame size mismatch");
+ __ subp(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
int offset = 0;
for (DoubleRegister reg : wasm::kFpParamRegisters) {
__ movdqu(Operand(rsp, offset), reg);
- offset += 16;
+ offset += kSimd128Size;
}
- // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ // Push the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
+ // Push the function index as second argument.
+ __ Push(r11);
+ // Load the correct CEntry builtin from the instance object.
+ __ movp(rcx, FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
- __ CallRuntime(Runtime::kWasmCompileLazy);
- // The entrypoint address is the first return value.
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, rcx);
+ // The entrypoint address is the return value.
__ movq(r11, kReturnRegister0);
- // The WASM instance is the second return value.
- __ movq(kWasmInstanceRegister, kReturnRegister1);
// Restore registers.
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
- offset -= 16;
+ offset -= kSimd128Size;
__ movdqu(reg, Operand(rsp, offset));
}
DCHECK_EQ(0, offset);
- __ addp(rsp, Immediate(16 * arraysize(wasm::kFpParamRegisters)));
+ __ addp(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
- if (reg == kWasmInstanceRegister) continue;
__ Pop(reg);
}
}
@@ -2962,8 +2621,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(exponent == rdx);
+ const Register exponent = rdx;
const Register scratch = rcx;
const XMMRegister double_result = xmm3;
const XMMRegister double_base = xmm2;
@@ -3091,6 +2749,101 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ ret(0);
}
+namespace {
+
+void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ testp(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&not_zero_case);
+ __ cmpl(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(rcx, args.GetArgumentOperand(0));
+ __ testp(rcx, rcx);
+ __ j(zero, &normal_sequence);
+
+ __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
+ masm->isolate(), GetHoleyElementsKind(kind))
+ .code(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&normal_sequence);
+ __ Jump(
+ CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
+ .code(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&not_one_case);
+ Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+} // namespace
+
+void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rdi : constructor
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a nullptr and a Smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
+ __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(rcx, MAP_TYPE, rcx);
+ __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(rcx);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmpl(rcx, Immediate(PACKED_ELEMENTS));
+ __ j(equal, &done);
+ __ cmpl(rcx, Immediate(HOLEY_ELEMENTS));
+ __ Assert(
+ equal,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmpl(rcx, Immediate(PACKED_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index a5cb8ad9ca..64ca681416 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -96,7 +96,7 @@ class V8_EXPORT_PRIVATE Cancelable {
protected:
bool TryRun() { return status_.TrySetValue(kWaiting, kRunning); }
bool IsRunning() { return status_.Value() == kRunning; }
- intptr_t CancelAttempts() { return cancel_counter_.Value(); }
+ intptr_t CancelAttempts() { return cancel_counter_; }
private:
// Identifies the state a cancelable task is in:
@@ -116,7 +116,7 @@ class V8_EXPORT_PRIVATE Cancelable {
if (status_.TrySetValue(kWaiting, kCanceled)) {
return true;
}
- cancel_counter_.Increment(1);
+ cancel_counter_++;
return false;
}
@@ -127,7 +127,7 @@ class V8_EXPORT_PRIVATE Cancelable {
// The counter is incremented for failing tries to cancel a task. This can be
// used by the task itself as an indication how often external entities tried
// to abort it.
- base::AtomicNumber<intptr_t> cancel_counter_;
+ std::atomic<intptr_t> cancel_counter_;
friend class CancelableTaskManager;
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index 5194628912..09cd5a62e0 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -83,13 +83,12 @@ class CodeEventListener {
virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
- virtual void CodeMoveEvent(AbstractCode* from, AbstractCode* to) = 0;
+ virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
virtual void CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) = 0;
- enum DeoptKind { kSoft, kLazy, kEager };
- virtual void CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
+ virtual void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) = 0;
virtual bool is_listening_to_code_events() { return false; }
@@ -155,7 +154,7 @@ class CodeEventDispatcher {
void RegExpCodeCreateEvent(AbstractCode* code, String* source) {
CODE_EVENT_DISPATCH(RegExpCodeCreateEvent(code, source));
}
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+ void CodeMoveEvent(AbstractCode* from, Address to) {
CODE_EVENT_DISPATCH(CodeMoveEvent(from, to));
}
void SharedFunctionInfoMoveEvent(Address from, Address to) {
@@ -165,7 +164,7 @@ class CodeEventDispatcher {
void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared) {
CODE_EVENT_DISPATCH(CodeDisableOptEvent(code, shared));
}
- void CodeDeoptEvent(Code* code, CodeEventListener::DeoptKind kind, Address pc,
+ void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) {
CODE_EVENT_DISPATCH(CodeDeoptEvent(code, kind, pc, fp_to_sp_delta));
}
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 4479d33fb8..b6eb03f81b 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -18,7 +18,7 @@ namespace {
template <typename Stub>
Callable make_callable(Stub& stub) {
typedef typename Stub::Descriptor Descriptor;
- return Callable(stub.GetCode(), Descriptor(stub.isolate()));
+ return Callable(stub.GetCode(), Descriptor{});
}
} // namespace
@@ -70,14 +70,24 @@ Handle<Code> CodeFactory::CEntry(Isolate* isolate, int result_size,
// static
Callable CodeFactory::ApiGetter(Isolate* isolate) {
- CallApiGetterStub stub(isolate);
- return make_callable(stub);
+ return Callable(BUILTIN_CODE(isolate, CallApiGetter), ApiGetterDescriptor{});
}
// static
Callable CodeFactory::CallApiCallback(Isolate* isolate, int argc) {
- CallApiCallbackStub stub(isolate, argc);
- return make_callable(stub);
+ switch (argc) {
+ case 0:
+ return Callable(BUILTIN_CODE(isolate, CallApiCallback_Argc0),
+ ApiCallbackDescriptor{});
+ case 1:
+ return Callable(BUILTIN_CODE(isolate, CallApiCallback_Argc1),
+ ApiCallbackDescriptor{});
+ default: {
+ CallApiCallbackStub stub(isolate, argc);
+ return make_callable(stub);
+ }
+ }
+ UNREACHABLE();
}
// static
@@ -86,7 +96,7 @@ Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
typeof_mode == NOT_INSIDE_TYPEOF
? BUILTIN_CODE(isolate, LoadGlobalICTrampoline)
: BUILTIN_CODE(isolate, LoadGlobalICInsideTypeofTrampoline),
- LoadGlobalDescriptor(isolate));
+ LoadGlobalDescriptor{});
}
// static
@@ -95,21 +105,19 @@ Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
return Callable(typeof_mode == NOT_INSIDE_TYPEOF
? BUILTIN_CODE(isolate, LoadGlobalIC)
: BUILTIN_CODE(isolate, LoadGlobalICInsideTypeof),
- LoadGlobalWithVectorDescriptor(isolate));
+ LoadGlobalWithVectorDescriptor{});
}
Callable CodeFactory::StoreOwnIC(Isolate* isolate) {
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
- return Callable(BUILTIN_CODE(isolate, StoreICTrampoline),
- StoreDescriptor(isolate));
+ return Callable(BUILTIN_CODE(isolate, StoreICTrampoline), StoreDescriptor{});
}
Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
- return Callable(BUILTIN_CODE(isolate, StoreIC),
- StoreWithVectorDescriptor(isolate));
+ return Callable(BUILTIN_CODE(isolate, StoreIC), StoreWithVectorDescriptor{});
}
// static
@@ -147,14 +155,14 @@ Callable CodeFactory::BinaryOperation(Isolate* isolate, Operation op) {
Callable CodeFactory::NonPrimitiveToPrimitive(Isolate* isolate,
ToPrimitiveHint hint) {
return Callable(isolate->builtins()->NonPrimitiveToPrimitive(hint),
- TypeConversionDescriptor(isolate));
+ TypeConversionDescriptor{});
}
// static
Callable CodeFactory::OrdinaryToPrimitive(Isolate* isolate,
OrdinaryToPrimitiveHint hint) {
return Callable(isolate->builtins()->OrdinaryToPrimitive(hint),
- TypeConversionDescriptor(isolate));
+ TypeConversionDescriptor{});
}
// static
@@ -185,110 +193,107 @@ Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
// static
Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ResumeGeneratorTrampoline),
- ResumeGeneratorDescriptor(isolate));
+ ResumeGeneratorDescriptor{});
}
// static
Callable CodeFactory::FrameDropperTrampoline(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, FrameDropperTrampoline),
- FrameDropperTrampolineDescriptor(isolate));
+ FrameDropperTrampolineDescriptor{});
}
// static
Callable CodeFactory::HandleDebuggerStatement(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, HandleDebuggerStatement),
- ContextOnlyDescriptor(isolate));
+ ContextOnlyDescriptor{});
}
// static
Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type) {
return Callable(isolate->builtins()->NewFunctionContext(scope_type),
- FastNewFunctionContextDescriptor(isolate));
+ FastNewFunctionContextDescriptor{});
}
// static
Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ArgumentsAdaptorTrampoline),
- ArgumentAdaptorDescriptor(isolate));
+ ArgumentAdaptorDescriptor{});
}
// static
Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
- return Callable(isolate->builtins()->Call(mode),
- CallTrampolineDescriptor(isolate));
+ return Callable(isolate->builtins()->Call(mode), CallTrampolineDescriptor{});
}
// static
Callable CodeFactory::CallWithArrayLike(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, CallWithArrayLike),
- CallWithArrayLikeDescriptor(isolate));
+ CallWithArrayLikeDescriptor{});
}
// static
Callable CodeFactory::CallWithSpread(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, CallWithSpread),
- CallWithSpreadDescriptor(isolate));
+ CallWithSpreadDescriptor{});
}
// static
Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
return Callable(isolate->builtins()->CallFunction(mode),
- CallTrampolineDescriptor(isolate));
+ CallTrampolineDescriptor{});
}
// static
Callable CodeFactory::CallVarargs(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, CallVarargs),
- CallVarargsDescriptor(isolate));
+ return Callable(BUILTIN_CODE(isolate, CallVarargs), CallVarargsDescriptor{});
}
// static
Callable CodeFactory::CallForwardVarargs(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, CallForwardVarargs),
- CallForwardVarargsDescriptor(isolate));
+ CallForwardVarargsDescriptor{});
}
// static
Callable CodeFactory::CallFunctionForwardVarargs(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, CallFunctionForwardVarargs),
- CallForwardVarargsDescriptor(isolate));
+ CallForwardVarargsDescriptor{});
}
// static
Callable CodeFactory::Construct(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, Construct),
- ConstructTrampolineDescriptor(isolate));
+ return Callable(BUILTIN_CODE(isolate, Construct), JSTrampolineDescriptor{});
}
// static
Callable CodeFactory::ConstructWithSpread(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ConstructWithSpread),
- ConstructWithSpreadDescriptor(isolate));
+ ConstructWithSpreadDescriptor{});
}
// static
Callable CodeFactory::ConstructFunction(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ConstructFunction),
- ConstructTrampolineDescriptor(isolate));
+ JSTrampolineDescriptor{});
}
// static
Callable CodeFactory::ConstructVarargs(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ConstructVarargs),
- ConstructVarargsDescriptor(isolate));
+ ConstructVarargsDescriptor{});
}
// static
Callable CodeFactory::ConstructForwardVarargs(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ConstructForwardVarargs),
- ConstructForwardVarargsDescriptor(isolate));
+ ConstructForwardVarargsDescriptor{});
}
// static
Callable CodeFactory::ConstructFunctionForwardVarargs(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ConstructFunctionForwardVarargs),
- ConstructForwardVarargsDescriptor(isolate));
+ ConstructForwardVarargsDescriptor{});
}
// static
@@ -297,14 +302,14 @@ Callable CodeFactory::InterpreterPushArgsThenCall(
InterpreterPushArgsMode mode) {
return Callable(
isolate->builtins()->InterpreterPushArgsThenCall(receiver_mode, mode),
- InterpreterPushArgsThenCallDescriptor(isolate));
+ InterpreterPushArgsThenCallDescriptor{});
}
// static
Callable CodeFactory::InterpreterPushArgsThenConstruct(
Isolate* isolate, InterpreterPushArgsMode mode) {
return Callable(isolate->builtins()->InterpreterPushArgsThenConstruct(mode),
- InterpreterPushArgsThenConstructDescriptor(isolate));
+ InterpreterPushArgsThenConstructDescriptor{});
}
// static
@@ -313,61 +318,123 @@ Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// save fpregs too.
Handle<Code> code = CodeFactory::CEntry(isolate, result_size, kDontSaveFPRegs,
kArgvInRegister);
- return Callable(code, InterpreterCEntryDescriptor(isolate));
+ if (result_size == 1) {
+ return Callable(code, InterpreterCEntry1Descriptor{});
+ } else {
+ DCHECK_EQ(result_size, 2);
+ return Callable(code, InterpreterCEntry2Descriptor{});
+ }
}
// static
Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, InterpreterOnStackReplacement),
- ContextOnlyDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ArrayConstructor(Isolate* isolate) {
- ArrayConstructorStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ArrayPop(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ArrayPop), BuiltinDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ArrayShift(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ArrayShift),
- BuiltinDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ExtractFastJSArray(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ExtractFastJSArray),
- ExtractFastJSArrayDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::CloneFastJSArray(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, CloneFastJSArray),
- CloneFastJSArrayDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ArrayPush(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ArrayPush), BuiltinDescriptor(isolate));
+ ContextOnlyDescriptor{});
+}
+
+// static
+Callable CodeFactory::ArrayNoArgumentConstructor(
+ Isolate* isolate, ElementsKind kind,
+ AllocationSiteOverrideMode override_mode) {
+#define CASE(kind_caps, kind_camel, mode_camel) \
+ case kind_caps: \
+ return Callable( \
+ BUILTIN_CODE(isolate, \
+ ArrayNoArgumentConstructor_##kind_camel##_##mode_camel), \
+ ArrayNoArgumentConstructorDescriptor{})
+ if (override_mode == DONT_OVERRIDE && AllocationSite::ShouldTrack(kind)) {
+ DCHECK(IsSmiElementsKind(kind));
+ switch (kind) {
+ CASE(PACKED_SMI_ELEMENTS, PackedSmi, DontOverride);
+ CASE(HOLEY_SMI_ELEMENTS, HoleySmi, DontOverride);
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK(override_mode == DISABLE_ALLOCATION_SITES ||
+ !AllocationSite::ShouldTrack(kind));
+ switch (kind) {
+ CASE(PACKED_SMI_ELEMENTS, PackedSmi, DisableAllocationSites);
+ CASE(HOLEY_SMI_ELEMENTS, HoleySmi, DisableAllocationSites);
+ CASE(PACKED_ELEMENTS, Packed, DisableAllocationSites);
+ CASE(HOLEY_ELEMENTS, Holey, DisableAllocationSites);
+ CASE(PACKED_DOUBLE_ELEMENTS, PackedDouble, DisableAllocationSites);
+ CASE(HOLEY_DOUBLE_ELEMENTS, HoleyDouble, DisableAllocationSites);
+ default:
+ UNREACHABLE();
+ }
+ }
+#undef CASE
+}
+
+// static
+Callable CodeFactory::ArraySingleArgumentConstructor(
+ Isolate* isolate, ElementsKind kind,
+ AllocationSiteOverrideMode override_mode) {
+#define CASE(kind_caps, kind_camel, mode_camel) \
+ case kind_caps: \
+ return Callable( \
+ BUILTIN_CODE( \
+ isolate, \
+ ArraySingleArgumentConstructor_##kind_camel##_##mode_camel), \
+ ArraySingleArgumentConstructorDescriptor{})
+ if (override_mode == DONT_OVERRIDE && AllocationSite::ShouldTrack(kind)) {
+ DCHECK(IsSmiElementsKind(kind));
+ switch (kind) {
+ CASE(PACKED_SMI_ELEMENTS, PackedSmi, DontOverride);
+ CASE(HOLEY_SMI_ELEMENTS, HoleySmi, DontOverride);
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK(override_mode == DISABLE_ALLOCATION_SITES ||
+ !AllocationSite::ShouldTrack(kind));
+ switch (kind) {
+ CASE(PACKED_SMI_ELEMENTS, PackedSmi, DisableAllocationSites);
+ CASE(HOLEY_SMI_ELEMENTS, HoleySmi, DisableAllocationSites);
+ CASE(PACKED_ELEMENTS, Packed, DisableAllocationSites);
+ CASE(HOLEY_ELEMENTS, Holey, DisableAllocationSites);
+ CASE(PACKED_DOUBLE_ELEMENTS, PackedDouble, DisableAllocationSites);
+ CASE(HOLEY_DOUBLE_ELEMENTS, HoleyDouble, DisableAllocationSites);
+ default:
+ UNREACHABLE();
+ }
+ }
+#undef CASE
}
// static
-Callable CodeFactory::FunctionPrototypeBind(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, FunctionPrototypeBind),
- BuiltinDescriptor(isolate));
+Callable CodeFactory::InternalArrayNoArgumentConstructor(Isolate* isolate,
+ ElementsKind kind) {
+ switch (kind) {
+ case PACKED_ELEMENTS:
+ return Callable(
+ BUILTIN_CODE(isolate, InternalArrayNoArgumentConstructor_Packed),
+ ArrayNoArgumentConstructorDescriptor{});
+ case HOLEY_ELEMENTS:
+ return Callable(
+ BUILTIN_CODE(isolate, InternalArrayNoArgumentConstructor_Holey),
+ ArrayNoArgumentConstructorDescriptor{});
+ default:
+ UNREACHABLE();
+ }
}
// static
-Callable CodeFactory::TransitionElementsKind(Isolate* isolate,
- ElementsKind from, ElementsKind to,
- bool is_jsarray) {
- TransitionElementsKindStub stub(isolate, from, to, is_jsarray);
- return make_callable(stub);
+Callable CodeFactory::InternalArraySingleArgumentConstructor(
+ Isolate* isolate, ElementsKind kind) {
+ switch (kind) {
+ case PACKED_ELEMENTS:
+ return Callable(
+ BUILTIN_CODE(isolate, InternalArraySingleArgumentConstructor_Packed),
+ ArraySingleArgumentConstructorDescriptor{});
+ case HOLEY_ELEMENTS:
+ return Callable(
+ BUILTIN_CODE(isolate, InternalArraySingleArgumentConstructor_Holey),
+ ArraySingleArgumentConstructorDescriptor{});
+ default:
+ UNREACHABLE();
+ }
}
} // namespace internal
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 20ad2ac1d7..3ff37695e2 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -15,6 +15,12 @@
namespace v8 {
namespace internal {
+// For ArrayNoArgumentConstructor and ArraySingleArgumentConstructor.
+enum AllocationSiteOverrideMode {
+ DONT_OVERRIDE,
+ DISABLE_ALLOCATION_SITES,
+};
+
class V8_EXPORT_PRIVATE CodeFactory final {
public:
// CEntry has var-args semantics (all the arguments are passed on the
@@ -85,15 +91,17 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
static Callable InterpreterOnStackReplacement(Isolate* isolate);
- static Callable ArrayConstructor(Isolate* isolate);
- static Callable ArrayPop(Isolate* isolate);
- static Callable ArrayPush(Isolate* isolate);
- static Callable ArrayShift(Isolate* isolate);
- static Callable ExtractFastJSArray(Isolate* isolate);
- static Callable CloneFastJSArray(Isolate* isolate);
- static Callable FunctionPrototypeBind(Isolate* isolate);
- static Callable TransitionElementsKind(Isolate* isolate, ElementsKind from,
- ElementsKind to, bool is_jsarray);
+ static Callable ArrayNoArgumentConstructor(
+ Isolate* isolate, ElementsKind kind,
+ AllocationSiteOverrideMode override_mode);
+ static Callable ArraySingleArgumentConstructor(
+ Isolate* isolate, ElementsKind kind,
+ AllocationSiteOverrideMode override_mode);
+
+ static Callable InternalArrayNoArgumentConstructor(Isolate* isolate,
+ ElementsKind kind);
+ static Callable InternalArraySingleArgumentConstructor(Isolate* isolate,
+ ElementsKind kind);
};
} // namespace internal
diff --git a/deps/v8/src/code-reference.h b/deps/v8/src/code-reference.h
index 189ab52769..cb4b25a621 100644
--- a/deps/v8/src/code-reference.h
+++ b/deps/v8/src/code-reference.h
@@ -35,6 +35,16 @@ class CodeReference {
return kind_ == JS ? js_code_.is_null() : wasm_code_ == nullptr;
}
+ Handle<Code> as_js_code() const {
+ DCHECK_EQ(JS, kind_);
+ return js_code_;
+ }
+
+ const wasm::WasmCode* as_wasm_code() const {
+ DCHECK_EQ(WASM, kind_);
+ return wasm_code_;
+ }
+
private:
enum { JS, WASM } kind_;
union {
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 9a51017899..d39d841fbd 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -8,6 +8,7 @@
#include "src/frames-inl.h"
#include "src/frames.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/descriptor-array.h"
#include "src/objects/ordered-hash-table-inl.h"
namespace v8 {
@@ -16,6 +17,8 @@ namespace internal {
using compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
+template <class T>
+using SloppyTNode = compiler::SloppyTNode<T>;
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
: compiler::CodeAssembler(state) {
@@ -203,7 +206,7 @@ TNode<Smi> CodeStubAssembler::SelectSmiConstant(SloppyTNode<BoolT> condition,
SmiConstant(false_value));
}
-Node* CodeStubAssembler::NoContextConstant() {
+TNode<Object> CodeStubAssembler::NoContextConstant() {
return SmiConstant(Context::kNoContext);
}
@@ -215,7 +218,18 @@ Node* CodeStubAssembler::NoContextConstant() {
*std::declval<Heap>().rootAccessorName())>::type>( \
LoadRoot(Heap::k##rootIndexName##RootIndex)); \
}
-HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR);
+HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
+#undef HEAP_CONSTANT_ACCESSOR
+
+#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
+ compiler::TNode<std::remove_reference<decltype( \
+ *std::declval<ReadOnlyRoots>().rootAccessorName())>::type> \
+ CodeStubAssembler::name##Constant() { \
+ return UncheckedCast<std::remove_reference<decltype( \
+ *std::declval<ReadOnlyRoots>().rootAccessorName())>::type>( \
+ LoadRoot(Heap::k##rootIndexName##RootIndex)); \
+ }
+HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
@@ -227,11 +241,41 @@ HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR);
SloppyTNode<Object> value) { \
return WordNotEqual(value, name##Constant()); \
}
-HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST);
+HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST);
#undef HEAP_CONSTANT_TEST
-TNode<Int32T> CodeStubAssembler::HashSeed() {
- return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex);
+TNode<Int64T> CodeStubAssembler::HashSeed() {
+ DCHECK(Is64());
+ TNode<HeapObject> hash_seed_root =
+ TNode<HeapObject>::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex));
+ return TNode<Int64T>::UncheckedCast(LoadObjectField(
+ hash_seed_root, ByteArray::kHeaderSize, MachineType::Int64()));
+}
+
+TNode<Int32T> CodeStubAssembler::HashSeedHigh() {
+ DCHECK(!Is64());
+#ifdef V8_TARGET_BIG_ENDIAN
+ static int kOffset = 0;
+#else
+ static int kOffset = kInt32Size;
+#endif
+ TNode<HeapObject> hash_seed_root =
+ TNode<HeapObject>::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex));
+ return TNode<Int32T>::UncheckedCast(LoadObjectField(
+ hash_seed_root, ByteArray::kHeaderSize + kOffset, MachineType::Int32()));
+}
+
+TNode<Int32T> CodeStubAssembler::HashSeedLow() {
+ DCHECK(!Is64());
+#ifdef V8_TARGET_BIG_ENDIAN
+ static int kOffset = kInt32Size;
+#else
+ static int kOffset = 0;
+#endif
+ TNode<HeapObject> hash_seed_root =
+ TNode<HeapObject>::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex));
+ return TNode<Int32T>::UncheckedCast(LoadObjectField(
+ hash_seed_root, ByteArray::kHeaderSize + kOffset, MachineType::Int32()));
}
Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
@@ -523,14 +567,27 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
return TNode<Float64T>::UncheckedCast(var_x.value());
}
+TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) {
+ if (SmiValuesAre31Bits() && kPointerSize == kInt64Size) {
+ // Check that the Smi value is properly sign-extended.
+ TNode<IntPtrT> value = Signed(BitcastTaggedToWord(smi));
+ return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value)));
+ }
+ return Int32TrueConstant();
+}
+
Node* CodeStubAssembler::SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value);
- return BitcastWordToTaggedSigned(
- WordShl(value_intptr, SmiShiftBitsConstant()));
+ TNode<Smi> smi =
+ BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant()));
+#if V8_COMPRESS_POINTERS
+ CSA_ASSERT(this, IsValidSmi(smi));
+#endif
+ return smi;
}
TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) {
@@ -550,16 +607,23 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
return SmiConstant(constant_value);
}
- return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
+ TNode<Smi> smi =
+ BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
+#if V8_COMPRESS_POINTERS
+ CSA_ASSERT(this, IsValidSmi(smi));
+#endif
+ return smi;
}
TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
+#if V8_COMPRESS_POINTERS
+ CSA_ASSERT(this, IsValidSmi(value));
+#endif
intptr_t constant_value;
if (ToIntPtrConstant(value, constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
- return UncheckedCast<IntPtrT>(
- WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()));
+ return Signed(WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
@@ -581,22 +645,44 @@ TNode<Smi> CodeStubAssembler::SmiMin(TNode<Smi> a, TNode<Smi> b) {
TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
- TNode<PairT<IntPtrT, BoolT>> pair =
- IntPtrAddWithOverflow(BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs));
- TNode<BoolT> overflow = Projection<1>(pair);
- GotoIf(overflow, if_overflow);
- TNode<IntPtrT> result = Projection<0>(pair);
- return BitcastWordToTaggedSigned(result);
+ if (SmiValuesAre32Bits()) {
+ TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAddWithOverflow(
+ BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs));
+ TNode<BoolT> overflow = Projection<1>(pair);
+ GotoIf(overflow, if_overflow);
+ TNode<IntPtrT> result = Projection<0>(pair);
+ return BitcastWordToTaggedSigned(result);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ TNode<PairT<Int32T, BoolT>> pair =
+ Int32AddWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedToWord(rhs)));
+ TNode<BoolT> overflow = Projection<1>(pair);
+ GotoIf(overflow, if_overflow);
+ TNode<Int32T> result = Projection<0>(pair);
+ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result));
+ }
}
TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
- TNode<PairT<IntPtrT, BoolT>> pair =
- IntPtrSubWithOverflow(BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs));
- TNode<BoolT> overflow = Projection<1>(pair);
- GotoIf(overflow, if_overflow);
- TNode<IntPtrT> result = Projection<0>(pair);
- return BitcastWordToTaggedSigned(result);
+ if (SmiValuesAre32Bits()) {
+ TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(
+ BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs));
+ TNode<BoolT> overflow = Projection<1>(pair);
+ GotoIf(overflow, if_overflow);
+ TNode<IntPtrT> result = Projection<0>(pair);
+ return BitcastWordToTaggedSigned(result);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ TNode<PairT<Int32T, BoolT>> pair =
+ Int32SubWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedToWord(rhs)));
+ TNode<BoolT> overflow = Projection<1>(pair);
+ GotoIf(overflow, if_overflow);
+ TNode<Int32T> result = Projection<0>(pair);
+ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result));
+ }
}
TNode<Object> CodeStubAssembler::NumberMax(SloppyTNode<Object> a,
@@ -891,8 +977,8 @@ void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); }
TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
TNode<FixedDoubleArray> array, TNode<Smi> index, Label* if_hole) {
- return TNode<Float64T>::UncheckedCast(LoadFixedDoubleArrayElement(
- array, index, MachineType::Float64(), 0, SMI_PARAMETERS, if_hole));
+ return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0,
+ SMI_PARAMETERS, if_hole);
}
void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
@@ -1292,6 +1378,12 @@ Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) {
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
+TNode<JSFunction> CodeStubAssembler::LoadTargetFromFrame() {
+ DCHECK(IsJSFunctionCall());
+ return CAST(LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer()));
+}
+
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
MachineType rep) {
return Load(rep, buffer, IntPtrConstant(offset));
@@ -1312,7 +1404,7 @@ Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
SloppyTNode<HeapObject> object, int offset) {
- if (Is64()) {
+ if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
@@ -1326,7 +1418,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
int offset) {
- if (Is64()) {
+ if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
@@ -1339,7 +1431,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
- if (Is64()) {
+ if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
@@ -1356,7 +1448,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32Root(
Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
int index = root_index * kPointerSize;
- if (Is64()) {
+ if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
@@ -1369,7 +1461,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32Root(
}
Node* CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
- if (Is64()) {
+ if (SmiValuesAre32Bits()) {
int zero_offset = offset + kPointerSize / 2;
int payload_offset = offset;
#if V8_TARGET_LITTLE_ENDIAN
@@ -1453,7 +1545,7 @@ TNode<Number> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
SloppyTNode<JSArray> array) {
TNode<Object> length = LoadJSArrayLength(array);
- CSA_ASSERT(this, IsFastElementsKind(LoadMapElementsKind(LoadMap(array))));
+ CSA_ASSERT(this, IsFastElementsKind(LoadElementsKind(array)));
// JSArray length is always a positive Smi for fast arrays.
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
return UncheckedCast<Smi>(length);
@@ -1486,6 +1578,11 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength(
return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset);
}
+TNode<Smi> CodeStubAssembler::LoadTypedArrayLength(
+ TNode<JSTypedArray> typed_array) {
+ return CAST(LoadObjectField(typed_array, JSTypedArray::kLengthOffset));
+}
+
TNode<Int32T> CodeStubAssembler::LoadMapBitField(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return UncheckedCast<Int32T>(
@@ -1515,6 +1612,11 @@ TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(SloppyTNode<Map> map) {
return Signed(DecodeWord32<Map::ElementsKindBits>(bit_field2));
}
+TNode<Int32T> CodeStubAssembler::LoadElementsKind(
+ SloppyTNode<HeapObject> object) {
+ return LoadMapElementsKind(LoadMap(object));
+}
+
TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
@@ -1622,7 +1724,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE),
&if_property_array);
- Branch(InstanceTypeEqual(properties_instance_type, HASH_TABLE_TYPE),
+ Branch(InstanceTypeEqual(properties_instance_type, NAME_DICTIONARY_TYPE),
&if_property_dictionary, &if_fixed_array);
BIND(&if_fixed_array);
@@ -1745,11 +1847,11 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
Goto(if_weak);
BIND(&inner_if_smi);
- *extracted = ToObject(maybe_object);
+ *extracted = CAST(maybe_object);
Goto(if_smi);
BIND(&inner_if_strong);
- *extracted = ToObject(maybe_object);
+ *extracted = CAST(maybe_object);
Goto(if_strong);
}
@@ -1760,9 +1862,9 @@ TNode<BoolT> CodeStubAssembler::IsStrongHeapObject(TNode<MaybeObject> value) {
}
TNode<HeapObject> CodeStubAssembler::ToStrongHeapObject(
- TNode<MaybeObject> value) {
- CSA_ASSERT(this, IsStrongHeapObject(value));
- return ReinterpretCast<HeapObject>(value);
+ TNode<MaybeObject> value, Label* if_not_strong) {
+ GotoIfNot(IsStrongHeapObject(value), if_not_strong);
+ return CAST(value);
}
TNode<BoolT> CodeStubAssembler::IsWeakOrClearedHeapObject(
@@ -1798,17 +1900,24 @@ TNode<HeapObject> CodeStubAssembler::ToWeakHeapObject(TNode<MaybeObject> value,
return ToWeakHeapObject(value);
}
-TNode<BoolT> CodeStubAssembler::IsObject(TNode<MaybeObject> value) {
- return WordNotEqual(WordAnd(BitcastMaybeObjectToWord(value),
- IntPtrConstant(kHeapObjectTagMask)),
- IntPtrConstant(kWeakHeapObjectTag));
+TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(TNode<MaybeObject> object,
+ TNode<Object> value) {
+ return WordEqual(WordAnd(BitcastMaybeObjectToWord(object),
+ IntPtrConstant(~kWeakHeapObjectMask)),
+ BitcastTaggedToWord(value));
+}
+
+TNode<BoolT> CodeStubAssembler::IsStrongReferenceTo(TNode<MaybeObject> object,
+ TNode<Object> value) {
+ return WordEqual(BitcastMaybeObjectToWord(object),
+ BitcastTaggedToWord(value));
}
-TNode<Object> CodeStubAssembler::ToObject(TNode<MaybeObject> value) {
- // TODO(marja): Make CAST work (with the appropriate check); replace this with
- // CAST.
- CSA_ASSERT(this, IsObject(value));
- return ReinterpretCast<Object>(value);
+TNode<BoolT> CodeStubAssembler::IsNotWeakReferenceTo(TNode<MaybeObject> object,
+ TNode<Object> value) {
+ return WordNotEqual(WordAnd(BitcastMaybeObjectToWord(object),
+ IntPtrConstant(~kWeakHeapObjectMask)),
+ BitcastTaggedToWord(value));
}
TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) {
@@ -1820,9 +1929,9 @@ TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
SloppyTNode<HeapObject> array, int array_header_size, Node* index_node,
int additional_offset, ParameterMode parameter_mode,
LoadSensitivity needs_poisoning) {
- CSA_SLOW_ASSERT(this, IntPtrGreaterThanOrEqual(
- ParameterToIntPtr(index_node, parameter_mode),
- IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
+ ParameterToIntPtr(index_node, parameter_mode),
+ IntPtrConstant(0)));
DCHECK_EQ(additional_offset % kPointerSize, 0);
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
@@ -1832,7 +1941,7 @@ TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
PropertyArray::kLengthAndHashOffset);
// Check that index_node + additional_offset <= object.length.
// TODO(cbruni): Use proper LoadXXLength helpers
- CSA_SLOW_ASSERT(
+ CSA_ASSERT(
this,
IsOffsetInBounds(
offset,
@@ -1858,13 +1967,14 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
ParameterMode parameter_mode, LoadSensitivity needs_poisoning) {
// This function is currently used for non-FixedArrays (e.g., PropertyArrays)
// and thus the reasonable assert IsFixedArraySubclass(object) is
- // untrue. TODO(marja): Fix.
+ // not always true. TODO(marja): Fix.
+ CSA_SLOW_ASSERT(
+ this, Word32Or(IsFixedArraySubclass(object), IsPropertyArray(object)));
CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object));
TNode<MaybeObject> element =
LoadArrayElement(object, FixedArray::kHeaderSize, index_node,
additional_offset, parameter_mode, needs_poisoning);
- CSA_ASSERT(this, IsObject(element));
- return ToObject(element);
+ return CAST(element);
}
TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
@@ -1874,9 +1984,9 @@ TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe;
STATIC_ASSERT(PropertyArray::kHeaderSize == FixedArray::kHeaderSize);
- return ToObject(LoadArrayElement(object, PropertyArray::kHeaderSize, index,
- additional_offset, parameter_mode,
- needs_poisoning));
+ return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index,
+ additional_offset, parameter_mode,
+ needs_poisoning));
}
TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayBackingStore(
@@ -2152,7 +2262,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
DCHECK_EQ(additional_offset % kPointerSize, 0);
int endian_correction = 0;
#if V8_TARGET_LITTLE_ENDIAN
- if (Is64()) endian_correction = kPointerSize / 2;
+ if (SmiValuesAre32Bits()) endian_correction = kPointerSize / 2;
#endif
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag +
endian_correction;
@@ -2161,12 +2271,12 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
STATIC_ASSERT(FixedArrayBase::kLengthOffset == WeakFixedArray::kLengthOffset);
// Check that index_node + additional_offset <= object.length.
// TODO(cbruni): Use proper LoadXXLength helpers
- CSA_SLOW_ASSERT(
- this, IsOffsetInBounds(
- offset,
- LoadAndUntagObjectField(object, FixedArrayBase::kLengthOffset),
- FixedArray::kHeaderSize + endian_correction));
- if (Is64()) {
+ CSA_ASSERT(this,
+ IsOffsetInBounds(
+ offset,
+ LoadAndUntagObjectField(object, FixedArrayBase::kLengthOffset),
+ FixedArray::kHeaderSize + endian_correction));
+ if (SmiValuesAre32Bits()) {
return UncheckedCast<Int32T>(Load(MachineType::Int32(), object, offset));
} else {
return SmiToInt32(Load(MachineType::AnyTagged(), object, offset));
@@ -2189,26 +2299,26 @@ TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement(
additional_offset, parameter_mode, needs_poisoning);
}
-Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
- Node* object, Node* index_node, MachineType machine_type,
- int additional_offset, ParameterMode parameter_mode, Label* if_hole) {
+TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
+ SloppyTNode<FixedDoubleArray> object, Node* index_node,
+ MachineType machine_type, int additional_offset,
+ ParameterMode parameter_mode, Label* if_hole) {
CSA_ASSERT(this, IsFixedDoubleArray(object));
DCHECK_EQ(additional_offset % kPointerSize, 0);
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
int32_t header_size =
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(index_node, HOLEY_DOUBLE_ELEMENTS,
- parameter_mode, header_size);
- CSA_SLOW_ASSERT(
- this,
- IsOffsetInBounds(offset, LoadAndUntagFixedArrayBaseLength(object),
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(
+ index_node, HOLEY_DOUBLE_ELEMENTS, parameter_mode, header_size);
+ CSA_ASSERT(this, IsOffsetInBounds(
+ offset, LoadAndUntagFixedArrayBaseLength(object),
FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS));
return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
}
-Node* CodeStubAssembler::LoadDoubleWithHoleCheck(Node* base, Node* offset,
- Label* if_hole,
- MachineType machine_type) {
+TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
+ SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
+ MachineType machine_type) {
if (if_hole) {
// TODO(ishell): Compare only the upper part for the hole once the
// compiler is able to fold addition of already complex |offset| with
@@ -2226,9 +2336,9 @@ Node* CodeStubAssembler::LoadDoubleWithHoleCheck(Node* base, Node* offset,
}
if (machine_type.IsNone()) {
// This means the actual value is not needed.
- return nullptr;
+ return TNode<Float64T>();
}
- return Load(machine_type, base, offset);
+ return UncheckedCast<Float64T>(Load(machine_type, base, offset));
}
TNode<Object> CodeStubAssembler::LoadContextElement(
@@ -2360,12 +2470,25 @@ Node* CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(Node* shared) {
return var_result.value();
}
+void CodeStubAssembler::StoreObjectByteNoWriteBarrier(TNode<HeapObject> object,
+ int offset,
+ TNode<Word32T> value) {
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, object,
+ IntPtrConstant(offset - kHeapObjectTag), value);
+}
+
void CodeStubAssembler::StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
SloppyTNode<Float64T> value) {
StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value,
MachineRepresentation::kFloat64);
}
+void CodeStubAssembler::StoreMutableHeapNumberValue(
+ SloppyTNode<MutableHeapNumber> object, SloppyTNode<Float64T> value) {
+ StoreObjectFieldNoWriteBarrier(object, MutableHeapNumber::kValueOffset, value,
+ MachineRepresentation::kFloat64);
+}
+
Node* CodeStubAssembler::StoreObjectField(
Node* object, int offset, Node* value) {
DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
@@ -2441,10 +2564,7 @@ Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(
- this,
- Word32Or(IsHashTable(object),
- Word32Or(IsFixedArray(object),
- Word32Or(IsPropertyArray(object), IsContext(object)))));
+ this, Word32Or(IsFixedArraySubclass(object), IsPropertyArray(object)));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
@@ -2454,12 +2574,28 @@ Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
- // TODO(cbruni): Enable check once we have TNodes in this method. Currently
- // the bounds check will fail for PropertyArray due to the different length
- // encoding.
- // CSA_ASSERT(this,
- // IsOffsetInBounds(offset, LoadAndUntagFixedArrayBaseLength(object),
- // FixedArray::kHeaderSize));
+ STATIC_ASSERT(FixedArrayBase::kLengthOffset == WeakFixedArray::kLengthOffset);
+ STATIC_ASSERT(FixedArrayBase::kLengthOffset ==
+ PropertyArray::kLengthAndHashOffset);
+ // Check that index_node + additional_offset <= object.length.
+ // TODO(cbruni): Use proper LoadXXLength helpers
+ CSA_ASSERT(
+ this,
+ IsOffsetInBounds(
+ offset,
+ Select<IntPtrT>(
+ IsPropertyArray(object),
+ [=] {
+ TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField(
+ object, PropertyArray::kLengthAndHashOffset);
+ return TNode<IntPtrT>::UncheckedCast(
+ DecodeWord<PropertyArray::LengthField>(length_and_hash));
+ },
+ [=] {
+ return LoadAndUntagObjectField(object,
+ FixedArrayBase::kLengthOffset);
+ }),
+ FixedArray::kHeaderSize));
if (barrier_mode == SKIP_WRITE_BARRIER) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
value);
@@ -2521,14 +2657,14 @@ void CodeStubAssembler::EnsureArrayLengthWritable(TNode<Map> map,
int length_index = JSArray::kLengthDescriptorIndex;
#ifdef DEBUG
- TNode<Name> maybe_length = CAST(LoadFixedArrayElement(
+ TNode<Name> maybe_length = CAST(LoadWeakFixedArrayElement(
descriptors, DescriptorArray::ToKeyIndex(length_index)));
CSA_ASSERT(this,
WordEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)));
#endif
- TNode<Int32T> details = LoadAndUntagToWord32FixedArrayElement(
- descriptors, DescriptorArray::ToDetailsIndex(length_index));
+ TNode<Uint32T> details = LoadDetailsByKeyIndex(
+ descriptors, IntPtrConstant(DescriptorArray::ToKeyIndex(length_index)));
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
bailout);
}
@@ -2687,22 +2823,34 @@ Node* CodeStubAssembler::StoreCellValue(Node* cell, Node* value,
}
}
-TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
+TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber() {
Node* result = Allocate(HeapNumber::kSize, kNone);
- Heap::RootListIndex heap_map_index =
- mode == IMMUTABLE ? Heap::kHeapNumberMapRootIndex
- : Heap::kMutableHeapNumberMapRootIndex;
+ Heap::RootListIndex heap_map_index = Heap::kHeapNumberMapRootIndex;
StoreMapNoWriteBarrier(result, heap_map_index);
return UncheckedCast<HeapNumber>(result);
}
TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
- SloppyTNode<Float64T> value, MutableMode mode) {
- TNode<HeapNumber> result = AllocateHeapNumber(mode);
+ SloppyTNode<Float64T> value) {
+ TNode<HeapNumber> result = AllocateHeapNumber();
StoreHeapNumberValue(result, value);
return result;
}
+TNode<MutableHeapNumber> CodeStubAssembler::AllocateMutableHeapNumber() {
+ Node* result = Allocate(MutableHeapNumber::kSize, kNone);
+ Heap::RootListIndex heap_map_index = Heap::kMutableHeapNumberMapRootIndex;
+ StoreMapNoWriteBarrier(result, heap_map_index);
+ return UncheckedCast<MutableHeapNumber>(result);
+}
+
+TNode<MutableHeapNumber> CodeStubAssembler::AllocateMutableHeapNumberWithValue(
+ SloppyTNode<Float64T> value) {
+ TNode<MutableHeapNumber> result = AllocateMutableHeapNumber();
+ StoreMutableHeapNumberValue(result, value);
+ return result;
+}
+
TNode<BigInt> CodeStubAssembler::AllocateBigInt(TNode<IntPtrT> length) {
TNode<BigInt> result = AllocateRawBigInt(length);
StoreBigIntBitfield(result, WordShl(length, BigInt::LengthBits::kShift));
@@ -3108,11 +3256,11 @@ Node* CodeStubAssembler::AllocateOrderedHashTable() {
// Allocate the table and add the proper map.
const ElementsKind elements_kind = HOLEY_ELEMENTS;
- Node* const length_intptr = IntPtrConstant(kFixedArrayLength);
- Node* const fixed_array_map = LoadRoot(
- static_cast<Heap::RootListIndex>(CollectionType::GetMapRootIndex()));
- Node* const table =
- AllocateFixedArray(elements_kind, length_intptr, INTPTR_PARAMETERS,
+ TNode<IntPtrT> length_intptr = IntPtrConstant(kFixedArrayLength);
+ TNode<Map> fixed_array_map = CAST(LoadRoot(
+ static_cast<Heap::RootListIndex>(CollectionType::GetMapRootIndex())));
+ TNode<FixedArray> table =
+ AllocateFixedArray(elements_kind, length_intptr,
kAllowLargeObjectAllocation, fixed_array_map);
// Initialize the OrderedHashTable fields.
@@ -3125,7 +3273,7 @@ Node* CodeStubAssembler::AllocateOrderedHashTable() {
SmiConstant(kBucketCount), barrier_mode);
// Fill the buckets with kNotFound.
- Node* const not_found = SmiConstant(CollectionType::kNotFound);
+ TNode<Smi> not_found = SmiConstant(CollectionType::kNotFound);
STATIC_ASSERT(CollectionType::kHashTableStartIndex ==
CollectionType::kNumberOfBucketsIndex + 1);
STATIC_ASSERT((CollectionType::kHashTableStartIndex + kBucketCount) ==
@@ -3149,6 +3297,87 @@ template Node* CodeStubAssembler::AllocateOrderedHashTable<OrderedHashMap>();
template Node* CodeStubAssembler::AllocateOrderedHashTable<OrderedHashSet>();
template <typename CollectionType>
+TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
+ TNode<IntPtrT> capacity) {
+ CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
+ CSA_ASSERT(this, IntPtrLessThan(
+ capacity, IntPtrConstant(CollectionType::kMaxCapacity)));
+
+ TNode<IntPtrT> data_table_start_offset =
+ IntPtrConstant(CollectionType::kDataTableStartOffset);
+
+ TNode<IntPtrT> data_table_size = IntPtrMul(
+ capacity, IntPtrConstant(CollectionType::kEntrySize * kPointerSize));
+
+ TNode<Int32T> hash_table_size =
+ Int32Div(TruncateIntPtrToInt32(capacity),
+ Int32Constant(CollectionType::kLoadFactor));
+
+ TNode<IntPtrT> hash_table_start_offset =
+ IntPtrAdd(data_table_start_offset, data_table_size);
+
+ TNode<IntPtrT> hash_table_and_chain_table_size =
+ IntPtrAdd(ChangeInt32ToIntPtr(hash_table_size), capacity);
+
+ TNode<IntPtrT> total_size =
+ IntPtrAdd(hash_table_start_offset, hash_table_and_chain_table_size);
+
+ TNode<IntPtrT> total_size_word_aligned =
+ IntPtrAdd(total_size, IntPtrConstant(kPointerSize - 1));
+ total_size_word_aligned = ChangeInt32ToIntPtr(
+ Int32Div(TruncateIntPtrToInt32(total_size_word_aligned),
+ Int32Constant(kPointerSize)));
+ total_size_word_aligned =
+ UncheckedCast<IntPtrT>(TimesPointerSize(total_size_word_aligned));
+
+ // Allocate the table and add the proper map.
+ TNode<Map> small_ordered_hash_map = CAST(LoadRoot(
+ static_cast<Heap::RootListIndex>(CollectionType::GetMapRootIndex())));
+ TNode<Object> table_obj = CAST(AllocateInNewSpace(total_size_word_aligned));
+ StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map);
+ TNode<CollectionType> table = UncheckedCast<CollectionType>(table_obj);
+
+ // Initialize the SmallOrderedHashTable fields.
+ StoreObjectByteNoWriteBarrier(
+ table, CollectionType::kNumberOfBucketsOffset,
+ Word32And(Int32Constant(0xFF), hash_table_size));
+ StoreObjectByteNoWriteBarrier(table, CollectionType::kNumberOfElementsOffset,
+ Int32Constant(0));
+ StoreObjectByteNoWriteBarrier(
+ table, CollectionType::kNumberOfDeletedElementsOffset, Int32Constant(0));
+
+ TNode<IntPtrT> table_address =
+ IntPtrSub(BitcastTaggedToWord(table), IntPtrConstant(kHeapObjectTag));
+ TNode<IntPtrT> hash_table_start_address =
+ IntPtrAdd(table_address, hash_table_start_offset);
+
+ // Initialize the HashTable part.
+ Node* memset = ExternalConstant(ExternalReference::libc_memset_function());
+ CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::UintPtr(), memset,
+ hash_table_start_address, IntPtrConstant(0xFF),
+ hash_table_and_chain_table_size);
+
+ // Initialize the DataTable part.
+ TNode<HeapObject> filler = TheHoleConstant();
+ TNode<WordT> data_table_start_address =
+ IntPtrAdd(table_address, data_table_start_offset);
+ TNode<WordT> data_table_end_address =
+ IntPtrAdd(data_table_start_address, data_table_size);
+ StoreFieldsNoWriteBarrier(data_table_start_address, data_table_end_address,
+ filler);
+
+ return table;
+}
+
+template TNode<SmallOrderedHashMap>
+CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashMap>(
+ TNode<IntPtrT> capacity);
+template TNode<SmallOrderedHashSet>
+CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
+ TNode<IntPtrT> capacity);
+
+template <typename CollectionType>
void CodeStubAssembler::FindOrderedHashTableEntry(
Node* table, Node* hash,
std::function<void(Node*, Label*, Label*)> key_compare,
@@ -3275,7 +3504,7 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
Heap::kEmptyFixedArrayRootIndex);
} else {
CSA_ASSERT(this, Word32Or(Word32Or(IsPropertyArray(properties),
- IsDictionary(properties)),
+ IsNameDictionary(properties)),
IsEmptyFixedArray(properties)));
StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOrHashOffset,
properties);
@@ -4079,7 +4308,7 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
ElementsKind from_kind,
ElementsKind to_kind,
Label* if_hole) {
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
+ CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
if (IsDoubleElementsKind(from_kind)) {
Node* value =
LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64());
@@ -4192,12 +4421,14 @@ void CodeStubAssembler::InitializeAllocationMemento(Node* base,
StoreObjectFieldNoWriteBarrier(
memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
if (FLAG_allocation_site_pretenuring) {
- TNode<Smi> count = CAST(LoadObjectField(
- allocation_site, AllocationSite::kPretenureCreateCountOffset));
- TNode<Smi> incremented_count = SmiAdd(count, SmiConstant(1));
- StoreObjectFieldNoWriteBarrier(allocation_site,
- AllocationSite::kPretenureCreateCountOffset,
- incremented_count);
+ TNode<Int32T> count = UncheckedCast<Int32T>(LoadObjectField(
+ allocation_site, AllocationSite::kPretenureCreateCountOffset,
+ MachineType::Int32()));
+
+ TNode<Int32T> incremented_count = Int32Add(count, Int32Constant(1));
+ StoreObjectFieldNoWriteBarrier(
+ allocation_site, AllocationSite::kPretenureCreateCountOffset,
+ incremented_count, MachineRepresentation::kWord32);
}
Comment("]");
}
@@ -4401,11 +4632,12 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
TVARIABLE(Number, var_result);
BIND(&if_valueisint32);
{
- if (Is64()) {
+ if (SmiValuesAre32Bits()) {
TNode<Smi> result = SmiTag(ChangeInt32ToIntPtr(value32));
var_result = result;
Goto(&if_join);
} else {
+ DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(value32, value32);
TNode<BoolT> overflow = Projection<1>(pair);
Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
@@ -4414,8 +4646,9 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
Goto(&if_valueisheapnumber);
BIND(&if_notoverflow);
{
- TNode<IntPtrT> result = ChangeInt32ToIntPtr(Projection<0>(pair));
- var_result = BitcastWordToTaggedSigned(result);
+ TNode<Smi> result =
+ BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
+ var_result = result;
Goto(&if_join);
}
}
@@ -4431,9 +4664,10 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
SloppyTNode<Int32T> value) {
- if (Is64()) {
+ if (SmiValuesAre32Bits()) {
return SmiTag(ChangeInt32ToIntPtr(value));
}
+ DCHECK(SmiValuesAre31Bits());
TVARIABLE(Number, var_result);
TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(value, value);
TNode<BoolT> overflow = Projection<1>(pair);
@@ -4449,8 +4683,10 @@ TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
Goto(&if_join);
BIND(&if_notoverflow);
{
- TNode<Smi> result =
- BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
+ TNode<IntPtrT> almost_tagged_value =
+ ChangeInt32ToIntPtr(Projection<0>(pair));
+ TNode<Smi> result;
+ result = BitcastWordToTaggedSigned(almost_tagged_value);
var_result = result;
}
Goto(&if_join);
@@ -4469,10 +4705,11 @@ TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
BIND(&if_not_overflow);
{
- if (Is64()) {
+ if (SmiValuesAre32Bits()) {
var_result =
SmiTag(ReinterpretCast<IntPtrT>(ChangeUint32ToUint64(value)));
} else {
+ DCHECK(SmiValuesAre31Bits());
// If tagging {value} results in an overflow, we need to use a HeapNumber
// to represent it.
// TODO(tebbi): This overflow can never happen.
@@ -4481,9 +4718,9 @@ TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, &if_overflow);
- TNode<Smi> result =
- BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
- var_result = result;
+ TNode<IntPtrT> almost_tagged_value =
+ ChangeInt32ToIntPtr(Projection<0>(pair));
+ var_result = BitcastWordToTaggedSigned(almost_tagged_value);
}
}
Goto(&if_join);
@@ -4584,7 +4821,7 @@ TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
return result.value();
}
-Node* CodeStubAssembler::TimesPointerSize(Node* value) {
+SloppyTNode<WordT> CodeStubAssembler::TimesPointerSize(Node* value) {
return WordShl(value, IntPtrConstant(kPointerSizeLog2));
}
@@ -5160,7 +5397,7 @@ TNode<BoolT> CodeStubAssembler::IsAccessorPair(SloppyTNode<HeapObject> object) {
TNode<BoolT> CodeStubAssembler::IsAllocationSite(
SloppyTNode<HeapObject> object) {
- return IsAllocationSiteMap(LoadMap(object));
+ return IsAllocationSiteInstanceType(LoadInstanceType(object));
}
TNode<BoolT> CodeStubAssembler::IsAnyHeapNumber(
@@ -5244,18 +5481,32 @@ TNode<BoolT> CodeStubAssembler::IsFixedDoubleArray(
}
TNode<BoolT> CodeStubAssembler::IsHashTable(SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, HASH_TABLE_TYPE);
+ Node* instance_type = LoadInstanceType(object);
+ return UncheckedCast<BoolT>(
+ Word32And(Int32GreaterThanOrEqual(instance_type,
+ Int32Constant(FIRST_HASH_TABLE_TYPE)),
+ Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_HASH_TABLE_TYPE))));
}
-TNode<BoolT> CodeStubAssembler::IsDictionary(SloppyTNode<HeapObject> object) {
- return UncheckedCast<BoolT>(
- Word32Or(IsHashTable(object), IsNumberDictionary(object)));
+TNode<BoolT> CodeStubAssembler::IsEphemeronHashTable(
+ SloppyTNode<HeapObject> object) {
+ return HasInstanceType(object, EPHEMERON_HASH_TABLE_TYPE);
+}
+
+TNode<BoolT> CodeStubAssembler::IsNameDictionary(
+ SloppyTNode<HeapObject> object) {
+ return HasInstanceType(object, NAME_DICTIONARY_TYPE);
+}
+
+TNode<BoolT> CodeStubAssembler::IsGlobalDictionary(
+ SloppyTNode<HeapObject> object) {
+ return HasInstanceType(object, GLOBAL_DICTIONARY_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsNumberDictionary(
SloppyTNode<HeapObject> object) {
- return WordEqual(LoadMap(object),
- LoadRoot(Heap::kNumberDictionaryMapRootIndex));
+ return HasInstanceType(object, NUMBER_DICTIONARY_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsJSGeneratorObject(
@@ -5268,6 +5519,11 @@ TNode<BoolT> CodeStubAssembler::IsJSFunctionInstanceType(
return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsAllocationSiteInstanceType(
+ SloppyTNode<Int32T> instance_type) {
+ return InstanceTypeEqual(instance_type, ALLOCATION_SITE_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSFunction(SloppyTNode<HeapObject> object) {
return IsJSFunctionMap(LoadMap(object));
}
@@ -5285,6 +5541,10 @@ TNode<BoolT> CodeStubAssembler::IsJSArrayBuffer(
return HasInstanceType(object, JS_ARRAY_BUFFER_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsJSDataView(TNode<HeapObject> object) {
+ return HasInstanceType(object, JS_DATA_VIEW_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsFixedTypedArray(
SloppyTNode<HeapObject> object) {
TNode<Int32T> instance_type = LoadInstanceType(object);
@@ -5337,43 +5597,95 @@ TNode<BoolT> CodeStubAssembler::IsNumberNormalized(SloppyTNode<Number> number) {
}
TNode<BoolT> CodeStubAssembler::IsNumberPositive(SloppyTNode<Number> number) {
- TNode<Float64T> float_zero = Float64Constant(0.);
return Select<BoolT>(TaggedIsSmi(number),
[=] { return TaggedIsPositiveSmi(number); },
- [=] {
- TNode<Float64T> v = LoadHeapNumberValue(CAST(number));
- return Float64GreaterThanOrEqual(v, float_zero);
- });
+ [=] { return IsHeapNumberPositive(CAST(number)); });
}
-TNode<BoolT> CodeStubAssembler::IsNumberArrayIndex(SloppyTNode<Number> number) {
- TVARIABLE(BoolT, var_result, Int32TrueConstant());
+// TODO(cbruni): Use TNode<HeapNumber> instead of custom name.
+TNode<BoolT> CodeStubAssembler::IsHeapNumberPositive(TNode<HeapNumber> number) {
+ TNode<Float64T> value = LoadHeapNumberValue(number);
+ TNode<Float64T> float_zero = Float64Constant(0.);
+ return Float64GreaterThanOrEqual(value, float_zero);
+}
- Label check_upper_bound(this), check_is_integer(this), out(this),
- return_false(this);
+TNode<BoolT> CodeStubAssembler::IsNumberNonNegativeSafeInteger(
+ TNode<Number> number) {
+ return Select<BoolT>(
+ // TODO(cbruni): Introduce TaggedIsNonNegateSmi to avoid confusion.
+ TaggedIsSmi(number), [=] { return TaggedIsPositiveSmi(number); },
+ [=] {
+ TNode<HeapNumber> heap_number = CAST(number);
+ return Select<BoolT>(IsInteger(heap_number),
+ [=] { return IsHeapNumberPositive(heap_number); },
+ [=] { return Int32FalseConstant(); });
+ });
+}
- GotoIfNumberGreaterThanOrEqual(number, NumberConstant(0), &check_upper_bound);
- Goto(&return_false);
+TNode<BoolT> CodeStubAssembler::IsSafeInteger(TNode<Object> number) {
+ return Select<BoolT>(
+ TaggedIsSmi(number), [=] { return Int32TrueConstant(); },
+ [=] {
+ return Select<BoolT>(
+ IsHeapNumber(CAST(number)),
+ [=] { return IsSafeInteger(UncheckedCast<HeapNumber>(number)); },
+ [=] { return Int32FalseConstant(); });
+ });
+}
- BIND(&check_upper_bound);
- GotoIfNumberGreaterThanOrEqual(number, NumberConstant(kMaxUInt32),
- &return_false);
- Goto(&check_is_integer);
+TNode<BoolT> CodeStubAssembler::IsSafeInteger(TNode<HeapNumber> number) {
+ // Load the actual value of {number}.
+ TNode<Float64T> number_value = LoadHeapNumberValue(number);
+ // Truncate the value of {number} to an integer (or an infinity).
+ TNode<Float64T> integer = Float64Trunc(number_value);
- BIND(&check_is_integer);
- GotoIf(TaggedIsSmi(number), &out);
- // Check that the HeapNumber is a valid uint32
- TNode<Float64T> value = LoadHeapNumberValue(CAST(number));
- TNode<Uint32T> int_value = ChangeFloat64ToUint32(value);
- GotoIf(Float64Equal(value, ChangeUint32ToFloat64(int_value)), &out);
- Goto(&return_false);
+ return Select<BoolT>(
+ // Check if {number}s value matches the integer (ruling out the
+ // infinities).
+ Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
+ [=] {
+ // Check if the {integer} value is in safe integer range.
+ return Float64LessThanOrEqual(Float64Abs(integer),
+ Float64Constant(kMaxSafeInteger));
+ },
+ [=] { return Int32FalseConstant(); });
+}
- BIND(&return_false);
- var_result = Int32FalseConstant();
- Goto(&out);
+TNode<BoolT> CodeStubAssembler::IsInteger(TNode<Object> number) {
+ return Select<BoolT>(
+ TaggedIsSmi(number), [=] { return Int32TrueConstant(); },
+ [=] {
+ return Select<BoolT>(
+ IsHeapNumber(CAST(number)),
+ [=] { return IsInteger(UncheckedCast<HeapNumber>(number)); },
+ [=] { return Int32FalseConstant(); });
+ });
+}
- BIND(&out);
- return var_result.value();
+TNode<BoolT> CodeStubAssembler::IsInteger(TNode<HeapNumber> number) {
+ TNode<Float64T> number_value = LoadHeapNumberValue(number);
+ // Truncate the value of {number} to an integer (or an infinity).
+ TNode<Float64T> integer = Float64Trunc(number_value);
+ // Check if {number}s value matches the integer (ruling out the infinities).
+ return Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0));
+}
+
+TNode<BoolT> CodeStubAssembler::IsHeapNumberUint32(TNode<HeapNumber> number) {
+ // Check that the HeapNumber is a valid uint32
+ return Select<BoolT>(
+ IsHeapNumberPositive(number),
+ [=] {
+ TNode<Float64T> value = LoadHeapNumberValue(number);
+ TNode<Uint32T> int_value = ChangeFloat64ToUint32(value);
+ return Float64Equal(value, ChangeUint32ToFloat64(int_value));
+ },
+ [=] { return Int32FalseConstant(); });
+}
+
+TNode<BoolT> CodeStubAssembler::IsNumberArrayIndex(TNode<Number> number) {
+ return Select<BoolT>(TaggedIsSmi(number),
+ [=] { return TaggedIsPositiveSmi(number); },
+ [=] { return IsHeapNumberUint32(CAST(number)); });
}
Node* CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(Node* element_count,
@@ -5920,8 +6232,7 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
// If new length is greater than String::kMaxLength, goto runtime to
// throw. Note: we also need to invalidate the string length protector, so
// can't just throw here directly.
- GotoIf(SmiGreaterThan(new_length, SmiConstant(String::kMaxLength)),
- &runtime);
+ GotoIf(SmiAbove(new_length, SmiConstant(String::kMaxLength)), &runtime);
TVARIABLE(String, var_left, left);
TVARIABLE(String, var_right, right);
@@ -6816,10 +7127,11 @@ TNode<UintPtrT> CodeStubAssembler::DecodeWord(SloppyTNode<WordT> word,
WordShr(WordAnd(word, IntPtrConstant(mask)), static_cast<int>(shift)));
}
-Node* CodeStubAssembler::UpdateWord(Node* word, Node* value, uint32_t shift,
- uint32_t mask) {
- Node* encoded_value = WordShl(value, static_cast<int>(shift));
- Node* inverted_mask = IntPtrConstant(~static_cast<intptr_t>(mask));
+TNode<WordT> CodeStubAssembler::UpdateWord(TNode<WordT> word,
+ TNode<WordT> value, uint32_t shift,
+ uint32_t mask) {
+ TNode<WordT> encoded_value = WordShl(value, static_cast<int>(shift));
+ TNode<IntPtrT> inverted_mask = IntPtrConstant(~static_cast<intptr_t>(mask));
// Ensure the {value} fits fully in the mask.
CSA_ASSERT(this, WordEqual(WordAnd(encoded_value, inverted_mask),
IntPtrConstant(0)));
@@ -6936,8 +7248,11 @@ void CodeStubAssembler::TryInternalizeString(
CSA_SLOW_ASSERT(this, IsString(string));
Node* function =
ExternalConstant(ExternalReference::try_internalize_string_function());
- Node* result = CallCFunction1(MachineType::AnyTagged(),
- MachineType::AnyTagged(), function, string);
+ Node* const isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ Node* result =
+ CallCFunction2(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::AnyTagged(), function, isolate_ptr, string);
Label internalized(this);
GotoIf(TaggedIsNotSmi(result), &internalized);
Node* word_result = SmiUntag(result);
@@ -6962,6 +7277,32 @@ TNode<IntPtrT> CodeStubAssembler::EntryToIndex(TNode<IntPtrT> entry,
field_index));
}
+TNode<Uint32T> CodeStubAssembler::LoadDetailsByKeyIndex(
+ TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
+ const int kKeyToDetailsOffset =
+ (DescriptorArray::kEntryDetailsIndex - DescriptorArray::kEntryKeyIndex) *
+ kPointerSize;
+ return Unsigned(LoadAndUntagToWord32ArrayElement(
+ container, WeakFixedArray::kHeaderSize, key_index, kKeyToDetailsOffset));
+}
+
+TNode<Object> CodeStubAssembler::LoadValueByKeyIndex(
+ TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
+ const int kKeyToValueOffset =
+ (DescriptorArray::kEntryValueIndex - DescriptorArray::kEntryKeyIndex) *
+ kPointerSize;
+ return CAST(
+ LoadWeakFixedArrayElement(container, key_index, kKeyToValueOffset));
+}
+
+TNode<MaybeObject> CodeStubAssembler::LoadFieldTypeByKeyIndex(
+ TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
+ const int kKeyToValueOffset =
+ (DescriptorArray::kEntryValueIndex - DescriptorArray::kEntryKeyIndex) *
+ kPointerSize;
+ return LoadWeakFixedArrayElement(container, key_index, kKeyToValueOffset);
+}
+
template TNode<IntPtrT> CodeStubAssembler::EntryToIndex<NameDictionary>(
TNode<IntPtrT>, int);
template TNode<IntPtrT> CodeStubAssembler::EntryToIndex<GlobalDictionary>(
@@ -7122,7 +7463,14 @@ void CodeStubAssembler::NumberDictionaryLookup(
TNode<IntPtrT> capacity = SmiUntag(GetCapacity<NumberDictionary>(dictionary));
TNode<WordT> mask = IntPtrSub(capacity, IntPtrConstant(1));
- TNode<Int32T> int32_seed = HashSeed();
+ TNode<Int32T> int32_seed;
+
+ if (Is64()) {
+ int32_seed = TruncateInt64ToInt32(HashSeed());
+ } else {
+ int32_seed = HashSeedLow();
+ }
+
TNode<WordT> hash =
ChangeUint32ToWord(ComputeIntegerHash(intptr_index, int32_seed));
Node* key_as_float64 = RoundIntPtrToFloat64(intptr_index);
@@ -7175,6 +7523,51 @@ void CodeStubAssembler::NumberDictionaryLookup(
}
}
+TNode<Object> CodeStubAssembler::BasicLoadNumberDictionaryElement(
+ TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
+ Label* not_data, Label* if_hole) {
+ TVARIABLE(IntPtrT, var_entry);
+ Label if_found(this);
+ NumberDictionaryLookup(dictionary, intptr_index, &if_found, &var_entry,
+ if_hole);
+ BIND(&if_found);
+
+ // Check that the value is a data property.
+ TNode<IntPtrT> index = EntryToIndex<NumberDictionary>(var_entry.value());
+ TNode<Uint32T> details =
+ LoadDetailsByKeyIndex<NumberDictionary>(dictionary, index);
+ TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
+ // TODO(jkummerow): Support accessors without missing?
+ GotoIfNot(Word32Equal(kind, Int32Constant(kData)), not_data);
+ // Finally, load the value.
+ return LoadValueByKeyIndex<NumberDictionary>(dictionary, index);
+}
+
+void CodeStubAssembler::BasicStoreNumberDictionaryElement(
+ TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
+ TNode<Object> value, Label* not_data, Label* if_hole, Label* read_only) {
+ TVARIABLE(IntPtrT, var_entry);
+ Label if_found(this);
+ NumberDictionaryLookup(dictionary, intptr_index, &if_found, &var_entry,
+ if_hole);
+ BIND(&if_found);
+
+ // Check that the value is a data property.
+ TNode<IntPtrT> index = EntryToIndex<NumberDictionary>(var_entry.value());
+ TNode<Uint32T> details =
+ LoadDetailsByKeyIndex<NumberDictionary>(dictionary, index);
+ TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
+ // TODO(jkummerow): Support accessors without missing?
+ GotoIfNot(Word32Equal(kind, Int32Constant(kData)), not_data);
+
+ // Check that the property is writeable.
+ GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
+ read_only);
+
+ // Finally, store the value.
+ StoreValueByKeyIndex<NumberDictionary>(dictionary, index, value);
+}
+
template <class Dictionary>
void CodeStubAssembler::FindInsertionEntry(TNode<Dictionary> dictionary,
TNode<Name> key,
@@ -7285,8 +7678,8 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
TVariable<IntPtrT>* var_name_index,
Label* if_not_found) {
static_assert(std::is_base_of<FixedArray, Array>::value ||
- std::is_base_of<TransitionArray, Array>::value,
- "T must be a descendant of FixedArray or a TransitionArray");
+ std::is_base_of<WeakFixedArray, Array>::value,
+ "T must be a descendant of FixedArray or a WeakFixedArray");
Comment("LookupLinear");
TNode<IntPtrT> first_inclusive = IntPtrConstant(Array::ToKeyIndex(0));
TNode<IntPtrT> factor = IntPtrConstant(Array::kEntrySize);
@@ -7298,9 +7691,7 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
[=](SloppyTNode<IntPtrT> name_index) {
TNode<MaybeObject> element =
LoadArrayElement(array, Array::kHeaderSize, name_index);
- CSA_ASSERT(this, IsStrongHeapObject(element));
- TNode<Name> candidate_name =
- CAST(ToStrongHeapObject(element));
+ TNode<Name> candidate_name = CAST(element);
*var_name_index = name_index;
GotoIf(WordEqual(candidate_name, unique_name), if_found);
},
@@ -7311,8 +7702,9 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
template <>
TNode<Uint32T> CodeStubAssembler::NumberOfEntries<DescriptorArray>(
TNode<DescriptorArray> descriptors) {
- return Unsigned(LoadAndUntagToWord32FixedArrayElement(
- descriptors, IntPtrConstant(DescriptorArray::kDescriptorLengthIndex)));
+ return Unsigned(LoadAndUntagToWord32ArrayElement(
+ descriptors, WeakFixedArray::kHeaderSize,
+ IntPtrConstant(DescriptorArray::kDescriptorLengthIndex)));
}
template <>
@@ -7366,14 +7758,13 @@ template <typename Array>
TNode<Name> CodeStubAssembler::GetKey(TNode<Array> array,
TNode<Uint32T> entry_index) {
static_assert(std::is_base_of<FixedArray, Array>::value ||
- std::is_base_of<TransitionArray, Array>::value,
+ std::is_base_of<WeakFixedArray, Array>::value,
"T must be a descendant of FixedArray or a TransitionArray");
const int key_offset = Array::ToKeyIndex(0) * kPointerSize;
TNode<MaybeObject> element =
LoadArrayElement(array, Array::kHeaderSize,
EntryIndexToIndex<Array>(entry_index), key_offset);
- CSA_ASSERT(this, IsStrongHeapObject(element));
- return CAST(ToStrongHeapObject(element));
+ return CAST(element);
}
template TNode<Name> CodeStubAssembler::GetKey<DescriptorArray>(
@@ -7384,9 +7775,9 @@ template TNode<Name> CodeStubAssembler::GetKey<TransitionArray>(
TNode<Uint32T> CodeStubAssembler::DescriptorArrayGetDetails(
TNode<DescriptorArray> descriptors, TNode<Uint32T> descriptor_number) {
const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
- return Unsigned(LoadAndUntagToWord32FixedArrayElement(
- descriptors, EntryIndexToIndex<DescriptorArray>(descriptor_number),
- details_offset));
+ return Unsigned(LoadAndUntagToWord32ArrayElement(
+ descriptors, WeakFixedArray::kHeaderSize,
+ EntryIndexToIndex<DescriptorArray>(descriptor_number), details_offset));
}
template <typename Array>
@@ -7639,27 +8030,23 @@ Node* CodeStubAssembler::GetMethod(Node* context, Node* object,
return method;
}
-void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
- Node* descriptors,
- Node* name_index,
- Variable* var_details,
- Variable* var_value) {
+void CodeStubAssembler::LoadPropertyFromFastObject(
+ Node* object, Node* map, TNode<DescriptorArray> descriptors,
+ Node* name_index, Variable* var_details, Variable* var_value) {
DCHECK_EQ(MachineRepresentation::kWord32, var_details->rep());
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
Node* details =
- LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
+ LoadDetailsByKeyIndex(descriptors, UncheckedCast<IntPtrT>(name_index));
var_details->Bind(details);
LoadPropertyFromFastObject(object, map, descriptors, name_index, details,
var_value);
}
-void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
- Node* descriptors,
- Node* name_index,
- Node* details,
- Variable* var_value) {
+void CodeStubAssembler::LoadPropertyFromFastObject(
+ Node* object, Node* map, TNode<DescriptorArray> descriptors,
+ Node* name_index, Node* details, Variable* var_value) {
Comment("[ LoadPropertyFromFastObject");
Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
@@ -7742,7 +8129,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
BIND(&if_in_descriptor);
{
var_value->Bind(
- LoadValueByKeyIndex<DescriptorArray>(descriptors, name_index));
+ LoadValueByKeyIndex(descriptors, UncheckedCast<IntPtrT>(name_index)));
Goto(&done);
}
BIND(&done);
@@ -7755,7 +8142,7 @@ void CodeStubAssembler::LoadPropertyFromNameDictionary(Node* dictionary,
Variable* var_details,
Variable* var_value) {
Comment("LoadPropertyFromNameDictionary");
- CSA_ASSERT(this, IsDictionary(dictionary));
+ CSA_ASSERT(this, IsNameDictionary(dictionary));
var_details->Bind(
LoadDetailsByKeyIndex<NameDictionary>(dictionary, name_index));
@@ -7770,7 +8157,7 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
Variable* var_value,
Label* if_deleted) {
Comment("[ LoadPropertyFromGlobalDictionary");
- CSA_ASSERT(this, IsDictionary(dictionary));
+ CSA_ASSERT(this, IsGlobalDictionary(dictionary));
Node* property_cell = LoadFixedArrayElement(dictionary, name_index);
CSA_ASSERT(this, IsPropertyCell(property_cell));
@@ -7926,7 +8313,7 @@ void CodeStubAssembler::TryGetOwnProperty(
&var_entry, if_not_found, if_bailout);
BIND(&if_found_fast);
{
- Node* descriptors = var_meta_storage.value();
+ TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value());
Node* name_index = var_entry.value();
LoadPropertyFromFastObject(object, map, descriptors, name_index,
@@ -8086,8 +8473,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
GotoIf(IsDetachedBuffer(buffer), if_absent);
- Node* length = TryToIntptr(
- LoadObjectField(object, JSTypedArray::kLengthOffset), if_bailout);
+ Node* length = SmiUntag(LoadTypedArrayLength(CAST(object)));
Branch(UintPtrLessThan(intptr_index, length), if_found, if_absent);
}
BIND(&if_oob);
@@ -8417,16 +8803,21 @@ TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
return IntPtrLessThanOrEqual(offset, last_offset);
}
-Node* CodeStubAssembler::LoadFeedbackVector(Node* closure) {
- Node* feedback_cell =
- LoadObjectField(closure, JSFunction::kFeedbackCellOffset);
- CSA_ASSERT(this, IsFeedbackCell(feedback_cell));
- return LoadObjectField(feedback_cell, FeedbackCell::kValueOffset);
+TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVector(
+ SloppyTNode<JSFunction> closure, Label* if_undefined) {
+ TNode<FeedbackCell> feedback_cell =
+ CAST(LoadObjectField(closure, JSFunction::kFeedbackCellOffset));
+ TNode<Object> maybe_vector =
+ LoadObjectField(feedback_cell, FeedbackCell::kValueOffset);
+ if (if_undefined) {
+ GotoIf(IsUndefined(maybe_vector), if_undefined);
+ }
+ return CAST(maybe_vector);
}
-Node* CodeStubAssembler::LoadFeedbackVectorForStub() {
- Node* function =
- LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset);
+TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVectorForStub() {
+ TNode<JSFunction> function =
+ CAST(LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset));
return LoadFeedbackVector(function);
}
@@ -8437,8 +8828,7 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* feedback_vector,
// our new feedback in place.
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(feedback_vector, slot_id);
- CSA_ASSERT(this, IsObject(feedback_element));
- TNode<Smi> previous_feedback = CAST(ToObject(feedback_element));
+ TNode<Smi> previous_feedback = CAST(feedback_element);
TNode<Smi> combined_feedback = SmiOr(previous_feedback, CAST(feedback));
Label end(this);
@@ -8747,6 +9137,9 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
case FLOAT64_ELEMENTS:
rep = MachineRepresentation::kFloat64;
break;
+ case BIGINT64_ELEMENTS:
+ case BIGUINT64_ELEMENTS:
+ return ToBigInt(context, input);
default:
UNREACHABLE();
}
@@ -8909,10 +9302,24 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
if (IsFixedTypedArrayElementsKind(elements_kind)) {
Label done(this);
+
+ // IntegerIndexedElementSet converts value to a Number/BigInt prior to the
+ // bounds check.
+ value = PrepareValueForWriteToTypedArray(CAST(value), elements_kind,
+ CAST(context));
+
+ // There must be no allocations between the buffer load and
+ // and the actual store to backing store, because GC may decide that
+ // the buffer is not alive or move the elements.
+ // TODO(ishell): introduce DisallowHeapAllocationCode scope here.
+
+ // Check if buffer has been neutered.
+ Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), bailout);
+
// Bounds check.
- Node* length = TaggedToParameter(
- CAST(LoadObjectField(object, JSTypedArray::kLengthOffset)),
- parameter_mode);
+ Node* length =
+ TaggedToParameter(LoadTypedArrayLength(CAST(object)), parameter_mode);
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
// Skip the store if we write beyond the length or
@@ -8923,24 +9330,17 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
}
- TNode<Object> value_obj = UncheckedCast<Object>(value);
if (elements_kind == BIGINT64_ELEMENTS ||
elements_kind == BIGUINT64_ELEMENTS) {
- EmitBigTypedArrayElementStore(CAST(object), CAST(elements), intptr_key,
- value_obj, CAST(context), bailout);
+ TNode<BigInt> bigint_value = UncheckedCast<BigInt>(value);
+
+ TNode<RawPtrT> backing_store =
+ LoadFixedTypedArrayBackingStore(CAST(elements));
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(
+ intptr_key, BIGINT64_ELEMENTS, INTPTR_PARAMETERS, 0);
+ EmitBigTypedArrayElementStore(CAST(elements), backing_store, offset,
+ bigint_value);
} else {
- value = PrepareValueForWriteToTypedArray(value_obj, elements_kind,
- CAST(context));
-
- // There must be no allocations between the buffer load and
- // and the actual store to backing store, because GC may decide that
- // the buffer is not alive or move the elements.
- // TODO(ishell): introduce DisallowHeapAllocationCode scope here.
-
- // Check if buffer has been neutered.
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), bailout);
-
Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
StoreElement(backing_store, elements_kind, intptr_key, value,
parameter_mode);
@@ -9114,30 +9514,32 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
Label no_memento_found(this);
Label top_check(this), map_check(this);
- Node* new_space_top_address = ExternalConstant(
+ TNode<ExternalReference> new_space_top_address = ExternalConstant(
ExternalReference::new_space_allocation_top_address(isolate()));
const int kMementoMapOffset = JSArray::kSize;
const int kMementoLastWordOffset =
kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
- Node* object_word = BitcastTaggedToWord(object);
- Node* object_page = PageFromAddress(object_word);
+ TNode<IntPtrT> object_word = BitcastTaggedToWord(object);
+ TNode<IntPtrT> object_page = PageFromAddress(object_word);
{
- Node* page_flags = Load(MachineType::IntPtr(), object_page,
- IntPtrConstant(Page::kFlagsOffset));
+ TNode<IntPtrT> page_flags =
+ UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), object_page,
+ IntPtrConstant(Page::kFlagsOffset)));
GotoIf(WordEqual(WordAnd(page_flags,
IntPtrConstant(MemoryChunk::kIsInNewSpaceMask)),
IntPtrConstant(0)),
&no_memento_found);
}
- Node* memento_last_word = IntPtrAdd(
+ TNode<IntPtrT> memento_last_word = IntPtrAdd(
object_word, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag));
- Node* memento_last_word_page = PageFromAddress(memento_last_word);
+ TNode<IntPtrT> memento_last_word_page = PageFromAddress(memento_last_word);
- Node* new_space_top = Load(MachineType::Pointer(), new_space_top_address);
- Node* new_space_top_page = PageFromAddress(new_space_top);
+ TNode<IntPtrT> new_space_top = UncheckedCast<IntPtrT>(
+ Load(MachineType::Pointer(), new_space_top_address));
+ TNode<IntPtrT> new_space_top_page = PageFromAddress(new_space_top);
// If the object is in new space, we need to check whether respective
// potential memento object is on the same page as the current top.
@@ -9160,7 +9562,7 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
// Memento map check.
BIND(&map_check);
{
- Node* memento_map = LoadObjectField(object, kMementoMapOffset);
+ TNode<Object> memento_map = LoadObjectField(object, kMementoMapOffset);
Branch(
WordEqual(memento_map, LoadRoot(Heap::kAllocationMementoMapRootIndex)),
memento_found, &no_memento_found);
@@ -9169,41 +9571,44 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
Comment("] TrapAllocationMemento");
}
-Node* CodeStubAssembler::PageFromAddress(Node* address) {
+TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
return WordAnd(address, IntPtrConstant(~Page::kPageAlignmentMask));
}
TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
- Node* feedback_vector, Node* slot) {
- Node* size = IntPtrConstant(AllocationSite::kSize);
+ SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot) {
+ TNode<IntPtrT> size = IntPtrConstant(AllocationSite::kSizeWithWeakNext);
Node* site = Allocate(size, CodeStubAssembler::kPretenured);
- StoreMapNoWriteBarrier(site, Heap::kAllocationSiteMapRootIndex);
+ StoreMapNoWriteBarrier(site, Heap::kAllocationSiteWithWeakNextMapRootIndex);
// Should match AllocationSite::Initialize.
- Node* field = UpdateWord<AllocationSite::ElementsKindBits>(
+ TNode<WordT> field = UpdateWord<AllocationSite::ElementsKindBits>(
IntPtrConstant(0), IntPtrConstant(GetInitialFastElementsKind()));
StoreObjectFieldNoWriteBarrier(
- site, AllocationSite::kTransitionInfoOrBoilerplateOffset, SmiTag(field));
+ site, AllocationSite::kTransitionInfoOrBoilerplateOffset,
+ SmiTag(Signed(field)));
// Unlike literals, constructed arrays don't have nested sites
- Node* zero = SmiConstant(0);
+ TNode<Smi> zero = SmiConstant(0);
StoreObjectFieldNoWriteBarrier(site, AllocationSite::kNestedSiteOffset, zero);
// Pretenuring calculation field.
StoreObjectFieldNoWriteBarrier(site, AllocationSite::kPretenureDataOffset,
- zero);
+ Int32Constant(0),
+ MachineRepresentation::kWord32);
// Pretenuring memento creation count field.
StoreObjectFieldNoWriteBarrier(
- site, AllocationSite::kPretenureCreateCountOffset, zero);
+ site, AllocationSite::kPretenureCreateCountOffset, Int32Constant(0),
+ MachineRepresentation::kWord32);
// Store an empty fixed array for the code dependency.
StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
Heap::kEmptyFixedArrayRootIndex);
// Link the object to the allocation site list
- Node* site_list = ExternalConstant(
+ TNode<ExternalReference> site_list = ExternalConstant(
ExternalReference::allocation_sites_list_address(isolate()));
- Node* next_site = LoadBufferObject(site_list, 0);
+ TNode<Object> next_site = CAST(LoadBufferObject(site_list, 0));
// TODO(mvstanton): This is a store to a weak pointer, which we may want to
// mark as such in order to skip the write barrier, once we have a unified
@@ -9218,20 +9623,41 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
return CAST(site);
}
-Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
- Node* slot,
- Node* value) {
- Node* size = IntPtrConstant(WeakCell::kSize);
- Node* cell = Allocate(size, CodeStubAssembler::kPretenured);
+TNode<MaybeObject> CodeStubAssembler::StoreWeakReferenceInFeedbackVector(
+ SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot,
+ TNode<HeapObject> value) {
+ TNode<MaybeObject> weak_value = MakeWeak(value);
+ StoreFeedbackVectorSlot(feedback_vector, slot, weak_value);
+ return weak_value;
+}
+
+TNode<BoolT> CodeStubAssembler::NotHasBoilerplate(
+ TNode<Object> maybe_literal_site) {
+ return TaggedIsSmi(maybe_literal_site);
+}
+
+TNode<Smi> CodeStubAssembler::LoadTransitionInfo(
+ TNode<AllocationSite> allocation_site) {
+ TNode<Smi> transition_info = CAST(LoadObjectField(
+ allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset));
+ return transition_info;
+}
- // Initialize the WeakCell.
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kWeakCellMapRootIndex));
- StoreMapNoWriteBarrier(cell, Heap::kWeakCellMapRootIndex);
- StoreObjectField(cell, WeakCell::kValueOffset, value);
+TNode<JSObject> CodeStubAssembler::LoadBoilerplate(
+ TNode<AllocationSite> allocation_site) {
+ TNode<JSObject> boilerplate = CAST(LoadObjectField(
+ allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset));
+ return boilerplate;
+}
- // Store the WeakCell in the feedback vector.
- StoreFeedbackVectorSlot(feedback_vector, slot, cell);
- return cell;
+TNode<Int32T> CodeStubAssembler::LoadElementsKind(
+ TNode<AllocationSite> allocation_site) {
+ TNode<Smi> transition_info = LoadTransitionInfo(allocation_site);
+ TNode<Int32T> elements_kind =
+ Signed(DecodeWord32<AllocationSite::ElementsKindBits>(
+ SmiToInt32(transition_info)));
+ CSA_ASSERT(this, IsFastElementsKind(elements_kind));
+ return elements_kind;
}
Node* CodeStubAssembler::BuildFastLoop(
@@ -11021,7 +11447,7 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
//
// Call to Function.prototype[@@hasInstance] directly.
Callable builtin(BUILTIN_CODE(isolate(), FunctionPrototypeHasInstance),
- CallTrampolineDescriptor(isolate()));
+ CallTrampolineDescriptor{});
Node* result = CallJS(builtin, context, inst_of_handler, callable, object);
var_result.Bind(result);
Goto(&return_result);
@@ -11335,6 +11761,18 @@ Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
return IsSetWord32<JSArrayBuffer::WasNeutered>(buffer_bit_field);
}
+TNode<JSArrayBuffer> CodeStubAssembler::LoadArrayBufferViewBuffer(
+ TNode<JSArrayBufferView> array_buffer_view) {
+ return LoadObjectField<JSArrayBuffer>(array_buffer_view,
+ JSArrayBufferView::kBufferOffset);
+}
+
+TNode<RawPtrT> CodeStubAssembler::LoadArrayBufferBackingStore(
+ TNode<JSArrayBuffer> array_buffer) {
+ return LoadObjectField<RawPtrT>(array_buffer,
+ JSArrayBuffer::kBackingStoreOffset);
+}
+
CodeStubArguments::CodeStubArguments(
CodeStubAssembler* assembler, Node* argc, Node* fp,
CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode)
@@ -11358,6 +11796,13 @@ TNode<Object> CodeStubArguments::GetReceiver() const {
assembler_->IntPtrConstant(kPointerSize)));
}
+void CodeStubArguments::SetReceiver(TNode<Object> object) const {
+ DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
+ assembler_->StoreNoWriteBarrier(MachineRepresentation::kTagged, arguments_,
+ assembler_->IntPtrConstant(kPointerSize),
+ object);
+}
+
TNode<RawPtr<Object>> CodeStubArguments::AtIndexPtr(
Node* index, CodeStubAssembler::ParameterMode mode) const {
typedef compiler::Node Node;
@@ -11471,6 +11916,15 @@ Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
Int32Constant(LAST_FAST_ELEMENTS_KIND));
}
+TNode<BoolT> CodeStubAssembler::IsDoubleElementsKind(
+ TNode<Int32T> elements_kind) {
+ STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
+ STATIC_ASSERT((PACKED_DOUBLE_ELEMENTS & 1) == 0);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS + 1 == HOLEY_DOUBLE_ELEMENTS);
+ return Word32Equal(Word32Shr(elements_kind, Int32Constant(1)),
+ Int32Constant(PACKED_DOUBLE_ELEMENTS / 2));
+}
+
Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) {
STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS > TERMINAL_FAST_ELEMENTS_KIND);
@@ -11505,13 +11959,28 @@ Node* CodeStubAssembler::IsDebugActive() {
return Word32NotEqual(is_debug_active, Int32Constant(0));
}
-Node* CodeStubAssembler::IsPromiseHookEnabledOrDebugIsActive() {
- Node* const promise_hook_or_debug_is_active =
+Node* CodeStubAssembler::IsPromiseHookEnabled() {
+ Node* const promise_hook = Load(
+ MachineType::Pointer(),
+ ExternalConstant(ExternalReference::promise_hook_address(isolate())));
+ return WordNotEqual(promise_hook, IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::HasAsyncEventDelegate() {
+ Node* const async_event_delegate =
+ Load(MachineType::Pointer(),
+ ExternalConstant(
+ ExternalReference::async_event_delegate_address(isolate())));
+ return WordNotEqual(async_event_delegate, IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate() {
+ Node* const promise_hook_or_async_event_delegate =
Load(MachineType::Uint8(),
ExternalConstant(
- ExternalReference::promise_hook_or_debug_is_active_address(
+ ExternalReference::promise_hook_or_async_event_delegate_address(
isolate())));
- return Word32NotEqual(promise_hook_or_debug_is_active, Int32Constant(0));
+ return Word32NotEqual(promise_hook_or_async_event_delegate, Int32Constant(0));
}
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
@@ -11532,17 +12001,21 @@ TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
}
TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
- SloppyTNode<SharedFunctionInfo> shared_info) {
+ SloppyTNode<SharedFunctionInfo> shared_info, Label* if_compile_lazy) {
TNode<Object> sfi_data =
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
- TYPED_VARIABLE_DEF(Code, sfi_code, this);
+ TVARIABLE(Code, sfi_code);
Label done(this);
Label check_instance_type(this);
// IsSmi: Is builtin
GotoIf(TaggedIsNotSmi(sfi_data), &check_instance_type);
+ if (if_compile_lazy) {
+ GotoIf(SmiEqual(CAST(sfi_data), SmiConstant(Builtins::kCompileLazy)),
+ if_compile_lazy);
+ }
sfi_code = LoadBuiltin(CAST(sfi_data));
Goto(&done);
@@ -11551,18 +12024,24 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
TNode<Int32T> data_type = LoadInstanceType(CAST(sfi_data));
int32_t case_values[] = {BYTECODE_ARRAY_TYPE,
- WASM_EXPORTED_FUNCTION_DATA_TYPE, FIXED_ARRAY_TYPE,
- TUPLE2_TYPE, FUNCTION_TEMPLATE_INFO_TYPE};
+ WASM_EXPORTED_FUNCTION_DATA_TYPE,
+ FIXED_ARRAY_TYPE,
+ UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE,
+ UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE,
+ FUNCTION_TEMPLATE_INFO_TYPE};
Label check_is_bytecode_array(this);
Label check_is_exported_function_data(this);
Label check_is_fixed_array(this);
- Label check_is_pre_parsed_scope_data(this);
+ Label check_is_uncompiled_data_without_pre_parsed_scope(this);
+ Label check_is_uncompiled_data_with_pre_parsed_scope(this);
Label check_is_function_template_info(this);
Label check_is_interpreter_data(this);
- Label* case_labels[] = {
- &check_is_bytecode_array, &check_is_exported_function_data,
- &check_is_fixed_array, &check_is_pre_parsed_scope_data,
- &check_is_function_template_info};
+ Label* case_labels[] = {&check_is_bytecode_array,
+ &check_is_exported_function_data,
+ &check_is_fixed_array,
+ &check_is_uncompiled_data_without_pre_parsed_scope,
+ &check_is_uncompiled_data_with_pre_parsed_scope,
+ &check_is_function_template_info};
STATIC_ASSERT(arraysize(case_values) == arraysize(case_labels));
Switch(data_type, &check_is_interpreter_data, case_values, case_labels,
arraysize(case_labels));
@@ -11585,11 +12064,14 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs));
Goto(&done);
- // IsPreParsedScopeData: Compile lazy
- BIND(&check_is_pre_parsed_scope_data);
+ // IsUncompiledDataWithPreParsedScope | IsUncompiledDataWithoutPreParsedScope:
+ // Compile lazy
+ BIND(&check_is_uncompiled_data_with_pre_parsed_scope);
+ Goto(&check_is_uncompiled_data_without_pre_parsed_scope);
+ BIND(&check_is_uncompiled_data_without_pre_parsed_scope);
DCHECK(!Builtins::IsLazy(Builtins::kCompileLazy));
sfi_code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
- Goto(&done);
+ Goto(if_compile_lazy ? if_compile_lazy : &done);
// IsFunctionTemplateInfo: API call
BIND(&check_is_function_template_info);
@@ -11738,11 +12220,6 @@ TNode<Object> CodeStubAssembler::GetArgumentValue(CodeStubArguments* args,
return args->GetOptionalArgumentValue(index);
}
-TNode<Object> CodeStubAssembler::GetArgumentValueSmiIndex(
- CodeStubArguments* args, TNode<Smi> index) {
- return args->GetOptionalArgumentValue(SmiUntag(index));
-}
-
void CodeStubAssembler::Print(const char* s) {
std::string formatted(s);
formatted += "\n";
@@ -11762,22 +12239,24 @@ void CodeStubAssembler::Print(const char* prefix, Node* tagged_value) {
CallRuntime(Runtime::kDebugPrint, NoContextConstant(), tagged_value);
}
-void CodeStubAssembler::PerformStackCheck(Node* context) {
+void CodeStubAssembler::PerformStackCheck(TNode<Context> context) {
Label ok(this), stack_check_interrupt(this, Label::kDeferred);
- Node* sp = LoadStackPointer();
- Node* stack_limit = Load(
+ // The instruction sequence below is carefully crafted to hit our pattern
+ // matcher for stack checks within instruction selection.
+ // See StackCheckMatcher::Matched and JSGenericLowering::LowerJSStackCheck.
+
+ TNode<UintPtrT> sp = UncheckedCast<UintPtrT>(LoadStackPointer());
+ TNode<UintPtrT> stack_limit = UncheckedCast<UintPtrT>(Load(
MachineType::Pointer(),
- ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
- Node* interrupt = UintPtrLessThan(sp, stack_limit);
+ ExternalConstant(ExternalReference::address_of_stack_limit(isolate()))));
+ TNode<BoolT> sp_within_limit = UintPtrLessThan(stack_limit, sp);
- Branch(interrupt, &stack_check_interrupt, &ok);
+ Branch(sp_within_limit, &ok, &stack_check_interrupt);
BIND(&stack_check_interrupt);
- {
- CallRuntime(Runtime::kStackGuard, context);
- Goto(&ok);
- }
+ CallRuntime(Runtime::kStackGuard, context);
+ Goto(&ok);
BIND(&ok);
}
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 35341b65d5..3d7859f064 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -11,6 +11,7 @@
#include "src/compiler/code-assembler.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/roots.h"
namespace v8 {
namespace internal {
@@ -23,57 +24,74 @@ class StubCache;
enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
-#define HEAP_CONSTANT_LIST(V) \
- V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
- V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
- V(AllocationSiteMap, allocation_site_map, AllocationSiteMap) \
- V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \
- V(BooleanMap, boolean_map, BooleanMap) \
- V(CodeMap, code_map, CodeMap) \
- V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
- V(EmptyPropertyDictionary, empty_property_dictionary, \
- EmptyPropertyDictionary) \
- V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
- EmptySlowElementDictionary) \
- V(empty_string, empty_string, EmptyString) \
- V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
- V(FalseValue, false_value, False) \
- V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
- V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
- V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
- V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
- V(FunctionTemplateInfoMap, function_template_info_map, \
- FunctionTemplateInfoMap) \
- V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
- V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
- V(HeapNumberMap, heap_number_map, HeapNumberMap) \
- V(iterator_symbol, iterator_symbol, IteratorSymbol) \
- V(length_string, length_string, LengthString) \
- V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
- V(MetaMap, meta_map, MetaMap) \
- V(MinusZeroValue, minus_zero_value, MinusZero) \
- V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
- V(NanValue, nan_value, Nan) \
- V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
- V(NullValue, null_value, Null) \
- V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
- V(PromiseSpeciesProtector, promise_species_protector, \
- PromiseSpeciesProtector) \
- V(prototype_string, prototype_string, PrototypeString) \
- V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
- V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
- V(SymbolMap, symbol_map, SymbolMap) \
- V(TheHoleValue, the_hole_value, TheHole) \
- V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
- V(TrueValue, true_value, True) \
- V(Tuple2Map, tuple2_map, Tuple2Map) \
- V(Tuple3Map, tuple3_map, Tuple3Map) \
- V(TypedArraySpeciesProtector, typed_array_species_protector, \
- TypedArraySpeciesProtector) \
- V(UndefinedValue, undefined_value, Undefined) \
- V(WeakCellMap, weak_cell_map, WeakCellMap) \
+#define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
+ V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \
+ V(EmptyPropertyDictionary, empty_property_dictionary, \
+ EmptyPropertyDictionary) \
+ V(PromiseSpeciesProtector, promise_species_protector, \
+ PromiseSpeciesProtector) \
+ V(TypedArraySpeciesProtector, typed_array_species_protector, \
+ TypedArraySpeciesProtector) \
+ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map)
+
+#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
+ V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
+ V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
+ V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \
+ V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \
+ AllocationSiteWithoutWeakNextMap) \
+ V(BooleanMap, boolean_map, BooleanMap) \
+ V(CodeMap, code_map, CodeMap) \
+ V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
+ V(empty_string, empty_string, EmptyString) \
+ V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
+ V(FalseValue, false_value, False) \
+ V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
+ V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
+ V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(FunctionTemplateInfoMap, function_template_info_map, \
+ FunctionTemplateInfoMap) \
+ V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
+ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
+ V(HeapNumberMap, heap_number_map, HeapNumberMap) \
+ V(iterator_symbol, iterator_symbol, IteratorSymbol) \
+ V(length_string, length_string, LengthString) \
+ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
+ V(MetaMap, meta_map, MetaMap) \
+ V(MinusZeroValue, minus_zero_value, MinusZero) \
+ V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
+ V(NanValue, nan_value, Nan) \
+ V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
+ V(NullValue, null_value, Null) \
+ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
+ V(PreParsedScopeDataMap, pre_parsed_scope_data_map, PreParsedScopeDataMap) \
+ V(prototype_string, prototype_string, PrototypeString) \
+ V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
+ V(SymbolMap, symbol_map, SymbolMap) \
+ V(TheHoleValue, the_hole_value, TheHole) \
+ V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
+ V(TrueValue, true_value, True) \
+ V(Tuple2Map, tuple2_map, Tuple2Map) \
+ V(Tuple3Map, tuple3_map, Tuple3Map) \
+ V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \
+ ArrayBoilerplateDescriptionMap) \
+ V(UncompiledDataWithoutPreParsedScopeMap, \
+ uncompiled_data_without_pre_parsed_scope_map, \
+ UncompiledDataWithoutPreParsedScopeMap) \
+ V(UncompiledDataWithPreParsedScopeMap, \
+ uncompiled_data_with_pre_parsed_scope_map, \
+ UncompiledDataWithPreParsedScopeMap) \
+ V(UndefinedValue, undefined_value, Undefined) \
+ V(WeakCellMap, weak_cell_map, WeakCellMap) \
V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap)
+#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
+ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
+ HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V)
+
// Returned from IteratorBuiltinsAssembler::GetIterator(). Struct is declared
// here to simplify use in other generated builtins.
struct IteratorRecord {
@@ -85,6 +103,119 @@ struct IteratorRecord {
compiler::TNode<Object> next;
};
+#define CSA_CHECK(csa, x) \
+ (csa)->Check( \
+ [&]() -> compiler::Node* { \
+ return implicit_cast<compiler::SloppyTNode<Word32T>>(x); \
+ }, \
+ #x, __FILE__, __LINE__)
+
+#ifdef DEBUG
+// Add stringified versions to the given values, except the first. That is,
+// transform
+// x, a, b, c, d, e, f
+// to
+// a, "a", b, "b", c, "c", d, "d", e, "e", f, "f"
+//
+// __VA_ARGS__ is ignored to allow the caller to pass through too many
+// parameters, and the first element is ignored to support having no extra
+// values without empty __VA_ARGS__ (which cause all sorts of problems with
+// extra commas).
+#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(_, v1, v2, v3, v4, v5, ...) \
+ v1, #v1, v2, #v2, v3, #v3, v4, #v4, v5, #v5
+
+// Stringify the given variable number of arguments. The arguments are trimmed
+// to 5 if there are too many, and padded with nullptr if there are not enough.
+#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES(...) \
+ CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \
+ nullptr, nullptr)
+
+#define CSA_ASSERT_GET_FIRST(x, ...) (x)
+#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x
+
+// CSA_ASSERT(csa, <condition>, <extra values to print...>)
+
+// We have to jump through some hoops to allow <extra values to print...> to be
+// empty.
+#define CSA_ASSERT(csa, ...) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ return implicit_cast<compiler::SloppyTNode<Word32T>>( \
+ EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \
+ }, \
+ EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \
+ CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
+
+// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
+// <extra values to print...>)
+
+#define CSA_ASSERT_BRANCH(csa, ...) \
+ (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \
+ EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \
+ __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
+
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ compiler::Node* const argc = \
+ (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \
+ return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \
+ "argc")
+
+#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
+ CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
+
+#define CSA_DEBUG_INFO(name) \
+ { #name, __FILE__, __LINE__ }
+#define BIND(label) Bind(label, CSA_DEBUG_INFO(label))
+#define VARIABLE(name, ...) \
+ Variable name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
+#define VARIABLE_CONSTRUCTOR(name, ...) \
+ name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
+#define TYPED_VARIABLE_DEF(type, name, ...) \
+ TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
+#else // DEBUG
+#define CSA_ASSERT(csa, ...) ((void)0)
+#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
+#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
+#define BIND(label) Bind(label)
+#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
+#define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__)
+#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
+#endif // DEBUG
+
+#define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this))
+
+#ifdef ENABLE_SLOW_DCHECKS
+#define CSA_SLOW_ASSERT(csa, ...) \
+ if (FLAG_enable_slow_asserts) { \
+ CSA_ASSERT(csa, __VA_ARGS__); \
+ }
+#else
+#define CSA_SLOW_ASSERT(csa, ...) ((void)0)
+#endif
+
+class int31_t {
+ public:
+ int31_t() : value_(0) {}
+ int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ }
+ int31_t& operator=(int value) {
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ value_ = value;
+ return *this;
+ }
+ int32_t value() const { return value_; }
+ operator int32_t() const { return value_; }
+
+ private:
+ int32_t value_;
+};
+
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
// it's possible to add JavaScript-specific useful CodeAssembler "macros"
@@ -115,6 +246,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
typedef base::Flags<AllocationFlag> AllocationFlags;
enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS };
+
// On 32-bit platforms, there is a slight performance advantage to doing all
// of the array offset/index arithmetic with SMIs, since it's possible
// to save a few tag/untag operations without paying an extra expense when
@@ -163,6 +295,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return UncheckedCast<Smi>(value);
}
+ TNode<Number> TaggedToNumber(TNode<Object> value, Label* fail) {
+ GotoIfNot(IsNumber(value), fail);
+ return UncheckedCast<Number>(value);
+ }
+
TNode<HeapObject> TaggedToHeapObject(TNode<Object> value, Label* fail) {
GotoIf(TaggedIsSmi(value), fail);
return UncheckedCast<HeapObject>(value);
@@ -175,6 +312,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return UncheckedCast<JSArray>(heap_object);
}
+ TNode<JSArray> TaggedToFastJSArray(TNode<Context> context,
+ TNode<Object> value, Label* fail) {
+ GotoIf(TaggedIsSmi(value), fail);
+ TNode<HeapObject> heap_object = CAST(value);
+ GotoIfNot(IsFastJSArray(heap_object, context), fail);
+ return UncheckedCast<JSArray>(heap_object);
+ }
+
+ TNode<JSDataView> TaggedToJSDataView(TNode<Object> value, Label* fail) {
+ GotoIf(TaggedIsSmi(value), fail);
+ TNode<HeapObject> heap_object = CAST(value);
+ GotoIfNot(IsJSDataView(heap_object), fail);
+ return UncheckedCast<JSDataView>(heap_object);
+ }
+
TNode<JSReceiver> TaggedToCallable(TNode<Object> value, Label* fail) {
GotoIf(TaggedIsSmi(value), fail);
TNode<HeapObject> result = UncheckedCast<HeapObject>(value);
@@ -182,6 +334,69 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return CAST(result);
}
+ TNode<HeapNumber> UnsafeCastNumberToHeapNumber(TNode<Number> p_n) {
+ return CAST(p_n);
+ }
+
+ TNode<FixedArrayBase> UnsafeCastObjectToFixedArrayBase(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<FixedArray> UnsafeCastObjectToFixedArray(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<FixedDoubleArray> UnsafeCastObjectToFixedDoubleArray(
+ TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<HeapNumber> UnsafeCastObjectToHeapNumber(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<HeapObject> UnsafeCastObjectToCallable(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<Smi> UnsafeCastObjectToSmi(TNode<Object> p_o) { return CAST(p_o); }
+
+ TNode<Number> UnsafeCastObjectToNumber(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<HeapObject> UnsafeCastObjectToHeapObject(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<JSArray> UnsafeCastObjectToJSArray(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<FixedTypedArrayBase> UnsafeCastObjectToFixedTypedArrayBase(
+ TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<Object> UnsafeCastObjectToCompareBuiltinFn(TNode<Object> p_o) {
+ return p_o;
+ }
+
+ TNode<NumberDictionary> UnsafeCastObjectToNumberDictionary(
+ TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<JSReceiver> UnsafeCastObjectToJSReceiver(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<JSObject> UnsafeCastObjectToJSObject(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
+ TNode<Map> UnsafeCastObjectToMap(TNode<Object> p_o) { return CAST(p_o); }
+
Node* MatchesParameterMode(Node* value, ParameterMode mode);
#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
@@ -207,26 +422,33 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SmiAboveOrEqual)
#undef PARAMETER_BINOP
- Node* NoContextConstant();
+ TNode<Object> NoContextConstant();
+
+#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
+ compiler::TNode<std::remove_reference<decltype( \
+ *std::declval<ReadOnlyRoots>().rootAccessorName())>::type> \
+ name##Constant();
+ HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
+#undef HEAP_CONSTANT_ACCESSOR
+
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
compiler::TNode<std::remove_reference<decltype( \
*std::declval<Heap>().rootAccessorName())>::type> \
name##Constant();
- HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR)
+ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
TNode<BoolT> Is##name(SloppyTNode<Object> value); \
TNode<BoolT> IsNot##name(SloppyTNode<Object> value);
- HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST)
+ HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
#undef HEAP_CONSTANT_TEST
- TNode<Int32T> HashSeed();
+ TNode<Int64T> HashSeed();
+ TNode<Int32T> HashSeedHigh();
+ TNode<Int32T> HashSeedLow();
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
- TNode<BoolT> BoolConstant(bool value) {
- return value ? Int32TrueConstant() : Int32FalseConstant();
- }
TNode<Smi> LanguageModeConstant(LanguageMode mode) {
return SmiConstant(static_cast<int>(mode));
}
@@ -277,15 +499,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
// Smi operations.
-#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName) \
- TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- return BitcastWordToTaggedSigned( \
- IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \
- }
- SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd)
- SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub)
- SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd)
- SMI_ARITHMETIC_BINOP(SmiOr, WordOr)
+#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (SmiValuesAre32Bits()) { \
+ return BitcastWordToTaggedSigned( \
+ IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \
+ } else { \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \
+ Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedToWord(b))))); \
+ } \
+ }
+ SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add)
+ SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub)
+ SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd, Word32And)
+ SMI_ARITHMETIC_BINOP(SmiOr, WordOr, Word32Or)
#undef SMI_ARITHMETIC_BINOP
TNode<Smi> TrySmiAdd(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
@@ -319,19 +552,32 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
}
}
-#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName) \
- TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
- }
- SMI_COMPARISON_OP(SmiEqual, WordEqual)
- SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual)
- SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan)
- SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual)
- SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan)
- SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan)
- SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual)
- SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan)
- SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual)
+#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (SmiValuesAre32Bits()) { \
+ return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
+ } else { \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedToWord(b))); \
+ } \
+ }
+ SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal)
+ SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual)
+ SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan, Uint32GreaterThan)
+ SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual,
+ Uint32GreaterThanOrEqual)
+ SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan, Uint32LessThan)
+ SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan, Int32LessThan)
+ SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual,
+ Int32LessThanOrEqual)
+ SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan, Int32GreaterThan)
+ SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual,
+ Int32GreaterThanOrEqual)
#undef SMI_COMPARISON_OP
TNode<Smi> SmiMax(TNode<Smi> a, TNode<Smi> b);
TNode<Smi> SmiMin(TNode<Smi> a, TNode<Smi> b);
@@ -521,6 +767,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadFromParentFrame(int offset,
MachineType rep = MachineType::AnyTagged());
+ // Load target function from the current JS frame.
+ // This is an alternative way of getting the target function in addition to
+ // Parameter(Descriptor::kJSTarget). The latter should be used near the
+ // beginning of builtin code while the target value is still in the register
+ // and the former should be used in slow paths in order to reduce register
+ // pressure on the fast path.
+ TNode<JSFunction> LoadTargetFromFrame();
+
// Load an object pointer from a buffer that isn't in the heap.
Node* LoadBufferObject(Node* buffer, int offset,
MachineType rep = MachineType::AnyTagged());
@@ -601,6 +855,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Smi> LoadWeakFixedArrayLength(TNode<WeakFixedArray> array);
TNode<IntPtrT> LoadAndUntagWeakFixedArrayLength(
SloppyTNode<WeakFixedArray> array);
+ // Load the length of a JSTypedArray instance.
+ TNode<Smi> LoadTypedArrayLength(TNode<JSTypedArray> typed_array);
// Load the bit field of a Map.
TNode<Int32T> LoadMapBitField(SloppyTNode<Map> map);
// Load bit field 2 of a map.
@@ -611,6 +867,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Int32T> LoadMapInstanceType(SloppyTNode<Map> map);
// Load the ElementsKind of a map.
TNode<Int32T> LoadMapElementsKind(SloppyTNode<Map> map);
+ TNode<Int32T> LoadElementsKind(SloppyTNode<HeapObject> map);
// Load the instance descriptors of a map.
TNode<DescriptorArray> LoadMapDescriptors(SloppyTNode<Map> map);
// Load the prototype of a map.
@@ -681,7 +938,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsStrongHeapObject(TNode<Object> value) {
return IsStrongHeapObject(ReinterpretCast<MaybeObject>(value));
}
- TNode<HeapObject> ToStrongHeapObject(TNode<MaybeObject> value);
+ TNode<HeapObject> ToStrongHeapObject(TNode<MaybeObject> value,
+ Label* if_not_strong);
TNode<BoolT> IsWeakOrClearedHeapObject(TNode<MaybeObject> value);
TNode<BoolT> IsClearedWeakHeapObject(TNode<MaybeObject> value);
@@ -693,13 +951,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<HeapObject> ToWeakHeapObject(TNode<MaybeObject> value,
Label* if_cleared);
- // IsObject == true when the MaybeObject is a strong HeapObject or a smi.
- TNode<BoolT> IsObject(TNode<MaybeObject> value);
- // This variant is for overzealous checking.
- TNode<BoolT> IsObject(TNode<Object> value) {
- return IsObject(ReinterpretCast<MaybeObject>(value));
- }
- TNode<Object> ToObject(TNode<MaybeObject> value);
+ TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> object,
+ TNode<Object> value);
+ TNode<BoolT> IsNotWeakReferenceTo(TNode<MaybeObject> object,
+ TNode<Object> value);
+ TNode<BoolT> IsStrongReferenceTo(TNode<MaybeObject> object,
+ TNode<Object> value);
TNode<MaybeObject> MakeWeak(TNode<HeapObject> value);
@@ -779,12 +1036,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
}
// Load an array element from a FixedDoubleArray.
- Node* LoadFixedDoubleArrayElement(
- Node* object, Node* index, MachineType machine_type,
- int additional_offset = 0,
+ TNode<Float64T> LoadFixedDoubleArrayElement(
+ SloppyTNode<FixedDoubleArray> object, Node* index,
+ MachineType machine_type, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
Label* if_hole = nullptr);
+ Node* LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
+ TNode<Smi> index) {
+ return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
+ SMI_PARAMETERS);
+ }
+
// Load a feedback slot from a FeedbackVector.
TNode<MaybeObject> LoadFeedbackVectorSlot(
Node* object, Node* index, int additional_offset = 0,
@@ -798,8 +1061,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load Float64 value by |base| + |offset| address. If the value is a double
// hole then jump to |if_hole|. If |machine_type| is None then only the hole
// check is generated.
- Node* LoadDoubleWithHoleCheck(
- Node* base, Node* offset, Label* if_hole,
+ TNode<Float64T> LoadDoubleWithHoleCheck(
+ SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
TNode<RawPtrT> LoadFixedTypedArrayBackingStore(
TNode<FixedTypedArrayBase> typed_array);
@@ -849,9 +1112,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadSharedFunctionInfoBytecodeArray(Node* shared);
+ void StoreObjectByteNoWriteBarrier(TNode<HeapObject> object, int offset,
+ TNode<Word32T> value);
+
// Store the floating point value of a HeapNumber.
void StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
SloppyTNode<Float64T> value);
+ void StoreMutableHeapNumberValue(SloppyTNode<MutableHeapNumber> object,
+ SloppyTNode<Float64T> value);
// Store a field to an object on the heap.
Node* StoreObjectField(Node* object, int offset, Node* value);
Node* StoreObjectField(Node* object, Node* offset, Node* value);
@@ -937,10 +1205,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Allocate a HeapNumber without initializing its value.
- TNode<HeapNumber> AllocateHeapNumber(MutableMode mode = IMMUTABLE);
+ TNode<HeapNumber> AllocateHeapNumber();
// Allocate a HeapNumber with a specific value.
- TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value,
- MutableMode mode = IMMUTABLE);
+ TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value);
+ TNode<HeapNumber> AllocateHeapNumberWithValue(double value) {
+ return AllocateHeapNumberWithValue(Float64Constant(value));
+ }
+
+ // Allocate a MutableHeapNumber with a specific value.
+ TNode<MutableHeapNumber> AllocateMutableHeapNumberWithValue(
+ SloppyTNode<Float64T> value);
// Allocate a BigInt with {length} digits. Sets the sign bit to {false}.
// Does not initialize the digits.
@@ -953,11 +1227,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<WordT> LoadBigIntBitfield(TNode<BigInt> bigint);
TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
- TNode<HeapNumber> AllocateHeapNumberWithValue(double value,
- MutableMode mode = IMMUTABLE) {
- return AllocateHeapNumberWithValue(Float64Constant(value), mode);
- }
-
// Allocate a SeqOneByteString with the given length.
TNode<String> AllocateSeqOneByteString(int length,
AllocationFlags flags = kNone);
@@ -1028,6 +1297,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
std::function<void(Node*, Label*, Label*)> key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found);
+ template <typename CollectionType>
+ TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
+
Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
void InitializeStructBody(Node* object, Node* map, Node* size,
int start_offset = Struct::kHeaderSize);
@@ -1089,15 +1361,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
AllocationFlags flags = kNone,
SloppyTNode<Map> fixed_array_map = nullptr);
- TNode<FixedArray> AllocateFixedArray(ElementsKind kind, TNode<Smi> capacity,
- AllocationFlags flags = kNone) {
- return AllocateFixedArray(kind, capacity, SMI_PARAMETERS, flags);
- }
-
- TNode<FixedArray> AllocateFixedArray(ElementsKind kind, TNode<Smi> capacity,
- TNode<Map> map,
- AllocationFlags flags = kNone) {
- return AllocateFixedArray(kind, capacity, SMI_PARAMETERS, flags, map);
+ TNode<FixedArray> AllocateFixedArray(
+ ElementsKind kind, TNode<IntPtrT> capacity, AllocationFlags flags,
+ SloppyTNode<Map> fixed_array_map = nullptr) {
+ return AllocateFixedArray(kind, capacity, INTPTR_PARAMETERS, flags,
+ fixed_array_map);
}
Node* AllocatePropertyArray(Node* capacity,
@@ -1332,7 +1600,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Variable* var_numeric,
Variable* var_feedback);
- Node* TimesPointerSize(Node* value);
+ SloppyTNode<WordT> TimesPointerSize(Node* value);
// Type conversions.
// Throws a TypeError for {method_name} if {value} is not coercible to Object,
@@ -1385,7 +1653,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsConstructorMap(SloppyTNode<Map> map);
TNode<BoolT> IsConstructor(SloppyTNode<HeapObject> object);
TNode<BoolT> IsDeprecatedMap(SloppyTNode<Map> map);
- TNode<BoolT> IsDictionary(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsNameDictionary(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsGlobalDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsExtensibleMap(SloppyTNode<Map> map);
TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsFastJSArray(SloppyTNode<Object> object,
@@ -1406,15 +1675,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsFixedTypedArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFunctionWithPrototypeSlotMap(SloppyTNode<Map> map);
TNode<BoolT> IsHashTable(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsEphemeronHashTable(SloppyTNode<HeapObject> object);
TNode<BoolT> IsHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSArrayIterator(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSAsyncGeneratorObject(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSFunctionInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsAllocationSiteInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorObject(SloppyTNode<HeapObject> object);
@@ -1490,19 +1762,43 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// within Smi range.
TNode<BoolT> IsNumberNormalized(SloppyTNode<Number> number);
TNode<BoolT> IsNumberPositive(SloppyTNode<Number> number);
+ TNode<BoolT> IsHeapNumberPositive(TNode<HeapNumber> number);
+
+ // True iff {number} is non-negative and less or equal than 2**53-1.
+ TNode<BoolT> IsNumberNonNegativeSafeInteger(TNode<Number> number);
+
+ // True iff {number} represents an integer value.
+ TNode<BoolT> IsInteger(TNode<Object> number);
+ TNode<BoolT> IsInteger(TNode<HeapNumber> number);
+
+ // True iff abs({number}) <= 2**53 -1
+ TNode<BoolT> IsSafeInteger(TNode<Object> number);
+ TNode<BoolT> IsSafeInteger(TNode<HeapNumber> number);
+
+ // True iff {number} represents a valid uint32t value.
+ TNode<BoolT> IsHeapNumberUint32(TNode<HeapNumber> number);
+
// True iff {number} is a positive number and a valid array index in the range
// [0, 2^32-1).
- TNode<BoolT> IsNumberArrayIndex(SloppyTNode<Number> number);
+ TNode<BoolT> IsNumberArrayIndex(TNode<Number> number);
Node* FixedArraySizeDoesntFitInNewSpace(
Node* element_count, int base_size = FixedArray::kHeaderSize,
ParameterMode mode = INTPTR_PARAMETERS);
// ElementsKind helpers:
+ TNode<BoolT> ElementsKindEqual(TNode<Int32T> a, TNode<Int32T> b) {
+ return Word32Equal(a, b);
+ }
+ bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; }
Node* IsFastElementsKind(Node* elements_kind);
bool IsFastElementsKind(ElementsKind kind) {
return v8::internal::IsFastElementsKind(kind);
}
+ TNode<BoolT> IsDoubleElementsKind(TNode<Int32T> elements_kind);
+ bool IsDoubleElementsKind(ElementsKind kind) {
+ return v8::internal::IsDoubleElementsKind(kind);
+ }
Node* IsFastSmiOrTaggedElementsKind(Node* elements_kind);
Node* IsFastSmiElementsKind(Node* elements_kind);
Node* IsHoleyFastElementsKind(Node* elements_kind);
@@ -1517,7 +1813,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<String> StringFromSingleCharCode(TNode<Int32T> code);
// Return a new string object which holds a substring containing the range
- // [from,to[ of string. |from| and |to| are expected to be tagged.
+ // [from,to[ of string.
TNode<String> SubString(TNode<String> string, TNode<IntPtrT> from,
TNode<IntPtrT> to);
@@ -1658,13 +1954,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Returns a node that contains the updated values of a |BitField|.
template <typename BitField>
- Node* UpdateWord(Node* word, Node* value) {
+ TNode<WordT> UpdateWord(TNode<WordT> word, TNode<WordT> value) {
return UpdateWord(word, value, BitField::kShift, BitField::kMask);
}
// Returns a node that contains the updated {value} inside {word} starting
// at {shift} and fitting in {mask}.
- Node* UpdateWord(Node* word, Node* value, uint32_t shift, uint32_t mask);
+ TNode<WordT> UpdateWord(TNode<WordT> word, TNode<WordT> value, uint32_t shift,
+ uint32_t mask);
// Returns true if any of the |T|'s bits in given |word32| are set.
template <typename T>
@@ -1776,6 +2073,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Returns an untagged int32.
template <class ContainerType>
TNode<Uint32T> LoadDetailsByKeyIndex(Node* container, Node* key_index) {
+ static_assert(!std::is_same<ContainerType, DescriptorArray>::value,
+ "Use the non-templatized version for DescriptorArray");
const int kKeyToDetailsOffset =
(ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
@@ -1787,6 +2086,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Returns a tagged value.
template <class ContainerType>
TNode<Object> LoadValueByKeyIndex(Node* container, Node* key_index) {
+ static_assert(!std::is_same<ContainerType, DescriptorArray>::value,
+ "Use the non-templatized version for DescriptorArray");
const int kKeyToValueOffset =
(ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
@@ -1794,6 +2095,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
LoadFixedArrayElement(container, key_index, kKeyToValueOffset));
}
+ TNode<Uint32T> LoadDetailsByKeyIndex(TNode<DescriptorArray> container,
+ TNode<IntPtrT> key_index);
+ TNode<Object> LoadValueByKeyIndex(TNode<DescriptorArray> container,
+ TNode<IntPtrT> key_index);
+ TNode<MaybeObject> LoadFieldTypeByKeyIndex(TNode<DescriptorArray> container,
+ TNode<IntPtrT> key_index);
+
// Stores the details for the entry with the given key_index.
// |details| must be a Smi.
template <class ContainerType>
@@ -1891,6 +2199,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TVariable<IntPtrT>* var_entry,
Label* if_not_found);
+ TNode<Object> BasicLoadNumberDictionaryElement(
+ TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
+ Label* not_data, Label* if_hole);
+ void BasicStoreNumberDictionaryElement(TNode<NumberDictionary> dictionary,
+ TNode<IntPtrT> intptr_index,
+ TNode<Object> value, Label* not_data,
+ Label* if_hole, Label* read_only);
+
template <class Dictionary>
void FindInsertionEntry(TNode<Dictionary> dictionary, TNode<Name> key,
TVariable<IntPtrT>* var_key_index);
@@ -1934,37 +2250,37 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Object> GetProperty(SloppyTNode<Context> context,
SloppyTNode<Object> receiver,
SloppyTNode<Object> name) {
- return UncheckedCast<Object>(
- CallStub(Builtins::CallableFor(isolate(), Builtins::kGetProperty),
- context, receiver, name));
+ return CallStub(Builtins::CallableFor(isolate(), Builtins::kGetProperty),
+ context, receiver, name);
}
Node* GetMethod(Node* context, Node* object, Handle<Name> name,
Label* if_null_or_undefined);
template <class... TArgs>
- TNode<Object> CallBuiltin(Builtins::Name id, SloppyTNode<Context> context,
+ TNode<Object> CallBuiltin(Builtins::Name id, SloppyTNode<Object> context,
TArgs... args) {
DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
!Builtins::IsLazy(id));
- return UncheckedCast<Object>(
- CallStub(Builtins::CallableFor(isolate(), id), context, args...));
+ return CallStub<Object>(Builtins::CallableFor(isolate(), id), context,
+ args...);
}
template <class... TArgs>
- TNode<Object> TailCallBuiltin(Builtins::Name id, SloppyTNode<Context> context,
- TArgs... args) {
+ void TailCallBuiltin(Builtins::Name id, SloppyTNode<Object> context,
+ TArgs... args) {
DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
!Builtins::IsLazy(id));
- return UncheckedCast<Object>(
- TailCallStub(Builtins::CallableFor(isolate(), id), context, args...));
+ return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...);
}
- void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
+ void LoadPropertyFromFastObject(Node* object, Node* map,
+ TNode<DescriptorArray> descriptors,
Node* name_index, Variable* var_details,
Variable* var_value);
- void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
+ void LoadPropertyFromFastObject(Node* object, Node* map,
+ TNode<DescriptorArray> descriptors,
Node* name_index, Node* details,
Variable* var_value);
@@ -2054,10 +2370,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object);
// Load type feedback vector from the stub caller's frame.
- Node* LoadFeedbackVectorForStub();
+ TNode<FeedbackVector> LoadFeedbackVectorForStub();
// Load type feedback vector for the given closure.
- Node* LoadFeedbackVector(Node* closure);
+ TNode<FeedbackVector> LoadFeedbackVector(SloppyTNode<JSFunction> closure,
+ Label* if_undefined = nullptr);
// Update the type feedback vector.
void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
@@ -2140,16 +2457,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void TrapAllocationMemento(Node* object, Label* memento_found);
- Node* PageFromAddress(Node* address);
+ TNode<IntPtrT> PageFromAddress(TNode<IntPtrT> address);
- // Create a new weak cell with a specified value and install it into a
- // feedback vector.
- Node* CreateWeakCellInFeedbackVector(Node* feedback_vector, Node* slot,
- Node* value);
+ // Store a weak in-place reference into the FeedbackVector.
+ TNode<MaybeObject> StoreWeakReferenceInFeedbackVector(
+ SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot,
+ TNode<HeapObject> value);
// Create a new AllocationSite and install it into a feedback vector.
TNode<AllocationSite> CreateAllocationSiteInFeedbackVector(
- Node* feedback_vector, Node* slot);
+ SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot);
+
+ // TODO(ishell, cbruni): Change to HasBoilerplate.
+ TNode<BoolT> NotHasBoilerplate(TNode<Object> maybe_literal_site);
+ TNode<Smi> LoadTransitionInfo(TNode<AllocationSite> allocation_site);
+ TNode<JSObject> LoadBoilerplate(TNode<AllocationSite> allocation_site);
+ TNode<Int32T> LoadElementsKind(TNode<AllocationSite> allocation_site);
enum class IndexAdvanceMode { kPre, kPost };
@@ -2288,6 +2611,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// TypedArray/ArrayBuffer helpers
Node* IsDetachedBuffer(Node* buffer);
+ TNode<JSArrayBuffer> LoadArrayBufferViewBuffer(
+ TNode<JSArrayBufferView> array_buffer_view);
+ TNode<RawPtrT> LoadArrayBufferBackingStore(TNode<JSArrayBuffer> array_buffer);
TNode<IntPtrT> ElementOffsetFromIndex(Node* index, ElementsKind kind,
ParameterMode mode, int base_size = 0);
@@ -2301,14 +2627,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Code> LoadBuiltin(TNode<Smi> builtin_id);
// Figure out the SFI's code object using its data field.
+ // If |if_compile_lazy| is provided then the execution will go to the given
+ // label in case of an CompileLazy code object.
TNode<Code> GetSharedFunctionInfoCode(
- SloppyTNode<SharedFunctionInfo> shared_info);
+ SloppyTNode<SharedFunctionInfo> shared_info,
+ Label* if_compile_lazy = nullptr);
Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info,
Node* context);
// Promise helpers
- Node* IsPromiseHookEnabledOrDebugIsActive();
+ Node* IsPromiseHookEnabled();
+ Node* HasAsyncEventDelegate();
+ Node* IsPromiseHookEnabledOrHasAsyncEventDelegate();
// Helpers for StackFrame markers.
Node* MarkerIsFrameType(Node* marker_or_function,
@@ -2323,8 +2654,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<IntPtrT> GetArgumentsLength(CodeStubArguments* args);
TNode<Object> GetArgumentValue(CodeStubArguments* args, TNode<IntPtrT> index);
- TNode<Object> GetArgumentValueSmiIndex(CodeStubArguments* args,
- TNode<Smi> index);
// Support for printf-style debugging
void Print(const char* s);
@@ -2353,7 +2682,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
bool ConstexprBoolNot(bool value) { return !value; }
- void PerformStackCheck(Node* context);
+ bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; }
+
+ void PerformStackCheck(TNode<Context> context);
protected:
// Implements DescriptorArray::Search().
@@ -2443,6 +2774,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* allocation_site,
Node* size_in_bytes);
+ TNode<BoolT> IsValidSmi(TNode<Smi> smi);
Node* SmiShiftBitsConstant();
// Emits keyed sloppy arguments load if the |value| is nullptr or store
@@ -2458,6 +2790,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Smi> length, TNode<String> first,
TNode<String> second, AllocationFlags flags);
+ // Allocate a MutableHeapNumber without initializing its value.
+ TNode<MutableHeapNumber> AllocateMutableHeapNumber();
+
Node* SelectImpl(TNode<BoolT> condition, const NodeGenerator& true_body,
const NodeGenerator& false_body, MachineRepresentation rep);
@@ -2521,6 +2856,10 @@ class CodeStubArguments {
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
TNode<Object> GetReceiver() const;
+ // Replaces receiver argument on the expression stack. Should be used only
+ // for manipulating arguments in trampoline builtins before tail calling
+ // further with passing all the JS arguments as is.
+ void SetReceiver(TNode<Object> object) const;
TNode<RawPtr<Object>> AtIndexPtr(
Node* index, CodeStubAssembler::ParameterMode mode =
@@ -2558,8 +2897,9 @@ class CodeStubArguments {
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const ForEachBodyFunction& body, Node* first = nullptr,
- Node* last = nullptr, CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS) {
+ Node* last = nullptr,
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS) {
CodeStubAssembler::VariableList list(0, assembler_->zone());
ForEach(list, body, first, last);
}
@@ -2567,8 +2907,9 @@ class CodeStubArguments {
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const CodeStubAssembler::VariableList& vars,
const ForEachBodyFunction& body, Node* first = nullptr,
- Node* last = nullptr, CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* last = nullptr,
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS);
void PopAndReturn(Node* value);
@@ -2632,100 +2973,6 @@ class ToDirectStringAssembler : public CodeStubAssembler {
const Flags flags_;
};
-#define CSA_CHECK(csa, x) \
- (csa)->Check( \
- [&]() -> compiler::Node* { \
- return implicit_cast<compiler::SloppyTNode<Word32T>>(x); \
- }, \
- #x, __FILE__, __LINE__)
-
-#ifdef DEBUG
-// Add stringified versions to the given values, except the first. That is,
-// transform
-// x, a, b, c, d, e, f
-// to
-// a, "a", b, "b", c, "c", d, "d", e, "e", f, "f"
-//
-// __VA_ARGS__ is ignored to allow the caller to pass through too many
-// parameters, and the first element is ignored to support having no extra
-// values without empty __VA_ARGS__ (which cause all sorts of problems with
-// extra commas).
-#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(_, v1, v2, v3, v4, v5, ...) \
- v1, #v1, v2, #v2, v3, #v3, v4, #v4, v5, #v5
-
-// Stringify the given variable number of arguments. The arguments are trimmed
-// to 5 if there are too many, and padded with nullptr if there are not enough.
-#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES(...) \
- CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \
- nullptr, nullptr)
-
-#define CSA_ASSERT_GET_FIRST(x, ...) (x)
-#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x
-
-// CSA_ASSERT(csa, <condition>, <extra values to print...>)
-
-// We have to jump through some hoops to allow <extra values to print...> to be
-// empty.
-#define CSA_ASSERT(csa, ...) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- return implicit_cast<compiler::SloppyTNode<Word32T>>( \
- EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \
- }, \
- EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \
- CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
-
-// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
-// <extra values to print...>)
-
-#define CSA_ASSERT_BRANCH(csa, ...) \
- (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \
- EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \
- __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
-
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- compiler::Node* const argc = \
- (csa)->Parameter(Descriptor::kActualArgumentsCount); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- SmiFromInt32((csa)->Parameter(Descriptor::kActualArgumentsCount)), \
- "argc")
-
-#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
- CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
-
-#define CSA_DEBUG_INFO(name) \
- { #name, __FILE__, __LINE__ }
-#define BIND(label) Bind(label, CSA_DEBUG_INFO(label))
-#define VARIABLE(name, ...) \
- Variable name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
-#define VARIABLE_CONSTRUCTOR(name, ...) \
- name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
-#define TYPED_VARIABLE_DEF(type, name, ...) \
- TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
-#else // DEBUG
-#define CSA_ASSERT(csa, ...) ((void)0)
-#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
-#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
-#define BIND(label) Bind(label)
-#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
-#define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__)
-#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
-#endif // DEBUG
-
-#define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this))
-
-#ifdef ENABLE_SLOW_DCHECKS
-#define CSA_SLOW_ASSERT(csa, ...) \
- if (FLAG_enable_slow_asserts) { \
- CSA_ASSERT(csa, __VA_ARGS__); \
- }
-#else
-#define CSA_SLOW_ASSERT(csa, ...) ((void)0)
-#endif
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index bf1a3181ab..adca79ac8f 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -13,6 +13,7 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/code-stubs-utils.h"
+#include "src/code-tracer.h"
#include "src/counters.h"
#include "src/gdb-jit.h"
#include "src/heap/heap-inl.h"
@@ -36,9 +37,7 @@ CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
deoptimization_handler_(kNullAddress),
miss_handler_(),
- has_miss_handler_(false) {
- stub->InitializeDescriptor(this);
-}
+ has_miss_handler_(false) {}
CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
: isolate_(isolate),
@@ -90,17 +89,17 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
Counters* counters = isolate()->counters();
counters->total_stubs_code_size()->Increment(code->raw_instruction_size());
#ifdef DEBUG
- code->VerifyEmbeddedObjects();
+ code->VerifyEmbeddedObjects(isolate());
#endif
}
void CodeStub::DeleteStubFromCacheForTesting() {
Heap* heap = isolate_->heap();
- Handle<SimpleNumberDictionary> dict(heap->code_stubs());
- int entry = dict->FindEntry(GetKey());
+ Handle<SimpleNumberDictionary> dict(heap->code_stubs(), isolate());
+ int entry = dict->FindEntry(isolate(), GetKey());
DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
- dict = SimpleNumberDictionary::DeleteEntry(dict, entry);
+ dict = SimpleNumberDictionary::DeleteEntry(isolate(), dict, entry);
heap->SetRootCodeStubs(*dict);
}
@@ -108,15 +107,16 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
Factory* factory = isolate()->factory();
// Generate the new code.
- MacroAssembler masm(isolate(), nullptr, 256, CodeObjectRequired::kYes);
+ // TODO(yangguo): remove this once we can serialize IC stubs.
+ AssemblerOptions options = AssemblerOptions::Default(isolate(), true);
+ MacroAssembler masm(isolate(), options, nullptr, 256,
+ CodeObjectRequired::kYes);
{
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
// Generate the code for the stub.
- // TODO(yangguo): remove this once we can serialize IC stubs.
- masm.enable_serializer();
NoCurrentFrameScope scope(&masm);
Generate(&masm);
}
@@ -141,7 +141,7 @@ Handle<Code> CodeStub::GetCode() {
Code* code;
if (FindCodeInCache(&code)) {
DCHECK(code->is_stub());
- return handle(code);
+ return handle(code, isolate_);
}
{
@@ -167,7 +167,7 @@ Handle<Code> CodeStub::GetCode() {
// Update the dictionary and the root in Heap.
Handle<SimpleNumberDictionary> dict = SimpleNumberDictionary::Set(
- handle(heap->code_stubs()), GetKey(), new_object);
+ isolate(), handle(heap->code_stubs(), isolate_), GetKey(), new_object);
heap->SetRootCodeStubs(*dict);
code = *new_object;
}
@@ -177,7 +177,7 @@ Handle<Code> CodeStub::GetCode() {
return Handle<Code>(code, isolate());
}
-CodeStub::Major CodeStub::GetMajorKey(Code* code_stub) {
+CodeStub::Major CodeStub::GetMajorKey(const Code* code_stub) {
return MajorKeyFromKey(code_stub->stub_key());
}
@@ -231,7 +231,6 @@ static void InitializeDescriptorDispatchedCall(CodeStub* stub,
void** value_out) {
CodeStubDescriptor* descriptor_out =
reinterpret_cast<CodeStubDescriptor*>(value_out);
- stub->InitializeDescriptor(descriptor_out);
descriptor_out->set_call_descriptor(stub->GetCallInterfaceDescriptor());
}
@@ -263,9 +262,10 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
compiler::CodeAssemblerState state(
isolate(), &zone, descriptor, Code::STUB, name,
- PoisoningMitigationLevel::kDontPoison, 1, GetKey());
+ PoisoningMitigationLevel::kDontPoison, GetKey());
GenerateAssembly(&state);
- return compiler::CodeAssembler::GenerateCode(&state);
+ return compiler::CodeAssembler::GenerateCode(
+ &state, AssemblerOptions::Default(isolate()));
}
TF_STUB(ElementsTransitionAndStoreStub, CodeStubAssembler) {
@@ -305,23 +305,6 @@ TF_STUB(ElementsTransitionAndStoreStub, CodeStubAssembler) {
}
}
-TF_STUB(TransitionElementsKindStub, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* object = Parameter(Descriptor::kObject);
- Node* new_map = Parameter(Descriptor::kMap);
-
- Label bailout(this);
- TransitionElementsKind(object, new_map, stub->from_kind(), stub->to_kind(),
- stub->is_jsarray(), &bailout);
- Return(object);
-
- BIND(&bailout);
- {
- Comment("Call runtime");
- TailCallRuntime(Runtime::kTransitionElementsKind, context, object, new_map);
- }
-}
-
// TODO(ishell): move to builtins-handler-gen.
TF_STUB(KeyedLoadSloppyArgumentsStub, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -475,140 +458,8 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
entry_hook(function, stack_pointer);
}
-TF_STUB(ArrayNoArgumentConstructorStub, CodeStubAssembler) {
- ElementsKind elements_kind = stub->elements_kind();
- Node* native_context = LoadObjectField(Parameter(Descriptor::kFunction),
- JSFunction::kContextOffset);
- bool track_allocation_site =
- AllocationSite::ShouldTrack(elements_kind) &&
- stub->override_mode() != DISABLE_ALLOCATION_SITES;
- Node* allocation_site =
- track_allocation_site ? Parameter(Descriptor::kAllocationSite) : nullptr;
- Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
- Node* array =
- AllocateJSArray(elements_kind, array_map,
- IntPtrConstant(JSArray::kPreallocatedArrayElements),
- SmiConstant(0), allocation_site);
- Return(array);
-}
-
-TF_STUB(InternalArrayNoArgumentConstructorStub, CodeStubAssembler) {
- Node* array_map = LoadObjectField(Parameter(Descriptor::kFunction),
- JSFunction::kPrototypeOrInitialMapOffset);
- Node* array = AllocateJSArray(
- stub->elements_kind(), array_map,
- IntPtrConstant(JSArray::kPreallocatedArrayElements), SmiConstant(0));
- Return(array);
-}
-
-class ArrayConstructorAssembler : public CodeStubAssembler {
- public:
- typedef compiler::Node Node;
-
- explicit ArrayConstructorAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- void GenerateConstructor(Node* context, Node* array_function, Node* array_map,
- Node* array_size, Node* allocation_site,
- ElementsKind elements_kind, AllocationSiteMode mode);
-};
-
-void ArrayConstructorAssembler::GenerateConstructor(
- Node* context, Node* array_function, Node* array_map, Node* array_size,
- Node* allocation_site, ElementsKind elements_kind,
- AllocationSiteMode mode) {
- Label ok(this);
- Label smi_size(this);
- Label small_smi_size(this);
- Label call_runtime(this, Label::kDeferred);
-
- Branch(TaggedIsSmi(array_size), &smi_size, &call_runtime);
-
- BIND(&smi_size);
-
- if (IsFastPackedElementsKind(elements_kind)) {
- Label abort(this, Label::kDeferred);
- Branch(SmiEqual(CAST(array_size), SmiConstant(0)), &small_smi_size, &abort);
-
- BIND(&abort);
- Node* reason = SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray);
- TailCallRuntime(Runtime::kAbort, context, reason);
- } else {
- int element_size =
- IsDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
- int max_fast_elements =
- (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
- AllocationMemento::kSize) /
- element_size;
- Branch(SmiAboveOrEqual(CAST(array_size), SmiConstant(max_fast_elements)),
- &call_runtime, &small_smi_size);
- }
-
- BIND(&small_smi_size);
- {
- Node* array = AllocateJSArray(
- elements_kind, array_map, array_size, array_size,
- mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
- CodeStubAssembler::SMI_PARAMETERS);
- Return(array);
- }
-
- BIND(&call_runtime);
- {
- TailCallRuntime(Runtime::kNewArray, context, array_function, array_size,
- array_function, allocation_site);
- }
-}
-
-TF_STUB(ArraySingleArgumentConstructorStub, ArrayConstructorAssembler) {
- ElementsKind elements_kind = stub->elements_kind();
- Node* context = Parameter(Descriptor::kContext);
- Node* function = Parameter(Descriptor::kFunction);
- Node* native_context = LoadObjectField(function, JSFunction::kContextOffset);
- Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
- AllocationSiteMode mode = DONT_TRACK_ALLOCATION_SITE;
- if (stub->override_mode() == DONT_OVERRIDE) {
- mode = AllocationSite::ShouldTrack(elements_kind)
- ? TRACK_ALLOCATION_SITE
- : DONT_TRACK_ALLOCATION_SITE;
- }
-
- Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter);
- Node* allocation_site = Parameter(Descriptor::kAllocationSite);
-
- GenerateConstructor(context, function, array_map, array_size, allocation_site,
- elements_kind, mode);
-}
-
-TF_STUB(InternalArraySingleArgumentConstructorStub, ArrayConstructorAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* function = Parameter(Descriptor::kFunction);
- Node* array_map =
- LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
- Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter);
- Node* allocation_site = UndefinedConstant();
-
- GenerateConstructor(context, function, array_map, array_size, allocation_site,
- stub->elements_kind(), DONT_TRACK_ALLOCATION_SITE);
-}
-
-ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
-InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
-CommonArrayConstructorStub::CommonArrayConstructorStub(
- Isolate* isolate, ElementsKind kind,
- AllocationSiteOverrideMode override_mode)
- : TurboFanCodeStub(isolate) {
- // It only makes sense to override local allocation site behavior
- // if there is a difference between the global allocation site policy
- // for an ElementsKind and the desired usage of the stub.
- DCHECK(override_mode != DISABLE_ALLOCATION_SITES ||
- AllocationSite::ShouldTrack(kind));
- set_sub_minor_key(ElementsKindBits::encode(kind) |
- AllocationSiteOverrideModeBits::encode(override_mode));
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ StoreFastElementStub::GenerateAheadOfTime(isolate);
}
} // namespace internal
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index cdc9c9878d..745aa1aa24 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -5,49 +5,33 @@
#ifndef V8_CODE_STUBS_H_
#define V8_CODE_STUBS_H_
-#include "src/allocation.h"
-#include "src/assembler.h"
-#include "src/globals.h"
-#include "src/heap/factory.h"
#include "src/interface-descriptors.h"
-#include "src/macro-assembler.h"
-#include "src/ostreams.h"
#include "src/type-hints.h"
namespace v8 {
namespace internal {
// Forward declarations.
-class CodeStubAssembler;
+class Isolate;
namespace compiler {
-class CodeAssemblerLabel;
class CodeAssemblerState;
-class Node;
}
// List of code stubs used on all platforms.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
/* --- PlatformCodeStubs --- */ \
- V(ArrayConstructor) \
V(CallApiCallback) \
V(CallApiGetter) \
- V(InternalArrayConstructor) \
V(JSEntry) \
V(ProfileEntryHook) \
/* --- TurboFanCodeStubs --- */ \
V(StoreSlowElement) \
V(StoreInArrayLiteralSlow) \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(InternalArrayNoArgumentConstructor) \
- V(InternalArraySingleArgumentConstructor) \
V(ElementsTransitionAndStore) \
V(KeyedLoadSloppyArguments) \
V(KeyedStoreSloppyArguments) \
V(StoreFastElement) \
V(StoreInterceptor) \
- V(TransitionElementsKind) \
V(LoadIndexedInterceptor)
// List of code stubs only used on ARM 32 bits platforms.
@@ -123,7 +107,7 @@ class CodeStub : public ZoneObject {
}
// Gets the major key from a code object that is a code stub or binary op IC.
- static Major GetMajorKey(Code* code_stub);
+ static Major GetMajorKey(const Code* code_stub);
static uint32_t NoCacheKey() { return MajorKeyBits::encode(NoCache); }
@@ -151,8 +135,6 @@ class CodeStub : public ZoneObject {
return GetCallInterfaceDescriptor().GetStackParameterCount();
}
- virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) {}
-
static void InitializeDescriptor(Isolate* isolate, uint32_t key,
CodeStubDescriptor* desc);
@@ -259,7 +241,7 @@ class CodeStub : public ZoneObject {
public: \
typedef NAME##Descriptor Descriptor; \
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
- return Descriptor(isolate()); \
+ return Descriptor(); \
}
// There are some code stubs we just can't describe right now with a
@@ -439,37 +421,6 @@ class StoreInterceptorStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
};
-class TransitionElementsKindStub : public TurboFanCodeStub {
- public:
- TransitionElementsKindStub(Isolate* isolate, ElementsKind from_kind,
- ElementsKind to_kind, bool is_jsarray)
- : TurboFanCodeStub(isolate) {
- set_sub_minor_key(FromKindBits::encode(from_kind) |
- ToKindBits::encode(to_kind) |
- IsJSArrayBits::encode(is_jsarray));
- }
-
- void set_sub_minor_key(uint32_t key) { minor_key_ = key; }
-
- uint32_t sub_minor_key() const { return minor_key_; }
-
- ElementsKind from_kind() const {
- return FromKindBits::decode(sub_minor_key());
- }
-
- ElementsKind to_kind() const { return ToKindBits::decode(sub_minor_key()); }
-
- bool is_jsarray() const { return IsJSArrayBits::decode(sub_minor_key()); }
-
- private:
- class ToKindBits : public BitField<ElementsKind, 0, 8> {};
- class FromKindBits : public BitField<ElementsKind, ToKindBits::kNext, 8> {};
- class IsJSArrayBits : public BitField<bool, FromKindBits::kNext, 1> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TransitionElementsKind);
- DEFINE_TURBOFAN_CODE_STUB(TransitionElementsKind, TurboFanCodeStub);
-};
-
// TODO(jgruber): Convert this stub into a builtin.
class LoadIndexedInterceptorStub : public TurboFanCodeStub {
public:
@@ -480,37 +431,6 @@ class LoadIndexedInterceptorStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
};
-enum AllocationSiteOverrideMode {
- DONT_OVERRIDE,
- DISABLE_ALLOCATION_SITES,
- LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
-};
-
-// TODO(jgruber): Convert this stub into a builtin.
-class ArrayConstructorStub: public PlatformCodeStub {
- public:
- explicit ArrayConstructorStub(Isolate* isolate);
-
- private:
- void GenerateDispatchToArrayStub(MacroAssembler* masm,
- AllocationSiteOverrideMode mode);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
- DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
-};
-
-// TODO(jgruber): Convert this stub into a builtin.
-class InternalArrayConstructorStub: public PlatformCodeStub {
- public:
- explicit InternalArrayConstructorStub(Isolate* isolate);
-
- private:
- void GenerateCase(MacroAssembler* masm, ElementsKind kind);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNArgumentsConstructor);
- DEFINE_PLATFORM_CODE_STUB(InternalArrayConstructor, PlatformCodeStub);
-};
-
// TODO(jgruber): Convert this stub into a builtin.
class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
public:
@@ -545,7 +465,7 @@ class CallApiCallbackStub : public PlatformCodeStub {
CallApiCallbackStub(Isolate* isolate, int argc)
: PlatformCodeStub(isolate) {
- CHECK_LE(0, argc);
+ CHECK_LE(0, argc); // The argc in {0, 1} cases are covered by builtins.
CHECK_LE(argc, kArgMax);
minor_key_ = ArgumentBits::encode(argc);
}
@@ -555,14 +475,20 @@ class CallApiCallbackStub : public PlatformCodeStub {
class ArgumentBits : public BitField<int, 0, kArgBits> {};
+ friend class Builtins; // For generating the related builtin.
+
DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiCallback);
DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
};
-// TODO(jgruber): Convert this stub into a builtin.
+// TODO(jgruber): This stub only exists to avoid code duplication between
+// code-stubs-<arch>.cc and builtins-<arch>.cc. If CallApiCallbackStub is ever
+// completely removed, CallApiGetterStub can also be deleted.
class CallApiGetterStub : public PlatformCodeStub {
- public:
+ private:
+ // For generating the related builtin.
explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ friend class Builtins;
DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiGetter);
DEFINE_PLATFORM_CODE_STUB(CallApiGetter, PlatformCodeStub);
@@ -654,120 +580,6 @@ class StoreFastElementStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(StoreFastElement, TurboFanCodeStub);
};
-
-class CommonArrayConstructorStub : public TurboFanCodeStub {
- protected:
- CommonArrayConstructorStub(Isolate* isolate, ElementsKind kind,
- AllocationSiteOverrideMode override_mode);
-
- void set_sub_minor_key(uint32_t key) { minor_key_ = key; }
-
- uint32_t sub_minor_key() const { return minor_key_; }
-
- CommonArrayConstructorStub(uint32_t key, Isolate* isolate)
- : TurboFanCodeStub(key, isolate) {}
-
- public:
- ElementsKind elements_kind() const {
- return ElementsKindBits::decode(sub_minor_key());
- }
-
- AllocationSiteOverrideMode override_mode() const {
- return AllocationSiteOverrideModeBits::decode(sub_minor_key());
- }
-
- static void GenerateStubsAheadOfTime(Isolate* isolate);
-
- private:
- // Ensure data fits within available bits.
- STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
-
- class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
- class AllocationSiteOverrideModeBits
- : public BitField<AllocationSiteOverrideMode, 8, 1> {}; // NOLINT
-};
-
-class ArrayNoArgumentConstructorStub : public CommonArrayConstructorStub {
- public:
- ArrayNoArgumentConstructorStub(
- Isolate* isolate, ElementsKind kind,
- AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : CommonArrayConstructorStub(isolate, kind, override_mode) {}
-
- private:
- void PrintName(std::ostream& os) const override { // NOLINT
- os << "ArrayNoArgumentConstructorStub";
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNoArgumentConstructor);
- DEFINE_TURBOFAN_CODE_STUB(ArrayNoArgumentConstructor,
- CommonArrayConstructorStub);
-};
-
-class InternalArrayNoArgumentConstructorStub
- : public CommonArrayConstructorStub {
- public:
- InternalArrayNoArgumentConstructorStub(Isolate* isolate, ElementsKind kind)
- : CommonArrayConstructorStub(isolate, kind, DONT_OVERRIDE) {}
-
- private:
- void PrintName(std::ostream& os) const override { // NOLINT
- os << "InternalArrayNoArgumentConstructorStub";
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNoArgumentConstructor);
- DEFINE_TURBOFAN_CODE_STUB(InternalArrayNoArgumentConstructor,
- CommonArrayConstructorStub);
-};
-
-class ArraySingleArgumentConstructorStub : public CommonArrayConstructorStub {
- public:
- ArraySingleArgumentConstructorStub(
- Isolate* isolate, ElementsKind kind,
- AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : CommonArrayConstructorStub(isolate, kind, override_mode) {}
-
- private:
- void PrintName(std::ostream& os) const override { // NOLINT
- os << "ArraySingleArgumentConstructorStub";
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArraySingleArgumentConstructor);
- DEFINE_TURBOFAN_CODE_STUB(ArraySingleArgumentConstructor,
- CommonArrayConstructorStub);
-};
-
-class InternalArraySingleArgumentConstructorStub
- : public CommonArrayConstructorStub {
- public:
- InternalArraySingleArgumentConstructorStub(Isolate* isolate,
- ElementsKind kind)
- : CommonArrayConstructorStub(isolate, kind, DONT_OVERRIDE) {}
-
- private:
- void PrintName(std::ostream& os) const override { // NOLINT
- os << "InternalArraySingleArgumentConstructorStub";
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArraySingleArgumentConstructor);
- DEFINE_TURBOFAN_CODE_STUB(InternalArraySingleArgumentConstructor,
- CommonArrayConstructorStub);
-};
-
-// TODO(jgruber): Convert this stub into a builtin.
-class ArrayNArgumentsConstructorStub : public PlatformCodeStub {
- public:
- explicit ArrayNArgumentsConstructorStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- return ArrayNArgumentsConstructorDescriptor(isolate());
- }
-
- private:
- DEFINE_PLATFORM_CODE_STUB(ArrayNArgumentsConstructor, PlatformCodeStub);
-};
-
class StoreSlowElementStub : public TurboFanCodeStub {
public:
StoreSlowElementStub(Isolate* isolate, KeyedAccessStoreMode mode)
diff --git a/deps/v8/src/code-tracer.h b/deps/v8/src/code-tracer.h
new file mode 100644
index 0000000000..3ed07be77e
--- /dev/null
+++ b/deps/v8/src/code-tracer.h
@@ -0,0 +1,83 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODE_TRACER_H_
+#define V8_CODE_TRACER_H_
+
+#include "src/allocation.h"
+#include "src/flags.h"
+#include "src/globals.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeTracer final : public Malloced {
+ public:
+ explicit CodeTracer(int isolate_id) : file_(nullptr), scope_depth_(0) {
+ if (!ShouldRedirect()) {
+ file_ = stdout;
+ return;
+ }
+
+ if (FLAG_redirect_code_traces_to != nullptr) {
+ StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
+ } else if (isolate_id >= 0) {
+ SNPrintF(filename_, "code-%d-%d.asm", base::OS::GetCurrentProcessId(),
+ isolate_id);
+ } else {
+ SNPrintF(filename_, "code-%d.asm", base::OS::GetCurrentProcessId());
+ }
+
+ WriteChars(filename_.start(), "", 0, false);
+ }
+
+ class Scope {
+ public:
+ explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
+ ~Scope() { tracer_->CloseFile(); }
+
+ FILE* file() const { return tracer_->file(); }
+
+ private:
+ CodeTracer* tracer_;
+ };
+
+ void OpenFile() {
+ if (!ShouldRedirect()) {
+ return;
+ }
+
+ if (file_ == nullptr) {
+ file_ = base::OS::FOpen(filename_.start(), "ab");
+ }
+
+ scope_depth_++;
+ }
+
+ void CloseFile() {
+ if (!ShouldRedirect()) {
+ return;
+ }
+
+ if (--scope_depth_ == 0) {
+ fclose(file_);
+ file_ = nullptr;
+ }
+ }
+
+ FILE* file() const { return file_; }
+
+ private:
+ static bool ShouldRedirect() { return FLAG_redirect_code_traces; }
+
+ EmbeddedVector<char, 128> filename_;
+ FILE* file_;
+ int scope_depth_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODE_TRACER_H_
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index ce8270fdb3..61b83b1a18 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -65,7 +65,7 @@ void CompilationSubCache::Age() {
}
// Set the first generation as unborn.
- tables_[0] = isolate()->heap()->undefined_value();
+ tables_[0] = ReadOnlyRoots(isolate()).undefined_value();
}
void CompilationSubCache::Iterate(RootVisitor* v) {
@@ -74,7 +74,8 @@ void CompilationSubCache::Iterate(RootVisitor* v) {
}
void CompilationSubCache::Clear() {
- MemsetPointer(tables_, isolate()->heap()->undefined_value(), generations_);
+ MemsetPointer(tables_, ReadOnlyRoots(isolate()).undefined_value(),
+ generations_);
}
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
@@ -115,8 +116,9 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
if (resource_options.Flags() != script->origin_options().Flags())
return false;
// Compare the two name strings for equality.
- return String::Equals(Handle<String>::cast(name),
- Handle<String>(String::cast(script->name())));
+ return String::Equals(
+ isolate(), Handle<String>::cast(name),
+ Handle<String>(String::cast(script->name()), isolate()));
}
// TODO(245): Need to allow identical code from different contexts to
@@ -160,6 +162,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
resource_options));
#endif
isolate()->counters()->compilation_cache_hits()->Increment();
+ LOG(isolate(), CompilationCacheEvent("hit", "script", *function_info));
} else {
isolate()->counters()->compilation_cache_misses()->Increment();
}
@@ -245,7 +248,8 @@ void CompilationCacheRegExp::Put(Handle<String> source,
Handle<FixedArray> data) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
- SetFirstTable(CompilationCacheTable::PutRegExp(table, source, flags, data));
+ SetFirstTable(
+ CompilationCacheTable::PutRegExp(isolate(), table, source, flags, data));
}
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
@@ -274,14 +278,23 @@ InfoCellPair CompilationCache::LookupEval(Handle<String> source,
InfoCellPair result;
if (!IsEnabled()) return result;
+ const char* cache_type;
+
if (context->IsNativeContext()) {
result = eval_global_.Lookup(source, outer_info, context, language_mode,
position);
+ cache_type = "eval-global";
+
} else {
DCHECK_NE(position, kNoSourcePosition);
Handle<Context> native_context(context->native_context(), isolate());
result = eval_contextual_.Lookup(source, outer_info, native_context,
language_mode, position);
+ cache_type = "eval-contextual";
+ }
+
+ if (result.has_shared()) {
+ LOG(isolate(), CompilationCacheEvent("hit", cache_type, result.shared()));
}
return result;
@@ -299,6 +312,7 @@ void CompilationCache::PutScript(Handle<String> source,
LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
+ LOG(isolate(), CompilationCacheEvent("put", "script", *function_info));
script_.Put(source, native_context, language_mode, function_info);
}
@@ -311,24 +325,26 @@ void CompilationCache::PutEval(Handle<String> source,
int position) {
if (!IsEnabled()) return;
+ const char* cache_type;
HandleScope scope(isolate());
if (context->IsNativeContext()) {
eval_global_.Put(source, outer_info, function_info, context, feedback_cell,
position);
+ cache_type = "eval-global";
} else {
DCHECK_NE(position, kNoSourcePosition);
Handle<Context> native_context(context->native_context(), isolate());
eval_contextual_.Put(source, outer_info, function_info, native_context,
feedback_cell, position);
+ cache_type = "eval-contextual";
}
+ LOG(isolate(), CompilationCacheEvent("put", cache_type, *function_info));
}
void CompilationCache::PutRegExp(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
- if (!IsEnabled()) {
- return;
- }
+ if (!IsEnabled()) return;
reg_exp_.Put(source, flags, data);
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 7609e7ac38..3a4fe2e7b5 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -220,9 +220,9 @@ class CompilationCache {
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
- bool IsEnabled() { return FLAG_compilation_cache && enabled_; }
+ bool IsEnabled() const { return FLAG_compilation_cache && enabled_; }
- Isolate* isolate() { return isolate_; }
+ Isolate* isolate() const { return isolate_; }
Isolate* isolate_;
diff --git a/deps/v8/src/compilation-dependencies.cc b/deps/v8/src/compilation-dependencies.cc
deleted file mode 100644
index 8be814025a..0000000000
--- a/deps/v8/src/compilation-dependencies.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compilation-dependencies.h"
-
-#include "src/handles-inl.h"
-#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-DependentCode* CompilationDependencies::Get(Handle<Object> object) const {
- if (object->IsMap()) {
- return Handle<Map>::cast(object)->dependent_code();
- } else if (object->IsPropertyCell()) {
- return Handle<PropertyCell>::cast(object)->dependent_code();
- } else if (object->IsAllocationSite()) {
- return Handle<AllocationSite>::cast(object)->dependent_code();
- }
- UNREACHABLE();
-}
-
-
-void CompilationDependencies::Set(Handle<Object> object,
- Handle<DependentCode> dep) {
- if (object->IsMap()) {
- Handle<Map>::cast(object)->set_dependent_code(*dep);
- } else if (object->IsPropertyCell()) {
- Handle<PropertyCell>::cast(object)->set_dependent_code(*dep);
- } else if (object->IsAllocationSite()) {
- Handle<AllocationSite>::cast(object)->set_dependent_code(*dep);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void CompilationDependencies::Insert(DependentCode::DependencyGroup group,
- Handle<HeapObject> object) {
- if (groups_[group] == nullptr) {
- groups_[group] = new (zone_->New(sizeof(ZoneVector<Handle<HeapObject>>)))
- ZoneVector<Handle<HeapObject>>(zone_);
- }
- groups_[group]->push_back(object);
-
- if (object_wrapper_.is_null()) {
- // Allocate the wrapper if necessary.
- object_wrapper_ =
- isolate_->factory()->NewForeign(reinterpret_cast<Address>(this));
- }
-
- // Get the old dependent code list.
- Handle<DependentCode> old_dependent_code =
- Handle<DependentCode>(Get(object), isolate_);
- Handle<DependentCode> new_dependent_code =
- DependentCode::InsertCompilationDependencies(old_dependent_code, group,
- object_wrapper_);
-
- // Set the new dependent code list if the head of the list changed.
- if (!new_dependent_code.is_identical_to(old_dependent_code)) {
- Set(object, new_dependent_code);
- }
-}
-
-
-void CompilationDependencies::Commit(Handle<Code> code) {
- if (IsEmpty()) return;
-
- DCHECK(!object_wrapper_.is_null());
- Handle<WeakCell> cell = Code::WeakCellFor(code);
- AllowDeferredHandleDereference get_wrapper;
- for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ZoneVector<Handle<HeapObject>>* group_objects = groups_[i];
- if (group_objects == nullptr) continue;
- DependentCode::DependencyGroup group =
- static_cast<DependentCode::DependencyGroup>(i);
- for (size_t j = 0; j < group_objects->size(); j++) {
- DependentCode* dependent_code = Get(group_objects->at(j));
- dependent_code->UpdateToFinishedCode(group, *object_wrapper_, *cell);
- }
- groups_[i] = nullptr; // Zone-allocated, no need to delete.
- }
-}
-
-
-void CompilationDependencies::Rollback() {
- if (IsEmpty()) return;
-
- AllowDeferredHandleDereference get_wrapper;
- // Unregister from all dependent maps if not yet committed.
- for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ZoneVector<Handle<HeapObject>>* group_objects = groups_[i];
- if (group_objects == nullptr) continue;
- DependentCode::DependencyGroup group =
- static_cast<DependentCode::DependencyGroup>(i);
- for (size_t j = 0; j < group_objects->size(); j++) {
- DependentCode* dependent_code = Get(group_objects->at(j));
- dependent_code->RemoveCompilationDependencies(group, *object_wrapper_);
- }
- groups_[i] = nullptr; // Zone-allocated, no need to delete.
- }
-}
-
-
-void CompilationDependencies::AssumeMapNotDeprecated(Handle<Map> map) {
- DCHECK(!map->is_deprecated());
- // Do nothing if the map cannot be deprecated.
- if (map->CanBeDeprecated()) {
- Insert(DependentCode::kTransitionGroup, map);
- }
-}
-
-
-void CompilationDependencies::AssumeMapStable(Handle<Map> map) {
- DCHECK(map->is_stable());
- // Do nothing if the map cannot transition.
- if (map->CanTransition()) {
- Insert(DependentCode::kPrototypeCheckGroup, map);
- }
-}
-
-
-void CompilationDependencies::AssumePrototypeMapsStable(
- Handle<Map> map, MaybeHandle<JSReceiver> prototype) {
- for (PrototypeIterator i(map); !i.IsAtEnd(); i.Advance()) {
- Handle<JSReceiver> const current =
- PrototypeIterator::GetCurrent<JSReceiver>(i);
- AssumeMapStable(handle(current->map()));
- Handle<JSReceiver> last;
- if (prototype.ToHandle(&last) && last.is_identical_to(current)) {
- break;
- }
- }
-}
-
-
-void CompilationDependencies::AssumeTransitionStable(
- Handle<AllocationSite> site) {
- // Do nothing if the object doesn't have any useful element transitions left.
- ElementsKind kind = site->PointsToLiteral()
- ? site->boilerplate()->GetElementsKind()
- : site->GetElementsKind();
- if (AllocationSite::ShouldTrack(kind)) {
- Insert(DependentCode::kAllocationSiteTransitionChangedGroup, site);
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compilation-dependencies.h b/deps/v8/src/compilation-dependencies.h
deleted file mode 100644
index fa26e67b1a..0000000000
--- a/deps/v8/src/compilation-dependencies.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILATION_DEPENDENCIES_H_
-#define V8_COMPILATION_DEPENDENCIES_H_
-
-#include "src/handles.h"
-#include "src/objects.h"
-#include "src/objects/map.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-// Collects dependencies for this compilation, e.g. assumptions about
-// stable maps, constant globals, etc.
-class CompilationDependencies {
- public:
- CompilationDependencies(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone),
- object_wrapper_(Handle<Foreign>::null()),
- aborted_(false) {
- std::fill_n(groups_, DependentCode::kGroupCount, nullptr);
- }
-
- void Insert(DependentCode::DependencyGroup group, Handle<HeapObject> handle);
-
- void AssumeInitialMapCantChange(Handle<Map> map) {
- Insert(DependentCode::kInitialMapChangedGroup, map);
- }
- void AssumeFieldOwner(Handle<Map> map) {
- Insert(DependentCode::kFieldOwnerGroup, map);
- }
- void AssumeMapStable(Handle<Map> map);
- void AssumePrototypeMapsStable(
- Handle<Map> map,
- MaybeHandle<JSReceiver> prototype = MaybeHandle<JSReceiver>());
- void AssumeMapNotDeprecated(Handle<Map> map);
- void AssumePropertyCell(Handle<PropertyCell> cell) {
- Insert(DependentCode::kPropertyCellChangedGroup, cell);
- }
- void AssumeTenuringDecision(Handle<AllocationSite> site) {
- Insert(DependentCode::kAllocationSiteTenuringChangedGroup, site);
- }
- void AssumeTransitionStable(Handle<AllocationSite> site);
-
- void Commit(Handle<Code> code);
- void Rollback();
- void Abort() { aborted_ = true; }
- bool HasAborted() const { return aborted_; }
-
- bool IsEmpty() const {
- for (int i = 0; i < DependentCode::kGroupCount; i++) {
- if (groups_[i]) return false;
- }
- return true;
- }
-
- private:
- Isolate* isolate_;
- Zone* zone_;
- Handle<Foreign> object_wrapper_;
- bool aborted_;
- ZoneVector<Handle<HeapObject> >* groups_[DependentCode::kGroupCount];
-
- DependentCode* Get(Handle<Object> object) const;
- void Set(Handle<Object> object, Handle<DependentCode> dep);
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILATION_DEPENDENCIES_H_
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index dd7527dfbf..47b2181a88 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -201,7 +201,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
output_queue_.pop();
}
OptimizedCompilationInfo* info = job->compilation_info();
- Handle<JSFunction> function(*info->closure());
+ Handle<JSFunction> function(*info->closure(), isolate_);
if (function->HasOptimizedCode()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Aborting compilation for ");
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
index 3e90ccfa40..2e8065ed11 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
@@ -116,20 +116,28 @@ void UnoptimizedCompileJob::PrepareOnMainThread(Isolate* isolate) {
static_cast<void*>(this));
}
- HandleScope scope(isolate);
+ ParseInfo* parse_info = new ParseInfo(isolate, shared_);
+ parse_info_.reset(parse_info);
+
unicode_cache_.reset(new UnicodeCache());
- Handle<Script> script(Script::cast(shared_->script()), isolate);
- DCHECK(script->type() != Script::TYPE_NATIVE);
+ parse_info_->set_unicode_cache(unicode_cache_.get());
+ parse_info_->set_function_literal_id(shared_->FunctionLiteralId(isolate));
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ parse_info_->set_runtime_call_stats(new (parse_info_->zone())
+ RuntimeCallStats());
+ }
+
+ Handle<Script> script = parse_info->script();
+ HandleScope scope(isolate);
+ DCHECK(script->type() != Script::TYPE_NATIVE);
Handle<String> source(String::cast(script->source()), isolate);
- parse_info_.reset(new ParseInfo(isolate->allocator()));
- parse_info_->InitFromIsolate(isolate);
if (source->IsExternalTwoByteString() || source->IsExternalOneByteString()) {
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
- source, shared_->StartPosition(), shared_->EndPosition()));
+ isolate, source, shared_->StartPosition(), shared_->EndPosition()));
parse_info_->set_character_stream(std::move(stream));
} else {
- source = String::Flatten(source);
+ source = String::Flatten(isolate, source);
const void* data;
int offset = 0;
int length = source->length();
@@ -172,47 +180,32 @@ void UnoptimizedCompileJob::PrepareOnMainThread(Isolate* isolate) {
if (source->IsOneByteRepresentation()) {
ExternalOneByteString::Resource* resource =
new OneByteWrapper(data, length);
- source_wrapper_.reset(resource);
wrapper = isolate->factory()
->NewExternalStringFromOneByte(resource)
.ToHandleChecked();
} else {
ExternalTwoByteString::Resource* resource =
new TwoByteWrapper(data, length);
- source_wrapper_.reset(resource);
wrapper = isolate->factory()
->NewExternalStringFromTwoByte(resource)
.ToHandleChecked();
}
wrapper_ = isolate->global_handles()->Create(*wrapper);
std::unique_ptr<Utf16CharacterStream> stream(
- ScannerStream::For(wrapper_, shared_->StartPosition() - offset,
+ ScannerStream::For(isolate, wrapper_, shared_->StartPosition() - offset,
shared_->EndPosition() - offset));
parse_info_->set_character_stream(std::move(stream));
}
- parse_info_->set_hash_seed(isolate->heap()->HashSeed());
- parse_info_->set_is_named_expression(shared_->is_named_expression());
- parse_info_->set_function_flags(shared_->flags());
- parse_info_->set_start_position(shared_->StartPosition());
- parse_info_->set_end_position(shared_->EndPosition());
- parse_info_->set_unicode_cache(unicode_cache_.get());
- parse_info_->set_language_mode(shared_->language_mode());
- parse_info_->set_function_literal_id(shared_->function_literal_id());
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- parse_info_->set_runtime_call_stats(new (parse_info_->zone())
- RuntimeCallStats());
- }
parser_.reset(new Parser(parse_info_.get()));
- MaybeHandle<ScopeInfo> outer_scope_info;
- if (shared_->HasOuterScopeInfo()) {
- outer_scope_info = handle(shared_->GetOuterScopeInfo());
- }
- parser_->DeserializeScopeChain(parse_info_.get(), outer_scope_info);
+ parser_->DeserializeScopeChain(isolate, parse_info_.get(),
+ parse_info_->maybe_outer_scope_info());
- Handle<String> name(shared_->Name());
+ // Initailize the name after setting up the ast_value_factory.
+ Handle<String> name(shared_->Name(), isolate);
parse_info_->set_function_name(
parse_info_->ast_value_factory()->GetString(name));
+
set_status(Status::kPrepared);
}
@@ -278,7 +271,8 @@ void UnoptimizedCompileJob::FinalizeOnMainThread(Isolate* isolate) {
}
Handle<Script> script(Script::cast(shared_->script()), isolate);
- parse_info_->set_script(script);
+ DCHECK_EQ(*parse_info_->script(), shared_->script());
+
parser_->UpdateStatistics(isolate, script);
parse_info_->UpdateBackgroundParseStatisticsOnMainThread(isolate);
parser_->HandleSourceURLComments(isolate, script);
@@ -288,8 +282,7 @@ void UnoptimizedCompileJob::FinalizeOnMainThread(Isolate* isolate) {
// Internalize ast values onto the heap.
parse_info_->ast_value_factory()->Internalize(isolate);
// Allocate scope infos for the literal.
- DeclarationScope::AllocateScopeInfos(parse_info_.get(), isolate,
- AnalyzeMode::kRegular);
+ DeclarationScope::AllocateScopeInfos(parse_info_.get(), isolate);
if (compilation_job_->state() == CompilationJob::State::kFailed ||
!Compiler::FinalizeCompilationJob(compilation_job_.release(), shared_,
isolate)) {
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
index 8352e4e795..3e08388ca0 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
@@ -67,7 +67,6 @@ class V8_EXPORT_PRIVATE UnoptimizedCompileJob : public CompilerDispatcherJob {
Handle<SharedFunctionInfo> shared_; // Global handle.
Handle<String> source_; // Global handle.
Handle<String> wrapper_; // Global handle.
- std::unique_ptr<v8::String::ExternalStringResourceBase> source_wrapper_;
size_t max_stack_size_;
// Members required for parsing.
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index a01750b23a..6c5211b74d 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -94,7 +94,7 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
int column_num = Script::GetColumnNumber(script, shared->StartPosition()) + 1;
String* script_name = script->name()->IsString()
? String::cast(script->name())
- : isolate->heap()->empty_string();
+ : ReadOnlyRoots(isolate).empty_string();
CodeEventListener::LogEventsAndTags log_tag =
Logger::ToNativeByScript(tag, *script);
PROFILE(isolate, CodeCreateEvent(log_tag, *abstract_code, *shared,
@@ -119,7 +119,7 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
UNREACHABLE();
}
- LOG(isolate, FunctionEvent(name.c_str(), nullptr, script->id(), time_taken_ms,
+ LOG(isolate, FunctionEvent(name.c_str(), script->id(), time_taken_ms,
shared->StartPosition(), shared->EndPosition(),
shared->DebugName()));
}
@@ -198,7 +198,7 @@ CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
DisallowJavascriptExecution no_js(isolate);
if (FLAG_trace_opt && compilation_info()->IsOptimizing()) {
- OFStream os(stdout);
+ StdoutStream os;
os << "[compiling method " << Brief(*compilation_info()->closure())
<< " using " << compiler_name_;
if (compilation_info()->is_osr()) os << " OSR";
@@ -227,8 +227,6 @@ CompilationJob::Status OptimizedCompilationJob::FinalizeJob(Isolate* isolate) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
DisallowCodeDependencyChange no_dependency_change;
DisallowJavascriptExecution no_js(isolate);
- CHECK(!compilation_info()->dependencies() ||
- !compilation_info()->dependencies()->HasAborted());
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToFinalize);
@@ -285,7 +283,7 @@ void OptimizedCompilationJob::RecordFunctionCompilation(
time_taken_to_finalize_.InMillisecondsF();
Handle<Script> script(
- Script::cast(compilation_info()->shared_info()->script()));
+ Script::cast(compilation_info()->shared_info()->script()), isolate);
LogFunctionCompilation(tag, compilation_info()->shared_info(), script,
abstract_code, true, time_taken_ms, isolate);
}
@@ -342,7 +340,7 @@ void InstallBytecodeArray(Handle<BytecodeArray> bytecode_array,
Script::GetColumnNumber(script, shared_info->StartPosition()) + 1;
String* script_name = script->name()->IsString()
? String::cast(script->name())
- : isolate->heap()->empty_string();
+ : ReadOnlyRoots(isolate).empty_string();
CodeEventListener::LogEventsAndTags log_tag = Logger::ToNativeByScript(
CodeEventListener::INTERPRETED_FUNCTION_TAG, *script);
PROFILE(isolate, CodeCreateEvent(log_tag, *abstract_code, *shared_info,
@@ -374,7 +372,7 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
DCHECK(compilation_info->has_asm_wasm_data());
shared_info->set_asm_wasm_data(*compilation_info->asm_wasm_data());
shared_info->set_feedback_metadata(
- isolate->heap()->empty_feedback_metadata());
+ ReadOnlyRoots(isolate).empty_feedback_metadata());
}
// Install coverage info on the shared function info.
@@ -509,8 +507,7 @@ bool FinalizeUnoptimizedCode(
DCHECK(AllowCompilation::IsAllowed(isolate));
// Allocate scope infos for the literal.
- DeclarationScope::AllocateScopeInfos(parse_info, isolate,
- AnalyzeMode::kRegular);
+ DeclarationScope::AllocateScopeInfos(parse_info, isolate);
// Finalize the outer-most function's compilation job.
if (FinalizeUnoptimizedCompilationJob(outer_function_job, shared_info,
@@ -548,7 +545,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
RuntimeCallTimerScope runtimeTimer(
function->GetIsolate(),
RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
DisallowHeapAllocation no_gc;
if (osr_offset.IsNone()) {
if (function->feedback_cell()->value()->IsFeedbackVector()) {
@@ -561,7 +558,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
// Caching of optimized code enabled and optimized code found.
DCHECK(!code->marked_for_deoptimization());
DCHECK(function->shared()->is_compiled());
- return Handle<Code>(code);
+ return Handle<Code>(code, feedback_vector->GetIsolate());
}
}
}
@@ -593,8 +590,9 @@ void InsertCodeIntoOptimizedCodeCache(
// Cache optimized context-specific code.
Handle<JSFunction> function = compilation_info->closure();
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<Context> native_context(function->context()->native_context());
+ Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
+ Handle<Context> native_context(function->context()->native_context(),
+ function->GetIsolate());
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
@@ -711,7 +709,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// tolerate the lack of a script without bytecode.
DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
std::unique_ptr<OptimizedCompilationJob> job(
- compiler::Pipeline::NewCompilationJob(function, has_script));
+ compiler::Pipeline::NewCompilationJob(isolate, function, has_script));
OptimizedCompilationInfo* compilation_info = job->compilation_info();
compilation_info->SetOptimizingForOsr(osr_offset, osr_frame);
@@ -755,7 +753,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
CanonicalHandleScope canonical(isolate);
// Reopen handles in the new CompilationHandleScope.
- compilation_info->ReopenHandlesInNewHandleScope();
+ compilation_info->ReopenHandlesInNewHandleScope(isolate);
if (mode == ConcurrencyMode::kConcurrent) {
if (GetOptimizedCodeLater(job.get(), isolate)) {
@@ -802,8 +800,6 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(
if (job->state() == CompilationJob::State::kReadyToFinalize) {
if (shared->optimization_disabled()) {
job->RetryOptimization(BailoutReason::kOptimizationDisabled);
- } else if (compilation_info->dependencies()->HasAborted()) {
- job->RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
} else if (job->FinalizeJob(isolate) == CompilationJob::SUCCEEDED) {
job->RecordCompilationStats();
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
@@ -973,8 +969,9 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* source,
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
- ParseInfo* info = new ParseInfo(isolate->allocator());
- info->InitFromIsolate(isolate);
+ ParseInfo* info = new ParseInfo(isolate);
+ LOG(isolate, ScriptEvent(Logger::ScriptEventType::kStreamingCompile,
+ info->script_id()));
if (V8_UNLIKELY(FLAG_runtime_stats)) {
info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
} else {
@@ -1000,7 +997,7 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* source,
// Parser needs to stay alive for finalizing the parsing on the main
// thread.
source_->parser.reset(new Parser(source_->info.get()));
- source_->parser->DeserializeScopeChain(source_->info.get(),
+ source_->parser->DeserializeScopeChain(isolate, source_->info.get(),
MaybeHandle<ScopeInfo>());
}
@@ -1077,7 +1074,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
// Set up parse info.
- ParseInfo parse_info(shared_info);
+ ParseInfo parse_info(isolate, shared_info);
parse_info.set_lazy_compile();
// Check if the compiler dispatcher has shared_info enqueued for compile.
@@ -1090,13 +1087,11 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
if (FLAG_preparser_scope_analysis) {
- if (shared_info->HasPreParsedScopeData()) {
- Handle<PreParsedScopeData> data(
- PreParsedScopeData::cast(shared_info->preparsed_scope_data()));
- parse_info.consumed_preparsed_scope_data()->SetData(data);
- // After we've compiled the function, we don't need data about its
- // skippable functions any more.
- shared_info->ClearPreParsedScopeData();
+ if (shared_info->HasUncompiledDataWithPreParsedScope()) {
+ parse_info.consumed_preparsed_scope_data()->SetData(
+ isolate, handle(shared_info->uncompiled_data_with_pre_parsed_scope()
+ ->pre_parsed_scope_data(),
+ isolate));
}
}
@@ -1136,7 +1131,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
DCHECK(!function->HasOptimizedCode());
Isolate* isolate = function->GetIsolate();
- Handle<SharedFunctionInfo> shared_info = handle(function->shared());
+ Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);
// Ensure shared function info is compiled.
if (!shared_info->is_compiled() && !Compile(shared_info, flag)) return false;
@@ -1202,37 +1197,9 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
return true;
}
-MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
- Isolate* isolate = script->GetIsolate();
- DCHECK(AllowCompilation::IsAllowed(isolate));
-
- // In order to ensure that live edit function info collection finds the newly
- // generated shared function infos, clear the script's list temporarily
- // and restore it at the end of this method.
- Handle<WeakFixedArray> old_function_infos(script->shared_function_infos(),
- isolate);
- script->set_shared_function_infos(isolate->heap()->empty_weak_fixed_array());
-
- // Start a compilation.
- ParseInfo parse_info(script);
- parse_info.set_eager();
-
- // TODO(635): support extensions.
- Handle<JSArray> infos;
- Handle<SharedFunctionInfo> shared_info;
- if (CompileToplevel(&parse_info, isolate).ToHandle(&shared_info)) {
- // Check postconditions on success.
- DCHECK(!isolate->has_pending_exception());
- infos = LiveEditFunctionTracker::Collect(parse_info.literal(), script,
- parse_info.zone(), isolate);
- }
-
- // Restore the original function info list in order to remain side-effect
- // free as much as possible, since some code expects the old shared function
- // infos to stick around.
- script->set_shared_function_infos(*old_function_infos);
-
- return infos;
+MaybeHandle<SharedFunctionInfo> Compiler::CompileForLiveEdit(
+ ParseInfo* parse_info, Isolate* isolate) {
+ return CompileToplevel(parse_info, isolate);
}
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
@@ -1242,7 +1209,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
int eval_scope_position, int eval_position, int line_offset,
int column_offset, Handle<Object> script_name,
ScriptOriginOptions options) {
- Isolate* isolate = source->GetIsolate();
+ Isolate* isolate = context->GetIsolate();
int source_length = source->length();
isolate->counters()->total_eval_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
@@ -1279,24 +1246,22 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
script = Handle<Script>(Script::cast(shared_info->script()), isolate);
allow_eval_cache = true;
} else {
- script = isolate->factory()->NewScript(source);
- if (isolate->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(script);
- }
+ ParseInfo parse_info(isolate);
+ script = parse_info.CreateScript(isolate, source, options);
if (!script_name.is_null()) {
+ // TODO(cbruni): check whether we can store this data in options
script->set_name(*script_name);
script->set_line_offset(line_offset);
script->set_column_offset(column_offset);
+ LOG(isolate, ScriptDetails(*script));
}
- script->set_origin_options(options);
script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
-
script->set_eval_from_shared(*outer_info);
if (eval_position == kNoSourcePosition) {
// If the position is missing, attempt to get the code offset by
// walking the stack. Do not translate the code offset into source
// position, but store it as negative value for lazy translation.
- StackTraceFrameIterator it(script->GetIsolate());
+ StackTraceFrameIterator it(isolate);
if (!it.done() && it.is_javascript()) {
FrameSummary summary = FrameSummary::GetTop(it.javascript_frame());
script->set_eval_from_shared(
@@ -1308,13 +1273,12 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
}
script->set_eval_from_position(eval_position);
- ParseInfo parse_info(script);
parse_info.set_eval();
parse_info.set_language_mode(language_mode);
parse_info.set_parse_restriction(restriction);
parse_info.set_parameters_end_pos(parameters_end_pos);
if (!context->IsNativeContext()) {
- parse_info.set_outer_scope_info(handle(context->scope_info()));
+ parse_info.set_outer_scope_info(handle(context->scope_info(), isolate));
}
DCHECK(!parse_info.is_module());
@@ -1399,7 +1363,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
int eval_scope_position = 0;
int eval_position = kNoSourcePosition;
Handle<SharedFunctionInfo> outer_info(
- native_context->empty_function()->shared());
+ native_context->empty_function()->shared(), isolate);
return Compiler::GetFunctionFromEval(
source, outer_info, native_context, LanguageMode::kSloppy, restriction,
parameters_end_pos, eval_scope_position, eval_position);
@@ -1602,29 +1566,20 @@ struct ScriptCompileTimerScope {
}
};
-Handle<Script> NewScript(Isolate* isolate, Handle<String> source,
+Handle<Script> NewScript(Isolate* isolate, ParseInfo* parse_info,
+ Handle<String> source,
Compiler::ScriptDetails script_details,
ScriptOriginOptions origin_options,
NativesFlag natives) {
// Create a script object describing the script to be compiled.
- Handle<Script> script = isolate->factory()->NewScript(source);
- if (isolate->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(script);
- }
- if (natives == NATIVES_CODE) {
- script->set_type(Script::TYPE_NATIVE);
- } else if (natives == EXTENSION_CODE) {
- script->set_type(Script::TYPE_EXTENSION);
- } else if (natives == INSPECTOR_CODE) {
- script->set_type(Script::TYPE_INSPECTOR);
- }
+ Handle<Script> script =
+ parse_info->CreateScript(isolate, source, origin_options, natives);
Handle<Object> script_name;
if (script_details.name_obj.ToHandle(&script_name)) {
script->set_name(*script_name);
script->set_line_offset(script_details.line_offset);
script->set_column_offset(script_details.column_offset);
}
- script->set_origin_options(origin_options);
Handle<Object> source_map_url;
if (script_details.source_map_url.ToHandle(&source_map_url)) {
script->set_source_mapping_url(*source_map_url);
@@ -1633,6 +1588,7 @@ Handle<Script> NewScript(Isolate* isolate, Handle<String> source,
if (script_details.host_defined_options.ToHandle(&host_defined_options)) {
script->set_host_defined_options(*host_defined_options);
}
+ LOG(isolate, ScriptDetails(*script));
return script;
}
@@ -1666,7 +1622,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (extension == nullptr) {
bool can_consume_code_cache =
compile_options == ScriptCompiler::kConsumeCodeCache &&
- !isolate->debug()->is_loaded();
+ !isolate->debug()->is_active();
if (can_consume_code_cache) {
compile_timer.set_consuming_code_cache();
}
@@ -1687,7 +1643,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
Handle<SharedFunctionInfo> inner_result;
- if (CodeSerializer::Deserialize(isolate, cached_data, source)
+ if (CodeSerializer::Deserialize(isolate, cached_data, source,
+ origin_options)
.ToHandle(&inner_result)) {
// Promote to per-isolate compilation cache.
DCHECK(inner_result->is_compiled());
@@ -1703,12 +1660,12 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
if (maybe_result.is_null()) {
+ ParseInfo parse_info(isolate);
// No cache entry found compile the script.
- Handle<Script> script =
- NewScript(isolate, source, script_details, origin_options, natives);
+ NewScript(isolate, &parse_info, source, script_details, origin_options,
+ natives);
// Compile the function and add it to the isolate cache.
- ParseInfo parse_info(script);
Zone compile_zone(isolate->allocator(), ZONE_NAME);
if (origin_options.IsModule()) parse_info.set_module();
parse_info.set_extension(extension);
@@ -1737,7 +1694,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
ScriptOriginOptions origin_options, ScriptData* cached_data,
v8::ScriptCompiler::CompileOptions compile_options,
v8::ScriptCompiler::NoCacheReason no_cache_reason) {
- Isolate* isolate = source->GetIsolate();
+ Isolate* isolate = context->GetIsolate();
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
if (compile_options == ScriptCompiler::kNoCompileOptions ||
@@ -1756,7 +1713,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
MaybeHandle<SharedFunctionInfo> maybe_result;
bool can_consume_code_cache =
compile_options == ScriptCompiler::kConsumeCodeCache &&
- !isolate->debug()->is_loaded();
+ !isolate->debug()->is_active();
if (can_consume_code_cache) {
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
@@ -1765,7 +1722,8 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
- maybe_result = CodeSerializer::Deserialize(isolate, cached_data, source);
+ maybe_result = CodeSerializer::Deserialize(isolate, cached_data, source,
+ origin_options);
if (maybe_result.is_null()) {
// Deserializer failed. Fall through to compile.
compile_timer.set_consuming_code_cache_failed();
@@ -1775,16 +1733,16 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
Handle<SharedFunctionInfo> wrapped;
Handle<Script> script;
if (!maybe_result.ToHandle(&wrapped)) {
- script = NewScript(isolate, source, script_details, origin_options,
- NOT_NATIVES_CODE);
+ ParseInfo parse_info(isolate);
+ script = NewScript(isolate, &parse_info, source, script_details,
+ origin_options, NOT_NATIVES_CODE);
script->set_wrapped_arguments(*arguments);
- ParseInfo parse_info(script);
parse_info.set_eval(); // Use an eval scope as declaration scope.
parse_info.set_wrapped_as_function();
// parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
if (!context->IsNativeContext()) {
- parse_info.set_outer_scope_info(handle(context->scope_info()));
+ parse_info.set_outer_scope_info(handle(context->scope_info(), isolate));
}
parse_info.set_language_mode(
stricter_language_mode(parse_info.language_mode(), language_mode));
@@ -1794,7 +1752,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
if (maybe_result.is_null()) isolate->ReportPendingMessages();
ASSIGN_RETURN_ON_EXCEPTION(isolate, top_level, maybe_result, JSFunction);
- SharedFunctionInfo::ScriptIterator infos(script);
+ SharedFunctionInfo::ScriptIterator infos(isolate, *script);
while (SharedFunctionInfo* info = infos.Next()) {
if (info->is_wrapped()) {
wrapped = Handle<SharedFunctionInfo>(info, isolate);
@@ -1846,9 +1804,9 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
if (maybe_result.is_null()) {
// No cache entry found, finalize compilation of the script and add it to
// the isolate cache.
- Handle<Script> script = NewScript(isolate, source, script_details,
- origin_options, NOT_NATIVES_CODE);
- parse_info->set_script(script);
+ Handle<Script> script =
+ NewScript(isolate, parse_info, source, script_details, origin_options,
+ NOT_NATIVES_CODE);
streaming_data->parser->UpdateStatistics(isolate, script);
streaming_data->parser->HandleSourceURLComments(isolate, script);
@@ -1933,7 +1891,7 @@ bool Compiler::FinalizeCompilationJob(UnoptimizedCompilationJob* raw_job,
void Compiler::PostInstantiation(Handle<JSFunction> function,
PretenureFlag pretenure) {
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!shared->optimization_disabled() && !shared->HasAsmWasmData() &&
@@ -1962,8 +1920,9 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
if (shared->is_toplevel() || shared->is_wrapped()) {
// If it's a top-level script, report compilation to the debugger.
- Handle<Script> script(handle(Script::cast(shared->script())));
- script->GetIsolate()->debug()->OnAfterCompile(script);
+ Handle<Script> script(
+ handle(Script::cast(shared->script()), function->GetIsolate()));
+ function->GetIsolate()->debug()->OnAfterCompile(script);
}
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 1477b4f3f1..4789759dfc 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -57,7 +57,9 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
ClearExceptionFlag flag);
static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
- static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
+ CompileForLiveEdit(ParseInfo* parse_info, Isolate* isolate);
// Creates a new task that when run will parse and compile the streamed
// script associated with |streaming_data| and can be finalized with
@@ -300,7 +302,6 @@ class OptimizedCompilationJob : public CompilationJob {
OptimizedCompilationInfo* compilation_info() const {
return compilation_info_;
}
- virtual size_t AllocatedMemory() const { return 0; }
protected:
// Overridden by the actual implementation.
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index e7c4b7542f..40783a3511 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -22,4 +22,6 @@ per-file wasm-*=kschimpf@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
+per-file simd-scalar-lowering.*=aseemgarg@chromium.org
+
# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 02c5d8a2a0..0b78795e00 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -10,6 +10,9 @@
#include "src/handles-inl.h"
#include "src/heap/heap.h"
#include "src/objects-inl.h"
+#include "src/objects/arguments.h"
+#include "src/objects/js-collection.h"
+#include "src/objects/module.h"
namespace v8 {
namespace internal {
@@ -82,9 +85,9 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
// static
-FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
+FieldAccess AccessBuilder::ForJSObjectInObjectProperty(const MapRef& map,
int index) {
- int const offset = map->GetInObjectPropertyOffset(index);
+ int const offset = map.GetInObjectPropertyOffset(index);
FieldAccess access = {kTaggedBase, offset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::NonInternal(), MachineType::AnyTagged(),
@@ -259,9 +262,9 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
// static
-FieldAccess AccessBuilder::ForJSGeneratorObjectRegisterFile() {
+FieldAccess AccessBuilder::ForJSGeneratorObjectParametersAndRegisters() {
FieldAccess access = {
- kTaggedBase, JSGeneratorObject::kRegisterFileOffset,
+ kTaggedBase, JSGeneratorObject::kParametersAndRegistersOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::Internal(), MachineType::AnyTagged(),
kPointerWriteBarrier};
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 4e8107d859..945edf3014 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -48,7 +48,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForJSObjectElements();
// Provides access to JSObject inobject property fields.
- static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+ static FieldAccess ForJSObjectInObjectProperty(const MapRef& map, int index);
static FieldAccess ForJSObjectOffset(
int offset, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
@@ -94,8 +94,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSGeneratorObject::input_or_debug_pos() field.
static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
- // Provides access to JSGeneratorObject::register_file() field.
- static FieldAccess ForJSGeneratorObjectRegisterFile();
+ // Provides access to JSGeneratorObject::parameters_and_registers() field.
+ static FieldAccess ForJSGeneratorObjectParametersAndRegisters();
// Provides access to JSGeneratorObject::function() field.
static FieldAccess ForJSGeneratorObjectFunction();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index f33555cbe3..62ed7e7d85 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -4,14 +4,16 @@
#include <ostream>
-#include "src/accessors.h"
-#include "src/compilation-dependencies.h"
#include "src/compiler/access-info.h"
+
+#include "src/accessors.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/type-cache.h"
#include "src/field-index-inl.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
+#include "src/objects/module-inl.h"
#include "src/objects/templates.h"
namespace v8 {
@@ -88,7 +90,8 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
FieldIndex field_index, MachineRepresentation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
- Kind kind = constness == kConst ? kDataConstantField : kDataField;
+ Kind kind =
+ constness == PropertyConstness::kConst ? kDataConstantField : kDataField;
return PropertyAccessInfo(kind, holder, transition_map, field_index,
field_representation, field_type, field_map,
receiver_maps);
@@ -234,9 +237,11 @@ Handle<Cell> PropertyAccessInfo::export_cell() const {
return Handle<Cell>::cast(constant_);
}
-AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
+AccessInfoFactory::AccessInfoFactory(const JSHeapBroker* js_heap_broker,
+ CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone)
- : dependencies_(dependencies),
+ : js_heap_broker_(js_heap_broker),
+ dependencies_(dependencies),
native_context_(native_context),
isolate_(native_context->GetIsolate()),
type_cache_(TypeCache::Get()),
@@ -273,7 +278,7 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
for (Handle<Map> map : maps) {
- if (Map::TryUpdate(map).ToHandle(&map)) {
+ if (Map::TryUpdate(isolate(), map).ToHandle(&map)) {
if (CanInlineElementAccess(map) &&
IsFastElementsKind(map->elements_kind()) &&
GetInitialFastElementsKind() != map->elements_kind()) {
@@ -287,16 +292,17 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
receiver_maps.reserve(maps.size());
MapTransitionList transitions(maps.size());
for (Handle<Map> map : maps) {
- if (Map::TryUpdate(map).ToHandle(&map)) {
+ if (Map::TryUpdate(isolate(), map).ToHandle(&map)) {
// Don't generate elements kind transitions from stable maps.
- Map* transition_target = map->is_stable()
- ? nullptr
- : map->FindElementsKindTransitionedMap(
- possible_transition_targets);
+ Map* transition_target =
+ map->is_stable() ? nullptr
+ : map->FindElementsKindTransitionedMap(
+ isolate(), possible_transition_targets);
if (transition_target == nullptr) {
receiver_maps.push_back(map);
} else {
- transitions.push_back(std::make_pair(map, handle(transition_target)));
+ transitions.push_back(
+ std::make_pair(map, handle(transition_target, isolate())));
}
}
}
@@ -344,7 +350,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
do {
// Lookup the named property on the {map}.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- int const number = descriptors->SearchWithCache(isolate(), *name, *map);
+ int const number = descriptors->Search(*name, *map);
if (number != DescriptorArray::kNotFound) {
PropertyDetails const details = descriptors->GetDetails(number);
if (access_mode == AccessMode::kStore ||
@@ -390,13 +396,11 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// The field type was cleared by the GC, so we don't know anything
// about the contents now.
} else if (descriptors_field_type->IsClass()) {
- // Add proper code dependencies in case of stable field map(s).
- Handle<Map> field_owner_map(map->FindFieldOwner(number),
- isolate());
- dependencies()->AssumeFieldOwner(field_owner_map);
-
+ dependencies()->DependOnFieldType(MapRef(js_heap_broker(), map),
+ number);
// Remember the field map, and try to infer a useful type.
- field_type = Type::For(descriptors_field_type->AsClass());
+ field_type = Type::For(js_heap_broker(),
+ descriptors_field_type->AsClass());
field_map = descriptors_field_type->AsClass();
}
}
@@ -416,7 +420,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
DCHECK(!FLAG_track_constant_fields);
*access_info = PropertyAccessInfo::DataConstant(
MapHandles{receiver_map},
- handle(descriptors->GetValue(number), isolate()), holder);
+ handle(descriptors->GetStrongValue(number), isolate()), holder);
return true;
} else {
DCHECK_EQ(kAccessor, details.kind());
@@ -431,7 +435,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
isolate());
Handle<Cell> cell(
Cell::cast(module_namespace->module()->exports()->Lookup(
- isolate(), name, Smi::ToInt(name->GetHash()))),
+ ReadOnlyRoots(isolate()), name,
+ Smi::ToInt(name->GetHash()))),
isolate());
if (cell->value()->IsTheHole(isolate())) {
// This module has not been fully initialized yet.
@@ -441,7 +446,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
MapHandles{receiver_map}, cell);
return true;
}
- Handle<Object> accessors(descriptors->GetValue(number), isolate());
+ Handle<Object> accessors(descriptors->GetStrongValue(number),
+ isolate());
if (!accessors->IsAccessorPair()) return false;
Handle<Object> accessor(
access_mode == AccessMode::kLoad
@@ -449,7 +455,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
: Handle<AccessorPair>::cast(accessors)->setter(),
isolate());
if (!accessor->IsJSFunction()) {
- CallOptimization optimization(accessor);
+ CallOptimization optimization(isolate(), accessor);
if (!optimization.is_simple_api_call()) return false;
if (optimization.IsCrossContextLazyAccessorPair(*native_context_,
*map)) {
@@ -554,7 +560,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfos(
MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos) {
for (Handle<Map> map : maps) {
- if (Map::TryUpdate(map).ToHandle(&map)) {
+ if (Map::TryUpdate(isolate(), map).ToHandle(&map)) {
PropertyAccessInfo access_info;
if (!ComputePropertyAccessInfo(map, name, access_mode, &access_info)) {
return false;
@@ -618,17 +624,17 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) {
// Check for special JSObject field accessors.
FieldIndex field_index;
- if (Accessors::IsJSObjectFieldAccessor(map, name, &field_index)) {
+ if (Accessors::IsJSObjectFieldAccessor(isolate(), map, name, &field_index)) {
Type field_type = Type::NonInternal();
MachineRepresentation field_representation = MachineRepresentation::kTagged;
if (map->IsStringMap()) {
- DCHECK(Name::Equals(factory()->length_string(), name));
+ DCHECK(Name::Equals(isolate(), factory()->length_string(), name));
// The String::length property is always a smi in the range
// [0, String::kMaxLength].
field_type = type_cache_.kStringLengthType;
field_representation = MachineRepresentation::kTaggedSigned;
} else if (map->IsJSArrayMap()) {
- DCHECK(Name::Equals(factory()->length_string(), name));
+ DCHECK(Name::Equals(isolate(), factory()->length_string(), name));
// The JSArray::length property is a smi in the range
// [0, FixedDoubleArray::kMaxLength] in case of fast double
// elements, a smi in the range [0, FixedArray::kMaxLength]
@@ -645,9 +651,9 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
}
}
// Special fields are always mutable.
- *access_info =
- PropertyAccessInfo::DataField(kMutable, MapHandles{map}, field_index,
- field_representation, field_type);
+ *access_info = PropertyAccessInfo::DataField(
+ PropertyConstness::kMutable, MapHandles{map}, field_index,
+ field_representation, field_type);
return true;
}
return false;
@@ -659,10 +665,10 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
PropertyAccessInfo* access_info) {
// Check if the {map} has a data transition with the given {name}.
Map* transition =
- TransitionsAccessor(map).SearchTransition(*name, kData, NONE);
+ TransitionsAccessor(isolate(), map).SearchTransition(*name, kData, NONE);
if (transition == nullptr) return false;
- Handle<Map> transition_map(transition);
+ Handle<Map> transition_map(transition, isolate());
int const number = transition_map->LastAdded();
PropertyDetails const details =
transition_map->instance_descriptors()->GetDetails(number);
@@ -694,21 +700,19 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
// Store is not safe if the field type was cleared.
return false;
} else if (descriptors_field_type->IsClass()) {
- // Add proper code dependencies in case of stable field map(s).
- Handle<Map> field_owner_map(transition_map->FindFieldOwner(number),
- isolate());
- dependencies()->AssumeFieldOwner(field_owner_map);
-
+ dependencies()->DependOnFieldType(
+ MapRef(js_heap_broker(), transition_map), number);
// Remember the field map, and try to infer a useful type.
- field_type = Type::For(descriptors_field_type->AsClass());
+ field_type =
+ Type::For(js_heap_broker(), descriptors_field_type->AsClass());
field_map = descriptors_field_type->AsClass();
}
}
- dependencies()->AssumeMapNotDeprecated(transition_map);
+ dependencies()->DependOnTransition(MapRef(js_heap_broker(), transition_map));
// Transitioning stores are never stores to constant fields.
*access_info = PropertyAccessInfo::DataField(
- kMutable, MapHandles{map}, field_index, field_representation, field_type,
- field_map, holder, transition_map);
+ PropertyConstness::kMutable, MapHandles{map}, field_index,
+ field_representation, field_type, field_map, holder, transition_map);
return true;
}
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index d591cc219b..fa737ce0c4 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -18,12 +18,12 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class CompilationDependencies;
class Factory;
namespace compiler {
// Forward declarations.
+class CompilationDependencies;
class Type;
class TypeCache;
@@ -140,7 +140,9 @@ class PropertyAccessInfo final {
// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
class AccessInfoFactory final {
public:
- AccessInfoFactory(CompilationDependencies* dependencies,
+ AccessInfoFactory(const JSHeapBroker* js_heap_broker,
+ CompilationDependencies* dependencies,
+
Handle<Context> native_context, Zone* zone);
bool ComputeElementAccessInfo(Handle<Map> map, AccessMode access_mode,
@@ -167,11 +169,13 @@ class AccessInfoFactory final {
PropertyAccessInfo* access_info);
CompilationDependencies* dependencies() const { return dependencies_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Factory* factory() const;
Isolate* isolate() const { return isolate_; }
Handle<Context> native_context() const { return native_context_; }
Zone* zone() const { return zone_; }
+ const JSHeapBroker* const js_heap_broker_;
CompilationDependencies* const dependencies_;
Handle<Context> const native_context_;
Isolate* const isolate_;
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index 167a8bdb54..4aabac1c11 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -75,6 +75,10 @@ class AllocationBuilder final {
void Store(const FieldAccess& access, Handle<Object> value) {
Store(access, jsgraph()->Constant(value));
}
+ // Compound store of a constant into a field.
+ void Store(const FieldAccess& access, const ObjectRef& value) {
+ Store(access, jsgraph()->Constant(value));
+ }
void FinishAndChange(Node* node) {
NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
@@ -90,6 +94,7 @@ class AllocationBuilder final {
protected:
JSGraph* jsgraph() { return jsgraph_; }
+ Isolate* isolate() const { return jsgraph_->isolate(); }
Graph* graph() { return jsgraph_->graph(); }
CommonOperatorBuilder* common() { return jsgraph_->common(); }
SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 3d2c3d2871..d129274863 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -669,19 +669,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
- // We must not share code targets for calls to builtins for wasm code, as
- // they might need to be patched individually.
- internal::Assembler::BlockCodeTargetSharingScope scope;
- if (info()->IsWasm()) scope.Open(tasm());
-
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
- __ add(scratch, i.InputRegister(0),
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(scratch);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ add(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(reg);
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -689,19 +685,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallWasmFunction: {
- // We must not share code targets for calls to builtins for wasm code, as
- // they might need to be patched individually.
- internal::Assembler::BlockCodeTargetSharingScope scope;
- RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
- if (info()->IsWasm()) {
- scope.Open(tasm());
- rmode = RelocInfo::WASM_CALL;
- }
-
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
- __ Call(wasm_code, rmode);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+ __ Call(wasm_code, constant.rmode());
} else {
__ Call(i.InputRegister(0));
}
@@ -712,11 +699,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- // We must not share code targets for calls to builtins for wasm code, as
- // they might need to be patched individually.
- internal::Assembler::BlockCodeTargetSharingScope scope;
- if (info()->IsWasm()) scope.Open(tasm());
-
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -725,11 +707,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
- __ add(scratch, i.InputRegister(0),
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(scratch);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ add(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(reg);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
unwinding_info_writer_.MarkBlockWillExit();
@@ -738,19 +721,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallWasm: {
- // We must not share code targets for calls to builtins for wasm code, as
- // they might need to be patched individually.
- internal::Assembler::BlockCodeTargetSharingScope scope;
- RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
- if (info()->IsWasm()) {
- scope.Open(tasm());
- rmode = RelocInfo::WASM_CALL;
- }
-
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
- __ Jump(wasm_code, rmode);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+ __ Jump(wasm_code, constant.rmode());
} else {
__ Jump(i.InputRegister(0));
}
@@ -762,7 +736,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
- __ Jump(i.InputRegister(0));
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -854,6 +832,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -919,13 +900,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), fp);
}
break;
- case kArchRootsPointer:
- __ mov(i.OutputRegister(), kRootRegister);
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
- i.InputDoubleRegister(0));
+ i.InputDoubleRegister(0), DetermineStubCallMode());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchStoreWithWriteBarrier: {
@@ -2354,18 +2331,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
- // Check for in-place shuffles.
- // If dst == src0 == src1, then the shuffle is unary and we only use src0.
- if (dst == src0) {
- Simd128Register scratch = temps.AcquireQ();
- __ vmov(scratch, src0);
- src0 = scratch;
- } else if (dst == src1) {
- Simd128Register scratch = temps.AcquireQ();
- __ vmov(scratch, src1);
- src1 = scratch;
- }
+ DCHECK_NE(dst, src0);
+ DCHECK_NE(dst, src1);
// Perform shuffle as a vmov per lane.
int dst_code = dst.code() * 4;
int src0_code = src0.code() * 4;
@@ -2789,31 +2756,19 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
void Generate() final {
ArmOperandConverter i(gen_, instr_);
-
- Builtins::Name trap_id =
- static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- __ set_has_frame(old_has_frame);
- }
}
private:
- void GenerateCallToTrap(Builtins::Name trap_id) {
- if (trap_id == Builtins::builtin_count) {
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -2829,8 +2784,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
- RelocInfo::CODE_TARGET);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2841,12 +2797,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}
}
- bool frame_elided_;
Instruction* instr_;
CodeGenerator* gen_;
};
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ auto ool = new (zone()) OutOfLineTrap(this, instr);
Label* tlabel = ool->entry();
Condition cc = FlagsConditionToCondition(condition);
__ b(cc, tlabel);
@@ -2866,6 +2820,16 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ mov(reg, Operand(1), LeaveCC, cc);
}
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
ArmOperandConverter i(this, instr);
@@ -2959,47 +2923,42 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
- if (info()->IsWasm()) {
- if (shrink_slots > 128) {
- // For WebAssembly functions with big frames we have to do the stack
- // overflow check before we construct the frame. Otherwise we may not
- // have enough space on the stack to call the runtime for the stack
- // overflow.
- Label done;
-
- // If the frame is bigger than the stack, we throw the stack overflow
- // exception unconditionally. Thereby we can avoid the integer overflow
- // check in the condition code.
- if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
- __ Move(scratch,
- Operand(ExternalReference::address_of_real_stack_limit(
- __ isolate())));
- __ ldr(scratch, MemOperand(scratch));
- __ add(scratch, scratch, Operand(shrink_slots * kPointerSize));
- __ cmp(sp, scratch);
- __ b(cs, &done);
- }
-
- if (!frame_access_state()->has_frame()) {
- __ set_has_frame(true);
- // There is no need to leave the frame, we will not return from the
- // runtime call.
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
- __ Move(cp, Smi::kZero);
- __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
- // We come from WebAssembly, there are no references for the GC.
- ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
- }
+ DCHECK(frame_access_state()->has_frame());
+ if (info()->IsWasm() && shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ ldr(scratch, FieldMemOperand(
+ kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ __ ldr(scratch, MemOperand(scratch));
+ __ add(scratch, scratch, Operand(shrink_slots * kPointerSize));
+ __ cmp(sp, scratch);
+ __ b(cs, &done);
+ }
- __ bind(&done);
+ __ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
+ __ Move(cp, Smi::kZero);
+ __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r2);
+ // We come from WebAssembly, there are no references for the GC.
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
}
+
+ __ bind(&done);
}
// Skip callee-saved and return slots, which are pushed below.
@@ -3105,6 +3064,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
__ Move(dst, src_object);
}
+ } else if (src.type() == Constant::kExternalReference) {
+ __ Move(dst, src.ToExternalReference());
} else {
__ mov(dst, g.ToImmediate(source));
}
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 4f6702b67a..8fc5779112 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -1897,11 +1897,10 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
}
}
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
}
-
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
@@ -2445,7 +2444,9 @@ static const ShuffleEntry arch_shuffles[] = {
{{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kArmS8x2Reverse}};
bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
- size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
for (size_t i = 0; i < num_entries; ++i) {
const ShuffleEntry& entry = table[i];
int j = 0;
@@ -2477,48 +2478,51 @@ void ArrangeShuffleTable(ArmOperandGenerator* g, Node* input0, Node* input1,
} // namespace
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
- uint8_t mask = CanonicalizeShuffle(node);
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
uint8_t shuffle32x4[4];
ArmOperandGenerator g(this);
int index = 0;
if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
if (TryMatchDup<4>(shuffle, &index)) {
- InstructionOperand src = index < 4 ? g.UseRegister(node->InputAt(0))
- : g.UseRegister(node->InputAt(1));
- Emit(kArmS128Dup, g.DefineAsRegister(node), src, g.UseImmediate(Neon32),
- g.UseImmediate(index % 4));
+ DCHECK_GT(4, index);
+ Emit(kArmS128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseImmediate(Neon32), g.UseImmediate(index % 4));
+ } else if (TryMatchIdentity(shuffle)) {
+ EmitIdentity(node);
} else {
- Emit(kArmS32x4Shuffle, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ // 32x4 shuffles are implemented as s-register moves. To simplify these,
+ // make sure the destination is distinct from both sources.
+ InstructionOperand src0 = g.UseUniqueRegister(input0);
+ InstructionOperand src1 = is_swizzle ? src0 : g.UseUniqueRegister(input1);
+ Emit(kArmS32x4Shuffle, g.DefineAsRegister(node), src0, src1,
+ g.UseImmediate(Pack4Lanes(shuffle32x4)));
}
return;
}
if (TryMatchDup<8>(shuffle, &index)) {
- InstructionOperand src = index < 8 ? g.UseRegister(node->InputAt(0))
- : g.UseRegister(node->InputAt(1));
- Emit(kArmS128Dup, g.DefineAsRegister(node), src, g.UseImmediate(Neon16),
- g.UseImmediate(index % 8));
+ DCHECK_GT(8, index);
+ Emit(kArmS128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseImmediate(Neon16), g.UseImmediate(index % 8));
return;
}
if (TryMatchDup<16>(shuffle, &index)) {
- InstructionOperand src = index < 16 ? g.UseRegister(node->InputAt(0))
- : g.UseRegister(node->InputAt(1));
- Emit(kArmS128Dup, g.DefineAsRegister(node), src, g.UseImmediate(Neon8),
- g.UseImmediate(index % 16));
+ DCHECK_GT(16, index);
+ Emit(kArmS128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseImmediate(Neon8), g.UseImmediate(index % 16));
return;
}
ArchOpcode opcode;
if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
- mask, &opcode)) {
+ is_swizzle, &opcode)) {
VisitRRRShuffle(this, opcode, node);
return;
}
- Node* input0 = node->InputAt(0);
- Node* input1 = node->InputAt(1);
uint8_t offset;
- if (TryMatchConcat(shuffle, mask, &offset)) {
+ if (TryMatchConcat(shuffle, &offset)) {
Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
g.UseRegister(input1), g.UseImmediate(offset));
return;
@@ -2527,10 +2531,10 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
InstructionOperand src0, src1;
ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
Emit(kArmS8x16Shuffle, g.DefineAsRegister(node), src0, src1,
- g.UseImmediate(Pack4Lanes(shuffle, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
+ g.UseImmediate(Pack4Lanes(shuffle)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12)));
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 751ad4599b..a7c5beee4c 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -130,6 +130,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(InputRegister32(index), SXTW);
case kMode_MRI:
case kMode_MRR:
+ case kMode_Root:
break;
}
UNREACHABLE();
@@ -159,13 +160,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(InputRegister64(index), SXTW);
case kMode_MRI:
case kMode_MRR:
+ case kMode_Root:
break;
}
UNREACHABLE();
}
- MemOperand MemoryOperand(size_t* first_index) {
- const size_t index = *first_index;
+ MemOperand MemoryOperand(size_t index = 0) {
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
case kMode_Operand2_R_LSR_I:
@@ -177,24 +178,19 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
case kMode_Operand2_R_SXTH:
case kMode_Operand2_R_SXTW:
break;
+ case kMode_Root:
+ return MemOperand(kRootRegister, InputInt64(index));
case kMode_Operand2_R_LSL_I:
- *first_index += 3;
return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
LSL, InputInt32(index + 2));
case kMode_MRI:
- *first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
case kMode_MRR:
- *first_index += 2;
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
}
UNREACHABLE();
}
- MemOperand MemoryOperand(size_t first_index = 0) {
- return MemoryOperand(&first_index);
- }
-
Operand ToOperand(InstructionOperand* op) {
if (op->IsRegister()) {
return Operand(ToRegister(op));
@@ -601,17 +597,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
- // We must not share code targets for calls to builtins for wasm code, as
- // they might need to be patched individually.
- internal::Assembler::BlockCodeTargetSharingScope scope;
- if (info()->IsWasm()) scope.Open(tasm());
-
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- Register target = i.InputRegister(0);
- __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
- __ Call(target);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Add(reg, reg, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -619,13 +613,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt64());
- if (info()->IsWasm()) {
- __ Call(wasm_code, RelocInfo::WASM_CALL);
- } else {
- __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
- }
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Call(wasm_code, constant.rmode());
} else {
Register target = i.InputRegister(0);
__ Call(target);
@@ -636,11 +626,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- // We must not share code targets for calls to builtins for wasm code, as
- // they might need to be patched individually.
- internal::Assembler::BlockCodeTargetSharingScope scope;
- if (info()->IsWasm()) scope.Open(tasm());
-
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -649,9 +634,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- Register target = i.InputRegister(0);
- __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(target);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Add(reg, reg, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -660,14 +648,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallWasm: {
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt64());
- if (info()->IsWasm()) {
- __ Jump(wasm_code, RelocInfo::WASM_CALL);
- } else {
- __ Jump(wasm_code, RelocInfo::JS_TO_WASM_CALL);
- }
-
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
} else {
Register target = i.InputRegister(0);
__ Jump(target);
@@ -679,7 +662,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
- __ Jump(i.InputRegister(0));
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -774,6 +761,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
@@ -829,12 +819,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), fp);
}
break;
- case kArchRootsPointer:
- __ mov(i.OutputRegister(), kRootRegister);
- break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
- i.InputDoubleRegister(0));
+ i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -2251,29 +2238,18 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
void Generate() final {
Arm64OperandConverter i(gen_, instr_);
- Builtins::Name trap_id =
- static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- __ set_has_frame(old_has_frame);
- }
}
private:
- void GenerateCallToTrap(Builtins::Name trap_id) {
- if (trap_id == Builtins::builtin_count) {
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ CallCFunction(
@@ -2287,8 +2263,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
- RelocInfo::CODE_TARGET);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2299,12 +2276,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}
}
}
- bool frame_elided_;
Instruction* instr_;
CodeGenerator* gen_;
};
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ auto ool = new (zone()) OutOfLineTrap(this, instr);
Label* tlabel = ool->entry();
Condition cc = FlagsConditionToCondition(condition);
__ B(cc, tlabel);
@@ -2323,6 +2298,16 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Cset(reg, cc);
}
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+ Register input = i.InputRegister32(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
@@ -2433,23 +2418,17 @@ void CodeGenerator::AssembleConstructFrame() {
if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
UseScratchRegisterScope scope(tasm());
Register scratch = scope.AcquireX();
- __ Mov(scratch, Operand(ExternalReference::address_of_real_stack_limit(
- __ isolate())));
+ __ Ldr(scratch, FieldMemOperand(
+ kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
__ Ldr(scratch, MemOperand(scratch));
__ Add(scratch, scratch, shrink_slots * kPointerSize);
__ Cmp(sp, scratch);
__ B(hs, &done);
}
- if (!frame_access_state()->has_frame()) {
- __ set_has_frame(true);
- // There is no need to leave the frame, we will not return from the
- // runtime call.
- __ EnterFrame(StackFrame::WASM_COMPILED);
- } else {
+ {
// Finish the frame that hasn't been fully built yet.
- // TODO(mstarzinger): This is a work-around, deferred frame building is
- // actually no longer supported, remove the associated code.
UseScratchRegisterScope temps(tasm());
__ Claim(2); // Claim extra slots for marker + instance.
Register scratch = temps.AcquireX();
@@ -2459,8 +2438,10 @@ void CodeGenerator::AssembleConstructFrame() {
__ Str(kWasmInstanceRegister,
MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
}
+ __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
__ Mov(cp, Smi::kZero);
- __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
+ __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, x2);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2599,8 +2580,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
- __ Mov(dst, src_object);
+ __ Move(dst, src_object);
}
+ } else if (src.type() == Constant::kExternalReference) {
+ __ Mov(dst, src.ToExternalReference());
} else {
__ Mov(dst, g.ToImmediate(source));
}
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index c5f4b19c16..ce73515321 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -362,7 +362,8 @@ namespace compiler {
V(Operand2_R_UXTH) /* %r0 UXTH (unsigned extend halfword) */ \
V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */ \
- V(Operand2_R_SXTW) /* %r0 SXTW (signed extend word) */
+ V(Operand2_R_SXTW) /* %r0 SXTW (signed extend word) */ \
+ V(Root) /* [%rr + K] */
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 7f388cde3f..e07debf9ec 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -49,6 +49,16 @@ class Arm64OperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
+ // Use the stack pointer if the node is LoadStackPointer, otherwise assign a
+ // register.
+ InstructionOperand UseRegisterOrStackPointer(Node* node, bool sp_allowed) {
+ if (sp_allowed && node->opcode() == IrOpcode::kLoadStackPointer)
+ return LocationOperand(LocationOperand::EXPLICIT,
+ LocationOperand::REGISTER,
+ MachineRepresentation::kWord64, sp.code());
+ return UseRegister(node);
+ }
+
// Use the provided node if it has the required value, or create a
// TempImmediate otherwise.
InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
@@ -550,6 +560,27 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
// If output is not nullptr, use that as the output register. This
// is used when we merge a conversion into the load.
outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
+
+ if (selector->CanAddressRelativeToRootsRegister()) {
+ ExternalReferenceMatcher m(base);
+ if (m.HasValue() && g.IsIntegerConstant(index)) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ selector->isolate(), m.Value());
+ input_count = 1;
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
+ opcode |= AddressingModeField::encode(kMode_Root);
+ selector->Emit(opcode, arraysize(outputs), outputs, input_count,
+ inputs);
+ return;
+ }
+ }
+ }
+
inputs[0] = g.UseRegister(base);
if (g.CanBeImmediate(index, immediate_mode)) {
@@ -1747,17 +1778,21 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
+ if (right->opcode() == IrOpcode::kLoadStackPointer ||
+ g.CanBeImmediate(left, immediate_mode)) {
+ if (!commutative) cont->Commute();
+ std::swap(left, right);
+ }
+
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, immediate_mode)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
- } else if (g.CanBeImmediate(left, immediate_mode)) {
- if (!commutative) cont->Commute();
- VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
- cont);
+ VisitCompare(selector, opcode,
+ g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp),
+ g.UseImmediate(right), cont);
} else {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
- cont);
+ VisitCompare(selector, opcode,
+ g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp),
+ g.UseRegister(right), cont);
}
}
@@ -2374,11 +2409,10 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
}
}
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
}
-
void InstructionSelector::VisitWord32Equal(Node* const node) {
Node* const user = node;
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -3088,7 +3122,9 @@ static const ShuffleEntry arch_shuffles[] = {
kArm64S8x2Reverse}};
bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
- size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
for (size_t i = 0; i < num_entries; i++) {
const ShuffleEntry& entry = table[i];
int j = 0;
@@ -3120,60 +3156,59 @@ void ArrangeShuffleTable(Arm64OperandGenerator* g, Node* input0, Node* input1,
} // namespace
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
- uint8_t mask = CanonicalizeShuffle(node);
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
uint8_t shuffle32x4[4];
Arm64OperandGenerator g(this);
ArchOpcode opcode;
if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
- mask, &opcode)) {
+ is_swizzle, &opcode)) {
VisitRRR(this, opcode, node);
return;
}
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
- uint8_t bias;
- if (TryMatchConcat(shuffle, mask, &bias)) {
+ uint8_t offset;
+ if (TryMatchConcat(shuffle, &offset)) {
Emit(kArm64S8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
- g.UseRegister(input1), g.UseImmediate(bias));
+ g.UseRegister(input1), g.UseImmediate(offset));
return;
}
int index = 0;
if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
if (TryMatchDup<4>(shuffle, &index)) {
- InstructionOperand src = index < 4 ? g.UseRegister(node->InputAt(0))
- : g.UseRegister(node->InputAt(1));
- Emit(kArm64S128Dup, g.DefineAsRegister(node), src, g.UseImmediate(4),
- g.UseImmediate(index % 4));
+ DCHECK_GT(4, index);
+ Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseImmediate(4), g.UseImmediate(index % 4));
+ } else if (TryMatchIdentity(shuffle)) {
+ EmitIdentity(node);
} else {
- Emit(kArm64S32x4Shuffle, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ Emit(kArm64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4)));
}
return;
}
if (TryMatchDup<8>(shuffle, &index)) {
- InstructionOperand src = index < 8 ? g.UseRegister(node->InputAt(0))
- : g.UseRegister(node->InputAt(1));
- Emit(kArm64S128Dup, g.DefineAsRegister(node), src, g.UseImmediate(8),
- g.UseImmediate(index % 8));
+ DCHECK_GT(8, index);
+ Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseImmediate(8), g.UseImmediate(index % 8));
return;
}
if (TryMatchDup<16>(shuffle, &index)) {
- InstructionOperand src = index < 16 ? g.UseRegister(node->InputAt(0))
- : g.UseRegister(node->InputAt(1));
- Emit(kArm64S128Dup, g.DefineAsRegister(node), src, g.UseImmediate(16),
- g.UseImmediate(index % 16));
+ DCHECK_GT(16, index);
+ Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseImmediate(16), g.UseImmediate(index % 16));
return;
}
// Code generator uses vtbl, arrange sources to form a valid lookup table.
InstructionOperand src0, src1;
ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
Emit(kArm64S8x16Shuffle, g.DefineAsRegister(node), src0, src1,
- g.UseImmediate(Pack4Lanes(shuffle, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
+ g.UseImmediate(Pack4Lanes(shuffle)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12)));
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index d3ed9241f2..741de6f264 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -247,13 +247,15 @@ bool BranchElimination::ControlPathConditions::LookupCondition(
}
}
return false;
- }
+}
- Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
+Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
- CommonOperatorBuilder* BranchElimination::common() const {
- return jsgraph()->common();
- }
+Isolate* BranchElimination::isolate() const { return jsgraph()->isolate(); }
+
+CommonOperatorBuilder* BranchElimination::common() const {
+ return jsgraph()->common();
+}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index de3b9e5b2e..23881ebd0a 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -72,6 +72,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
Node* dead() const { return dead_; }
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
CommonOperatorBuilder* common() const;
JSGraph* const jsgraph_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index ade917de47..2d16ba525c 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -14,7 +14,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
-#include "src/objects/literal-objects.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/vector-slot-pair.h"
namespace v8 {
@@ -521,7 +521,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
: local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
- bytecode_array_(handle(shared_info->GetBytecodeArray())),
+ bytecode_array_(
+ handle(shared_info->GetBytecodeArray(), jsgraph->isolate())),
feedback_vector_(feedback_vector),
type_hint_lowering_(jsgraph, feedback_vector, flags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
@@ -878,11 +879,10 @@ void BytecodeGraphBuilder::VisitBytecodes() {
interpreter::BytecodeArrayIterator iterator(bytecode_array());
set_bytecode_iterator(&iterator);
SourcePositionTableIterator source_position_iterator(
- handle(bytecode_array()->SourcePositionTable()));
+ handle(bytecode_array()->SourcePositionTable(), isolate()));
if (analyze_environment_liveness() && FLAG_trace_environment_liveness) {
- OFStream of(stdout);
-
+ StdoutStream of;
bytecode_analysis.PrintLivenessTo(of);
}
@@ -918,8 +918,8 @@ void BytecodeGraphBuilder::VisitLdaSmi() {
}
void BytecodeGraphBuilder::VisitLdaConstant() {
- Node* node =
- jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ Node* node = jsgraph()->Constant(
+ handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
environment()->BindAccumulator(node);
}
@@ -976,8 +976,8 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
void BytecodeGraphBuilder::VisitLdaGlobal() {
PrepareEagerCheckpoint();
- Handle<Name> name =
- Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<Name> name(
+ Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
@@ -986,8 +986,8 @@ void BytecodeGraphBuilder::VisitLdaGlobal() {
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
PrepareEagerCheckpoint();
- Handle<Name> name =
- Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<Name> name(
+ Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
@@ -996,8 +996,8 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
- Handle<Name> name =
- Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<Name> name(
+ Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
@@ -1122,8 +1122,8 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
PrepareEagerCheckpoint();
- Node* name =
- jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ Node* name = jsgraph()->Constant(
+ handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
@@ -1208,7 +1208,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
set_environment(slow_environment);
{
Node* name = jsgraph()->Constant(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1243,8 +1243,9 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
PrepareEagerCheckpoint();
- Handle<Name> name =
- Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<Name> name(
+ Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
+ isolate());
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1260,7 +1261,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
set_environment(slow_environment);
{
Node* name = jsgraph()->Constant(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1289,8 +1290,8 @@ void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
- Node* name =
- jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ Node* name = jsgraph()->Constant(
+ handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
LanguageMode language_mode = static_cast<LanguageMode>(
interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
@@ -1314,8 +1315,8 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name =
- Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+ Handle<Name> name(
+ Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
@@ -1362,8 +1363,8 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name =
- Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+ Handle<Name> name(
+ Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
@@ -1461,8 +1462,10 @@ void BytecodeGraphBuilder::VisitPopContext() {
}
void BytecodeGraphBuilder::VisitCreateClosure() {
- Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<SharedFunctionInfo> shared_info(
+ SharedFunctionInfo::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0)),
+ isolate());
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
FeedbackNexus nexus(feedback_vector(), slot);
PretenureFlag tenured =
@@ -1472,15 +1475,17 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
: NOT_TENURED;
const Operator* op = javascript()->CreateClosure(
shared_info, nexus.GetFeedbackCell(),
- handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy)),
+ handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy),
+ isolate()),
tenured);
Node* closure = NewNode(op);
environment()->BindAccumulator(closure);
}
void BytecodeGraphBuilder::VisitCreateBlockContext() {
- Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<ScopeInfo> scope_info(
+ ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
+ isolate());
const Operator* op = javascript()->CreateBlockContext(scope_info);
Node* context = NewNode(op);
@@ -1488,8 +1493,9 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
}
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
- Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<ScopeInfo> scope_info(
+ ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
+ isolate());
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op =
javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE);
@@ -1498,8 +1504,9 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
}
void BytecodeGraphBuilder::VisitCreateEvalContext() {
- Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<ScopeInfo> scope_info(
+ ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
+ isolate());
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op =
javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE);
@@ -1510,8 +1517,9 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() {
void BytecodeGraphBuilder::VisitCreateCatchContext() {
interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
Node* exception = environment()->LookupRegister(reg);
- Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
- bytecode_iterator().GetConstantForIndexOperand(1));
+ Handle<ScopeInfo> scope_info(
+ ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)),
+ isolate());
const Operator* op = javascript()->CreateCatchContext(scope_info);
Node* context = NewNode(op, exception);
@@ -1521,8 +1529,9 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
void BytecodeGraphBuilder::VisitCreateWithContext() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
- bytecode_iterator().GetConstantForIndexOperand(1));
+ Handle<ScopeInfo> scope_info(
+ ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)),
+ isolate());
const Operator* op = javascript()->CreateWithContext(scope_info);
Node* context = NewNode(op, object);
@@ -1548,8 +1557,9 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
}
void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
- Handle<String> constant_pattern =
- Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<String> constant_pattern(
+ String::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
+ isolate());
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
@@ -1559,9 +1569,10 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
}
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
- Handle<ConstantElementsPair> constant_elements =
- Handle<ConstantElementsPair>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description(
+ ArrayBoilerplateDescription::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0)),
+ isolate());
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -1574,9 +1585,10 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
literal_flags |= ArrayLiteral::kDisableMementos;
// TODO(mstarzinger): Thread through number of elements. The below number is
// only an estimate and does not match {ArrayLiteral::values::length}.
- int number_of_elements = constant_elements->constant_values()->length();
+ int number_of_elements =
+ array_boilerplate_description->constant_elements()->length();
Node* literal = NewNode(javascript()->CreateLiteralArray(
- constant_elements, pair, literal_flags, number_of_elements));
+ array_boilerplate_description, pair, literal_flags, number_of_elements));
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
}
@@ -1588,9 +1600,10 @@ void BytecodeGraphBuilder::VisitCreateEmptyArrayLiteral() {
}
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
- Handle<BoilerplateDescription> constant_properties =
- Handle<BoilerplateDescription>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<ObjectBoilerplateDescription> constant_properties(
+ ObjectBoilerplateDescription::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0)),
+ isolate());
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -1612,21 +1625,24 @@ void BytecodeGraphBuilder::VisitCreateEmptyObjectLiteral() {
}
void BytecodeGraphBuilder::VisitGetTemplateObject() {
- Handle<TemplateObjectDescription> description =
- Handle<TemplateObjectDescription>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<TemplateObjectDescription> description(
+ TemplateObjectDescription::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0)),
+ isolate());
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
FeedbackNexus nexus(feedback_vector(), slot);
Handle<JSArray> cached_value;
- if (nexus.GetFeedback() == Smi::kZero) {
+ if (nexus.GetFeedback() == MaybeObject::FromSmi(Smi::kZero)) {
// It's not observable when the template object is created, so we
// can just create it eagerly during graph building and bake in
// the JSArray constant here.
- cached_value = TemplateObjectDescription::CreateTemplateObject(description);
+ cached_value =
+ TemplateObjectDescription::CreateTemplateObject(isolate(), description);
nexus.vector()->Set(slot, *cached_value);
} else {
- cached_value = handle(JSArray::cast(nexus.GetFeedback()));
+ cached_value = handle(
+ JSArray::cast(nexus.GetFeedback()->ToStrongHeapObject()), isolate());
}
Node* template_object = jsgraph()->HeapConstant(cached_value);
@@ -2082,8 +2098,8 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
Node* accumulator = environment()->LookupAccumulator();
Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
jsgraph()->TheHoleConstant());
- Node* name =
- jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ Node* name = jsgraph()->Constant(
+ handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
BuildHoleCheckAndThrow(check_for_hole, Runtime::kThrowReferenceError, name);
}
@@ -2791,6 +2807,9 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
CHECK_EQ(0, first_reg.index());
int register_count =
static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
+ int parameter_count_without_receiver =
+ bytecode_array()->parameter_count() - 1;
+
Node* suspend_id = jsgraph()->SmiConstant(
bytecode_iterator().GetUnsignedImmediateOperand(3));
@@ -2807,7 +2826,7 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
// are live.
// TODO(leszeks): We could get this count from liveness rather than the
// register list.
- int value_input_count = 3 + register_count;
+ int value_input_count = 3 + parameter_count_without_receiver + register_count;
Node** value_inputs = local_zone()->NewArray<Node*>(value_input_count);
value_inputs[0] = generator;
@@ -2815,14 +2834,24 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
value_inputs[2] = offset;
int count_written = 0;
+ // Store the parameters.
+ for (int i = 0; i < parameter_count_without_receiver; i++) {
+ value_inputs[3 + count_written++] =
+ environment()->LookupRegister(interpreter::Register::FromParameterIndex(
+ i, parameter_count_without_receiver));
+ }
+
+ // Store the registers.
for (int i = 0; i < register_count; ++i) {
if (liveness == nullptr || liveness->RegisterIsLive(i)) {
- while (count_written < i) {
+ int index_in_parameters_and_registers =
+ parameter_count_without_receiver + i;
+ while (count_written < index_in_parameters_and_registers) {
value_inputs[3 + count_written++] = jsgraph()->OptimizedOutConstant();
}
value_inputs[3 + count_written++] =
environment()->LookupRegister(interpreter::Register(i));
- DCHECK_EQ(count_written, i + 1);
+ DCHECK_EQ(count_written, index_in_parameters_and_registers + 1);
}
}
@@ -2920,12 +2949,16 @@ void BytecodeGraphBuilder::VisitResumeGenerator() {
bytecode_analysis()->GetOutLivenessFor(
bytecode_iterator().current_offset());
- // Bijection between registers and array indices must match that used in
- // InterpreterAssembler::ExportRegisterFile.
+ int parameter_count_without_receiver =
+ bytecode_array()->parameter_count() - 1;
+
+ // Mapping between registers and array indices must match that used in
+ // InterpreterAssembler::ExportParametersAndRegisterFile.
for (int i = 0; i < environment()->register_count(); ++i) {
if (liveness == nullptr || liveness->RegisterIsLive(i)) {
- Node* value =
- NewNode(javascript()->GeneratorRestoreRegister(i), generator);
+ Node* value = NewNode(javascript()->GeneratorRestoreRegister(
+ parameter_count_without_receiver + i),
+ generator);
environment()->BindRegister(interpreter::Register(i), value);
}
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 9025d477d5..a94a3d79af 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -302,6 +302,7 @@ class BytecodeGraphBuilder {
CommonOperatorBuilder* common() const { return jsgraph_->common(); }
Zone* graph_zone() const { return graph()->zone(); }
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const { return jsgraph_->isolate(); }
JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
SimplifiedOperatorBuilder* simplified() const {
return jsgraph_->simplified();
diff --git a/deps/v8/src/compiler/checkpoint-elimination.cc b/deps/v8/src/compiler/checkpoint-elimination.cc
index d44dfdff48..172d42845c 100644
--- a/deps/v8/src/compiler/checkpoint-elimination.cc
+++ b/deps/v8/src/compiler/checkpoint-elimination.cc
@@ -39,6 +39,7 @@ Reduction CheckpointElimination::ReduceCheckpoint(Node* node) {
}
Reduction CheckpointElimination::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kCheckpoint:
return ReduceCheckpoint(node);
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index c9f323615a..0b77d10072 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -6,7 +6,6 @@
#include <ostream>
-#include "src/builtins/constants-table-builder.h"
#include "src/code-factory.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
@@ -22,24 +21,9 @@
#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
-#include "src/snapshot/serializer-common.h"
#include "src/utils.h"
#include "src/zone/zone.h"
-#define REPEAT_1_TO_2(V, T) V(T) V(T, T)
-#define REPEAT_1_TO_3(V, T) REPEAT_1_TO_2(V, T) V(T, T, T)
-#define REPEAT_1_TO_4(V, T) REPEAT_1_TO_3(V, T) V(T, T, T, T)
-#define REPEAT_1_TO_5(V, T) REPEAT_1_TO_4(V, T) V(T, T, T, T, T)
-#define REPEAT_1_TO_6(V, T) REPEAT_1_TO_5(V, T) V(T, T, T, T, T, T)
-#define REPEAT_1_TO_7(V, T) REPEAT_1_TO_6(V, T) V(T, T, T, T, T, T, T)
-#define REPEAT_1_TO_8(V, T) REPEAT_1_TO_7(V, T) V(T, T, T, T, T, T, T, T)
-#define REPEAT_1_TO_9(V, T) REPEAT_1_TO_8(V, T) V(T, T, T, T, T, T, T, T, T)
-#define REPEAT_1_TO_10(V, T) REPEAT_1_TO_9(V, T) V(T, T, T, T, T, T, T, T, T, T)
-#define REPEAT_1_TO_11(V, T) \
- REPEAT_1_TO_10(V, T) V(T, T, T, T, T, T, T, T, T, T, T)
-#define REPEAT_1_TO_12(V, T) \
- REPEAT_1_TO_11(V, T) V(T, T, T, T, T, T, T, T, T, T, T, T)
-
namespace v8 {
namespace internal {
@@ -60,15 +44,14 @@ static_assert(
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
Code::Kind kind, const char* name, PoisoningMitigationLevel poisoning_level,
- size_t result_size, uint32_t stub_key, int32_t builtin_index)
+ uint32_t stub_key, int32_t builtin_index)
// TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
// bytecode handlers?
: CodeAssemblerState(
isolate, zone,
Linkage::GetStubCallDescriptor(
- isolate, zone, descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size),
+ zone, descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties),
kind, name, poisoning_level, stub_key, builtin_index) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
@@ -184,7 +167,8 @@ PoisoningMitigationLevel CodeAssembler::poisoning_level() const {
}
// static
-Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
+Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state,
+ const AssemblerOptions& options) {
DCHECK(!state->code_generated_);
RawMachineAssembler* rasm = state->raw_assembler_.get();
@@ -194,19 +178,24 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
bool should_optimize_jumps =
rasm->isolate()->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
- Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
- rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
- state->kind_, state->name_, state->stub_key_, state->builtin_index_,
- should_optimize_jumps ? &jump_opt : nullptr, rasm->poisoning_level());
+ Handle<Code> code =
+ Pipeline::GenerateCodeForCodeStub(
+ rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
+ state->kind_, state->name_, state->stub_key_, state->builtin_index_,
+ should_optimize_jumps ? &jump_opt : nullptr, rasm->poisoning_level(),
+ options)
+ .ToHandleChecked();
if (jump_opt.is_optimizable()) {
jump_opt.set_optimizing();
// Regenerate machine code
- code = Pipeline::GenerateCodeForCodeStub(
- rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
- state->kind_, state->name_, state->stub_key_, state->builtin_index_,
- &jump_opt, rasm->poisoning_level());
+ code =
+ Pipeline::GenerateCodeForCodeStub(
+ rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
+ state->kind_, state->name_, state->stub_key_, state->builtin_index_,
+ &jump_opt, rasm->poisoning_level(), options)
+ .ToHandleChecked();
}
state->code_generated_ = true;
@@ -244,58 +233,20 @@ bool CodeAssembler::IsIntPtrAbsWithOverflowSupported() const {
: IsInt32AbsWithOverflowSupported();
}
-#ifdef V8_EMBEDDED_BUILTINS
-TNode<HeapObject> CodeAssembler::LookupConstant(Handle<HeapObject> object) {
- DCHECK(isolate()->ShouldLoadConstantsFromRootList());
-
- // Ensure the given object is in the builtins constants table and fetch its
- // index.
- BuiltinsConstantsTableBuilder* builder =
- isolate()->builtins_constants_table_builder();
- uint32_t index = builder->AddObject(object);
-
- // The builtins constants table is loaded through the root register on all
- // supported platforms. This is checked by the
- // VerifyBuiltinsIsolateIndependence cctest, which disallows embedded objects
- // in isolate-independent builtins.
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- Heap::kBuiltinsConstantsTableRootIndex));
- TNode<FixedArray> builtins_constants_table = UncheckedCast<FixedArray>(
- LoadRoot(Heap::kBuiltinsConstantsTableRootIndex));
-
- // Generate the lookup.
- const int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
- TNode<IntPtrT> offset = IntPtrConstant(header_size + kPointerSize * index);
- return UncheckedCast<HeapObject>(
- Load(MachineType::AnyTagged(), builtins_constants_table, offset));
+#ifdef DEBUG
+void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node,
+ const char* location) {
+ Label ok(this);
+ GotoIf(WordNotEqual(WordAnd(BitcastMaybeObjectToWord(node),
+ IntPtrConstant(kHeapObjectTagMask)),
+ IntPtrConstant(kWeakHeapObjectTag)),
+ &ok);
+ Node* message_node = StringConstant(location);
+ DebugAbort(message_node);
+ Unreachable();
+ Bind(&ok);
}
-
-// External references are stored in the external reference table.
-TNode<ExternalReference> CodeAssembler::LookupExternalReference(
- ExternalReference reference) {
- DCHECK(isolate()->ShouldLoadConstantsFromRootList());
-
- // Encode as an index into the external reference table stored on the isolate.
-
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
- uint32_t index = v.index();
-
- // Generate code to load from the external reference table.
-
- const intptr_t roots_to_external_reference_offset =
- Heap::roots_to_external_reference_table_offset()
-#ifdef V8_TARGET_ARCH_X64
- - kRootRegisterBias
#endif
- + ExternalReferenceTable::OffsetOfEntry(index);
-
- return UncheckedCast<ExternalReference>(
- Load(MachineType::Pointer(), LoadRootsPointer(),
- IntPtrConstant(roots_to_external_reference_offset)));
-}
-#endif // V8_EMBEDDED_BUILTINS
TNode<Int32T> CodeAssembler::Int32Constant(int32_t value) {
return UncheckedCast<Int32T>(raw_assembler()->Int32Constant(value));
@@ -318,8 +269,8 @@ TNode<Number> CodeAssembler::NumberConstant(double value) {
// deferring allocation to code generation
// (see AllocateAndInstallRequestedHeapObjects) since that makes it easier
// to generate constant lookups for embedded builtins.
- return UncheckedCast<Number>(HeapConstant(
- isolate()->factory()->NewHeapNumber(value, IMMUTABLE, TENURED)));
+ return UncheckedCast<Number>(
+ HeapConstant(isolate()->factory()->NewHeapNumber(value, TENURED)));
}
}
@@ -334,16 +285,6 @@ TNode<Smi> CodeAssembler::SmiConstant(int value) {
TNode<HeapObject> CodeAssembler::UntypedHeapConstant(
Handle<HeapObject> object) {
-#ifdef V8_EMBEDDED_BUILTINS
- // Root constants are simply loaded from the root list, while non-root
- // constants must be looked up from the builtins constants table.
- if (isolate()->ShouldLoadConstantsFromRootList()) {
- Heap::RootListIndex root_index;
- if (!isolate()->heap()->IsRootHandle(object, &root_index)) {
- return LookupConstant(object);
- }
- }
-#endif // V8_EMBEDDED_BUILTINS
return UncheckedCast<HeapObject>(raw_assembler()->HeapConstant(object));
}
@@ -359,11 +300,6 @@ TNode<Oddball> CodeAssembler::BooleanConstant(bool value) {
TNode<ExternalReference> CodeAssembler::ExternalConstant(
ExternalReference address) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (isolate()->ShouldLoadConstantsFromRootList()) {
- return LookupExternalReference(address);
- }
-#endif // V8_EMBEDDED_BUILTINS
return UncheckedCast<ExternalReference>(
raw_assembler()->ExternalConstant(address));
}
@@ -438,8 +374,14 @@ bool CodeAssembler::IsNullConstant(TNode<Object> node) {
return m.Is(isolate()->factory()->null_value());
}
-Node* CodeAssembler::Parameter(int value) {
- return raw_assembler()->Parameter(value);
+Node* CodeAssembler::Parameter(int index) {
+ if (index == kTargetParameterIndex) return raw_assembler()->TargetParameter();
+ return raw_assembler()->Parameter(index);
+}
+
+bool CodeAssembler::IsJSFunctionCall() const {
+ auto call_descriptor = raw_assembler()->call_descriptor();
+ return call_descriptor->IsJSFunctionCall();
}
TNode<Context> CodeAssembler::GetJSContextParameter() {
@@ -476,6 +418,10 @@ void CodeAssembler::ReturnIf(Node* condition, Node* value) {
Bind(&if_continue);
}
+void CodeAssembler::ReturnRaw(Node* value) {
+ return raw_assembler()->Return(value);
+}
+
void CodeAssembler::DebugAbort(Node* message) {
raw_assembler()->DebugAbort(message);
}
@@ -524,10 +470,6 @@ Node* CodeAssembler::LoadParentFramePointer() {
return raw_assembler()->LoadParentFramePointer();
}
-TNode<IntPtrT> CodeAssembler::LoadRootsPointer() {
- return UncheckedCast<IntPtrT>(raw_assembler()->LoadRootsPointer());
-}
-
Node* CodeAssembler::LoadStackPointer() {
return raw_assembler()->LoadStackPointer();
}
@@ -938,6 +880,25 @@ TNode<Word64T> CodeAssembler::Word64Sar(SloppyTNode<Word64T> left,
return UncheckedCast<Word64T>(raw_assembler()->Word64Sar(left, right));
}
+#define CODE_ASSEMBLER_COMPARE(Name, ArgT, VarT, ToConstant, op) \
+ TNode<BoolT> CodeAssembler::Name(SloppyTNode<ArgT> left, \
+ SloppyTNode<ArgT> right) { \
+ VarT lhs, rhs; \
+ if (ToConstant(left, lhs) && ToConstant(right, rhs)) { \
+ return BoolConstant(lhs op rhs); \
+ } \
+ return UncheckedCast<BoolT>(raw_assembler()->Name(left, right)); \
+ }
+
+CODE_ASSEMBLER_COMPARE(IntPtrEqual, WordT, intptr_t, ToIntPtrConstant, ==)
+CODE_ASSEMBLER_COMPARE(WordEqual, WordT, intptr_t, ToIntPtrConstant, ==)
+CODE_ASSEMBLER_COMPARE(WordNotEqual, WordT, intptr_t, ToIntPtrConstant, !=)
+CODE_ASSEMBLER_COMPARE(Word32Equal, Word32T, int32_t, ToInt32Constant, ==)
+CODE_ASSEMBLER_COMPARE(Word32NotEqual, Word32T, int32_t, ToInt32Constant, !=)
+CODE_ASSEMBLER_COMPARE(Word64Equal, Word64T, int64_t, ToInt64Constant, ==)
+CODE_ASSEMBLER_COMPARE(Word64NotEqual, Word64T, int64_t, ToInt64Constant, !=)
+#undef CODE_ASSEMBLER_COMPARE
+
TNode<UintPtrT> CodeAssembler::ChangeUint32ToWord(SloppyTNode<Word32T> value) {
if (raw_assembler()->machine()->Is64()) {
return UncheckedCast<UintPtrT>(
@@ -1001,6 +962,9 @@ TNode<Object> CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
}
}
+ // TODO(jgruber): In theory we could generate better code for this by
+ // letting the macro assembler decide how to load from the roots list. In most
+ // cases, it would boil down to loading from a fixed kRootRegister offset.
Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), roots_array_start,
@@ -1100,99 +1064,107 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Bind(&success);
}
-template <class... TArgs>
-TNode<Object> CodeAssembler::CallRuntimeImpl(Runtime::FunctionId function,
- SloppyTNode<Object> context,
- TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+namespace {
+template <size_t kMaxSize>
+class NodeArray {
+ public:
+ void Add(Node* node) {
+ DCHECK_GT(kMaxSize, size());
+ *ptr_++ = node;
+ }
+
+ Node* const* data() const { return arr_; }
+ int size() const { return static_cast<int>(ptr_ - arr_); }
+
+ private:
+ Node* arr_[kMaxSize];
+ Node** ptr_ = arr_;
+};
+} // namespace
+
+TNode<Object> CodeAssembler::CallRuntimeImpl(
+ Runtime::FunctionId function, TNode<Object> context,
+ std::initializer_list<TNode<Object>> args) {
+ int result_size = Runtime::FunctionForId(function)->result_size;
+ TNode<Code> centry =
+ HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
+ return CallRuntimeWithCEntryImpl(function, centry, context, args);
+}
+
+TNode<Object> CodeAssembler::CallRuntimeWithCEntryImpl(
+ Runtime::FunctionId function, TNode<Code> centry, TNode<Object> context,
+ std::initializer_list<TNode<Object>> args) {
+ constexpr size_t kMaxNumArgs = 6;
+ DCHECK_GE(kMaxNumArgs, args.size());
+ int argc = static_cast<int>(args.size());
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
zone(), function, argc, Operator::kNoProperties,
CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(call_descriptor->ReturnCount());
- Node* centry =
- HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
Node* ref = ExternalConstant(ExternalReference::Create(function));
Node* arity = Int32Constant(argc);
- Node* nodes[] = {centry, args..., ref, arity, context};
+ NodeArray<kMaxNumArgs + 4> inputs;
+ inputs.Add(centry);
+ for (auto arg : args) inputs.Add(arg);
+ inputs.Add(ref);
+ inputs.Add(arity);
+ inputs.Add(context);
CallPrologue();
Node* return_value =
- raw_assembler()->CallN(call_descriptor, arraysize(nodes), nodes);
+ raw_assembler()->CallN(call_descriptor, inputs.size(), inputs.data());
CallEpilogue();
return UncheckedCast<Object>(return_value);
}
-// Instantiate CallRuntime() for argument counts used by CSA-generated code
-#define INSTANTIATE(...) \
- template V8_EXPORT_PRIVATE TNode<Object> CodeAssembler::CallRuntimeImpl( \
- Runtime::FunctionId, __VA_ARGS__);
-REPEAT_1_TO_7(INSTANTIATE, SloppyTNode<Object>)
-#undef INSTANTIATE
+void CodeAssembler::TailCallRuntimeImpl(
+ Runtime::FunctionId function, TNode<Int32T> arity, TNode<Object> context,
+ std::initializer_list<TNode<Object>> args) {
+ int result_size = Runtime::FunctionForId(function)->result_size;
+ TNode<Code> centry =
+ HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
+ return TailCallRuntimeWithCEntryImpl(function, arity, centry, context, args);
+}
-template <class... TArgs>
-TNode<Object> CodeAssembler::TailCallRuntimeImpl(Runtime::FunctionId function,
- SloppyTNode<Object> context,
- TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+void CodeAssembler::TailCallRuntimeWithCEntryImpl(
+ Runtime::FunctionId function, TNode<Int32T> arity, TNode<Code> centry,
+ TNode<Object> context, std::initializer_list<TNode<Object>> args) {
+ constexpr size_t kMaxNumArgs = 6;
+ DCHECK_GE(kMaxNumArgs, args.size());
+ int argc = static_cast<int>(args.size());
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
zone(), function, argc, Operator::kNoProperties,
CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(call_descriptor->ReturnCount());
- Node* centry =
- HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
Node* ref = ExternalConstant(ExternalReference::Create(function));
- Node* arity = Int32Constant(argc);
- Node* nodes[] = {centry, args..., ref, arity, context};
+ NodeArray<kMaxNumArgs + 4> inputs;
+ inputs.Add(centry);
+ for (auto arg : args) inputs.Add(arg);
+ inputs.Add(ref);
+ inputs.Add(arity);
+ inputs.Add(context);
- return UncheckedCast<Object>(
- raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes));
+ raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
}
-// Instantiate TailCallRuntime() for argument counts used by CSA-generated code
-#define INSTANTIATE(...) \
- template V8_EXPORT_PRIVATE TNode<Object> CodeAssembler::TailCallRuntimeImpl( \
- Runtime::FunctionId, __VA_ARGS__);
-REPEAT_1_TO_7(INSTANTIATE, SloppyTNode<Object>)
-#undef INSTANTIATE
-
-template <class... TArgs>
-Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
- size_t result_size, Node* target, Node* context,
- TArgs... args) {
- Node* nodes[] = {target, args..., context};
- int input_count = arraysize(nodes);
- if (context == nullptr) --input_count;
- return CallStubN(descriptor, result_size, input_count, nodes,
- context != nullptr);
-}
-
-// Instantiate CallStubR() for argument counts used by CSA-generated code.
-#define INSTANTIATE(...) \
- template V8_EXPORT_PRIVATE Node* CodeAssembler::CallStubR( \
- const CallInterfaceDescriptor& descriptor, size_t, Node*, __VA_ARGS__);
-REPEAT_1_TO_11(INSTANTIATE, Node*)
-#undef INSTANTIATE
-
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
size_t result_size, int input_count,
- Node* const* inputs, bool pass_context) {
+ Node* const* inputs) {
// implicit nodes are target and optionally context.
- int implicit_nodes = pass_context ? 2 : 1;
+ int implicit_nodes = descriptor.HasContextParameter() ? 2 : 1;
DCHECK_LE(implicit_nodes, input_count);
int argc = input_count - implicit_nodes;
DCHECK_LE(descriptor.GetParameterCount(), argc);
// Extra arguments not mentioned in the descriptor are passed on the stack.
int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
+ DCHECK_EQ(result_size, descriptor.GetReturnCount());
+
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, stack_parameter_count,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size,
- pass_context ? Linkage::kPassContext : Linkage::kNoContext);
+ zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags,
+ Operator::kNoProperties);
CallPrologue();
Node* return_value =
@@ -1201,62 +1173,73 @@ Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
return return_value;
}
-template <class... TArgs>
-Node* CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context,
- TArgs... args) {
- DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
- size_t result_size = 1;
+void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
+ TNode<Code> target, TNode<Object> context,
+ std::initializer_list<Node*> args) {
+ constexpr size_t kMaxNumArgs = 11;
+ DCHECK_GE(kMaxNumArgs, args.size());
+ DCHECK_EQ(descriptor.GetParameterCount(), args.size());
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
+ zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties);
+
+ NodeArray<kMaxNumArgs + 2> inputs;
+ inputs.Add(target);
+ for (auto arg : args) inputs.Add(arg);
+ if (descriptor.HasContextParameter()) {
+ inputs.Add(context);
+ }
- Node* nodes[] = {target, args..., context};
- CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes));
- return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
+ raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
}
-// Instantiate TailCallStub() for argument counts used by CSA-generated code
-#define INSTANTIATE(...) \
- template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallStubImpl( \
- const CallInterfaceDescriptor& descriptor, Node*, __VA_ARGS__);
-REPEAT_1_TO_12(INSTANTIATE, Node*)
-#undef INSTANTIATE
+Node* CodeAssembler::CallStubRImpl(const CallInterfaceDescriptor& descriptor,
+ size_t result_size, SloppyTNode<Code> target,
+ SloppyTNode<Object> context,
+ std::initializer_list<Node*> args) {
+ constexpr size_t kMaxNumArgs = 10;
+ DCHECK_GE(kMaxNumArgs, args.size());
-template <class... TArgs>
-Node* CodeAssembler::TailCallStubThenBytecodeDispatch(
+ NodeArray<kMaxNumArgs + 2> inputs;
+ inputs.Add(target);
+ for (auto arg : args) inputs.Add(arg);
+ if (descriptor.HasContextParameter()) {
+ inputs.Add(context);
+ }
+
+ return CallStubN(descriptor, result_size, inputs.size(), inputs.data());
+}
+
+Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl(
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
- TArgs... args) {
- DCHECK_LE(descriptor.GetParameterCount(), sizeof...(args));
+ std::initializer_list<Node*> args) {
+ constexpr size_t kMaxNumArgs = 6;
+ DCHECK_GE(kMaxNumArgs, args.size());
+
+ DCHECK_LE(descriptor.GetParameterCount(), args.size());
+ int argc = static_cast<int>(args.size());
// Extra arguments not mentioned in the descriptor are passed on the stack.
- int stack_parameter_count =
- sizeof...(args) - descriptor.GetRegisterParameterCount();
+ int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, stack_parameter_count,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), 0);
+ zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags,
+ Operator::kNoProperties);
- Node* nodes[] = {target, args..., context};
- return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
-}
+ NodeArray<kMaxNumArgs + 2> inputs;
+ inputs.Add(target);
+ for (auto arg : args) inputs.Add(arg);
+ inputs.Add(context);
-// Instantiate TailCallJSAndBytecodeDispatch() for argument counts used by
-// CSA-generated code
-#define INSTANTIATE(...) \
- template V8_EXPORT_PRIVATE Node* \
- CodeAssembler::TailCallStubThenBytecodeDispatch( \
- const CallInterfaceDescriptor&, Node*, Node*, Node*, __VA_ARGS__);
-REPEAT_1_TO_7(INSTANTIATE, Node*)
-#undef INSTANTIATE
+ return raw_assembler()->TailCallN(call_descriptor, inputs.size(),
+ inputs.data());
+}
template <class... TArgs>
Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
auto call_descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount());
+ zone(), descriptor, descriptor.GetStackParameterCount());
Node* nodes[] = {target, args...};
CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes));
@@ -1269,6 +1252,22 @@ template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
Node*, Node*);
+TNode<Object> CodeAssembler::TailCallJSCode(TNode<Code> code,
+ TNode<Context> context,
+ TNode<JSFunction> function,
+ TNode<Object> new_target,
+ TNode<Int32T> arg_count) {
+ JSTrampolineDescriptor descriptor;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kFixedTargetRegister, Operator::kNoProperties);
+
+ Node* nodes[] = {code, function, new_target, arg_count, context};
+ CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes));
+ return UncheckedCast<Object>(
+ raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes));
+}
+
Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
int input_count, Node* const* inputs) {
auto call_descriptor = Linkage::GetSimplifiedCDescriptor(zone(), signature);
@@ -1686,7 +1685,8 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
} // namespace compiler
-Smi* CheckObjectType(Object* value, Smi* type, String* location) {
+Smi* CheckObjectType(Isolate* isolate, Object* value, Smi* type,
+ String* location) {
#ifdef DEBUG
const char* expected;
switch (static_cast<ObjectType>(type->value())) {
@@ -1721,15 +1721,3 @@ Smi* CheckObjectType(Object* value, Smi* type, String* location) {
} // namespace internal
} // namespace v8
-
-#undef REPEAT_1_TO_2
-#undef REPEAT_1_TO_3
-#undef REPEAT_1_TO_4
-#undef REPEAT_1_TO_5
-#undef REPEAT_1_TO_6
-#undef REPEAT_1_TO_7
-#undef REPEAT_1_TO_8
-#undef REPEAT_1_TO_9
-#undef REPEAT_1_TO_10
-#undef REPEAT_1_TO_11
-#undef REPEAT_1_TO_12
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 0e4f8ea5c8..6419140a74 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -35,6 +35,7 @@ class JSRegExpStringIterator;
class JSWeakCollection;
class JSWeakMap;
class JSWeakSet;
+class MaybeObject;
class PromiseCapability;
class PromiseFulfillReactionJobTask;
class PromiseReaction;
@@ -57,7 +58,9 @@ struct WordT : IntegralT {
: MachineRepresentation::kWord64;
};
-struct RawPtrT : WordT {};
+struct RawPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::Pointer();
+};
template <class To>
struct RawPtr : RawPtrT {};
@@ -275,7 +278,8 @@ HEAP_OBJECT_TEMPLATE_TYPE_LIST(OBJECT_TYPE_TEMPLATE_CASE)
#undef OBJECT_TYPE_STRUCT_CASE
#undef OBJECT_TYPE_TEMPLATE_CASE
-Smi* CheckObjectType(Object* value, Smi* type, String* location);
+Smi* CheckObjectType(Isolate* isolate, Object* value, Smi* type,
+ String* location);
namespace compiler {
@@ -361,6 +365,16 @@ struct types_have_common_values<UnionT<T1, T2>, UnionT<U1, U2>> {
types_have_common_values<T2, U2>::value;
};
+template <class T>
+struct types_have_common_values<T, MaybeObject> {
+ static const bool value = types_have_common_values<T, Object>::value;
+};
+
+template <class T>
+struct types_have_common_values<MaybeObject, T> {
+ static const bool value = types_have_common_values<Object, T>::value;
+};
+
// TNode<T> is an SSA value with the static type tag T, which is one of the
// following:
// - a subclass of internal::Object represents a tagged type
@@ -434,7 +448,6 @@ class SloppyTNode : public TNode<T> {
V(IntPtrLessThanOrEqual, BoolT, WordT, WordT) \
V(IntPtrGreaterThan, BoolT, WordT, WordT) \
V(IntPtrGreaterThanOrEqual, BoolT, WordT, WordT) \
- V(IntPtrEqual, BoolT, WordT, WordT) \
V(Uint32LessThan, BoolT, Word32T, Word32T) \
V(Uint32LessThanOrEqual, BoolT, Word32T, Word32T) \
V(Uint32GreaterThan, BoolT, Word32T, Word32T) \
@@ -442,13 +455,7 @@ class SloppyTNode : public TNode<T> {
V(UintPtrLessThan, BoolT, WordT, WordT) \
V(UintPtrLessThanOrEqual, BoolT, WordT, WordT) \
V(UintPtrGreaterThan, BoolT, WordT, WordT) \
- V(UintPtrGreaterThanOrEqual, BoolT, WordT, WordT) \
- V(WordEqual, BoolT, WordT, WordT) \
- V(WordNotEqual, BoolT, WordT, WordT) \
- V(Word32Equal, BoolT, Word32T, Word32T) \
- V(Word32NotEqual, BoolT, Word32T, Word32T) \
- V(Word64Equal, BoolT, Word64T, Word64T) \
- V(Word64NotEqual, BoolT, Word64T, Word64T)
+ V(UintPtrGreaterThanOrEqual, BoolT, WordT, WordT)
#define CODE_ASSEMBLER_BINARY_OP_LIST(V) \
CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
@@ -468,6 +475,7 @@ class SloppyTNode : public TNode<T> {
V(Int32Add, Word32T, Word32T, Word32T) \
V(Int32AddWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
V(Int32Sub, Word32T, Word32T, Word32T) \
+ V(Int32SubWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
V(Int32Mul, Word32T, Word32T, Word32T) \
V(Int32MulWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
V(Int32Div, Int32T, Int32T, Int32T) \
@@ -517,6 +525,8 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(ChangeInt32ToInt64, Int64T, Int32T) \
V(ChangeUint32ToFloat64, Float64T, Word32T) \
V(ChangeUint32ToUint64, Uint64T, Word32T) \
+ V(BitcastInt32ToFloat32, Float32T, Word32T) \
+ V(BitcastFloat32ToInt32, Word32T, Float32T) \
V(RoundFloat64ToInt32, Int32T, Float64T) \
V(RoundInt32ToFloat32, Int32T, Float32T) \
V(Float64SilenceNaN, Float64T, Float64T) \
@@ -557,7 +567,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
explicit CodeAssembler(CodeAssemblerState* state) : state_(state) {}
~CodeAssembler();
- static Handle<Code> GenerateCode(CodeAssemblerState* state);
+ static Handle<Code> GenerateCode(CodeAssemblerState* state,
+ const AssemblerOptions& options);
bool Is64() const;
bool IsFloat64RoundUpSupported() const;
@@ -592,6 +603,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class A>
operator TNode<A>() {
+ static_assert(
+ !std::is_same<A, MaybeObject>::value,
+ "Can't cast to MaybeObject, use explicit conversion functions. ");
+
static_assert(types_have_common_values<A, PreviousType>::value,
"Incompatible types: this cast can never succeed.");
static_assert(std::is_convertible<TNode<A>, TNode<Object>>::value,
@@ -603,12 +618,17 @@ class V8_EXPORT_PRIVATE CodeAssembler {
"Unnecessary CAST: types are convertible.");
#ifdef DEBUG
if (FLAG_debug_code) {
+ if (std::is_same<PreviousType, MaybeObject>::value) {
+ code_assembler_->GenerateCheckMaybeObjectIsObject(node_, location_);
+ }
Node* function = code_assembler_->ExternalConstant(
ExternalReference::check_object_type());
- code_assembler_->CallCFunction3(
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::TaggedSigned(), MachineType::AnyTagged(), function,
- node_,
+ Node* const isolate_ptr = code_assembler_->ExternalConstant(
+ ExternalReference::isolate_address(code_assembler_->isolate()));
+ code_assembler_->CallCFunction4(
+ MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::AnyTagged(), MachineType::TaggedSigned(),
+ MachineType::AnyTagged(), function, isolate_ptr, node_,
code_assembler_->SmiConstant(
static_cast<int>(ObjectTypeOf<A>::value)),
code_assembler_->StringConstant(location_));
@@ -668,9 +688,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#define CAST(x) Cast(x, "")
#endif
-#ifdef V8_EMBEDDED_BUILTINS
- TNode<HeapObject> LookupConstant(Handle<HeapObject> object);
- TNode<ExternalReference> LookupExternalReference(ExternalReference reference);
+#ifdef DEBUG
+ void GenerateCheckMaybeObjectIsObject(Node* node, const char* location);
#endif
// Constants.
@@ -702,6 +721,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<BoolT> Int32FalseConstant() {
return ReinterpretCast<BoolT>(Int32Constant(0));
}
+ TNode<BoolT> BoolConstant(bool value) {
+ return value ? Int32TrueConstant() : Int32FalseConstant();
+ }
bool ToInt32Constant(Node* node, int32_t& out_value);
bool ToInt64Constant(Node* node, int64_t& out_value);
@@ -720,6 +742,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return UncheckedCast<UintPtrT>(x);
}
+ static constexpr int kTargetParameterIndex = -1;
+
Node* Parameter(int value);
TNode<Context> GetJSContextParameter();
@@ -731,6 +755,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void ReturnIf(Node* condition, Node* value);
+ void ReturnRaw(Node* value);
+
void DebugAbort(Node* message);
void DebugBreak();
void Unreachable();
@@ -753,9 +779,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* LoadFramePointer();
Node* LoadParentFramePointer();
- // Access to the roots pointer.
- TNode<IntPtrT> LoadRootsPointer();
-
// Access to the stack pointer
Node* LoadStackPointer();
@@ -858,6 +881,18 @@ class V8_EXPORT_PRIVATE CodeAssembler {
ReinterpretCast<WordT>(right));
}
+ TNode<BoolT> IntPtrEqual(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
+ TNode<BoolT> WordEqual(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
+ TNode<BoolT> WordNotEqual(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
+ TNode<BoolT> Word32Equal(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right);
+ TNode<BoolT> Word32NotEqual(SloppyTNode<Word32T> left,
+ SloppyTNode<Word32T> right);
+ TNode<BoolT> Word64Equal(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right);
+ TNode<BoolT> Word64NotEqual(SloppyTNode<Word64T> left,
+ SloppyTNode<Word64T> right);
+
TNode<Int32T> Int32Add(TNode<Int32T> left, TNode<Int32T> right) {
return Signed(
Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
@@ -954,66 +989,88 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Calls
template <class... TArgs>
- TNode<Object> CallRuntimeImpl(Runtime::FunctionId function,
- SloppyTNode<Object> context, TArgs... args);
- template <class... TArgs>
TNode<Object> CallRuntime(Runtime::FunctionId function,
SloppyTNode<Object> context, TArgs... args) {
return CallRuntimeImpl(function, context,
- implicit_cast<SloppyTNode<Object>>(args)...);
+ {implicit_cast<SloppyTNode<Object>>(args)...});
}
template <class... TArgs>
- TNode<Object> TailCallRuntimeImpl(Runtime::FunctionId function,
- SloppyTNode<Object> context, TArgs... args);
+ TNode<Object> CallRuntimeWithCEntry(Runtime::FunctionId function,
+ TNode<Code> centry,
+ SloppyTNode<Object> context,
+ TArgs... args) {
+ return CallRuntimeWithCEntryImpl(function, centry, context, {args...});
+ }
+
template <class... TArgs>
- TNode<Object> TailCallRuntime(Runtime::FunctionId function,
- SloppyTNode<Object> context, TArgs... args) {
- return TailCallRuntimeImpl(function, context,
- implicit_cast<SloppyTNode<Object>>(args)...);
+ void TailCallRuntime(Runtime::FunctionId function,
+ SloppyTNode<Object> context, TArgs... args) {
+ int argc = static_cast<int>(sizeof...(args));
+ TNode<Int32T> arity = Int32Constant(argc);
+ return TailCallRuntimeImpl(function, arity, context,
+ {implicit_cast<SloppyTNode<Object>>(args)...});
+ }
+
+ template <class... TArgs>
+ void TailCallRuntime(Runtime::FunctionId function, TNode<Int32T> arity,
+ SloppyTNode<Object> context, TArgs... args) {
+ return TailCallRuntimeImpl(function, arity, context,
+ {implicit_cast<SloppyTNode<Object>>(args)...});
+ }
+
+ template <class... TArgs>
+ void TailCallRuntimeWithCEntry(Runtime::FunctionId function,
+ TNode<Code> centry, TNode<Object> context,
+ TArgs... args) {
+ int argc = sizeof...(args);
+ TNode<Int32T> arity = Int32Constant(argc);
+ return TailCallRuntimeWithCEntryImpl(
+ function, arity, centry, context,
+ {implicit_cast<SloppyTNode<Object>>(args)...});
}
//
// If context passed to CallStub is nullptr, it won't be passed to the stub.
//
- template <class... TArgs>
- Node* CallStub(Callable const& callable, Node* context, TArgs... args) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context,
- implicit_cast<Node*>(args)...);
+ template <class T = Object, class... TArgs>
+ TNode<T> CallStub(Callable const& callable, SloppyTNode<Object> context,
+ TArgs... args) {
+ TNode<Code> target = HeapConstant(callable.code());
+ return CallStub<T>(callable.descriptor(), target, context, args...);
}
- template <class... TArgs>
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, TArgs... args) {
- return CallStubR(descriptor, 1, target, context,
- implicit_cast<Node*>(args)...);
+ template <class T = Object, class... TArgs>
+ TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
+ SloppyTNode<Code> target, SloppyTNode<Object> context,
+ TArgs... args) {
+ return UncheckedCast<T>(CallStubR(descriptor, 1, target, context, args...));
}
template <class... TArgs>
Node* CallStubR(const CallInterfaceDescriptor& descriptor, size_t result_size,
- Node* target, Node* context, TArgs... args);
+ SloppyTNode<Code> target, SloppyTNode<Object> context,
+ TArgs... args) {
+ return CallStubRImpl(descriptor, result_size, target, context, {args...});
+ }
Node* CallStubN(const CallInterfaceDescriptor& descriptor, size_t result_size,
- int input_count, Node* const* inputs,
- bool pass_context = true);
+ int input_count, Node* const* inputs);
template <class... TArgs>
- Node* TailCallStub(Callable const& callable, Node* context, TArgs... args) {
- Node* target = HeapConstant(callable.code());
+ void TailCallStub(Callable const& callable, SloppyTNode<Object> context,
+ TArgs... args) {
+ TNode<Code> target = HeapConstant(callable.code());
return TailCallStub(callable.descriptor(), target, context, args...);
}
template <class... TArgs>
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, TArgs... args) {
- return TailCallStubImpl(descriptor, target, context,
- implicit_cast<Node*>(args)...);
+ void TailCallStub(const CallInterfaceDescriptor& descriptor,
+ SloppyTNode<Code> target, SloppyTNode<Object> context,
+ TArgs... args) {
+ return TailCallStubImpl(descriptor, target, context, {args...});
}
- template <class... TArgs>
- Node* TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, TArgs... args);
template <class... TArgs>
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
@@ -1021,8 +1078,23 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
Node* TailCallStubThenBytecodeDispatch(
- const CallInterfaceDescriptor& descriptor, Node* context, Node* target,
- TArgs... args);
+ const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
+ TArgs... args) {
+ return TailCallStubThenBytecodeDispatchImpl(descriptor, target, context,
+ {args...});
+ }
+
+ // Tailcalls to the given code object with JSCall linkage. The JS arguments
+ // (including receiver) are supposed to be already on the stack.
+ // This is a building block for implementing trampoline stubs that are
+ // installed instead of code objects with JSCall linkage.
+ // Note that no arguments adaption is going on here - all the JavaScript
+ // arguments are left on the stack unmodified. Therefore, this tail call can
+ // only be used after arguments adaptation has been performed already.
+ TNode<Object> TailCallJSCode(TNode<Code> code, TNode<Context> context,
+ TNode<JSFunction> function,
+ TNode<Object> new_target,
+ TNode<Int32T> arg_count);
template <class... TArgs>
Node* CallJS(Callable const& callable, Node* context, Node* function,
@@ -1131,7 +1203,39 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool Word32ShiftIsSafe() const;
PoisoningMitigationLevel poisoning_level() const;
+ bool IsJSFunctionCall() const;
+
private:
+ TNode<Object> CallRuntimeImpl(Runtime::FunctionId function,
+ TNode<Object> context,
+ std::initializer_list<TNode<Object>> args);
+
+ TNode<Object> CallRuntimeWithCEntryImpl(
+ Runtime::FunctionId function, TNode<Code> centry, TNode<Object> context,
+ std::initializer_list<TNode<Object>> args);
+
+ void TailCallRuntimeImpl(Runtime::FunctionId function, TNode<Int32T> arity,
+ TNode<Object> context,
+ std::initializer_list<TNode<Object>> args);
+
+ void TailCallRuntimeWithCEntryImpl(Runtime::FunctionId function,
+ TNode<Int32T> arity, TNode<Code> centry,
+ TNode<Object> context,
+ std::initializer_list<TNode<Object>> args);
+
+ void TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
+ TNode<Code> target, TNode<Object> context,
+ std::initializer_list<Node*> args);
+
+ Node* TailCallStubThenBytecodeDispatchImpl(
+ const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
+ std::initializer_list<Node*> args);
+
+ Node* CallStubRImpl(const CallInterfaceDescriptor& descriptor,
+ size_t result_size, SloppyTNode<Code> target,
+ SloppyTNode<Object> context,
+ std::initializer_list<Node*> args);
+
// These two don't have definitions and are here only for catching use cases
// where the cast is not necessary.
TNode<Int32T> Signed(TNode<Int32T> x);
@@ -1276,7 +1380,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
CodeAssemblerState(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, Code::Kind kind,
const char* name, PoisoningMitigationLevel poisoning_level,
- size_t result_size = 1, uint32_t stub_key = 0,
+ uint32_t stub_key = 0,
int32_t builtin_index = Builtins::kNoBuiltinId);
// Create with JSCall linkage.
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 865b42b0b8..6a7d0985f4 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -38,14 +38,13 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* code,
- OptimizedCompilationInfo* info, Isolate* isolate,
- base::Optional<OsrHelper> osr_helper,
- int start_source_position,
- JumpOptimizationInfo* jump_opt,
- WasmCompilationData* wasm_compilation_data,
- PoisoningMitigationLevel poisoning_level)
+CodeGenerator::CodeGenerator(
+ Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* code, OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper, int start_source_position,
+ JumpOptimizationInfo* jump_opt, WasmCompilationData* wasm_compilation_data,
+ PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ int32_t builtin_index)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -57,7 +56,7 @@ CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
current_block_(RpoNumber::Invalid()),
start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
- tasm_(isolate, nullptr, 0, CodeObjectRequired::kNo),
+ tasm_(isolate, options, nullptr, 0, CodeObjectRequired::kNo),
resolver_(this),
safepoints_(zone()),
handlers_(zone()),
@@ -78,7 +77,9 @@ CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
wasm_compilation_data_(wasm_compilation_data),
result_(kSuccess),
- poisoning_level_(poisoning_level) {
+ poisoning_level_(poisoning_level),
+ block_starts_(zone()),
+ instr_starts_(zone()) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -86,10 +87,12 @@ CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
CHECK_EQ(info->is_osr(), osr_helper_.has_value());
tasm_.set_jump_optimization_info(jump_opt);
Code::Kind code_kind = info_->code_kind();
- if (code_kind == Code::JS_TO_WASM_FUNCTION ||
- code_kind == Code::WASM_FUNCTION) {
- tasm_.enable_serializer();
+ if (code_kind == Code::WASM_FUNCTION ||
+ code_kind == Code::WASM_TO_JS_FUNCTION ||
+ code_kind == Code::WASM_INTERPRETER_ENTRY) {
+ tasm_.set_trap_on_abort(true);
}
+ tasm_.set_builtin_index(builtin_index);
}
bool CodeGenerator::wasm_runtime_exception_support() const {
@@ -111,31 +114,16 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, SourcePosition pos) {
DeoptimizeKind deopt_kind = GetDeoptimizationKind(deoptimization_id);
- Deoptimizer::BailoutType bailout_type;
- switch (deopt_kind) {
- case DeoptimizeKind::kSoft: {
- bailout_type = Deoptimizer::SOFT;
- break;
- }
- case DeoptimizeKind::kEager: {
- bailout_type = Deoptimizer::EAGER;
- break;
- }
- case DeoptimizeKind::kLazy: {
- bailout_type = Deoptimizer::LAZY;
- break;
- }
- default: { UNREACHABLE(); }
- }
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- tasm()->isolate(), deoptimization_id, bailout_type);
+ tasm()->isolate(), deoptimization_id, deopt_kind);
if (deopt_entry == kNullAddress) return kTooManyDeoptimizationBailouts;
if (info()->is_source_positions_enabled()) {
tasm()->RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
- tasm()->CallForDeoptimization(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ tasm()->CallForDeoptimization(deopt_entry, deoptimization_id,
+ RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -190,6 +178,10 @@ void CodeGenerator::AssembleCode() {
unwinding_info_writer_.SetNumberOfInstructionBlocks(
code()->InstructionBlockCount());
+ if (info->trace_turbo_json_enabled()) {
+ block_starts_.assign(code()->instruction_blocks().size(), -1);
+ instr_starts_.assign(code()->instructions().size(), -1);
+ }
// Assemble all non-deferred blocks, followed by deferred ones.
for (int deferred = 0; deferred < 2; ++deferred) {
for (const InstructionBlock* block : code()->instruction_blocks()) {
@@ -201,6 +193,9 @@ void CodeGenerator::AssembleCode() {
if (block->IsLoopHeader() && !tasm()->jump_optimization_info()) {
tasm()->Align(16);
}
+ if (info->trace_turbo_json_enabled()) {
+ block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
+ }
// Bind a label for a block.
current_block_ = block->rpo_number();
unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
@@ -354,14 +349,33 @@ void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
}
}
-Handle<ByteArray> CodeGenerator::GetSourcePositionTable() {
- return source_position_table_builder_.ToSourcePositionTable(isolate());
+void CodeGenerator::AssembleArchBinarySearchSwitchRange(
+ Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
+ std::pair<int32_t, Label*>* end) {
+ if (end - begin < kBinarySearchSwitchMinimalCases) {
+ while (begin != end) {
+ tasm()->JumpIfEqual(input, begin->first, begin->second);
+ ++begin;
+ }
+ AssembleArchJump(def_block);
+ return;
+ }
+ auto middle = begin + (end - begin) / 2;
+ Label less_label;
+ tasm()->JumpIfLessThan(input, middle->first, &less_label);
+ AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
+ tasm()->bind(&less_label);
+ AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
+}
+
+OwnedVector<byte> CodeGenerator::GetSourcePositionTable() {
+ return source_position_table_builder_.ToSourcePositionTableVector();
}
-Handle<Code> CodeGenerator::FinalizeCode() {
+MaybeHandle<Code> CodeGenerator::FinalizeCode() {
if (result_ != kSuccess) {
tasm()->AbortedCodeGeneration();
- return Handle<Code>();
+ return MaybeHandle<Code>();
}
// Allocate the source position table.
@@ -383,10 +397,11 @@ Handle<Code> CodeGenerator::FinalizeCode() {
source_positions, deopt_data, kMovable, info()->stub_key(), true,
frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset(),
handler_table_offset_);
+
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
tasm()->AbortedCodeGeneration();
- return Handle<Code>();
+ return MaybeHandle<Code>();
}
isolate()->counters()->total_compiled_code_size()->Increment(
code->raw_instruction_size());
@@ -447,6 +462,9 @@ bool CodeGenerator::IsMaterializableFromRoot(
CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
const InstructionBlock* block) {
for (int i = block->code_start(); i < block->code_end(); ++i) {
+ if (info()->trace_turbo_json_enabled()) {
+ instr_starts_[i] = tasm()->pc_offset();
+ }
Instruction* instr = code()->InstructionAt(i);
CodeGenResult result = AssembleInstruction(instr, block);
if (result != kSuccess) return result;
@@ -705,7 +723,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
buffer << "-- ";
// Turbolizer only needs the source position, as it can reconstruct
// the inlining stack from other information.
- if (info->trace_turbo_json_enabled() ||
+ if (info->trace_turbo_json_enabled() || !tasm()->isolate() ||
tasm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position;
} else {
@@ -732,6 +750,14 @@ bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
}
}
+StubCallMode CodeGenerator::DetermineStubCallMode() const {
+ Code::Kind code_kind = info()->code_kind();
+ return (code_kind == Code::WASM_FUNCTION ||
+ code_kind == Code::WASM_TO_JS_FUNCTION)
+ ? StubCallMode::kCallWasmRuntimeStub
+ : StubCallMode::kCallOnHeapBuiltin;
+}
+
void CodeGenerator::AssembleGaps(Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 1dfe0c3526..dcdb6bb806 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -84,15 +84,17 @@ class CodeGenerator final : public GapResolver::Assembler {
int start_source_position,
JumpOptimizationInfo* jump_opt,
WasmCompilationData* wasm_compilation_data,
- PoisoningMitigationLevel poisoning_level);
+ PoisoningMitigationLevel poisoning_level,
+ const AssemblerOptions& options,
+ int32_t builtin_index);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
- // FinalizeCode returns a null handle.
+ // FinalizeCode returns an empty MaybeHandle.
void AssembleCode(); // Does not need to run on main thread.
- Handle<Code> FinalizeCode();
+ MaybeHandle<Code> FinalizeCode();
- Handle<ByteArray> GetSourcePositionTable();
+ OwnedVector<byte> GetSourcePositionTable();
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
@@ -123,6 +125,11 @@ class CodeGenerator final : public GapResolver::Assembler {
size_t GetSafepointTableOffset() const { return safepoints_.GetCodeOffset(); }
size_t GetHandlerTableOffset() const { return handler_table_offset_; }
+ const ZoneVector<int>& block_starts() const { return block_starts_; }
+ const ZoneVector<int>& instr_starts() const { return instr_starts_; }
+
+ static constexpr int kBinarySearchSwitchMinimalCases = 4;
+
private:
GapResolver* resolver() { return &resolver_; }
SafepointTableBuilder* safepoints() { return &safepoints_; }
@@ -176,6 +183,9 @@ class CodeGenerator final : public GapResolver::Assembler {
// adjusted stack pointer is returned in |slot|.
bool GetSlotAboveSPBeforeTailCall(Instruction* instr, int* slot);
+ // Determines how to call helper stubs depending on the code kind.
+ StubCallMode DetermineStubCallMode() const;
+
CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
SourcePosition pos);
@@ -192,6 +202,10 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
+ void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
+ std::pair<int32_t, Label*>* begin,
+ std::pair<int32_t, Label*>* end);
+ void AssembleArchBinarySearchSwitch(Instruction* instr);
void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
@@ -416,6 +430,8 @@ class CodeGenerator final : public GapResolver::Assembler {
WasmCompilationData* wasm_compilation_data_;
CodeGenResult result_;
PoisoningMitigationLevel poisoning_level_;
+ ZoneVector<int> block_starts_;
+ ZoneVector<int> instr_starts_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 388cc66c16..6e50d700b7 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -19,7 +19,7 @@ namespace compiler {
namespace {
-Decision DecideCondition(Node* const cond) {
+Decision DecideCondition(const JSHeapBroker* broker, Node* const cond) {
switch (cond->opcode()) {
case IrOpcode::kInt32Constant: {
Int32Matcher mcond(cond);
@@ -27,7 +27,8 @@ Decision DecideCondition(Node* const cond) {
}
case IrOpcode::kHeapConstant: {
HeapObjectMatcher mcond(cond);
- return mcond.Value()->BooleanValue() ? Decision::kTrue : Decision::kFalse;
+ return mcond.Ref(broker).BooleanValue() ? Decision::kTrue
+ : Decision::kFalse;
}
default:
return Decision::kUnknown;
@@ -37,11 +38,13 @@ Decision DecideCondition(Node* const cond) {
} // namespace
CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
+ const JSHeapBroker* js_heap_broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
Zone* temp_zone)
: AdvancedReducer(editor),
graph_(graph),
+ js_heap_broker_(js_heap_broker),
common_(common),
machine_(machine),
dead_(graph->NewNode(common->Dead())),
@@ -50,6 +53,7 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
}
Reduction CommonOperatorReducer::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kBranch:
return ReduceBranch(node);
@@ -85,8 +89,10 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
// not (i.e. true being returned in the false case and vice versa).
if (cond->opcode() == IrOpcode::kBooleanNot ||
(cond->opcode() == IrOpcode::kSelect &&
- DecideCondition(cond->InputAt(1)) == Decision::kFalse &&
- DecideCondition(cond->InputAt(2)) == Decision::kTrue)) {
+ DecideCondition(js_heap_broker(), cond->InputAt(1)) ==
+ Decision::kFalse &&
+ DecideCondition(js_heap_broker(), cond->InputAt(2)) ==
+ Decision::kTrue)) {
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@@ -108,7 +114,7 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
node, common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
return Changed(node);
}
- Decision const decision = DecideCondition(cond);
+ Decision const decision = DecideCondition(js_heap_broker(), cond);
if (decision == Decision::kUnknown) return NoChange();
Node* const control = node->InputAt(1);
for (Node* const use : node->uses()) {
@@ -148,7 +154,7 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
: common()->DeoptimizeUnless(p.kind(), p.reason(), p.feedback()));
return Changed(node);
}
- Decision const decision = DecideCondition(condition);
+ Decision const decision = DecideCondition(js_heap_broker(), condition);
if (decision == Decision::kUnknown) return NoChange();
if (condition_is_true == (decision == Decision::kTrue)) {
ReplaceWithValue(node, dead(), effect, control);
@@ -381,7 +387,7 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
Node* const vtrue = node->InputAt(1);
Node* const vfalse = node->InputAt(2);
if (vtrue == vfalse) return Replace(vtrue);
- switch (DecideCondition(cond)) {
+ switch (DecideCondition(js_heap_broker(), cond)) {
case Decision::kTrue:
return Replace(vtrue);
case Decision::kFalse:
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 022c4fbe8c..77a1d71084 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -25,6 +25,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
CommonOperatorReducer(Editor* editor, Graph* graph,
+ const JSHeapBroker* js_heap_broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine, Zone* temp_zone);
~CommonOperatorReducer() final {}
@@ -47,11 +48,13 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
Graph* graph() const { return graph_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
CommonOperatorBuilder* common() const { return common_; }
MachineOperatorBuilder* machine() const { return machine_; }
Node* dead() const { return dead_; }
Graph* const graph_;
+ const JSHeapBroker* const js_heap_broker_;
CommonOperatorBuilder* const common_;
MachineOperatorBuilder* const machine_;
Node* const dead_;
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 5ef7548ce8..253a92eb84 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -41,6 +41,25 @@ std::ostream& operator<<(std::ostream& os, IsSafetyCheck is_safety_check) {
UNREACHABLE();
}
+std::ostream& operator<<(std::ostream& os, TrapId trap_id) {
+ switch (trap_id) {
+#define TRAP_CASE(Name) \
+ case TrapId::k##Name: \
+ return os << #Name;
+ FOREACH_WASM_TRAPREASON(TRAP_CASE)
+#undef TRAP_CASE
+ case TrapId::kInvalid:
+ return os << "Invalid";
+ }
+ UNREACHABLE();
+}
+
+TrapId TrapIdOf(const Operator* const op) {
+ DCHECK(op->opcode() == IrOpcode::kTrapIf ||
+ op->opcode() == IrOpcode::kTrapUnless);
+ return OpParameter<TrapId>(op);
+}
+
std::ostream& operator<<(std::ostream& os, BranchOperatorInfo info) {
return os << info.hint << "|" << info.is_safety_check;
}
@@ -742,35 +761,33 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
- template <int32_t trap_id>
- struct TrapIfOperator final : public Operator1<int32_t> {
+ template <TrapId trap_id>
+ struct TrapIfOperator final : public Operator1<TrapId> {
TrapIfOperator()
- : Operator1<int32_t>( // --
+ : Operator1<TrapId>( // --
IrOpcode::kTrapIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
"TrapIf", // name
1, 1, 1, 0, 0, 1, // counts
trap_id) {} // parameter
};
-#define CACHED_TRAP_IF(Trap) \
- TrapIfOperator<static_cast<int32_t>(Builtins::kThrowWasm##Trap)> \
- kTrapIf##Trap##Operator;
+#define CACHED_TRAP_IF(Trap) \
+ TrapIfOperator<TrapId::k##Trap> kTrapIf##Trap##Operator;
CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
#undef CACHED_TRAP_IF
- template <int32_t trap_id>
- struct TrapUnlessOperator final : public Operator1<int32_t> {
+ template <TrapId trap_id>
+ struct TrapUnlessOperator final : public Operator1<TrapId> {
TrapUnlessOperator()
- : Operator1<int32_t>( // --
+ : Operator1<TrapId>( // --
IrOpcode::kTrapUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
"TrapUnless", // name
1, 1, 1, 0, 0, 1, // counts
trap_id) {} // parameter
};
-#define CACHED_TRAP_UNLESS(Trap) \
- TrapUnlessOperator<static_cast<int32_t>(Builtins::kThrowWasm##Trap)> \
- kTrapUnless##Trap##Operator;
+#define CACHED_TRAP_UNLESS(Trap) \
+ TrapUnlessOperator<TrapId::k##Trap> kTrapUnless##Trap##Operator;
CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
#undef CACHED_TRAP_UNLESS
@@ -973,10 +990,10 @@ const Operator* CommonOperatorBuilder::DeoptimizeUnless(
parameter); // parameter
}
-const Operator* CommonOperatorBuilder::TrapIf(int32_t trap_id) {
+const Operator* CommonOperatorBuilder::TrapIf(TrapId trap_id) {
switch (trap_id) {
-#define CACHED_TRAP_IF(Trap) \
- case Builtins::kThrowWasm##Trap: \
+#define CACHED_TRAP_IF(Trap) \
+ case TrapId::k##Trap: \
return &cache_.kTrapIf##Trap##Operator;
CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
#undef CACHED_TRAP_IF
@@ -984,7 +1001,7 @@ const Operator* CommonOperatorBuilder::TrapIf(int32_t trap_id) {
break;
}
// Uncached
- return new (zone()) Operator1<int>( // --
+ return new (zone()) Operator1<TrapId>( // --
IrOpcode::kTrapIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
"TrapIf", // name
@@ -992,10 +1009,10 @@ const Operator* CommonOperatorBuilder::TrapIf(int32_t trap_id) {
trap_id); // parameter
}
-const Operator* CommonOperatorBuilder::TrapUnless(int32_t trap_id) {
+const Operator* CommonOperatorBuilder::TrapUnless(TrapId trap_id) {
switch (trap_id) {
-#define CACHED_TRAP_UNLESS(Trap) \
- case Builtins::kThrowWasm##Trap: \
+#define CACHED_TRAP_UNLESS(Trap) \
+ case TrapId::k##Trap: \
return &cache_.kTrapUnless##Trap##Operator;
CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
#undef CACHED_TRAP_UNLESS
@@ -1003,7 +1020,7 @@ const Operator* CommonOperatorBuilder::TrapUnless(int32_t trap_id) {
break;
}
// Uncached
- return new (zone()) Operator1<int>( // --
+ return new (zone()) Operator1<TrapId>( // --
IrOpcode::kTrapUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
"TrapUnless", // name
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 4a0da7c20a..23f1cdfc1d 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -59,6 +59,19 @@ inline size_t hash_value(IsSafetyCheck is_safety_check) {
return static_cast<size_t>(is_safety_check);
}
+enum class TrapId : uint32_t {
+#define DEF_ENUM(Name, ...) k##Name,
+ FOREACH_WASM_TRAPREASON(DEF_ENUM)
+#undef DEF_ENUM
+ kInvalid
+};
+
+inline size_t hash_value(TrapId id) { return static_cast<uint32_t>(id); }
+
+std::ostream& operator<<(std::ostream&, TrapId trap_id);
+
+TrapId TrapIdOf(const Operator* const op);
+
struct BranchOperatorInfo {
BranchHint hint;
IsSafetyCheck is_safety_check;
@@ -451,8 +464,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
DeoptimizeKind kind, DeoptimizeReason reason,
VectorSlotPair const& feedback,
IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- const Operator* TrapIf(int32_t trap_id);
- const Operator* TrapUnless(int32_t trap_id);
+ const Operator* TrapIf(TrapId trap_id);
+ const Operator* TrapUnless(TrapId trap_id);
const Operator* Return(int value_input_count = 1);
const Operator* Terminate();
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
new file mode 100644
index 0000000000..a672d0a1f0
--- /dev/null
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -0,0 +1,396 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/compilation-dependencies.h"
+
+#include "src/handles-inl.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CompilationDependencies::CompilationDependencies(Isolate* isolate, Zone* zone)
+ : zone_(zone), dependencies_(zone) {}
+
+class CompilationDependencies::Dependency : public ZoneObject {
+ public:
+ virtual bool IsSane() const = 0;
+ virtual bool IsValid() const = 0;
+ virtual void Install(Isolate* isolate, Handle<WeakCell> code) = 0;
+};
+
+class InitialMapDependency final : public CompilationDependencies::Dependency {
+ public:
+ InitialMapDependency(const JSFunctionRef& function, const MapRef& initial_map)
+ : function_(function), initial_map_(initial_map) {
+ DCHECK(IsSane());
+ }
+
+ bool IsSane() const override {
+ DisallowHeapAccess no_heap_access;
+ CHECK(function_.has_initial_map());
+ return function_.initial_map().equals(initial_map_);
+ }
+
+ bool IsValid() const override {
+ Handle<JSFunction> function = function_.object<JSFunction>();
+ return function->has_initial_map() &&
+ function->initial_map() == *initial_map_.object<Map>();
+ }
+
+ void Install(Isolate* isolate, Handle<WeakCell> code) override {
+ DCHECK(IsValid());
+ DependentCode::InstallDependency(isolate, code, initial_map_.object<Map>(),
+ DependentCode::kInitialMapChangedGroup);
+ }
+
+ private:
+ JSFunctionRef function_;
+ MapRef initial_map_;
+};
+
+class StableMapDependency final : public CompilationDependencies::Dependency {
+ public:
+ explicit StableMapDependency(const MapRef& map) : map_(map) {
+ DCHECK(IsSane());
+ }
+
+ bool IsSane() const override {
+ DisallowHeapAccess no_heap_access;
+ return map_.is_stable();
+ }
+
+ bool IsValid() const override { return map_.object<Map>()->is_stable(); }
+
+ void Install(Isolate* isolate, Handle<WeakCell> code) override {
+ DCHECK(IsValid());
+ DependentCode::InstallDependency(isolate, code, map_.object<Map>(),
+ DependentCode::kPrototypeCheckGroup);
+ }
+
+ private:
+ MapRef map_;
+};
+
+class TransitionDependency final : public CompilationDependencies::Dependency {
+ public:
+ explicit TransitionDependency(const MapRef& map) : map_(map) {
+ DCHECK(IsSane());
+ }
+
+ bool IsSane() const override {
+ DisallowHeapAccess no_heap_access;
+ return !map_.is_deprecated();
+ }
+
+ bool IsValid() const override { return !map_.object<Map>()->is_deprecated(); }
+
+ void Install(Isolate* isolate, Handle<WeakCell> code) override {
+ DCHECK(IsValid());
+ DependentCode::InstallDependency(isolate, code, map_.object<Map>(),
+ DependentCode::kTransitionGroup);
+ }
+
+ private:
+ MapRef map_;
+};
+
+class PretenureModeDependency final
+ : public CompilationDependencies::Dependency {
+ public:
+ PretenureModeDependency(const AllocationSiteRef& site, PretenureFlag mode)
+ : site_(site), mode_(mode) {
+ DCHECK(IsSane());
+ }
+
+ bool IsSane() const override {
+ DisallowHeapAccess no_heap_access;
+ return mode_ == site_.GetPretenureMode();
+ }
+
+ bool IsValid() const override {
+ return mode_ == site_.object<AllocationSite>()->GetPretenureMode();
+ }
+
+ void Install(Isolate* isolate, Handle<WeakCell> code) override {
+ DCHECK(IsValid());
+ DependentCode::InstallDependency(
+ isolate, code, site_.object<AllocationSite>(),
+ DependentCode::kAllocationSiteTenuringChangedGroup);
+ }
+
+ private:
+ AllocationSiteRef site_;
+ PretenureFlag mode_;
+};
+
+class FieldTypeDependency final : public CompilationDependencies::Dependency {
+ public:
+ FieldTypeDependency(const MapRef& owner, int descriptor,
+ const FieldTypeRef& type)
+ : owner_(owner), descriptor_(descriptor), type_(type) {
+ DCHECK(IsSane());
+ }
+
+ bool IsSane() const override {
+ DisallowHeapAccess no_heap_access;
+ CHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
+ return type_.equals(owner_.GetFieldType(descriptor_));
+ }
+
+ bool IsValid() const override {
+ DisallowHeapAllocation no_heap_allocation;
+ Handle<Map> owner = owner_.object<Map>();
+ Handle<FieldType> type = type_.object<FieldType>();
+ return *type == owner->instance_descriptors()->GetFieldType(descriptor_);
+ }
+
+ void Install(Isolate* isolate, Handle<WeakCell> code) override {
+ DCHECK(IsValid());
+ DependentCode::InstallDependency(isolate, code, owner_.object<Map>(),
+ DependentCode::kFieldOwnerGroup);
+ }
+
+ private:
+ MapRef owner_;
+ int descriptor_;
+ FieldTypeRef type_;
+};
+
+class GlobalPropertyDependency final
+ : public CompilationDependencies::Dependency {
+ public:
+ GlobalPropertyDependency(const PropertyCellRef& cell, PropertyCellType type,
+ bool read_only)
+ : cell_(cell), type_(type), read_only_(read_only) {
+ DCHECK(IsSane());
+ }
+
+ bool IsSane() const override {
+ DisallowHeapAccess no_heap_access;
+ return type_ == cell_.property_details().cell_type() &&
+ read_only_ == cell_.property_details().IsReadOnly();
+ }
+
+ bool IsValid() const override {
+ Handle<PropertyCell> cell = cell_.object<PropertyCell>();
+ return type_ == cell->property_details().cell_type() &&
+ read_only_ == cell->property_details().IsReadOnly();
+ }
+
+ void Install(Isolate* isolate, Handle<WeakCell> code) override {
+ DCHECK(IsValid());
+ DependentCode::InstallDependency(isolate, code,
+ cell_.object<PropertyCell>(),
+ DependentCode::kPropertyCellChangedGroup);
+ }
+
+ private:
+ PropertyCellRef cell_;
+ PropertyCellType type_;
+ bool read_only_;
+};
+
+class ProtectorDependency final : public CompilationDependencies::Dependency {
+ public:
+ explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {
+ DCHECK(IsSane());
+ }
+
+ bool IsSane() const override {
+ DisallowHeapAccess no_heap_access;
+ return cell_.value().IsSmi() &&
+ cell_.value().AsSmi() == Isolate::kProtectorValid;
+ }
+
+ bool IsValid() const override {
+ Handle<PropertyCell> cell = cell_.object<PropertyCell>();
+ return cell->value() == Smi::FromInt(Isolate::kProtectorValid);
+ }
+
+ void Install(Isolate* isolate, Handle<WeakCell> code) override {
+ DCHECK(IsValid());
+ DependentCode::InstallDependency(isolate, code,
+ cell_.object<PropertyCell>(),
+ DependentCode::kPropertyCellChangedGroup);
+ }
+
+ private:
+ PropertyCellRef cell_;
+};
+
+class ElementsKindDependency final
+ : public CompilationDependencies::Dependency {
+ public:
+ ElementsKindDependency(const AllocationSiteRef& site, ElementsKind kind)
+ : site_(site), kind_(kind) {
+ DCHECK(IsSane());
+ }
+
+ bool IsSane() const override {
+ DisallowHeapAccess no_heap_access;
+ DCHECK(AllocationSite::ShouldTrack(kind_));
+ ElementsKind kind = site_.PointsToLiteral()
+ ? site_.boilerplate().GetElementsKind()
+ : site_.GetElementsKind();
+ return kind_ == kind;
+ }
+
+ bool IsValid() const override {
+ Handle<AllocationSite> site = site_.object<AllocationSite>();
+ ElementsKind kind = site->PointsToLiteral()
+ ? site->boilerplate()->GetElementsKind()
+ : site->GetElementsKind();
+ return kind_ == kind;
+ }
+
+ void Install(Isolate* isolate, Handle<WeakCell> code) override {
+ DCHECK(IsValid());
+ DependentCode::InstallDependency(
+ isolate, code, site_.object<AllocationSite>(),
+ DependentCode::kAllocationSiteTransitionChangedGroup);
+ }
+
+ private:
+ AllocationSiteRef site_;
+ ElementsKind kind_;
+};
+
+MapRef CompilationDependencies::DependOnInitialMap(
+ const JSFunctionRef& function) {
+ MapRef map = function.initial_map();
+ dependencies_.push_front(new (zone_) InitialMapDependency(function, map));
+ return map;
+}
+
+void CompilationDependencies::DependOnStableMap(const MapRef& map) {
+ if (map.CanTransition()) {
+ dependencies_.push_front(new (zone_) StableMapDependency(map));
+ } else {
+ DCHECK(map.is_stable());
+ }
+}
+
+void CompilationDependencies::DependOnTransition(const MapRef& target_map) {
+ if (target_map.CanBeDeprecated()) {
+ dependencies_.push_front(new (zone_) TransitionDependency(target_map));
+ } else {
+ DCHECK(!target_map.is_deprecated());
+ }
+}
+
+PretenureFlag CompilationDependencies::DependOnPretenureMode(
+ const AllocationSiteRef& site) {
+ PretenureFlag mode = site.GetPretenureMode();
+ dependencies_.push_front(new (zone_) PretenureModeDependency(site, mode));
+ return mode;
+}
+
+void CompilationDependencies::DependOnFieldType(const MapRef& map,
+ int descriptor) {
+ MapRef owner = map.FindFieldOwner(descriptor);
+ FieldTypeRef type = owner.GetFieldType(descriptor);
+ DCHECK(type.equals(map.GetFieldType(descriptor)));
+ dependencies_.push_front(new (zone_)
+ FieldTypeDependency(owner, descriptor, type));
+}
+
+void CompilationDependencies::DependOnGlobalProperty(
+ const PropertyCellRef& cell) {
+ PropertyCellType type = cell.property_details().cell_type();
+ bool read_only = cell.property_details().IsReadOnly();
+ dependencies_.push_front(new (zone_)
+ GlobalPropertyDependency(cell, type, read_only));
+}
+
+void CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
+ dependencies_.push_front(new (zone_) ProtectorDependency(cell));
+}
+
+void CompilationDependencies::DependOnElementsKind(
+ const AllocationSiteRef& site) {
+ // Do nothing if the object doesn't have any useful element transitions left.
+ ElementsKind kind = site.PointsToLiteral()
+ ? site.boilerplate().GetElementsKind()
+ : site.GetElementsKind();
+ if (AllocationSite::ShouldTrack(kind)) {
+ dependencies_.push_front(new (zone_) ElementsKindDependency(site, kind));
+ }
+}
+
+bool CompilationDependencies::AreValid() const {
+ for (auto dep : dependencies_) {
+ if (!dep->IsValid()) return false;
+ }
+ return true;
+}
+
+bool CompilationDependencies::Commit(Handle<Code> code) {
+ Isolate* isolate = code->GetIsolate();
+
+ // Check validity of all dependencies first, such that we can abort before
+ // installing anything.
+ if (!AreValid()) {
+ dependencies_.clear();
+ return false;
+ }
+
+ Handle<WeakCell> cell = Code::WeakCellFor(code);
+ for (auto dep : dependencies_) {
+ dep->Install(isolate, cell);
+ }
+ dependencies_.clear();
+ return true;
+}
+
+namespace {
+void DependOnStablePrototypeChain(const JSHeapBroker* broker,
+ CompilationDependencies* deps,
+ Handle<Map> map,
+ MaybeHandle<JSReceiver> last_prototype) {
+ for (PrototypeIterator i(broker->isolate(), map); !i.IsAtEnd(); i.Advance()) {
+ Handle<JSReceiver> const current =
+ PrototypeIterator::GetCurrent<JSReceiver>(i);
+ deps->DependOnStableMap(
+ MapRef(broker, handle(current->map(), broker->isolate())));
+ Handle<JSReceiver> last;
+ if (last_prototype.ToHandle(&last) && last.is_identical_to(current)) {
+ break;
+ }
+ }
+}
+} // namespace
+
+void CompilationDependencies::DependOnStablePrototypeChains(
+ const JSHeapBroker* broker, Handle<Context> native_context,
+ std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
+ Isolate* isolate = holder->GetIsolate();
+ // Determine actual holder and perform prototype chain checks.
+ for (auto map : receiver_maps) {
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor;
+ if (Map::GetConstructorFunction(map, native_context)
+ .ToHandle(&constructor)) {
+ map = handle(constructor->initial_map(), isolate);
+ }
+ DependOnStablePrototypeChain(broker, this, map, holder);
+ }
+}
+
+void CompilationDependencies::DependOnElementsKinds(
+ const AllocationSiteRef& site) {
+ AllocationSiteRef current = site;
+ while (true) {
+ DependOnElementsKind(current);
+ if (!current.nested_site().IsAllocationSite()) break;
+ current = current.nested_site().AsAllocationSite();
+ }
+ CHECK_EQ(current.nested_site().AsSmi(), 0);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
new file mode 100644
index 0000000000..9770775c2e
--- /dev/null
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -0,0 +1,77 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMPILATION_DEPENDENCIES_H_
+#define V8_COMPILER_COMPILATION_DEPENDENCIES_H_
+
+#include "src/compiler/js-heap-broker.h"
+#include "src/objects.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Collects and installs dependencies of the code that is being generated.
+class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
+ public:
+ CompilationDependencies(Isolate* isolate, Zone* zone);
+
+ V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code);
+
+ // Return the initial map of {function} and record the assumption that it
+ // stays the intial map.
+ MapRef DependOnInitialMap(const JSFunctionRef& function);
+
+ // Record the assumption that {map} stays stable.
+ void DependOnStableMap(const MapRef& map);
+
+ // Record the assumption that {target_map} can be transitioned to, i.e., that
+ // it does not become deprecated.
+ void DependOnTransition(const MapRef& target_map);
+
+ // Return the pretenure mode of {site} and record the assumption that it does
+ // not change.
+ PretenureFlag DependOnPretenureMode(const AllocationSiteRef& site);
+
+ // Record the assumption that the field type of a field does not change. The
+ // field is identified by the arguments.
+ void DependOnFieldType(const MapRef& map, int descriptor);
+
+ // Record the assumption that neither {cell}'s {CellType} changes, nor the
+ // {IsReadOnly()} flag of {cell}'s {PropertyDetails}.
+ void DependOnGlobalProperty(const PropertyCellRef& cell);
+
+ // Record the assumption that the protector remains valid.
+ void DependOnProtector(const PropertyCellRef& cell);
+
+ // Record the assumption that {site}'s {ElementsKind} doesn't change.
+ void DependOnElementsKind(const AllocationSiteRef& site);
+
+ // Depend on the stability of (the maps of) all prototypes of every class in
+ // {receiver_type} up to (and including) the {holder}.
+ // TODO(neis): Fully brokerize!
+ void DependOnStablePrototypeChains(
+ const JSHeapBroker* broker, Handle<Context> native_context,
+ std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder);
+
+ // Like DependOnElementsKind but also applies to all nested allocation sites.
+ void DependOnElementsKinds(const AllocationSiteRef& site);
+
+ // Exposed only for testing purposes.
+ bool AreValid() const;
+
+ // Exposed only because C++.
+ class Dependency;
+
+ private:
+ Zone* zone_;
+ ZoneForwardList<Dependency*> dependencies_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_COMPILATION_DEPENDENCIES_H_
diff --git a/deps/v8/src/compiler/constant-folding-reducer.cc b/deps/v8/src/compiler/constant-folding-reducer.cc
index 5e45746d97..1811c06f98 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.cc
+++ b/deps/v8/src/compiler/constant-folding-reducer.cc
@@ -11,12 +11,16 @@ namespace v8 {
namespace internal {
namespace compiler {
-ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph)
- : AdvancedReducer(editor), jsgraph_(jsgraph) {}
+ConstantFoldingReducer::ConstantFoldingReducer(
+ Editor* editor, JSGraph* jsgraph, const JSHeapBroker* js_heap_broker)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ js_heap_broker_(js_heap_broker) {}
ConstantFoldingReducer::~ConstantFoldingReducer() {}
Reduction ConstantFoldingReducer::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
// Check if the output type is a singleton. In that case we already know the
// result value and can simply replace the node if it's eliminable.
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
@@ -33,10 +37,11 @@ Reduction ConstantFoldingReducer::Reduce(Node* node) {
if (!upper.IsNone()) {
Node* replacement = nullptr;
if (upper.IsHeapConstant()) {
- replacement = jsgraph()->Constant(upper.AsHeapConstant()->Value());
+ replacement = jsgraph()->Constant(upper.AsHeapConstant()->Ref());
} else if (upper.Is(Type::MinusZero())) {
Factory* factory = jsgraph()->isolate()->factory();
- replacement = jsgraph()->Constant(factory->minus_zero_value());
+ ObjectRef minus_zero(js_heap_broker(), factory->minus_zero_value());
+ replacement = jsgraph()->Constant(minus_zero);
} else if (upper.Is(Type::NaN())) {
replacement = jsgraph()->NaNConstant();
} else if (upper.Is(Type::Null())) {
diff --git a/deps/v8/src/compiler/constant-folding-reducer.h b/deps/v8/src/compiler/constant-folding-reducer.h
index 2085d59e5e..b111e5b878 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.h
+++ b/deps/v8/src/compiler/constant-folding-reducer.h
@@ -17,7 +17,8 @@ class JSGraph;
class V8_EXPORT_PRIVATE ConstantFoldingReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph);
+ ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
+ const JSHeapBroker* js_heap_broker);
~ConstantFoldingReducer() final;
const char* reducer_name() const override { return "ConstantFoldingReducer"; }
@@ -26,8 +27,10 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
private:
JSGraph* jsgraph() const { return jsgraph_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
JSGraph* const jsgraph_;
+ const JSHeapBroker* const js_heap_broker_;
DISALLOW_COPY_AND_ASSIGN(ConstantFoldingReducer);
};
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 9c8fd70011..424db00fc4 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -46,6 +46,7 @@ Node* FindDeadInput(Node* node) {
} // namespace
Reduction DeadCodeElimination::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kEnd:
return ReduceEnd(node);
diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h
index e1333052d7..cc6ca954f3 100644
--- a/deps/v8/src/compiler/diamond.h
+++ b/deps/v8/src/compiler/diamond.h
@@ -52,6 +52,10 @@ struct Diamond {
Node* Phi(MachineRepresentation rep, Node* tv, Node* fv) {
return graph->NewNode(common->Phi(rep, 2), tv, fv, merge);
}
+
+ Node* EffectPhi(Node* tv, Node* fv) {
+ return graph->NewNode(common->EffectPhi(2), tv, fv, merge);
+ }
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 921bfd7852..9a3a293055 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -935,9 +935,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kLoadTypedElement:
result = LowerLoadTypedElement(node);
break;
+ case IrOpcode::kLoadDataViewElement:
+ result = LowerLoadDataViewElement(node);
+ break;
case IrOpcode::kStoreTypedElement:
LowerStoreTypedElement(node);
break;
+ case IrOpcode::kStoreDataViewElement:
+ LowerStoreDataViewElement(node);
+ break;
case IrOpcode::kStoreSignedSmallElement:
LowerStoreSignedSmallElement(node);
break;
@@ -1038,14 +1044,16 @@ Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
__ Bind(&if_smi);
}
- if (machine()->Is64()) {
+ if (SmiValuesAre32Bits()) {
Node* value_smi = ChangeInt32ToSmi(value32);
__ Goto(&done, value_smi);
} else {
+ DCHECK(SmiValuesAre31Bits());
Node* add = __ Int32AddWithOverflow(value32, value32);
Node* ovf = __ Projection(1, add);
__ GotoIf(ovf, &if_heapnumber);
Node* value_smi = __ Projection(0, add);
+ value_smi = ChangeInt32ToIntPtr(value_smi);
__ Goto(&done, value_smi);
}
}
@@ -1089,9 +1097,10 @@ Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
Node* value = node->InputAt(0);
- if (machine()->Is64()) {
+ if (SmiValuesAre32Bits()) {
return ChangeInt32ToSmi(value);
}
+ DCHECK(SmiValuesAre31Bits());
auto if_overflow = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTagged);
@@ -1099,7 +1108,9 @@ Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
Node* add = __ Int32AddWithOverflow(value, value);
Node* ovf = __ Projection(1, add);
__ GotoIf(ovf, &if_overflow);
- __ Goto(&done, __ Projection(0, add));
+ Node* value_smi = __ Projection(0, add);
+ value_smi = ChangeInt32ToIntPtr(value_smi);
+ __ Goto(&done, value_smi);
__ Bind(&if_overflow);
Node* number = AllocateHeapNumberWithValue(__ ChangeInt32ToFloat64(value));
@@ -1529,8 +1540,8 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
void EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- __ DeoptimizeIfNot(DeoptimizeReasonOf(node->op()), VectorSlotPair(), value,
- frame_state);
+ const CheckIfParameters& p = CheckIfParametersOf(node->op());
+ __ DeoptimizeIfNot(p.reason(), p.feedback(), value, frame_state);
}
Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
@@ -1789,7 +1800,9 @@ Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* check = __ Projection(1, add);
__ DeoptimizeIf(DeoptimizeReason::kOverflow, params.feedback(), check,
frame_state);
- return __ Projection(0, add);
+ Node* result = __ Projection(0, add);
+ result = ChangeInt32ToIntPtr(result);
+ return result;
}
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
@@ -2035,7 +2048,7 @@ Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), argument,
__ NoContextConstant());
}
@@ -2491,7 +2504,7 @@ Node* EffectControlLinearizer::LowerTypeOf(Node* node) {
Operator::Properties const properties = Operator::kEliminatable;
CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
__ NoContextConstant());
}
@@ -2503,7 +2516,7 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
Operator::Properties const properties = Operator::kEliminatable;
CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
__ NoContextConstant());
}
@@ -2586,6 +2599,11 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
PretenureFlag const pretenure = PretenureFlagOf(node->op());
Node* length = node->InputAt(0);
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
+ Node* zero_length = __ Word32Equal(length, __ Int32Constant(0));
+ __ GotoIf(zero_length, &done,
+ jsgraph()->HeapConstant(factory()->empty_fixed_array()));
+
// Compute the effective size of the backing store.
Node* size =
__ Int32Add(__ Word32Shl(length, __ Int32Constant(kDoubleSizeLog2)),
@@ -2604,14 +2622,13 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
Node* the_hole =
__ LoadField(AccessBuilder::ForHeapNumberValue(), __ TheHoleConstant());
auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
- auto done_loop = __ MakeLabel();
__ Goto(&loop, __ IntPtrConstant(0));
__ Bind(&loop);
{
// Check if we've initialized everything.
Node* index = loop.PhiAt(0);
Node* check = __ UintLessThan(index, limit);
- __ GotoIfNot(check, &done_loop);
+ __ GotoIfNot(check, &done, result);
// Storing "the_hole" doesn't need a write barrier.
StoreRepresentation rep(MachineRepresentation::kFloat64, kNoWriteBarrier);
@@ -2625,14 +2642,19 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
__ Goto(&loop, index);
}
- __ Bind(&done_loop);
- return result;
+ __ Bind(&done);
+ return done.PhiAt(0);
}
Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
PretenureFlag const pretenure = PretenureFlagOf(node->op());
Node* length = node->InputAt(0);
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
+ Node* zero_length = __ Word32Equal(length, __ Int32Constant(0));
+ __ GotoIf(zero_length, &done,
+ jsgraph()->HeapConstant(factory()->empty_fixed_array()));
+
// Compute the effective size of the backing store.
Node* size =
__ Int32Add(__ Word32Shl(length, __ Int32Constant(kPointerSizeLog2)),
@@ -2648,14 +2670,13 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
Node* limit = ChangeUint32ToUintPtr(length);
Node* the_hole = __ TheHoleConstant();
auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
- auto done_loop = __ MakeLabel();
__ Goto(&loop, __ IntPtrConstant(0));
__ Bind(&loop);
{
// Check if we've initialized everything.
Node* index = loop.PhiAt(0);
Node* check = __ UintLessThan(index, limit);
- __ GotoIfNot(check, &done_loop);
+ __ GotoIfNot(check, &done, result);
// Storing "the_hole" doesn't need a write barrier.
StoreRepresentation rep(MachineRepresentation::kTagged, kNoWriteBarrier);
@@ -2669,8 +2690,8 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
__ Goto(&loop, index);
}
- __ Bind(&done_loop);
- return result;
+ __ Bind(&done);
+ return done.PhiAt(0);
}
Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
@@ -2683,7 +2704,7 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), frame,
length, __ SmiConstant(mapped_count), __ NoContextConstant());
}
@@ -2756,7 +2777,7 @@ Node* EffectControlLinearizer::LowerSameValue(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
__ NoContextConstant());
}
@@ -2778,7 +2799,7 @@ Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
__ NoContextConstant());
}
@@ -2936,8 +2957,7 @@ Node* EffectControlLinearizer::LowerStringCodePointAt(
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
- MachineType::TaggedSigned());
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
position, __ NoContextConstant());
}
@@ -3046,7 +3066,7 @@ Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
__ NoContextConstant());
}
@@ -3215,7 +3235,7 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), subject,
search_string, position, __ NoContextConstant());
}
@@ -3234,7 +3254,7 @@ Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
__ NoContextConstant());
}
@@ -3249,7 +3269,7 @@ Node* EffectControlLinearizer::LowerStringSubstring(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
start, end, __ NoContextConstant());
}
@@ -3352,15 +3372,19 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
frame_state);
// Try to find the {val} in the string table.
- MachineSignature::Builder builder(graph()->zone(), 1, 1);
+ MachineSignature::Builder builder(graph()->zone(), 1, 2);
builder.AddReturn(MachineType::AnyTagged());
+ builder.AddParam(MachineType::Pointer());
builder.AddParam(MachineType::AnyTagged());
Node* try_internalize_string_function = __ ExternalConstant(
ExternalReference::try_internalize_string_function());
+ Node* const isolate_ptr =
+ __ ExternalConstant(ExternalReference::isolate_address(isolate()));
auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
- Node* val_internalized = __ Call(common()->Call(call_descriptor),
- try_internalize_string_function, val);
+ Node* val_internalized =
+ __ Call(common()->Call(call_descriptor),
+ try_internalize_string_function, isolate_ptr, val);
// Now see if the results match.
__ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
@@ -3536,7 +3560,7 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kCopyFastSmiOrObjectElements);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
object, __ NoContextConstant());
__ Goto(&done, result);
@@ -3572,8 +3596,7 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
Builtins::kGrowFastSmiOrObjectElements);
CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
- properties);
+ graph()->zone(), callable.descriptor(), 0, call_flags, properties);
Node* new_elements =
__ Call(call_descriptor, __ HeapConstant(callable.code()), object,
ChangeInt32ToSmi(index), __ NoContextConstant());
@@ -3729,6 +3752,350 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
+ ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
+ Node* buffer = node->InputAt(0);
+ Node* storage = node->InputAt(1);
+ Node* index = node->InputAt(2);
+ Node* is_little_endian = node->InputAt(3);
+
+ // We need to keep the {buffer} alive so that the GC will not release the
+ // ArrayBuffer (if there's any) as long as we are still operating on it.
+ __ Retain(buffer);
+
+ ElementAccess access_int8 = AccessBuilder::ForTypedArrayElement(
+ kExternalInt8Array, true, LoadSensitivity::kCritical);
+ ElementAccess access_uint8 = AccessBuilder::ForTypedArrayElement(
+ kExternalUint8Array, true, LoadSensitivity::kCritical);
+
+ switch (element_type) {
+ case kExternalUint8Array:
+ return __ LoadElement(access_uint8, storage, index);
+
+ case kExternalInt8Array:
+ return __ LoadElement(access_int8, storage, index);
+
+ case kExternalUint16Array: // Fall through.
+ case kExternalInt16Array: {
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kWord32);
+
+ // If we're doing an Int16 load, sign-extend the most significant byte
+ // by loading it as an Int8 instead of Uint8.
+ ElementAccess access_msb =
+ element_type == kExternalInt16Array ? access_int8 : access_uint8;
+
+ __ GotoIfNot(is_little_endian, &big_endian);
+ {
+ // Little-endian load.
+ Node* b0 = __ LoadElement(access_uint8, storage, index);
+ Node* b1 = __ LoadElement(access_msb, storage,
+ __ Int32Add(index, __ Int32Constant(1)));
+
+ // result = (b1 << 8) + b0
+ Node* result = __ Int32Add(__ Word32Shl(b1, __ Int32Constant(8)), b0);
+ __ Goto(&done, result);
+ }
+
+ __ Bind(&big_endian);
+ {
+ // Big-endian load.
+ Node* b0 = __ LoadElement(access_msb, storage, index);
+ Node* b1 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(1)));
+
+ // result = (b0 << 8) + b1;
+ Node* result = __ Int32Add(__ Word32Shl(b0, __ Int32Constant(8)), b1);
+ __ Goto(&done, result);
+ }
+
+ // We're done, return {result}.
+ __ Bind(&done);
+ return done.PhiAt(0);
+ }
+
+ case kExternalUint32Array: // Fall through.
+ case kExternalInt32Array: // Fall through.
+ case kExternalFloat32Array: {
+ Node* b0 = __ LoadElement(access_uint8, storage, index);
+ Node* b1 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(1)));
+ Node* b2 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(2)));
+ Node* b3 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(3)));
+
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kWord32);
+
+ __ GotoIfNot(is_little_endian, &big_endian);
+ {
+ // Little-endian load.
+ // result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ Node* result =
+ __ Word32Or(__ Word32Or(__ Word32Shl(b3, __ Int32Constant(24)),
+ __ Word32Shl(b2, __ Int32Constant(16))),
+ __ Word32Or(__ Word32Shl(b1, __ Int32Constant(8)), b0));
+ __ Goto(&done, result);
+ }
+
+ __ Bind(&big_endian);
+ {
+ // Big-endian load.
+ // result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ Node* result =
+ __ Word32Or(__ Word32Or(__ Word32Shl(b0, __ Int32Constant(24)),
+ __ Word32Shl(b1, __ Int32Constant(16))),
+ __ Word32Or(__ Word32Shl(b2, __ Int32Constant(8)), b3));
+ __ Goto(&done, result);
+ }
+
+ // We're done, return {result}.
+ __ Bind(&done);
+ if (element_type == kExternalFloat32Array) {
+ return __ BitcastInt32ToFloat32(done.PhiAt(0));
+ } else {
+ return done.PhiAt(0);
+ }
+ }
+
+ case kExternalFloat64Array: {
+ Node* b0 = __ LoadElement(access_uint8, storage, index);
+ Node* b1 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(1)));
+ Node* b2 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(2)));
+ Node* b3 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(3)));
+ Node* b4 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(4)));
+ Node* b5 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(5)));
+ Node* b6 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(6)));
+ Node* b7 = __ LoadElement(access_uint8, storage,
+ __ Int32Add(index, __ Int32Constant(7)));
+
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kWord32,
+ MachineRepresentation::kWord32);
+
+ __ GotoIfNot(is_little_endian, &big_endian);
+ {
+ // Little-endian load.
+ // low_word = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ // high_word = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
+ Node* low_word =
+ __ Word32Or(__ Word32Or(__ Word32Shl(b3, __ Int32Constant(24)),
+ __ Word32Shl(b2, __ Int32Constant(16))),
+ __ Word32Or(__ Word32Shl(b1, __ Int32Constant(8)), b0));
+ Node* high_word =
+ __ Word32Or(__ Word32Or(__ Word32Shl(b7, __ Int32Constant(24)),
+ __ Word32Shl(b6, __ Int32Constant(16))),
+ __ Word32Or(__ Word32Shl(b5, __ Int32Constant(8)), b4));
+ __ Goto(&done, low_word, high_word);
+ }
+
+ __ Bind(&big_endian);
+ {
+ // Big-endian load.
+ // high_word = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ // low_word = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
+ Node* high_word =
+ __ Word32Or(__ Word32Or(__ Word32Shl(b0, __ Int32Constant(24)),
+ __ Word32Shl(b1, __ Int32Constant(16))),
+ __ Word32Or(__ Word32Shl(b2, __ Int32Constant(8)), b3));
+ Node* low_word =
+ __ Word32Or(__ Word32Or(__ Word32Shl(b4, __ Int32Constant(24)),
+ __ Word32Shl(b5, __ Int32Constant(16))),
+ __ Word32Or(__ Word32Shl(b6, __ Int32Constant(8)), b7));
+ __ Goto(&done, low_word, high_word);
+ }
+
+ // We're done, store the low and high words into a float64.
+ __ Bind(&done);
+ Node* result = __ Float64Constant(0.0);
+ result = __ Float64InsertLowWord32(result, done.PhiAt(0));
+ result = __ Float64InsertHighWord32(result, done.PhiAt(1));
+ return result;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
+ ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
+ Node* buffer = node->InputAt(0);
+ Node* storage = node->InputAt(1);
+ Node* index = node->InputAt(2);
+ Node* value = node->InputAt(3);
+ Node* is_little_endian = node->InputAt(4);
+
+ // We need to keep the {buffer} alive so that the GC will not release the
+ // ArrayBuffer (if there's any) as long as we are still operating on it.
+ __ Retain(buffer);
+
+ ElementAccess access =
+ AccessBuilder::ForTypedArrayElement(kExternalUint8Array, true);
+
+ switch (element_type) {
+ case kExternalUint8Array: // Fall through.
+ case kExternalInt8Array: {
+ Node* b0 = __ Word32And(value, __ Int32Constant(0xFF));
+ __ StoreElement(access, storage, index, b0);
+ break;
+ }
+ case kExternalUint16Array: // Fall through.
+ case kExternalInt16Array: {
+ Node* b0 = __ Word32And(value, __ Int32Constant(0xFF));
+ Node* b1 = __ Word32And(__ Word32Shr(value, __ Int32Constant(8)),
+ __ Int32Constant(0xFF));
+
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel();
+
+ __ GotoIfNot(is_little_endian, &big_endian);
+ {
+ // Little-endian store.
+ __ StoreElement(access, storage, index, b0);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(1)), b1);
+ __ Goto(&done);
+ }
+
+ __ Bind(&big_endian);
+ {
+ // Big-endian store.
+ __ StoreElement(access, storage, index, b1);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(1)), b0);
+ __ Goto(&done);
+ }
+
+ __ Bind(&done);
+ break;
+ }
+
+ case kExternalUint32Array: // Fall through.
+ case kExternalInt32Array: // Fall through.
+ case kExternalFloat32Array: {
+ if (element_type == kExternalFloat32Array) {
+ value = __ BitcastFloat32ToInt32(value);
+ }
+
+ Node* b0 = __ Word32And(value, __ Int32Constant(0xFF));
+ Node* b1 = __ Word32And(__ Word32Shr(value, __ Int32Constant(8)),
+ __ Int32Constant(0xFF));
+ Node* b2 = __ Word32And(__ Word32Shr(value, __ Int32Constant(16)),
+ __ Int32Constant(0xFF));
+ Node* b3 = __ Word32Shr(value, __ Int32Constant(24));
+
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel();
+
+ __ GotoIfNot(is_little_endian, &big_endian);
+ {
+ // Little-endian store.
+ __ StoreElement(access, storage, index, b0);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(1)), b1);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(2)), b2);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(3)), b3);
+ __ Goto(&done);
+ }
+
+ __ Bind(&big_endian);
+ {
+ // Big-endian store.
+ __ StoreElement(access, storage, index, b3);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(1)), b2);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(2)), b1);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(3)), b0);
+ __ Goto(&done);
+ }
+
+ __ Bind(&done);
+ break;
+ }
+
+ case kExternalFloat64Array: {
+ Node* low_word = __ Float64ExtractLowWord32(value);
+ Node* high_word = __ Float64ExtractHighWord32(value);
+
+ Node* b0 = __ Word32And(low_word, __ Int32Constant(0xFF));
+ Node* b1 = __ Word32And(__ Word32Shr(low_word, __ Int32Constant(8)),
+ __ Int32Constant(0xFF));
+ Node* b2 = __ Word32And(__ Word32Shr(low_word, __ Int32Constant(16)),
+ __ Int32Constant(0xFF));
+ Node* b3 = __ Word32Shr(low_word, __ Int32Constant(24));
+
+ Node* b4 = __ Word32And(high_word, __ Int32Constant(0xFF));
+ Node* b5 = __ Word32And(__ Word32Shr(high_word, __ Int32Constant(8)),
+ __ Int32Constant(0xFF));
+ Node* b6 = __ Word32And(__ Word32Shr(high_word, __ Int32Constant(16)),
+ __ Int32Constant(0xFF));
+ Node* b7 = __ Word32Shr(high_word, __ Int32Constant(24));
+
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel();
+
+ __ GotoIfNot(is_little_endian, &big_endian);
+ {
+ // Little-endian store.
+ __ StoreElement(access, storage, index, b0);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(1)), b1);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(2)), b2);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(3)), b3);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(4)), b4);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(5)), b5);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(6)), b6);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(7)), b7);
+ __ Goto(&done);
+ }
+
+ __ Bind(&big_endian);
+ {
+ // Big-endian store.
+ __ StoreElement(access, storage, index, b7);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(1)), b6);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(2)), b5);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(3)), b4);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(4)), b3);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(5)), b2);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(6)), b1);
+ __ StoreElement(access, storage,
+ __ Int32Add(index, __ Int32Constant(7)), b0);
+ __ Goto(&done);
+ }
+
+ __ Bind(&done);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
@@ -4178,8 +4545,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
Node* native_context = __ LoadField(
AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
@@ -4214,8 +4580,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
Node* native_context = __ LoadField(
AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
@@ -4576,8 +4941,7 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- properties);
+ graph()->zone(), callable.descriptor(), 0, flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), table,
key, __ NoContextConstant());
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 7f297f6a12..47e0a249cf 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -151,7 +151,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
void LowerTransitionElementsKind(Node* node);
Node* LowerLoadFieldByIndex(Node* node);
Node* LowerLoadTypedElement(Node* node);
+ Node* LowerLoadDataViewElement(Node* node);
void LowerStoreTypedElement(Node* node);
+ void LowerStoreDataViewElement(Node* node);
void LowerStoreSignedSmallElement(Node* node);
Node* LowerFindOrderedHashMapEntry(Node* node);
Node* LowerFindOrderedHashMapEntryForInt32Key(Node* node);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 77a8de2c66..976be6d906 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -100,6 +100,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
Reduction ReplaceNode(Node* original, Node* replacement);
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const { return jsgraph_->isolate(); }
EscapeAnalysisResult analysis_result() const { return analysis_result_; }
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 78349799ef..0e6822a9ca 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -9,7 +9,6 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Fix zone-handle-set.h instead!
#ifdef DEBUG
#define TRACE(...) \
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 504729bc81..71aae6b2a4 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -180,6 +180,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysis final
private:
void Reduce(Node* node, Reduction* reduction);
JSGraph* jsgraph() { return jsgraph_; }
+ Isolate* isolate() const { return jsgraph_->isolate(); }
EscapeAnalysisTracker* tracker_;
JSGraph* jsgraph_;
};
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 64f4663636..9780d227fd 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -247,9 +247,9 @@ Operator const* GraphAssembler::ToNumberOperator() {
Callable callable =
Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- jsgraph()->isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- Operator::kEliminatable);
+ auto call_descriptor =
+ Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(),
+ 0, flags, Operator::kEliminatable);
to_number_operator_.set(common()->Call(call_descriptor));
}
return to_number_operator_.get();
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index e7d74c8721..f9b45a2007 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -28,7 +28,10 @@ namespace compiler {
V(TruncateInt64ToInt32) \
V(RoundFloat64ToInt32) \
V(TruncateFloat64ToWord32) \
+ V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
+ V(BitcastInt32ToFloat32) \
+ V(BitcastFloat32ToInt32) \
V(Float64Abs)
#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
@@ -60,6 +63,8 @@ namespace compiler {
V(Float64Equal) \
V(Float64LessThan) \
V(Float64LessThanOrEqual) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
V(Word32Equal) \
V(WordEqual)
@@ -258,6 +263,7 @@ class GraphAssembler {
Operator const* ToNumberOperator();
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const { return jsgraph_->isolate(); }
Graph* graph() const { return jsgraph_->graph(); }
Zone* temp_zone() const { return temp_zone_; }
CommonOperatorBuilder* common() const { return jsgraph()->common(); }
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 8631810ebd..dc7b23521f 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -90,9 +90,8 @@ Reduction GraphReducer::Reduce(Node* const node) {
// all the other reducers for this node, as now there may be more
// opportunities for reduction.
if (FLAG_trace_turbo_reduction) {
- OFStream os(stdout);
- os << "- In-place update of " << *node << " by reducer "
- << (*i)->reducer_name() << std::endl;
+ StdoutStream{} << "- In-place update of " << *node << " by reducer "
+ << (*i)->reducer_name() << std::endl;
}
skip = i;
i = reducers_.begin();
@@ -100,10 +99,9 @@ Reduction GraphReducer::Reduce(Node* const node) {
} else {
// {node} was replaced by another node.
if (FLAG_trace_turbo_reduction) {
- OFStream os(stdout);
- os << "- Replacement of " << *node << " with "
- << *(reduction.replacement()) << " by reducer "
- << (*i)->reducer_name() << std::endl;
+ StdoutStream{} << "- Replacement of " << *node << " with "
+ << *(reduction.replacement()) << " by reducer "
+ << (*i)->reducer_name() << std::endl;
}
return reduction;
}
diff --git a/deps/v8/src/compiler/graph-trimmer.cc b/deps/v8/src/compiler/graph-trimmer.cc
index 74626fe67f..c3de2cd809 100644
--- a/deps/v8/src/compiler/graph-trimmer.cc
+++ b/deps/v8/src/compiler/graph-trimmer.cc
@@ -34,9 +34,8 @@ void GraphTrimmer::TrimGraph() {
Node* const user = edge.from();
if (!IsLive(user)) {
if (FLAG_trace_turbo_trimming) {
- OFStream os(stdout);
- os << "DeadLink: " << *user << "(" << edge.index() << ") -> " << *live
- << std::endl;
+ StdoutStream{} << "DeadLink: " << *user << "(" << edge.index()
+ << ") -> " << *live << std::endl;
}
edge.UpdateTo(nullptr);
}
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 5f05919ed7..feb0a8e9d3 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -32,12 +32,19 @@ namespace v8 {
namespace internal {
namespace compiler {
+const char* get_cached_trace_turbo_filename(OptimizedCompilationInfo* info) {
+ if (!info->trace_turbo_filename()) {
+ info->set_trace_turbo_filename(
+ GetVisualizerLogFileName(info, FLAG_trace_turbo_path, nullptr, "json"));
+ }
+ return info->trace_turbo_filename();
+}
+
TurboJsonFile::TurboJsonFile(OptimizedCompilationInfo* info,
std::ios_base::openmode mode)
- : std::ofstream(
- GetVisualizerLogFileName(info, FLAG_trace_turbo_path, nullptr, "json")
- .get(),
- mode) {}
+ : std::ofstream(get_cached_trace_turbo_filename(info), mode) {}
+
+TurboJsonFile::~TurboJsonFile() { flush(); }
std::ostream& operator<<(std::ostream& out,
const SourcePositionAsJSON& asJSON) {
@@ -128,7 +135,7 @@ void JsonPrintAllSourceWithPositions(std::ostream& os,
Handle<Script> script =
(info->shared_info().is_null() || !info->shared_info()->script())
? Handle<Script>()
- : handle(Script::cast(info->shared_info()->script()));
+ : handle(Script::cast(info->shared_info()->script()), isolate);
JsonPrintFunctionSource(os, -1,
info->shared_info().is_null()
? std::unique_ptr<char[]>(new char[1]{0})
@@ -141,8 +148,8 @@ void JsonPrintAllSourceWithPositions(std::ostream& os,
Handle<SharedFunctionInfo> shared = inlined[id].shared_info;
const int source_id = id_assigner.GetIdFor(shared);
JsonPrintFunctionSource(os, source_id, shared->DebugName()->ToCString(),
- handle(Script::cast(shared->script())), isolate,
- shared, true);
+ handle(Script::cast(shared->script()), isolate),
+ isolate, shared, true);
}
os << "}, ";
os << "\"inlinings\" : {";
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index b005927edc..4b95169215 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -30,6 +30,7 @@ class SourcePositionTable;
struct TurboJsonFile : public std::ofstream {
TurboJsonFile(OptimizedCompilationInfo* info, std::ios_base::openmode mode);
+ ~TurboJsonFile();
};
struct SourcePositionAsJSON {
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index 373d6d7ee1..fea76bff81 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -73,10 +73,7 @@ NodeId Graph::NextNodeId() {
return id;
}
-void Graph::Print() const {
- OFStream os(stdout);
- os << AsRPO(*this);
-}
+void Graph::Print() const { StdoutStream{} << AsRPO(*this); }
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index a150c3f12d..9aef138811 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -206,17 +206,24 @@ class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
- XMMRegister input)
+ XMMRegister input, StubCallMode stub_mode)
: OutOfLineCode(gen),
result_(result),
input_(input),
+ stub_mode_(stub_mode),
isolate_(gen->isolate()),
zone_(gen->zone()) {}
void Generate() final {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(MemOperand(esp, 0), input_);
- __ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ wasm_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ __ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
+ }
__ mov(result_, MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
@@ -224,6 +231,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
private:
Register const result_;
XMMRegister const input_;
+ StubCallMode stub_mode_;
Isolate* isolate_;
Zone* zone_;
};
@@ -412,6 +420,30 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (0)
+#define ASSEMBLE_SIMD_PUNPCK_SHUFFLE(opcode) \
+ do { \
+ XMMRegister src0 = i.InputSimd128Register(0); \
+ Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(i.OutputSimd128Register(), src0, src1); \
+ } else { \
+ DCHECK_EQ(i.OutputSimd128Register(), src0); \
+ __ opcode(i.OutputSimd128Register(), src1); \
+ } \
+ } while (false)
+
+#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputOperand(1), imm); \
+ } else { \
+ CpuFeatureScope sse_scope(tasm(), SSELevel); \
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
+ __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
+ }
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -587,6 +619,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
@@ -601,15 +636,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallWasmFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
- if (info()->IsWasm()) {
- __ wasm_call(wasm_code, RelocInfo::WASM_CALL);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+ if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
+ __ wasm_call(wasm_code, constant.rmode());
} else {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ __ RetpolineCall(wasm_code, constant.rmode());
} else {
- __ call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ __ call(wasm_code, constant.rmode());
}
}
} else {
@@ -636,6 +671,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
@@ -650,13 +688,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallWasm: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
- if (info()->IsWasm()) {
- __ jmp(wasm_code, RelocInfo::WASM_CALL);
- } else {
- __ jmp(wasm_code, RelocInfo::JS_TO_WASM_CALL);
- }
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+ __ jmp(wasm_code, constant.rmode());
} else {
Register reg = i.InputRegister(0);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
@@ -673,6 +707,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MoveOperandIfAliasedWithPoisonRegister(instr, this);
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -765,6 +802,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
@@ -819,13 +859,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), ebp);
}
break;
- case kArchRootsPointer:
- // TODO(jgruber,v8:6666): Implement ia32 support.
- UNREACHABLE();
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
- auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+ auto ool = new (zone()) OutOfLineTruncateDoubleToI(
+ this, result, input, DetermineStubCallMode());
__ cvttsd2si(result, Operand(input));
__ cmp(result, 1);
__ j(overflow, ool->entry());
@@ -1972,6 +2010,52 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(2), i.InputInt8(1));
break;
}
+ case kSSEI32x4SConvertF32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ // NAN->0
+ __ movaps(kScratchDoubleReg, dst);
+ __ cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
+ __ pand(dst, kScratchDoubleReg);
+ // Set top bit if >= 0 (but not -0.0!)
+ __ pxor(kScratchDoubleReg, dst);
+ // Convert
+ __ cvttps2dq(dst, dst);
+ // Set top bit if >=0 is now < 0
+ __ pand(kScratchDoubleReg, dst);
+ __ psrad(kScratchDoubleReg, 31);
+ // Set positive overflow lanes to 0x7FFFFFFF
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kAVXI32x4SConvertF32x4: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ // NAN->0
+ __ vcmpeqps(kScratchDoubleReg, src, src);
+ __ vpand(dst, src, kScratchDoubleReg);
+ // Set top bit if >= 0 (but not -0.0!)
+ __ vpxor(kScratchDoubleReg, kScratchDoubleReg, dst);
+ // Convert
+ __ vcvttps2dq(dst, dst);
+ // Set top bit if >=0 is now < 0
+ __ vpand(kScratchDoubleReg, kScratchDoubleReg, dst);
+ __ vpsrad(kScratchDoubleReg, kScratchDoubleReg, 31);
+ // Set positive overflow lanes to 0x7FFFFFFF
+ __ vpxor(dst, dst, kScratchDoubleReg);
+ break;
+ }
+ case kIA32I32x4SConvertI16x8Low: {
+ __ Pmovsxwd(i.OutputSimd128Register(), i.InputOperand(0));
+ break;
+ }
+ case kIA32I32x4SConvertI16x8High: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Palignr(dst, i.InputOperand(0), 8);
+ __ Pmovsxwd(dst, dst);
+ break;
+ }
case kIA32I32x4Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
@@ -2131,6 +2215,71 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqd(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
+ case kSSEI32x4UConvertF32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+ // NAN->0, negative->0
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ maxps(dst, kScratchDoubleReg);
+ // scratch: float representation of max_signed
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1); // 0x7fffffff
+ __ cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // 0x4f000000
+ // tmp: convert (src-max_signed).
+ // Positive overflow lanes -> 0x7FFFFFFF
+ // Negative lanes -> 0
+ __ movaps(tmp, dst);
+ __ subps(tmp, kScratchDoubleReg);
+ __ cmpleps(kScratchDoubleReg, tmp);
+ __ cvttps2dq(tmp, tmp);
+ __ pxor(tmp, kScratchDoubleReg);
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ pmaxsd(tmp, kScratchDoubleReg);
+ // convert. Overflow lanes above max_signed will be 0x80000000
+ __ cvttps2dq(dst, dst);
+ // Add (src-max_signed) for overflow lanes.
+ __ paddd(dst, tmp);
+ break;
+ }
+ case kAVXI32x4UConvertF32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+ // NAN->0, negative->0
+ __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vmaxps(dst, dst, kScratchDoubleReg);
+ // scratch: float representation of max_signed
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 1); // 0x7fffffff
+ __ vcvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // 0x4f000000
+ // tmp: convert (src-max_signed).
+ // Positive overflow lanes -> 0x7FFFFFFF
+ // Negative lanes -> 0
+ __ vsubps(tmp, dst, kScratchDoubleReg);
+ __ vcmpleps(kScratchDoubleReg, kScratchDoubleReg, tmp);
+ __ vcvttps2dq(tmp, tmp);
+ __ vpxor(tmp, tmp, kScratchDoubleReg);
+ __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpmaxsd(tmp, tmp, kScratchDoubleReg);
+ // convert. Overflow lanes above max_signed will be 0x80000000
+ __ vcvttps2dq(dst, dst);
+ // Add (src-max_signed) for overflow lanes.
+ __ vpaddd(dst, dst, tmp);
+ break;
+ }
+ case kIA32I32x4UConvertI16x8Low: {
+ __ Pmovzxwd(i.OutputSimd128Register(), i.InputOperand(0));
+ break;
+ }
+ case kIA32I32x4UConvertI16x8High: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Palignr(dst, i.InputOperand(0), 8);
+ __ Pmovzxwd(dst, dst);
+ break;
+ }
case kSSEI32x4ShrU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psrld(i.OutputSimd128Register(), i.InputInt8(1));
@@ -2229,6 +2378,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(2), i.InputInt8(1));
break;
}
+ case kIA32I16x8SConvertI8x16Low: {
+ __ Pmovsxbw(i.OutputSimd128Register(), i.InputOperand(0));
+ break;
+ }
+ case kIA32I16x8SConvertI8x16High: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Palignr(dst, i.InputOperand(0), 8);
+ __ Pmovsxbw(dst, dst);
+ break;
+ }
case kIA32I16x8Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
@@ -2417,6 +2576,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqw(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
+ case kIA32I16x8UConvertI8x16Low: {
+ __ Pmovzxbw(i.OutputSimd128Register(), i.InputOperand(0));
+ break;
+ }
+ case kIA32I16x8UConvertI8x16High: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Palignr(dst, i.InputOperand(0), 8);
+ __ Pmovzxbw(dst, dst);
+ break;
+ }
case kSSEI16x8ShrU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
@@ -2441,6 +2610,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI16x8UConvertI32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
// Change negative lanes to 0x7FFFFFFF
@@ -2584,29 +2754,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
-
-#define I8x16_SPLAT(reg, scratch, v) \
- __ Move(reg, static_cast<uint32_t>(v)); \
- __ Pxor(scratch, scratch); \
- __ Pshufb(reg, scratch)
-
case kSSEI8x16Shl: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister src = i.InputSimd128Register(0);
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
int8_t shift = i.InputInt8(1) & 0x7;
- XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
-
- // src = AAaa ... AAaa
- // tmp = 0F0F ... 0F0F (shift=4)
- I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU >> shift);
-
- // src = src & tmp
- // => 0A0a ... 0A0a
- __ pand(src, tmp);
-
- // src = src << shift
- // => A0a0 ... A0a0 (shift=4)
- __ pslld(src, shift);
+ if (shift < 4) {
+ // For small shifts, doubling is faster.
+ for (int i = 0; i < shift; ++i) {
+ __ paddb(dst, dst);
+ }
+ } else {
+ // Mask off the unwanted bits before word-shifting.
+ __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlw(kScratchDoubleReg, 8 + shift);
+ __ packuswb(kScratchDoubleReg, kScratchDoubleReg);
+ __ pand(dst, kScratchDoubleReg);
+ __ psllw(dst, shift);
+ }
break;
}
case kAVXI8x16Shl: {
@@ -2614,94 +2778,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
int8_t shift = i.InputInt8(1) & 0x7;
- XMMRegister tmp =
- dst != src ? dst : i.ToSimd128Register(instr->TempAt(0));
-
- // src = AAaa ... AAaa
- // tmp = 0F0F ... 0F0F (shift=4)
- I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU >> shift);
-
- // dst = src & tmp
- // => 0A0a ... 0A0a
- __ vpand(dst, src, tmp);
-
- // dst = dst << shift
- // => A0a0 ... A0a0 (shift=4)
- __ vpslld(dst, dst, shift);
- break;
- }
- case kSSEI8x16ShrS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister src = i.InputSimd128Register(0);
- int8_t shift = i.InputInt8(1) & 0x7;
- XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
-
- // I16x8 view of I8x16
- // src = AAaa AAaa ... AAaa AAaa
-
- // tmp = aa00 aa00 ... aa00 aa00
- __ movaps(tmp, src);
- __ Move(kScratchDoubleReg, static_cast<uint32_t>(0xff00));
- __ psllw(tmp, 8);
-
- // src = I16x8ShrS(src, shift)
- // => SAAa SAAa ... SAAa SAAa (shift=4)
- __ pshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
- __ psraw(src, shift);
-
- // tmp = I16x8ShrS(tmp, shift)
- // => Saa0 Saa0 ... Saa0 Saa0 (shift=4)
- __ pshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
- __ psraw(tmp, shift);
-
- // src = I16x8And(src, 0xff00)
- // => SA00 SA00 ... SA00 SA00
- __ pand(src, kScratchDoubleReg);
-
- // tmp = I16x8ShrU(tmp, 8)
- // => 00Sa 00Sa ... 00Sa 00Sa (shift=4)
- __ psrlw(tmp, 8);
-
- // src = I16x8Or(src, tmp)
- // => SASa SASa ... SASa SASa (shift=4)
- __ por(src, tmp);
+ if (shift < 4) {
+ // For small shifts, doubling is faster.
+ for (int i = 0; i < shift; ++i) {
+ __ vpaddb(dst, src, src);
+ src = dst;
+ }
+ } else {
+ // Mask off the unwanted bits before word-shifting.
+ __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrlw(kScratchDoubleReg, kScratchDoubleReg, 8 + shift);
+ __ vpackuswb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpand(dst, src, kScratchDoubleReg);
+ __ vpsllw(dst, dst, shift);
+ }
break;
}
- case kAVXI8x16ShrS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ case kIA32I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
int8_t shift = i.InputInt8(1) & 0x7;
- XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
-
- // I16x8 view of I8x16
- // src = AAaa AAaa ... AAaa AAaa
-
- // tmp = aa00 aa00 ... aa00 aa00
- __ Move(kScratchDoubleReg, static_cast<uint32_t>(0xff00));
- __ vpsllw(tmp, src, 8);
-
- // dst = I16x8ShrS(src, shift)
- // => SAAa SAAa ... SAAa SAAa (shift=4)
- __ vpshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
- __ vpsraw(dst, src, shift);
-
- // tmp = I16x8ShrS(tmp, shift)
- // => Saa0 Saa0 ... Saa0 Saa0 (shift=4)
- __ vpshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
- __ vpsraw(tmp, tmp, shift);
-
- // dst = I16x8And(dst, 0xff00)
- // => SA00 SA00 ... SA00 SA00
- __ vpand(dst, dst, kScratchDoubleReg);
-
- // tmp = I16x8ShrU(tmp, 8)
- // => 00Sa 00Sa ... 00Sa 00Sa (shift=4)
- __ vpsrlw(tmp, tmp, 8);
-
- // dst = I16x8Or(dst, tmp)
- // => SASa SASa ... SASa SASa (shift=4)
- __ vpor(dst, dst, tmp);
+ // Unpack the bytes into words, do arithmetic shifts, and repack.
+ __ Punpckhbw(kScratchDoubleReg, src);
+ __ Punpcklbw(dst, src);
+ __ Psraw(kScratchDoubleReg, 8 + shift);
+ __ Psraw(dst, 8 + shift);
+ __ Packsswb(dst, kScratchDoubleReg);
break;
}
case kSSEI8x16Add: {
@@ -2749,44 +2851,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEI8x16Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister left = i.InputSimd128Register(0);
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
XMMRegister right = i.InputSimd128Register(1);
- XMMRegister t0 = i.ToSimd128Register(instr->TempAt(0));
- XMMRegister t1 = i.ToSimd128Register(instr->TempAt(1));
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
// I16x8 view of I8x16
// left = AAaa AAaa ... AAaa AAaa
// right= BBbb BBbb ... BBbb BBbb
- // t0 = 00AA 00AA ... 00AA 00AA
- // t1 = 00BB 00BB ... 00BB 00BB
- __ movaps(t0, left);
- __ movaps(t1, right);
- __ Move(kScratchDoubleReg, static_cast<uint32_t>(0x00ff));
- __ psrlw(t0, 8);
- __ psrlw(t1, 8);
-
- // left = I16x8Mul(left, right)
- // => __pp __pp ... __pp __pp
- // t0 = I16x8Mul(t0, t1)
+ // t = 00AA 00AA ... 00AA 00AA
+ // s = 00BB 00BB ... 00BB 00BB
+ __ movaps(tmp, dst);
+ __ movaps(kScratchDoubleReg, right);
+ __ psrlw(tmp, 8);
+ __ psrlw(kScratchDoubleReg, 8);
+ // dst = left * 256
+ __ psllw(dst, 8);
+
+ // t = I16x8Mul(t, s)
// => __PP __PP ... __PP __PP
- __ pshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
- __ pmullw(t0, t1);
- __ pmullw(left, right);
- __ pshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ pmullw(tmp, kScratchDoubleReg);
+ // dst = I16x8Mul(left * 256, right)
+ // => pp__ pp__ ... pp__ pp__
+ __ pmullw(dst, right);
- // t0 = I16x8Shl(t0, 8)
+ // t = I16x8Shl(t, 8)
// => PP00 PP00 ... PP00 PP00
- __ psllw(t0, 8);
+ __ psllw(tmp, 8);
- // left = I16x8And(left, 0x00ff)
+ // dst = I16x8Shr(dst, 8)
// => 00pp 00pp ... 00pp 00pp
- __ pand(left, kScratchDoubleReg);
+ __ psrlw(dst, 8);
- // left = I16x8Or(left, t0)
+ // dst = I16x8Or(dst, t)
// => PPpp PPpp ... PPpp PPpp
- __ por(left, t0);
+ __ por(dst, tmp);
break;
}
case kAVXI8x16Mul: {
@@ -2794,40 +2894,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister left = i.InputSimd128Register(0);
XMMRegister right = i.InputSimd128Register(1);
- XMMRegister t0 = i.ToSimd128Register(instr->TempAt(0));
- XMMRegister t1 = i.ToSimd128Register(instr->TempAt(1));
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
// I16x8 view of I8x16
// left = AAaa AAaa ... AAaa AAaa
// right= BBbb BBbb ... BBbb BBbb
- // t0 = 00AA 00AA ... 00AA 00AA
- // t1 = 00BB 00BB ... 00BB 00BB
- __ Move(kScratchDoubleReg, static_cast<uint32_t>(0x00ff));
- __ vpsrlw(t0, left, 8);
- __ vpsrlw(t1, right, 8);
-
- // dst = I16x8Mul(left, right)
- // => __pp __pp ... __pp __pp
- __ vpshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
- __ vpmullw(dst, left, right);
+ // t = 00AA 00AA ... 00AA 00AA
+ // s = 00BB 00BB ... 00BB 00BB
+ __ vpsrlw(tmp, left, 8);
+ __ vpsrlw(kScratchDoubleReg, right, 8);
- // t0 = I16x8Mul(t0, t1)
+ // t = I16x8Mul(t0, t1)
// => __PP __PP ... __PP __PP
- __ vpmullw(t0, t0, t1);
- __ vpshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ vpmullw(tmp, tmp, kScratchDoubleReg);
- // t0 = I16x8Shl(t0, 8)
- // => PP00 PP00 ... PP00 PP00
- __ vpsllw(t0, t0, 8);
+ // s = left * 256
+ __ vpsllw(kScratchDoubleReg, left, 8);
+
+ // dst = I16x8Mul(left * 256, right)
+ // => pp__ pp__ ... pp__ pp__
+ __ vpmullw(dst, kScratchDoubleReg, right);
- // dst = I16x8And(dst, 0x00ff)
+ // dst = I16x8Shr(dst, 8)
// => 00pp 00pp ... 00pp 00pp
- __ vpand(dst, dst, kScratchDoubleReg);
+ __ vpsrlw(dst, dst, 8);
+
+ // t = I16x8Shl(t, 8)
+ // => PP00 PP00 ... PP00 PP00
+ __ vpsllw(tmp, tmp, 8);
- // dst = I16x8Or(dst, t0)
+ // dst = I16x8Or(dst, t)
// => PPpp PPpp ... PPpp PPpp
- __ vpor(dst, dst, t0);
+ __ vpor(dst, dst, tmp);
break;
}
case kSSEI8x16MinS: {
@@ -2922,6 +3021,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI8x16UConvertI16x8: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
// Change negative lanes to 0x7FFF
@@ -2954,51 +3054,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI8x16ShrU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister src = i.InputSimd128Register(0);
- int8_t shift = i.InputInt8(1) & 0x7;
- XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
-
- // src = AAaa ... AAaa
- // tmp = F0F0 ... F0F0 (shift=4)
-
- I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU << shift); // needn't byte cast
-
- // src = src & tmp
- // => A0a0 ... A0a0
- __ pand(src, tmp);
-
- // src = src >> shift
- // => 0A0a ... 0A0a (shift=4)
- __ psrld(src, shift);
- break;
- }
- case kAVXI8x16ShrU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ case kIA32I8x16ShrU: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
int8_t shift = i.InputInt8(1) & 0x7;
- XMMRegister tmp =
- dst != src ? dst : i.ToSimd128Register(instr->TempAt(0));
-
- // src = AAaa ... AAaa
- // tmp = F0F0 ... F0F0 (shift=4)
- I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU << shift);
-
- // src = src & tmp
- // => A0a0 ... A0a0
- __ vpand(dst, src, tmp);
-
- // dst = dst >> shift
- // => 0A0a ... 0A0a (shift=4)
- __ vpsrld(dst, dst, shift);
+ // Unpack the bytes into words, do logical shifts, and repack.
+ __ Punpckhbw(kScratchDoubleReg, src);
+ __ Punpcklbw(dst, src);
+ __ Psrlw(kScratchDoubleReg, 8 + shift);
+ __ Psrlw(dst, 8 + shift);
+ __ Packuswb(dst, kScratchDoubleReg);
break;
}
-#undef I8x16_SPLAT
case kSSEI8x16MinU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pminub(i.OutputSimd128Register(), i.InputOperand(1));
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ pminub(dst, i.InputOperand(1));
break;
}
case kAVXI8x16MinU: {
@@ -3132,12 +3203,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32S8x16Shuffle: {
XMMRegister dst = i.OutputSimd128Register();
+ Operand src0 = i.InputOperand(0);
Register tmp = i.TempRegister(0);
- // Prepare 16-byte boundary buffer for shuffle control mask
+ // Prepare 16 byte aligned buffer for shuffle control mask
__ mov(tmp, esp);
- __ movups(dst, i.InputOperand(0));
__ and_(esp, -16);
if (instr->InputCount() == 5) { // only one input operand
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
for (int j = 4; j > 0; j--) {
uint32_t mask = i.InputUint32(j);
__ push(Immediate(mask));
@@ -3145,6 +3217,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufb(dst, Operand(esp, 0));
} else { // two input operands
DCHECK_EQ(6, instr->InputCount());
+ __ movups(kScratchDoubleReg, src0);
for (int j = 5; j > 1; j--) {
uint32_t lanes = i.InputUint32(j);
uint32_t mask = 0;
@@ -3154,8 +3227,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ push(Immediate(mask));
}
- __ Pshufb(dst, Operand(esp, 0));
- __ movups(kScratchDoubleReg, i.InputOperand(1));
+ __ Pshufb(kScratchDoubleReg, Operand(esp, 0));
+ Operand src1 = i.InputOperand(1);
+ if (!src1.is_reg(dst)) __ movups(dst, src1);
for (int j = 5; j > 1; j--) {
uint32_t lanes = i.InputUint32(j);
uint32_t mask = 0;
@@ -3165,16 +3239,324 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ push(Immediate(mask));
}
- __ Pshufb(kScratchDoubleReg, Operand(esp, 0));
+ __ Pshufb(dst, Operand(esp, 0));
__ por(dst, kScratchDoubleReg);
}
__ mov(esp, tmp);
break;
}
case kIA32S32x4Swizzle: {
+ DCHECK_EQ(2, instr->InputCount());
__ Pshufd(i.OutputSimd128Register(), i.InputOperand(0), i.InputInt8(1));
break;
}
+ case kIA32S32x4Shuffle: {
+ DCHECK_EQ(4, instr->InputCount()); // Swizzles should be handled above.
+ int8_t shuffle = i.InputInt8(2);
+ DCHECK_NE(0xe4, shuffle); // A simple blend should be handled below.
+ __ Pshufd(kScratchDoubleReg, i.InputOperand(1), shuffle);
+ __ Pshufd(i.OutputSimd128Register(), i.InputOperand(0), shuffle);
+ __ Pblendw(i.OutputSimd128Register(), kScratchDoubleReg, i.InputInt8(3));
+ break;
+ }
+ case kIA32S16x8Blend:
+ ASSEMBLE_SIMD_IMM_SHUFFLE(pblendw, SSE4_1, i.InputInt8(2));
+ break;
+ case kIA32S16x8HalfShuffle1: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pshuflw(dst, i.InputOperand(0), i.InputInt8(1));
+ __ Pshufhw(dst, dst, i.InputInt8(2));
+ break;
+ }
+ case kIA32S16x8HalfShuffle2: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pshuflw(kScratchDoubleReg, i.InputOperand(1), i.InputInt8(2));
+ __ Pshufhw(kScratchDoubleReg, kScratchDoubleReg, i.InputInt8(3));
+ __ Pshuflw(dst, i.InputOperand(0), i.InputInt8(2));
+ __ Pshufhw(dst, dst, i.InputInt8(3));
+ __ Pblendw(dst, kScratchDoubleReg, i.InputInt8(4));
+ break;
+ }
+ case kIA32S8x16Alignr:
+ ASSEMBLE_SIMD_IMM_SHUFFLE(palignr, SSSE3, i.InputInt8(2));
+ break;
+ case kIA32S16x8Dup: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ int8_t lane = i.InputInt8(1) & 0x7;
+ int8_t lane4 = lane & 0x3;
+ int8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
+ if (lane < 4) {
+ __ Pshuflw(dst, src, half_dup);
+ __ Pshufd(dst, dst, 0);
+ } else {
+ __ Pshufhw(dst, src, half_dup);
+ __ Pshufd(dst, dst, 0xaa);
+ }
+ break;
+ }
+ case kIA32S8x16Dup: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t lane = i.InputInt8(1) & 0xf;
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ if (lane < 8) {
+ __ vpunpcklbw(dst, src, src);
+ } else {
+ __ vpunpckhbw(dst, src, src);
+ }
+ } else {
+ DCHECK_EQ(dst, src);
+ if (lane < 8) {
+ __ punpcklbw(dst, dst);
+ } else {
+ __ punpckhbw(dst, dst);
+ }
+ }
+ lane &= 0x7;
+ int8_t lane4 = lane & 0x3;
+ int8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
+ if (lane < 4) {
+ __ Pshuflw(dst, dst, half_dup);
+ __ Pshufd(dst, dst, 0);
+ } else {
+ __ Pshufhw(dst, dst, half_dup);
+ __ Pshufd(dst, dst, 0xaa);
+ }
+ break;
+ }
+ case kIA32S64x2UnpackHigh:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhqdq);
+ break;
+ case kIA32S32x4UnpackHigh:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhdq);
+ break;
+ case kIA32S16x8UnpackHigh:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhwd);
+ break;
+ case kIA32S8x16UnpackHigh:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhbw);
+ break;
+ case kIA32S64x2UnpackLow:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklqdq);
+ break;
+ case kIA32S32x4UnpackLow:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckldq);
+ break;
+ case kIA32S16x8UnpackLow:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklwd);
+ break;
+ case kIA32S8x16UnpackLow:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw);
+ break;
+ case kSSES16x8UnzipHigh: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ if (instr->InputCount() == 2) {
+ __ movups(kScratchDoubleReg, i.InputOperand(1));
+ __ psrld(kScratchDoubleReg, 16);
+ src2 = kScratchDoubleReg;
+ }
+ __ psrld(dst, 16);
+ __ packusdw(dst, src2);
+ break;
+ }
+ case kAVXS16x8UnzipHigh: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ if (instr->InputCount() == 2) {
+ __ vpsrld(kScratchDoubleReg, i.InputSimd128Register(1), 16);
+ src2 = kScratchDoubleReg;
+ }
+ __ vpsrld(dst, i.InputSimd128Register(0), 16);
+ __ vpackusdw(dst, dst, src2);
+ break;
+ }
+ case kSSES16x8UnzipLow: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ if (instr->InputCount() == 2) {
+ __ pblendw(kScratchDoubleReg, i.InputOperand(1), 0x55);
+ src2 = kScratchDoubleReg;
+ }
+ __ pblendw(dst, kScratchDoubleReg, 0xaa);
+ __ packusdw(dst, src2);
+ break;
+ }
+ case kAVXS16x8UnzipLow: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ if (instr->InputCount() == 2) {
+ __ vpblendw(kScratchDoubleReg, kScratchDoubleReg, i.InputOperand(1),
+ 0x55);
+ src2 = kScratchDoubleReg;
+ }
+ __ vpblendw(dst, kScratchDoubleReg, i.InputSimd128Register(0), 0x55);
+ __ vpackusdw(dst, dst, src2);
+ break;
+ }
+ case kSSES8x16UnzipHigh: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ if (instr->InputCount() == 2) {
+ __ movups(kScratchDoubleReg, i.InputOperand(1));
+ __ psrlw(kScratchDoubleReg, 8);
+ src2 = kScratchDoubleReg;
+ }
+ __ psrlw(dst, 8);
+ __ packuswb(dst, src2);
+ break;
+ }
+ case kAVXS8x16UnzipHigh: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ if (instr->InputCount() == 2) {
+ __ vpsrlw(kScratchDoubleReg, i.InputSimd128Register(1), 8);
+ src2 = kScratchDoubleReg;
+ }
+ __ vpsrlw(dst, i.InputSimd128Register(0), 8);
+ __ vpackuswb(dst, dst, src2);
+ break;
+ }
+ case kSSES8x16UnzipLow: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ if (instr->InputCount() == 2) {
+ __ movups(kScratchDoubleReg, i.InputOperand(1));
+ __ psllw(kScratchDoubleReg, 8);
+ __ psrlw(kScratchDoubleReg, 8);
+ src2 = kScratchDoubleReg;
+ }
+ __ psllw(dst, 8);
+ __ psrlw(dst, 8);
+ __ packuswb(dst, src2);
+ break;
+ }
+ case kAVXS8x16UnzipLow: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ if (instr->InputCount() == 2) {
+ __ vpsllw(kScratchDoubleReg, i.InputSimd128Register(1), 8);
+ __ vpsrlw(kScratchDoubleReg, kScratchDoubleReg, 8);
+ src2 = kScratchDoubleReg;
+ }
+ __ vpsllw(dst, i.InputSimd128Register(0), 8);
+ __ vpsrlw(dst, dst, 8);
+ __ vpackuswb(dst, dst, src2);
+ break;
+ }
+ case kSSES8x16TransposeLow: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ psllw(dst, 8);
+ if (instr->InputCount() == 1) {
+ __ movups(kScratchDoubleReg, dst);
+ } else {
+ DCHECK_EQ(2, instr->InputCount());
+ __ movups(kScratchDoubleReg, i.InputOperand(1));
+ __ psllw(kScratchDoubleReg, 8);
+ }
+ __ psrlw(dst, 8);
+ __ por(dst, kScratchDoubleReg);
+ break;
+ }
+ case kAVXS8x16TransposeLow: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputCount() == 1) {
+ __ vpsllw(kScratchDoubleReg, i.InputSimd128Register(0), 8);
+ __ vpsrlw(dst, kScratchDoubleReg, 8);
+ } else {
+ DCHECK_EQ(2, instr->InputCount());
+ __ vpsllw(kScratchDoubleReg, i.InputSimd128Register(1), 8);
+ __ vpsllw(dst, i.InputSimd128Register(0), 8);
+ __ vpsrlw(dst, dst, 8);
+ }
+ __ vpor(dst, dst, kScratchDoubleReg);
+ break;
+ }
+ case kSSES8x16TransposeHigh: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ psrlw(dst, 8);
+ if (instr->InputCount() == 1) {
+ __ movups(kScratchDoubleReg, dst);
+ } else {
+ DCHECK_EQ(2, instr->InputCount());
+ __ movups(kScratchDoubleReg, i.InputOperand(1));
+ __ psrlw(kScratchDoubleReg, 8);
+ }
+ __ psllw(kScratchDoubleReg, 8);
+ __ por(dst, kScratchDoubleReg);
+ break;
+ }
+ case kAVXS8x16TransposeHigh: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputCount() == 1) {
+ __ vpsrlw(dst, i.InputSimd128Register(0), 8);
+ __ vpsllw(kScratchDoubleReg, dst, 8);
+ } else {
+ DCHECK_EQ(2, instr->InputCount());
+ __ vpsrlw(kScratchDoubleReg, i.InputSimd128Register(1), 8);
+ __ vpsrlw(dst, i.InputSimd128Register(0), 8);
+ __ vpsllw(kScratchDoubleReg, kScratchDoubleReg, 8);
+ }
+ __ vpor(dst, dst, kScratchDoubleReg);
+ break;
+ }
+ case kSSES8x8Reverse:
+ case kSSES8x4Reverse:
+ case kSSES8x2Reverse: {
+ DCHECK_EQ(1, instr->InputCount());
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ if (arch_opcode != kSSES8x2Reverse) {
+ // First shuffle words into position.
+ int8_t shuffle_mask = arch_opcode == kSSES8x4Reverse ? 0xB1 : 0x1B;
+ __ pshuflw(dst, dst, shuffle_mask);
+ __ pshufhw(dst, dst, shuffle_mask);
+ }
+ __ movaps(kScratchDoubleReg, dst);
+ __ psrlw(kScratchDoubleReg, 8);
+ __ psllw(dst, 8);
+ __ por(dst, kScratchDoubleReg);
+ break;
+ }
+ case kAVXS8x2Reverse:
+ case kAVXS8x4Reverse:
+ case kAVXS8x8Reverse: {
+ DCHECK_EQ(1, instr->InputCount());
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = dst;
+ if (arch_opcode != kAVXS8x2Reverse) {
+ // First shuffle words into position.
+ int8_t shuffle_mask = arch_opcode == kAVXS8x4Reverse ? 0xB1 : 0x1B;
+ __ vpshuflw(dst, i.InputOperand(0), shuffle_mask);
+ __ vpshufhw(dst, dst, shuffle_mask);
+ } else {
+ src = i.InputSimd128Register(0);
+ }
+ // Reverse each 16 bit lane.
+ __ vpsrlw(kScratchDoubleReg, src, 8);
+ __ vpsllw(dst, src, 8);
+ __ vpor(dst, dst, kScratchDoubleReg);
+ break;
+ }
case kIA32S1x4AnyTrue:
case kIA32S1x8AnyTrue:
case kIA32S1x16AnyTrue: {
@@ -3402,31 +3784,19 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
void Generate() final {
IA32OperandConverter i(gen_, instr_);
-
- Builtins::Name trap_id =
- static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- __ set_has_frame(old_has_frame);
- }
}
private:
- void GenerateCallToTrap(Builtins::Name trap_id) {
- if (trap_id == Builtins::builtin_count) {
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ PrepareCallCFunction(0, esi);
@@ -3439,8 +3809,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret(static_cast<int>(pop_size), ecx);
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
- RelocInfo::CODE_TARGET);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ wasm_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -3449,12 +3820,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}
}
- bool frame_elided_;
Instruction* instr_;
CodeGenerator* gen_;
};
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ auto ool = new (zone()) OutOfLineTrap(this, instr);
Label* tlabel = ool->entry();
Label end;
if (condition == kUnorderedEqual) {
@@ -3505,6 +3874,16 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ bind(&done);
}
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
IA32OperandConverter i(this, instr);
@@ -3710,6 +4089,7 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves = call_descriptor->CalleeSavedRegisters();
if (shrink_slots > 0) {
+ DCHECK(frame_access_state()->has_frame());
if (info()->IsWasm() && shrink_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -3724,20 +4104,18 @@ void CodeGenerator::AssembleConstructFrame() {
Register scratch = esi;
__ push(scratch);
__ mov(scratch,
- Immediate(ExternalReference::address_of_real_stack_limit(
- __ isolate())));
+ FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
__ mov(scratch, Operand(scratch, 0));
__ add(scratch, Immediate(shrink_slots * kPointerSize));
__ cmp(esp, scratch);
__ pop(scratch);
__ j(above_equal, &done);
}
- if (!frame_access_state()->has_frame()) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
+ __ mov(ecx, FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
__ Move(esi, Smi::kZero);
- __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
+ __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, ecx);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
@@ -4073,6 +4451,8 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
#undef ASSEMBLE_BINOP
#undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_MOVX
+#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
+#undef ASSEMBLE_SIMD_IMM_SHUFFLE
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 7886615939..8ffc9c3819 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -154,6 +154,10 @@ namespace compiler {
V(IA32I32x4ExtractLane) \
V(SSEI32x4ReplaceLane) \
V(AVXI32x4ReplaceLane) \
+ V(SSEI32x4SConvertF32x4) \
+ V(AVXI32x4SConvertF32x4) \
+ V(IA32I32x4SConvertI16x8Low) \
+ V(IA32I32x4SConvertI16x8High) \
V(IA32I32x4Neg) \
V(SSEI32x4Shl) \
V(AVXI32x4Shl) \
@@ -179,6 +183,10 @@ namespace compiler {
V(AVXI32x4GtS) \
V(SSEI32x4GeS) \
V(AVXI32x4GeS) \
+ V(SSEI32x4UConvertF32x4) \
+ V(AVXI32x4UConvertF32x4) \
+ V(IA32I32x4UConvertI16x8Low) \
+ V(IA32I32x4UConvertI16x8High) \
V(SSEI32x4ShrU) \
V(AVXI32x4ShrU) \
V(SSEI32x4MinU) \
@@ -193,6 +201,8 @@ namespace compiler {
V(IA32I16x8ExtractLane) \
V(SSEI16x8ReplaceLane) \
V(AVXI16x8ReplaceLane) \
+ V(IA32I16x8SConvertI8x16Low) \
+ V(IA32I16x8SConvertI8x16High) \
V(IA32I16x8Neg) \
V(SSEI16x8Shl) \
V(AVXI16x8Shl) \
@@ -224,6 +234,8 @@ namespace compiler {
V(AVXI16x8GtS) \
V(SSEI16x8GeS) \
V(AVXI16x8GeS) \
+ V(IA32I16x8UConvertI8x16Low) \
+ V(IA32I16x8UConvertI8x16High) \
V(SSEI16x8ShrU) \
V(AVXI16x8ShrU) \
V(SSEI16x8UConvertI32x4) \
@@ -249,8 +261,7 @@ namespace compiler {
V(IA32I8x16Neg) \
V(SSEI8x16Shl) \
V(AVXI8x16Shl) \
- V(SSEI8x16ShrS) \
- V(AVXI8x16ShrS) \
+ V(IA32I8x16ShrS) \
V(SSEI8x16Add) \
V(AVXI8x16Add) \
V(SSEI8x16AddSaturateS) \
@@ -279,8 +290,7 @@ namespace compiler {
V(AVXI8x16AddSaturateU) \
V(SSEI8x16SubSaturateU) \
V(AVXI8x16SubSaturateU) \
- V(SSEI8x16ShrU) \
- V(AVXI8x16ShrU) \
+ V(IA32I8x16ShrU) \
V(SSEI8x16MinU) \
V(AVXI8x16MinU) \
V(SSEI8x16MaxU) \
@@ -302,6 +312,39 @@ namespace compiler {
V(AVXS128Select) \
V(IA32S8x16Shuffle) \
V(IA32S32x4Swizzle) \
+ V(IA32S32x4Shuffle) \
+ V(IA32S16x8Blend) \
+ V(IA32S16x8HalfShuffle1) \
+ V(IA32S16x8HalfShuffle2) \
+ V(IA32S8x16Alignr) \
+ V(IA32S16x8Dup) \
+ V(IA32S8x16Dup) \
+ V(SSES16x8UnzipHigh) \
+ V(AVXS16x8UnzipHigh) \
+ V(SSES16x8UnzipLow) \
+ V(AVXS16x8UnzipLow) \
+ V(SSES8x16UnzipHigh) \
+ V(AVXS8x16UnzipHigh) \
+ V(SSES8x16UnzipLow) \
+ V(AVXS8x16UnzipLow) \
+ V(IA32S64x2UnpackHigh) \
+ V(IA32S32x4UnpackHigh) \
+ V(IA32S16x8UnpackHigh) \
+ V(IA32S8x16UnpackHigh) \
+ V(IA32S64x2UnpackLow) \
+ V(IA32S32x4UnpackLow) \
+ V(IA32S16x8UnpackLow) \
+ V(IA32S8x16UnpackLow) \
+ V(SSES8x16TransposeLow) \
+ V(AVXS8x16TransposeLow) \
+ V(SSES8x16TransposeHigh) \
+ V(AVXS8x16TransposeHigh) \
+ V(SSES8x8Reverse) \
+ V(AVXS8x8Reverse) \
+ V(SSES8x4Reverse) \
+ V(AVXS8x4Reverse) \
+ V(SSES8x2Reverse) \
+ V(AVXS8x2Reverse) \
V(IA32S1x4AnyTrue) \
V(IA32S1x4AllTrue) \
V(IA32S1x8AnyTrue) \
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 4287f7be91..82d6fb88a3 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -136,6 +136,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4ExtractLane:
case kSSEI32x4ReplaceLane:
case kAVXI32x4ReplaceLane:
+ case kSSEI32x4SConvertF32x4:
+ case kAVXI32x4SConvertF32x4:
+ case kIA32I32x4SConvertI16x8Low:
+ case kIA32I32x4SConvertI16x8High:
case kIA32I32x4Neg:
case kSSEI32x4Shl:
case kAVXI32x4Shl:
@@ -161,6 +165,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI32x4GtS:
case kSSEI32x4GeS:
case kAVXI32x4GeS:
+ case kSSEI32x4UConvertF32x4:
+ case kAVXI32x4UConvertF32x4:
+ case kIA32I32x4UConvertI16x8Low:
+ case kIA32I32x4UConvertI16x8High:
case kSSEI32x4ShrU:
case kAVXI32x4ShrU:
case kSSEI32x4MinU:
@@ -175,6 +183,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8ExtractLane:
case kSSEI16x8ReplaceLane:
case kAVXI16x8ReplaceLane:
+ case kIA32I16x8SConvertI8x16Low:
+ case kIA32I16x8SConvertI8x16High:
case kIA32I16x8Neg:
case kSSEI16x8Shl:
case kAVXI16x8Shl:
@@ -206,6 +216,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI16x8GtS:
case kSSEI16x8GeS:
case kAVXI16x8GeS:
+ case kIA32I16x8UConvertI8x16Low:
+ case kIA32I16x8UConvertI8x16High:
case kSSEI16x8ShrU:
case kAVXI16x8ShrU:
case kSSEI16x8UConvertI32x4:
@@ -231,8 +243,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16Neg:
case kSSEI8x16Shl:
case kAVXI8x16Shl:
- case kSSEI8x16ShrS:
- case kAVXI8x16ShrS:
+ case kIA32I8x16ShrS:
case kSSEI8x16Add:
case kAVXI8x16Add:
case kSSEI8x16AddSaturateS:
@@ -261,8 +272,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16AddSaturateU:
case kSSEI8x16SubSaturateU:
case kAVXI8x16SubSaturateU:
- case kSSEI8x16ShrU:
- case kAVXI8x16ShrU:
+ case kIA32I8x16ShrU:
case kSSEI8x16MinU:
case kAVXI8x16MinU:
case kSSEI8x16MaxU:
@@ -284,6 +294,39 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXS128Select:
case kIA32S8x16Shuffle:
case kIA32S32x4Swizzle:
+ case kIA32S32x4Shuffle:
+ case kIA32S16x8Blend:
+ case kIA32S16x8HalfShuffle1:
+ case kIA32S16x8HalfShuffle2:
+ case kIA32S8x16Alignr:
+ case kIA32S16x8Dup:
+ case kIA32S8x16Dup:
+ case kSSES16x8UnzipHigh:
+ case kAVXS16x8UnzipHigh:
+ case kSSES16x8UnzipLow:
+ case kAVXS16x8UnzipLow:
+ case kSSES8x16UnzipHigh:
+ case kAVXS8x16UnzipHigh:
+ case kSSES8x16UnzipLow:
+ case kAVXS8x16UnzipLow:
+ case kIA32S64x2UnpackHigh:
+ case kIA32S32x4UnpackHigh:
+ case kIA32S16x8UnpackHigh:
+ case kIA32S8x16UnpackHigh:
+ case kIA32S64x2UnpackLow:
+ case kIA32S32x4UnpackLow:
+ case kIA32S16x8UnpackLow:
+ case kIA32S8x16UnpackLow:
+ case kSSES8x16TransposeLow:
+ case kAVXS8x16TransposeLow:
+ case kSSES8x16TransposeHigh:
+ case kAVXS8x16TransposeHigh:
+ case kSSES8x8Reverse:
+ case kAVXS8x8Reverse:
+ case kSSES8x4Reverse:
+ case kAVXS8x4Reverse:
+ case kSSES8x2Reverse:
+ case kAVXS8x2Reverse:
case kIA32S1x4AnyTrue:
case kIA32S1x4AllTrue:
case kIA32S1x8AnyTrue:
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 9000a1235f..4144254285 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -75,8 +75,7 @@ class IA32OperandGenerator final : public OperandGenerator {
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
Handle<HeapObject> value = HeapConstantOf(node->op());
- Isolate* isolate = value->GetIsolate();
- return !isolate->heap()->InNewSpace(*value);
+ return !Heap::InNewSpace(*value);
#else
return false;
#endif
@@ -210,19 +209,36 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
}
}
+void VisitRRSimd(InstructionSelector* selector, Node* node,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0);
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0);
+ }
+}
+
+void VisitRRISimd(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 =
+ g.UseImmediate(OpParameter<int32_t>(node->op()));
+ selector->Emit(opcode, g.DefineAsRegister(node), operand0, operand1);
+}
+
void VisitRRISimd(InstructionSelector* selector, Node* node,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
InstructionOperand operand1 =
g.UseImmediate(OpParameter<int32_t>(node->op()));
- InstructionOperand temps[] = {g.TempSimd128Register()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1,
- arraysize(temps), temps);
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1,
- arraysize(temps), temps);
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
}
}
@@ -1244,24 +1260,65 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- IA32OperandGenerator g(selector);
- Int32BinopMatcher m(node);
- if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
- LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
- ExternalReference js_stack_limit =
- ExternalReference::address_of_stack_limit(selector->isolate());
- if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
- // Compare(Load(js_stack_limit), LoadStackPointer)
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- InstructionCode opcode = cont->Encode(kIA32StackCheck);
- CHECK(cont->IsBranch());
- selector->EmitWithContinuation(opcode, cont);
- return;
- }
+ StackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> m(
+ selector->isolate(), node);
+ if (m.Matched()) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kIA32StackCheck);
+ CHECK(cont->IsBranch());
+ selector->EmitWithContinuation(opcode, cont);
+ return;
+ }
+ WasmStackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> wasm_m(
+ node);
+ if (wasm_m.Matched()) {
+ // This is a wasm stack check. By structure, we know that we can use the
+ // stack pointer directly, as wasm code does not modify the stack at points
+ // where stack checks are performed.
+ Node* left = node->InputAt(0);
+ LocationOperand esp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER,
+ InstructionSequence::DefaultRepresentation(),
+ RegisterCode::kRegCode_esp);
+ return VisitCompareWithMemoryOperand(selector, kIA32Cmp, left, esp, cont);
}
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, MachineRepresentation rep) {
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ if (rep == MachineRepresentation::kWord8) {
+ inputs[input_count++] = g.UseFixed(value, edx);
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ }
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ InstructionOperand outputs[1];
+ if (rep == MachineRepresentation::kWord8) {
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ outputs[0] = g.DefineAsFixed(node, edx);
+ } else {
+ outputs[0] = g.DefineSameAsFirst(node);
+ }
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs);
+}
+
} // namespace
// Shared routine for word comparison with zero.
@@ -1381,8 +1438,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
}
}
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
}
@@ -1530,10 +1587,6 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
IA32OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
switch (rep) {
@@ -1550,32 +1603,11 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
UNREACHABLE();
break;
}
- AddressingMode addressing_mode;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- if (rep == MachineRepresentation::kWord8) {
- inputs[input_count++] = g.UseByteRegister(value);
- } else {
- inputs[input_count++] = g.UseUniqueRegister(value);
- }
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, input_count, inputs);
+ VisitAtomicExchange(this, node, opcode, rep);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
IA32OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
@@ -1592,31 +1624,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand outputs[1];
- AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- if (type == MachineType::Int8() || type == MachineType::Uint8()) {
- inputs[input_count++] = g.UseFixed(value, edx);
- } else {
- inputs[input_count++] = g.UseUniqueRegister(value);
- }
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- if (type == MachineType::Int8() || type == MachineType::Uint8()) {
- // Using DefineSameAsFirst requires the register to be unallocated.
- outputs[0] = g.DefineAsFixed(node, edx);
- } else {
- outputs[0] = g.DefineSameAsFirst(node);
- }
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs);
+ VisitAtomicExchange(this, node, opcode, type.representation());
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
@@ -1770,7 +1778,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8Ne) \
V(I16x8GtS) \
V(I16x8GeS) \
- V(I16x8UConvertI32x4) \
V(I16x8AddSaturateU) \
V(I16x8SubSaturateU) \
V(I16x8MinU) \
@@ -1788,7 +1795,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16Ne) \
V(I8x16GtS) \
V(I8x16GeS) \
- V(I8x16UConvertI16x8) \
V(I8x16AddSaturateU) \
V(I8x16SubSaturateU) \
V(I8x16MinU) \
@@ -1799,12 +1805,20 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Or) \
V(S128Xor)
-#define SIMD_UNOP_LIST(V) \
- V(F32x4SConvertI32x4) \
- V(F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox) \
- V(I32x4Neg) \
- V(I16x8Neg) \
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4Neg) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8Neg) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
V(I8x16Neg)
#define SIMD_UNOP_PREFIX_LIST(V) \
@@ -1828,60 +1842,43 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4ShrU) \
V(I16x8Shl) \
V(I16x8ShrS) \
- V(I16x8ShrU)
+ V(I16x8ShrU) \
+ V(I8x16Shl)
+
+#define SIMD_I8X16_RIGHT_SHIFT_OPCODES(V) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
void InstructionSelector::VisitF32x4Splat(Node* node) {
- IA32OperandGenerator g(this);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- if (IsSupported(AVX)) {
- Emit(kAVXF32x4Splat, g.DefineAsRegister(node), operand0);
- } else {
- Emit(kSSEF32x4Splat, g.DefineSameAsFirst(node), operand0);
- }
+ VisitRRSimd(this, node, kAVXF32x4Splat, kSSEF32x4Splat);
}
void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
- IA32OperandGenerator g(this);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 =
- g.UseImmediate(OpParameter<int32_t>(node->op()));
- if (IsSupported(AVX)) {
- Emit(kAVXF32x4ExtractLane, g.DefineAsRegister(node), operand0, operand1);
- } else {
- Emit(kSSEF32x4ExtractLane, g.DefineSameAsFirst(node), operand0, operand1);
- }
+ VisitRRISimd(this, node, kAVXF32x4ExtractLane, kSSEF32x4ExtractLane);
}
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
- IA32OperandGenerator g(this);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- if (IsSupported(AVX)) {
- Emit(kAVXF32x4UConvertI32x4, g.DefineAsRegister(node), operand0);
- } else {
- Emit(kSSEF32x4UConvertI32x4, g.DefineSameAsFirst(node), operand0);
- }
+ VisitRRSimd(this, node, kAVXF32x4UConvertI32x4, kSSEF32x4UConvertI32x4);
}
-#define SIMD_I8X16_SHIFT_OPCODES(V) \
- V(I8x16Shl) \
- V(I8x16ShrS) \
- V(I8x16ShrU)
-
-#define VISIT_SIMD_I8X16_SHIFT(Op) \
- void InstructionSelector::Visit##Op(Node* node) { \
- VisitRRISimd(this, node, kAVX##Op, kSSE##Op); \
- }
+void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
+ VisitRRSimd(this, node, kAVXI32x4SConvertF32x4, kSSEI32x4SConvertF32x4);
+}
-SIMD_I8X16_SHIFT_OPCODES(VISIT_SIMD_I8X16_SHIFT)
-#undef SIMD_I8X16_SHIFT_OPCODES
-#undef VISIT_SIMD_I8X16_SHIFT
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionCode opcode =
+ IsSupported(AVX) ? kAVXI32x4UConvertF32x4 : kSSEI32x4UConvertF32x4;
+ Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ arraysize(temps), temps);
+}
void InstructionSelector::VisitI8x16Mul(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+ InstructionOperand temps[] = {g.TempSimd128Register()};
if (IsSupported(AVX)) {
Emit(kAVXI8x16Mul, g.DefineAsRegister(node), operand0, operand1,
arraysize(temps), temps);
@@ -1918,10 +1915,7 @@ SIMD_INT_TYPES(VISIT_SIMD_SPLAT)
#define VISIT_SIMD_EXTRACT_LANE(Type) \
void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
- IA32OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node->op()); \
- Emit(kIA32##Type##ExtractLane, g.DefineAsRegister(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
+ VisitRRISimd(this, node, kIA32##Type##ExtractLane); \
}
SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
#undef VISIT_SIMD_EXTRACT_LANE
@@ -1946,22 +1940,23 @@ VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
#undef SIMD_INT_TYPES
-#define VISIT_SIMD_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
- InstructionOperand operand1 = \
- g.UseImmediate(OpParameter<int32_t>(node->op())); \
- if (IsSupported(AVX)) { \
- Emit(kAVX##Opcode, g.DefineAsRegister(node), operand0, operand1); \
- } else { \
- Emit(kSSE##Opcode, g.DefineSameAsFirst(node), operand0, operand1); \
- } \
+#define VISIT_SIMD_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ VisitRRISimd(this, node, kAVX##Opcode, kSSE##Opcode); \
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
#undef SIMD_SHIFT_OPCODES
+#define VISIT_SIMD_I8X16_RIGHT_SHIFT(Op) \
+ void InstructionSelector::Visit##Op(Node* node) { \
+ VisitRRISimd(this, node, kIA32##Op); \
+ }
+
+SIMD_I8X16_RIGHT_SHIFT_OPCODES(VISIT_SIMD_I8X16_RIGHT_SHIFT)
+#undef SIMD_I8X16_RIGHT_SHIFT_OPCODES
+#undef VISIT_SIMD_I8X16_RIGHT_SHIFT
+
#define VISIT_SIMD_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
@@ -2011,6 +2006,26 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
+void VisitPack(InstructionSelector* selector, Node* node, ArchOpcode avx_opcode,
+ ArchOpcode sse_opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.Use(node->InputAt(1));
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ }
+}
+
+void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
+ VisitPack(this, node, kAVXI16x8UConvertI32x4, kSSEI16x8UConvertI32x4);
+}
+
+void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
+ VisitPack(this, node, kAVXI8x16UConvertI16x8, kSSEI8x16UConvertI16x8);
+}
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
@@ -2019,41 +2034,300 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+namespace {
+
+// Packs a 4 lane shuffle into a single imm8 suitable for use by pshufd,
+// pshuflw, and pshufhw.
+uint8_t PackShuffle4(uint8_t* shuffle) {
+ return (shuffle[0] & 3) | ((shuffle[1] & 3) << 2) | ((shuffle[2] & 3) << 4) |
+ ((shuffle[3] & 3) << 6);
+}
+
+// Gets an 8 bit lane mask suitable for 16x8 pblendw.
+uint8_t PackBlend8(const uint8_t* shuffle16x8) {
+ int8_t result = 0;
+ for (int i = 0; i < 8; ++i) {
+ result |= (shuffle16x8[i] >= 8 ? 1 : 0) << i;
+ }
+ return result;
+}
+
+// Gets an 8 bit lane mask suitable for 32x4 pblendw.
+uint8_t PackBlend4(const uint8_t* shuffle32x4) {
+ int8_t result = 0;
+ for (int i = 0; i < 4; ++i) {
+ result |= (shuffle32x4[i] >= 4 ? 0x3 : 0) << (i * 2);
+ }
+ return result;
+}
+
+// Returns true if shuffle can be decomposed into two 16x4 half shuffles
+// followed by a 16x8 blend.
+// E.g. [3 2 1 0 15 14 13 12].
+bool TryMatch16x8HalfShuffle(uint8_t* shuffle16x8, uint8_t* blend_mask) {
+ *blend_mask = 0;
+ for (int i = 0; i < 8; i++) {
+ if ((shuffle16x8[i] & 0x4) != (i & 0x4)) return false;
+ *blend_mask |= (shuffle16x8[i] > 7 ? 1 : 0) << i;
+ }
+ return true;
+}
+
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+ ArchOpcode avx_opcode;
+ bool src0_needs_reg;
+ bool src1_needs_reg;
+};
+
+// Shuffles that map to architecture-specific instruction sequences. These are
+// matched very early, so we shouldn't include shuffles that match better in
+// later tests, like 32x4 and 16x8 shuffles. In general, these patterns should
+// map to either a single instruction, or be finer grained, such as zip/unzip or
+// transpose patterns.
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23},
+ kIA32S64x2UnpackLow,
+ kIA32S64x2UnpackLow,
+ true,
+ false},
+ {{8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31},
+ kIA32S64x2UnpackHigh,
+ kIA32S64x2UnpackHigh,
+ true,
+ false},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kIA32S32x4UnpackLow,
+ kIA32S32x4UnpackLow,
+ true,
+ false},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kIA32S32x4UnpackHigh,
+ kIA32S32x4UnpackHigh,
+ true,
+ false},
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kIA32S16x8UnpackLow,
+ kIA32S16x8UnpackLow,
+ true,
+ false},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kIA32S16x8UnpackHigh,
+ kIA32S16x8UnpackHigh,
+ true,
+ false},
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kIA32S8x16UnpackLow,
+ kIA32S8x16UnpackLow,
+ true,
+ false},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kIA32S8x16UnpackHigh,
+ kIA32S8x16UnpackHigh,
+ true,
+ false},
+
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kSSES16x8UnzipLow,
+ kAVXS16x8UnzipLow,
+ true,
+ false},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kSSES16x8UnzipHigh,
+ kAVXS16x8UnzipHigh,
+ true,
+ true},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kSSES8x16UnzipLow,
+ kAVXS8x16UnzipLow,
+ true,
+ true},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kSSES8x16UnzipHigh,
+ kAVXS8x16UnzipHigh,
+ true,
+ true},
+
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kSSES8x16TransposeLow,
+ kAVXS8x16TransposeLow,
+ true,
+ true},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kSSES8x16TransposeHigh,
+ kAVXS8x16TransposeHigh,
+ true,
+ true},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kSSES8x8Reverse,
+ kAVXS8x8Reverse,
+ false,
+ false},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kSSES8x4Reverse,
+ kAVXS8x4Reverse,
+ false,
+ false},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kSSES8x2Reverse,
+ kAVXS8x2Reverse,
+ true,
+ true}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, bool is_swizzle,
+ const ShuffleEntry** arch_shuffle) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *arch_shuffle = &entry;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- static const int kMaxSwizzleIndex = 15;
- static const int kMaxShuffleIndex = 31;
- const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
- uint8_t mask = CanonicalizeShuffle(node);
- uint8_t shuffle32x4[4];
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+
+ int imm_count = 0;
+ static const int kMaxImms = 6;
+ uint32_t imms[kMaxImms];
+ int temp_count = 0;
+ static const int kMaxTemps = 2;
+ InstructionOperand temps[kMaxTemps];
+
IA32OperandGenerator g(this);
- InstructionOperand output = g.DefineAsRegister(node);
- InstructionOperand inputs[6];
- InstructionOperand temps[1];
- size_t input_count = 0;
- Node* input0 = node->InputAt(0);
- Node* input1 = node->InputAt(1);
- if (mask == kMaxSwizzleIndex) {
- if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
- Emit(kIA32S32x4Swizzle, output, g.Use(input0),
- g.UseImmediate((shuffle32x4[0] & 3) | ((shuffle32x4[1] & 3) << 2) |
- ((shuffle32x4[2] & 3) << 4) |
- ((shuffle32x4[3] & 3) << 6)));
- return;
+ bool use_avx = CpuFeatures::IsSupported(AVX);
+ // AVX and swizzles don't generally need DefineSameAsFirst to avoid a move.
+ bool no_same_as_first = use_avx || is_swizzle;
+ // We generally need UseRegister for input0, Use for input1.
+ bool src0_needs_reg = true;
+ bool src1_needs_reg = false;
+ ArchOpcode opcode = kIA32S8x16Shuffle; // general shuffle is the default
+
+ uint8_t offset;
+ uint8_t shuffle32x4[4];
+ uint8_t shuffle16x8[8];
+ int index;
+ const ShuffleEntry* arch_shuffle;
+ if (TryMatchConcat(shuffle, &offset)) {
+ // Swap inputs from the normal order for (v)palignr.
+ SwapShuffleInputs(node);
+ is_swizzle = false; // It's simpler to just handle the general case.
+ no_same_as_first = use_avx; // SSE requires same-as-first.
+ opcode = kIA32S8x16Alignr;
+ // palignr takes a single imm8 offset.
+ imms[imm_count++] = offset;
+ } else if (TryMatchArchShuffle(shuffle, arch_shuffles,
+ arraysize(arch_shuffles), is_swizzle,
+ &arch_shuffle)) {
+ opcode = use_avx ? arch_shuffle->avx_opcode : arch_shuffle->opcode;
+ src0_needs_reg = !use_avx || arch_shuffle->src0_needs_reg;
+ // SSE can't take advantage of both operands in registers and needs
+ // same-as-first.
+ src1_needs_reg = use_avx && arch_shuffle->src1_needs_reg;
+ no_same_as_first = use_avx;
+ } else if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ uint8_t shuffle_mask = PackShuffle4(shuffle32x4);
+ if (is_swizzle) {
+ if (TryMatchIdentity(shuffle)) {
+ // Bypass normal shuffle code generation in this case.
+ EmitIdentity(node);
+ return;
+ } else {
+ // pshufd takes a single imm8 shuffle mask.
+ opcode = kIA32S32x4Swizzle;
+ no_same_as_first = true;
+ src0_needs_reg = false;
+ imms[imm_count++] = shuffle_mask;
+ }
+ } else {
+ // 2 operand shuffle
+ // A blend is more efficient than a general 32x4 shuffle; try it first.
+ if (TryMatchBlend(shuffle)) {
+ opcode = kIA32S16x8Blend;
+ uint8_t blend_mask = PackBlend4(shuffle32x4);
+ imms[imm_count++] = blend_mask;
+ } else {
+ opcode = kIA32S32x4Shuffle;
+ no_same_as_first = true;
+ src0_needs_reg = false;
+ imms[imm_count++] = shuffle_mask;
+ int8_t blend_mask = PackBlend4(shuffle32x4);
+ imms[imm_count++] = blend_mask;
+ }
}
- // TODO(ia32): handle non 32x4 swizzles here
- inputs[input_count++] = g.Use(input0);
- } else {
- DCHECK_EQ(kMaxShuffleIndex, mask);
- USE(kMaxShuffleIndex);
- inputs[input_count++] = g.Use(input0);
- inputs[input_count++] = g.Use(input1);
- }
- inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle, mask));
- inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle + 4, mask));
- inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle + 8, mask));
- inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle + 12, mask));
- temps[0] = g.TempRegister();
- Emit(kIA32S8x16Shuffle, 1, &output, input_count, inputs, 1, temps);
+ } else if (TryMatch16x8Shuffle(shuffle, shuffle16x8)) {
+ uint8_t blend_mask;
+ if (TryMatchBlend(shuffle)) {
+ opcode = kIA32S16x8Blend;
+ blend_mask = PackBlend8(shuffle16x8);
+ imms[imm_count++] = blend_mask;
+ } else if (TryMatchDup<8>(shuffle, &index)) {
+ opcode = kIA32S16x8Dup;
+ src0_needs_reg = false;
+ imms[imm_count++] = index;
+ } else if (TryMatch16x8HalfShuffle(shuffle16x8, &blend_mask)) {
+ opcode = is_swizzle ? kIA32S16x8HalfShuffle1 : kIA32S16x8HalfShuffle2;
+ // Half-shuffles don't need DefineSameAsFirst or UseRegister(src0).
+ no_same_as_first = true;
+ src0_needs_reg = false;
+ uint8_t mask_lo = PackShuffle4(shuffle16x8);
+ uint8_t mask_hi = PackShuffle4(shuffle16x8 + 4);
+ imms[imm_count++] = mask_lo;
+ imms[imm_count++] = mask_hi;
+ if (!is_swizzle) imms[imm_count++] = blend_mask;
+ }
+ } else if (TryMatchDup<16>(shuffle, &index)) {
+ opcode = kIA32S8x16Dup;
+ no_same_as_first = use_avx;
+ src0_needs_reg = true;
+ imms[imm_count++] = index;
+ }
+ if (opcode == kIA32S8x16Shuffle) {
+ // Use same-as-first for general swizzle, but not shuffle.
+ no_same_as_first = !is_swizzle;
+ src0_needs_reg = !no_same_as_first;
+ imms[imm_count++] = Pack4Lanes(shuffle);
+ imms[imm_count++] = Pack4Lanes(shuffle + 4);
+ imms[imm_count++] = Pack4Lanes(shuffle + 8);
+ imms[imm_count++] = Pack4Lanes(shuffle + 12);
+ temps[temp_count++] = g.TempRegister();
+ }
+
+ // Use DefineAsRegister(node) and Use(src0) if we can without forcing an extra
+ // move instruction in the CodeGenerator.
+ Node* input0 = node->InputAt(0);
+ InstructionOperand dst =
+ no_same_as_first ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ InstructionOperand src0 =
+ src0_needs_reg ? g.UseRegister(input0) : g.Use(input0);
+
+ int input_count = 0;
+ InstructionOperand inputs[2 + kMaxImms + kMaxTemps];
+ inputs[input_count++] = src0;
+ if (!is_swizzle) {
+ Node* input1 = node->InputAt(1);
+ inputs[input_count++] =
+ src1_needs_reg ? g.UseRegister(input1) : g.Use(input1);
+ }
+ for (int i = 0; i < imm_count; ++i) {
+ inputs[input_count++] = g.UseImmediate(imms[i]);
+ }
+ Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
// static
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 17d032c3f2..83e8da9e8a 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -54,6 +54,7 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchCallWasmFunction) \
V(ArchTailCallWasm) \
V(ArchJmp) \
+ V(ArchBinarySearchSwitch) \
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
@@ -66,7 +67,6 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchStackPointer) \
V(ArchFramePointer) \
V(ArchParentFramePointer) \
- V(ArchRootsPointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(ArchStackSlot) \
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index cee5fc883b..de042cb670 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -248,13 +248,13 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchNop:
case kArchFramePointer:
case kArchParentFramePointer:
- case kArchRootsPointer:
case kArchStackSlot: // Despite its name this opcode will produce a
// reference to a frame slot, so it is not affected
// by the arm64 dual stack issues mentioned below.
case kArchComment:
case kArchDeoptimize:
case kArchJmp:
+ case kArchBinarySearchSwitch:
case kArchLookupSwitch:
case kArchRet:
case kArchTableSwitch:
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 257f0d8c04..27f37215df 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -45,14 +45,23 @@ class SwitchInfo {
}
}
+ // Ensure that comparison order of if-cascades is preserved.
+ std::vector<CaseInfo> CasesSortedByOriginalOrder() const {
+ std::vector<CaseInfo> result(cases_.begin(), cases_.end());
+ std::stable_sort(result.begin(), result.end());
+ return result;
+ }
+ std::vector<CaseInfo> CasesSortedByValue() const {
+ std::vector<CaseInfo> result(cases_.begin(), cases_.end());
+ std::stable_sort(result.begin(), result.end(),
+ [](CaseInfo a, CaseInfo b) { return a.value < b.value; });
+ return result;
+ }
+ const ZoneVector<CaseInfo>& CasesUnsorted() const { return cases_; }
int32_t min_value() const { return min_value_; }
int32_t max_value() const { return max_value_; }
size_t value_range() const { return value_range_; }
size_t case_count() const { return cases_.size(); }
- const CaseInfo& GetCase(size_t i) const {
- DCHECK_LT(i, cases_.size());
- return cases_[i];
- }
BasicBlock* default_branch() const { return default_branch_; }
private:
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index c8c6afea17..f1ca52b14d 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -27,8 +27,8 @@ InstructionSelector::InstructionSelector(
EnableSwitchJumpTable enable_switch_jump_table,
SourcePositionMode source_position_mode, Features features,
EnableScheduling enable_scheduling,
- EnableSerialization enable_serialization,
- PoisoningMitigationLevel poisoning_level)
+ EnableRootsRelativeAddressing enable_roots_relative_addressing,
+ PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@@ -48,14 +48,20 @@ InstructionSelector::InstructionSelector(
virtual_register_rename_(zone),
scheduler_(nullptr),
enable_scheduling_(enable_scheduling),
- enable_serialization_(enable_serialization),
+ enable_roots_relative_addressing_(enable_roots_relative_addressing),
enable_switch_jump_table_(enable_switch_jump_table),
poisoning_level_(poisoning_level),
frame_(frame),
- instruction_selection_failed_(false) {
+ instruction_selection_failed_(false),
+ instr_origins_(sequence->zone()),
+ trace_turbo_(trace_turbo) {
instructions_.reserve(node_count);
continuation_inputs_.reserve(5);
continuation_outputs_.reserve(2);
+
+ if (trace_turbo_ == kEnableTraceTurboJson) {
+ instr_origins_.assign(node_count, {-1, 0});
+ }
}
bool InstructionSelector::SelectInstructions() {
@@ -414,7 +420,7 @@ void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
}
bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
- return enable_serialization_ == kDisableSerialization &&
+ return enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing &&
CanUseRootsRegister();
}
@@ -720,7 +726,8 @@ Instruction* InstructionSelector::EmitWithContinuation(
} else if (cont->IsSet()) {
continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
} else if (cont->IsTrap()) {
- continuation_inputs_.push_back(g.UseImmediate(cont->trap_id()));
+ int trap_id = static_cast<int>(cont->trap_id());
+ continuation_inputs_.push_back(g.UseImmediate(trap_id));
} else {
DCHECK(cont->IsNone());
}
@@ -1091,21 +1098,29 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
// Visit code in reverse control flow order, because architecture-specific
// matching may cover more than one node at a time.
for (auto node : base::Reversed(*block)) {
- // Skip nodes that are unused or already defined.
- if (!IsUsed(node) || IsDefined(node)) continue;
- // Generate code for this node "top down", but schedule the code "bottom
- // up".
int current_node_end = current_num_instructions();
- VisitNode(node);
- if (!FinishEmittedInstructions(node, current_node_end)) return;
+ // Skip nodes that are unused or already defined.
+ if (IsUsed(node) && !IsDefined(node)) {
+ // Generate code for this node "top down", but schedule the code "bottom
+ // up".
+ VisitNode(node);
+ if (!FinishEmittedInstructions(node, current_node_end)) return;
+ }
+ if (trace_turbo_ == kEnableTraceTurboJson) {
+ instr_origins_[node->id()] = {current_num_instructions(),
+ current_node_end};
+ }
}
// We're done with the block.
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
- instruction_block->set_code_start(static_cast<int>(instructions_.size()));
+ if (current_num_instructions() == current_block_end) {
+ // Avoid empty block: insert a {kArchNop} instruction.
+ Emit(Instruction::New(sequence()->zone(), kArchNop));
+ }
+ instruction_block->set_code_start(current_num_instructions());
instruction_block->set_code_end(current_block_end);
-
current_block_ = nullptr;
}
@@ -1131,25 +1146,34 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
#endif
Node* input = block->control_input();
+ int instruction_end = static_cast<int>(instructions_.size());
switch (block->control()) {
case BasicBlock::kGoto:
- return VisitGoto(block->SuccessorAt(0));
+ VisitGoto(block->SuccessorAt(0));
+ break;
case BasicBlock::kCall: {
DCHECK_EQ(IrOpcode::kCall, input->opcode());
BasicBlock* success = block->SuccessorAt(0);
BasicBlock* exception = block->SuccessorAt(1);
- return VisitCall(input, exception), VisitGoto(success);
+ VisitCall(input, exception);
+ VisitGoto(success);
+ break;
}
case BasicBlock::kTailCall: {
DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
- return VisitTailCall(input);
+ VisitTailCall(input);
+ break;
}
case BasicBlock::kBranch: {
DCHECK_EQ(IrOpcode::kBranch, input->opcode());
BasicBlock* tbranch = block->SuccessorAt(0);
BasicBlock* fbranch = block->SuccessorAt(1);
- if (tbranch == fbranch) return VisitGoto(tbranch);
- return VisitBranch(input, tbranch, fbranch);
+ if (tbranch == fbranch) {
+ VisitGoto(tbranch);
+ } else {
+ VisitBranch(input, tbranch, fbranch);
+ }
+ break;
}
case BasicBlock::kSwitch: {
DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
@@ -1168,23 +1192,25 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
if (min_value > p.value()) min_value = p.value();
if (max_value < p.value()) max_value = p.value();
}
- // Ensure that comparison order of if-cascades is preserved.
- std::stable_sort(cases.begin(), cases.end());
SwitchInfo sw(cases, min_value, max_value, default_branch);
- return VisitSwitch(input, sw);
+ VisitSwitch(input, sw);
+ break;
}
case BasicBlock::kReturn: {
DCHECK_EQ(IrOpcode::kReturn, input->opcode());
- return VisitReturn(input);
+ VisitReturn(input);
+ break;
}
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
Node* value = input->InputAt(0);
- return VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
+ VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
+ break;
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
- return VisitThrow(input);
+ VisitThrow(input);
+ break;
case BasicBlock::kNone: {
// Exit block doesn't have control.
DCHECK_NULL(input);
@@ -1194,6 +1220,10 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
UNREACHABLE();
break;
}
+ if (trace_turbo_ == kEnableTraceTurboJson && input) {
+ int instruction_start = static_cast<int>(instructions_.size());
+ instr_origins_[input->id()] = {instruction_start, instruction_end};
+ }
}
void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
@@ -1272,11 +1302,9 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kDeoptimizeUnless:
return VisitDeoptimizeUnless(node);
case IrOpcode::kTrapIf:
- return VisitTrapIf(node, static_cast<Runtime::FunctionId>(
- OpParameter<int32_t>(node->op())));
+ return VisitTrapIf(node, TrapIdOf(node->op()));
case IrOpcode::kTrapUnless:
- return VisitTrapUnless(node, static_cast<Runtime::FunctionId>(
- OpParameter<int32_t>(node->op())));
+ return VisitTrapUnless(node, TrapIdOf(node->op()));
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
@@ -1632,8 +1660,6 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitLoadFramePointer(node);
case IrOpcode::kLoadParentFramePointer:
return VisitLoadParentFramePointer(node);
- case IrOpcode::kLoadRootsPointer:
- return VisitLoadRootsPointer(node);
case IrOpcode::kUnalignedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -1998,11 +2024,6 @@ void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
Emit(kArchParentFramePointer, g.DefineAsRegister(node));
}
-void InstructionSelector::VisitLoadRootsPointer(Node* node) {
- OperandGenerator g(this);
- Emit(kArchRootsPointer, g.DefineAsRegister(node));
-}
-
void InstructionSelector::VisitFloat64Acos(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
}
@@ -2096,8 +2117,7 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
inputs[0] = index_operand;
InstructionOperand default_operand = g.Label(sw.default_branch());
std::fill(&inputs[1], &inputs[input_count], default_operand);
- for (size_t index = 0; index < sw.case_count(); ++index) {
- const CaseInfo& c = sw.GetCase(index);
+ for (const CaseInfo& c : sw.CasesUnsorted()) {
size_t value = c.value - sw.min_value();
DCHECK_LE(0u, value);
DCHECK_LT(value + 2, input_count);
@@ -2110,19 +2130,38 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
InstructionOperand& value_operand) {
OperandGenerator g(this);
+ std::vector<CaseInfo> cases = sw.CasesSortedByOriginalOrder();
size_t input_count = 2 + sw.case_count() * 2;
DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = value_operand;
inputs[1] = g.Label(sw.default_branch());
- for (size_t index = 0; index < sw.case_count(); ++index) {
- const CaseInfo& c = sw.GetCase(index);
+ for (size_t index = 0; index < cases.size(); ++index) {
+ const CaseInfo& c = cases[index];
inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
inputs[index * 2 + 2 + 1] = g.Label(c.branch);
}
Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
+void InstructionSelector::EmitBinarySearchSwitch(
+ const SwitchInfo& sw, InstructionOperand& value_operand) {
+ OperandGenerator g(this);
+ size_t input_count = 2 + sw.case_count() * 2;
+ DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = g.Label(sw.default_branch());
+ std::vector<CaseInfo> cases = sw.CasesSortedByValue();
+ std::stable_sort(cases.begin(), cases.end(),
+ [](CaseInfo a, CaseInfo b) { return a.value < b.value; });
+ for (size_t index = 0; index < cases.size(); ++index) {
+ const CaseInfo& c = cases[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
+ inputs[index * 2 + 2 + 1] = g.Label(c.branch);
+ }
+ Emit(kArchBinarySearchSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
+}
void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
EmitIdentity(node);
@@ -2353,7 +2392,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2361,11 +2400,7 @@ void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}
@@ -2398,11 +2433,6 @@ void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2417,11 +2447,7 @@ void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
@@ -2754,16 +2780,15 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
}
}
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ FlagsContinuation::ForTrap(kNotEqual, trap_id, node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
+void InstructionSelector::VisitTrapUnless(Node* node, TrapId trap_id) {
FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ FlagsContinuation::ForTrap(kEqual, trap_id, node->InputAt(1));
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
@@ -2860,6 +2885,84 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
}
// static
+void InstructionSelector::CanonicalizeShuffle(bool inputs_equal,
+ uint8_t* shuffle,
+ bool* needs_swap,
+ bool* is_swizzle) {
+ *needs_swap = false;
+ // Inputs equal, then it's a swizzle.
+ if (inputs_equal) {
+ *is_swizzle = true;
+ } else {
+ // Inputs are distinct; check that both are required.
+ bool src0_is_used = false;
+ bool src1_is_used = false;
+ for (int i = 0; i < kSimd128Size; ++i) {
+ if (shuffle[i] < kSimd128Size) {
+ src0_is_used = true;
+ } else {
+ src1_is_used = true;
+ }
+ }
+ if (src0_is_used && !src1_is_used) {
+ *is_swizzle = true;
+ } else if (src1_is_used && !src0_is_used) {
+ *needs_swap = true;
+ *is_swizzle = true;
+ } else {
+ *is_swizzle = false;
+ // Canonicalize general 2 input shuffles so that the first input lanes are
+ // encountered first. This makes architectural shuffle pattern matching
+ // easier, since we only need to consider 1 input ordering instead of 2.
+ if (shuffle[0] >= kSimd128Size) {
+ // The second operand is used first. Swap inputs and adjust the shuffle.
+ *needs_swap = true;
+ for (int i = 0; i < kSimd128Size; ++i) {
+ shuffle[i] ^= kSimd128Size;
+ }
+ }
+ }
+ }
+ if (*is_swizzle) {
+ for (int i = 0; i < kSimd128Size; ++i) shuffle[i] &= kSimd128Size - 1;
+ }
+}
+
+void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle,
+ bool* is_swizzle) {
+ // Get raw shuffle indices.
+ memcpy(shuffle, OpParameter<uint8_t*>(node->op()), kSimd128Size);
+ bool needs_swap;
+ bool inputs_equal = GetVirtualRegister(node->InputAt(0)) ==
+ GetVirtualRegister(node->InputAt(1));
+ CanonicalizeShuffle(inputs_equal, shuffle, &needs_swap, is_swizzle);
+ if (needs_swap) {
+ SwapShuffleInputs(node);
+ }
+ // Duplicate the first input; for some shuffles on some architectures, it's
+ // easiest to implement a swizzle as a shuffle so it might be used.
+ if (*is_swizzle) {
+ node->ReplaceInput(1, node->InputAt(0));
+ }
+}
+
+// static
+void InstructionSelector::SwapShuffleInputs(Node* node) {
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ node->ReplaceInput(0, input1);
+ node->ReplaceInput(1, input0);
+}
+
+// static
+bool InstructionSelector::TryMatchIdentity(const uint8_t* shuffle) {
+ for (int i = 0; i < kSimd128Size; ++i) {
+ if (shuffle[i] != i) return false;
+ }
+ return true;
+}
+
+// static
bool InstructionSelector::TryMatch32x4Shuffle(const uint8_t* shuffle,
uint8_t* shuffle32x4) {
for (int i = 0; i < 4; ++i) {
@@ -2873,62 +2976,51 @@ bool InstructionSelector::TryMatch32x4Shuffle(const uint8_t* shuffle,
}
// static
-bool InstructionSelector::TryMatchConcat(const uint8_t* shuffle, uint8_t mask,
- uint8_t* vext) {
- uint8_t start = shuffle[0];
- int i = 1;
- for (; i < 16 - start; ++i) {
- if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return false;
- }
- uint8_t wrap = 16;
- for (; i < 16; ++i, ++wrap) {
- if ((shuffle[i] & mask) != (wrap & mask)) return false;
+bool InstructionSelector::TryMatch16x8Shuffle(const uint8_t* shuffle,
+ uint8_t* shuffle16x8) {
+ for (int i = 0; i < 8; ++i) {
+ if (shuffle[i * 2] % 2 != 0) return false;
+ for (int j = 1; j < 2; ++j) {
+ if (shuffle[i * 2 + j] - shuffle[i * 2 + j - 1] != 1) return false;
+ }
+ shuffle16x8[i] = shuffle[i * 2] / 2;
}
- *vext = start;
return true;
}
-// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
-// will ignore the high bit of indices in some cases.
-uint8_t InstructionSelector::CanonicalizeShuffle(Node* node) {
- static const int kMaxLaneIndex = 15;
- static const int kMaxShuffleIndex = 31;
-
- const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
- uint8_t mask = kMaxShuffleIndex;
- // If shuffle is unary, set 'mask' to ignore the high bit of the indices.
- // Replace any unused source with the other.
- if (GetVirtualRegister(node->InputAt(0)) ==
- GetVirtualRegister(node->InputAt(1))) {
- // unary, src0 == src1.
- mask = kMaxLaneIndex;
- } else {
- bool src0_is_used = false;
- bool src1_is_used = false;
- for (int i = 0; i < 16; ++i) {
- if (shuffle[i] < 16) {
- src0_is_used = true;
- } else {
- src1_is_used = true;
- }
- }
- if (src0_is_used && !src1_is_used) {
- node->ReplaceInput(1, node->InputAt(0));
- mask = kMaxLaneIndex;
- } else if (src1_is_used && !src0_is_used) {
- node->ReplaceInput(0, node->InputAt(1));
- mask = kMaxLaneIndex;
+// static
+bool InstructionSelector::TryMatchConcat(const uint8_t* shuffle,
+ uint8_t* offset) {
+ // Don't match the identity shuffle (e.g. [0 1 2 ... 15]).
+ uint8_t start = shuffle[0];
+ if (start == 0) return false;
+ DCHECK_GT(kSimd128Size, start); // The shuffle should be canonicalized.
+ // A concatenation is a series of consecutive indices, with at most one jump
+ // in the middle from the last lane to the first.
+ for (int i = 1; i < kSimd128Size; ++i) {
+ if ((shuffle[i]) != ((shuffle[i - 1] + 1))) {
+ if (shuffle[i - 1] != 15) return false;
+ if (shuffle[i] % kSimd128Size != 0) return false;
}
}
- return mask;
+ *offset = start;
+ return true;
+}
+
+// static
+bool InstructionSelector::TryMatchBlend(const uint8_t* shuffle) {
+ for (int i = 0; i < 16; ++i) {
+ if ((shuffle[i] & 0xF) != i) return false;
+ }
+ return true;
}
// static
-int32_t InstructionSelector::Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
+int32_t InstructionSelector::Pack4Lanes(const uint8_t* shuffle) {
int32_t result = 0;
for (int i = 3; i >= 0; --i) {
result <<= 8;
- result |= shuffle[i] & mask;
+ result |= shuffle[i];
}
return result;
}
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 42cf6ca4fc..39d0c01ee9 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -76,8 +76,8 @@ class FlagsContinuation final {
}
// Creates a new flags continuation for a wasm trap.
- static FlagsContinuation ForTrap(FlagsCondition condition,
- Runtime::FunctionId trap_id, Node* result) {
+ static FlagsContinuation ForTrap(FlagsCondition condition, TrapId trap_id,
+ Node* result) {
return FlagsContinuation(condition, trap_id, result);
}
@@ -118,7 +118,7 @@ class FlagsContinuation final {
DCHECK(IsSet());
return frame_state_or_result_;
}
- Runtime::FunctionId trap_id() const {
+ TrapId trap_id() const {
DCHECK(IsTrap());
return trap_id_;
}
@@ -210,8 +210,7 @@ class FlagsContinuation final {
DCHECK_NOT_NULL(result);
}
- FlagsContinuation(FlagsCondition condition, Runtime::FunctionId trap_id,
- Node* result)
+ FlagsContinuation(FlagsCondition condition, TrapId trap_id, Node* result)
: mode_(kFlags_trap),
condition_(condition),
frame_state_or_result_(result),
@@ -228,7 +227,7 @@ class FlagsContinuation final {
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
- Runtime::FunctionId trap_id_; // Only valid if mode_ == kFlags_trap.
+ TrapId trap_id_; // Only valid if mode_ == kFlags_trap.
};
// This struct connects nodes of parameters which are going to be pushed on the
@@ -252,11 +251,15 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
enum EnableScheduling { kDisableScheduling, kEnableScheduling };
- enum EnableSerialization { kDisableSerialization, kEnableSerialization };
+ enum EnableRootsRelativeAddressing {
+ kDisableRootsRelativeAddressing,
+ kEnableRootsRelativeAddressing
+ };
enum EnableSwitchJumpTable {
kDisableSwitchJumpTable,
kEnableSwitchJumpTable
};
+ enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson };
InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
@@ -268,9 +271,11 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
? kEnableScheduling
: kDisableScheduling,
- EnableSerialization enable_serialization = kDisableSerialization,
+ EnableRootsRelativeAddressing enable_roots_relative_addressing =
+ kDisableRootsRelativeAddressing,
PoisoningMitigationLevel poisoning_level =
- PoisoningMitigationLevel::kDontPoison);
+ PoisoningMitigationLevel::kDontPoison,
+ EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
// Visit code for the entire graph with the included schedule.
bool SelectInstructions();
@@ -432,14 +437,47 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
// Check if we can generate loads and stores of ExternalConstants relative
- // to the roots register, i.e. if both a root register is available for this
- // compilation unit and the serializer is disabled.
+ // to the roots register.
bool CanAddressRelativeToRootsRegister() const;
// Check if we can use the roots register to access GC roots.
bool CanUseRootsRegister() const;
Isolate* isolate() const { return sequence()->isolate(); }
+ const ZoneVector<std::pair<int, int>>& instr_origins() const {
+ return instr_origins_;
+ }
+
+ // Expose these SIMD helper functions for testing.
+ static void CanonicalizeShuffleForTesting(bool inputs_equal, uint8_t* shuffle,
+ bool* needs_swap,
+ bool* is_swizzle) {
+ CanonicalizeShuffle(inputs_equal, shuffle, needs_swap, is_swizzle);
+ }
+
+ static bool TryMatchIdentityForTesting(const uint8_t* shuffle) {
+ return TryMatchIdentity(shuffle);
+ }
+ template <int LANES>
+ static bool TryMatchDupForTesting(const uint8_t* shuffle, int* index) {
+ return TryMatchDup<LANES>(shuffle, index);
+ }
+ static bool TryMatch32x4ShuffleForTesting(const uint8_t* shuffle,
+ uint8_t* shuffle32x4) {
+ return TryMatch32x4Shuffle(shuffle, shuffle32x4);
+ }
+ static bool TryMatch16x8ShuffleForTesting(const uint8_t* shuffle,
+ uint8_t* shuffle16x8) {
+ return TryMatch16x8Shuffle(shuffle, shuffle16x8);
+ }
+ static bool TryMatchConcatForTesting(const uint8_t* shuffle,
+ uint8_t* offset) {
+ return TryMatchConcat(shuffle, offset);
+ }
+ static bool TryMatchBlendForTesting(const uint8_t* shuffle) {
+ return TryMatchBlend(shuffle);
+ }
+
private:
friend class OperandGenerator;
@@ -456,6 +494,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
void EmitLookupSwitch(const SwitchInfo& sw,
InstructionOperand& value_operand);
+ void EmitBinarySearchSwitch(const SwitchInfo& sw,
+ InstructionOperand& value_operand);
void TryRename(InstructionOperand* op);
int GetRename(int virtual_register);
@@ -567,8 +607,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
BasicBlock* handler = nullptr);
void VisitDeoptimizeIf(Node* node);
void VisitDeoptimizeUnless(Node* node);
- void VisitTrapIf(Node* node, Runtime::FunctionId func_id);
- void VisitTrapUnless(Node* node, Runtime::FunctionId func_id);
+ void VisitTrapIf(Node* node, TrapId trap_id);
+ void VisitTrapUnless(Node* node, TrapId trap_id);
void VisitTailCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
@@ -597,6 +637,27 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
// ============= Vector instruction (SIMD) helper fns. =======================
// ===========================================================================
+ // Converts a shuffle into canonical form, meaning that the first lane index
+ // is in the range [0 .. 15]. Set |inputs_equal| true if this is an explicit
+ // swizzle. Returns canonicalized |shuffle|, |needs_swap|, and |is_swizzle|.
+ // If |needs_swap| is true, inputs must be swapped. If |is_swizzle| is true,
+ // the second input can be ignored.
+ static void CanonicalizeShuffle(bool inputs_equal, uint8_t* shuffle,
+ bool* needs_swap, bool* is_swizzle);
+
+ // Canonicalize shuffles to make pattern matching simpler. Returns the shuffle
+ // indices, and a boolean indicating if the shuffle is a swizzle (one input).
+ void CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle);
+
+ // Swaps the two first input operands of the node, to help match shuffles
+ // to specific architectural instructions.
+ void SwapShuffleInputs(Node* node);
+
+ // Tries to match an 8x16 byte shuffle to the identity shuffle, which is
+ // [0 1 ... 15]. This should be called after canonicalizing the shuffle, so
+ // the second identity shuffle, [16 17 .. 31] is converted to the first one.
+ static bool TryMatchIdentity(const uint8_t* shuffle);
+
// Tries to match a byte shuffle to a scalar splat operation. Returns the
// index of the lane if successful.
template <int LANES>
@@ -621,22 +682,30 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
return true;
}
- // Tries to match 8x16 byte shuffle to an equivalent 32x4 word shuffle. If
- // successful, it writes the 32x4 shuffle word indices.
+ // Tries to match an 8x16 byte shuffle to an equivalent 32x4 shuffle. If
+ // successful, it writes the 32x4 shuffle word indices. E.g.
+ // [0 1 2 3 8 9 10 11 4 5 6 7 12 13 14 15] == [0 2 1 3]
static bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4);
- // Tries to match a byte shuffle to a concatenate operation. If successful,
- // it writes the byte offset.
- static bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask,
- uint8_t* offset);
+ // Tries to match an 8x16 byte shuffle to an equivalent 16x8 shuffle. If
+ // successful, it writes the 16x8 shuffle word indices. E.g.
+ // [0 1 8 9 2 3 10 11 4 5 12 13 6 7 14 15] == [0 4 1 5 2 6 3 7]
+ static bool TryMatch16x8Shuffle(const uint8_t* shuffle, uint8_t* shuffle16x8);
+
+ // Tries to match a byte shuffle to a concatenate operation, formed by taking
+ // 16 bytes from the 32 byte concatenation of the inputs. If successful, it
+ // writes the byte offset. E.g. [4 5 6 7 .. 16 17 18 19] concatenates both
+ // source vectors with offset 4. The shuffle should be canonicalized.
+ static bool TryMatchConcat(const uint8_t* shuffle, uint8_t* offset);
- // Packs 4 bytes of shuffle into a 32 bit immediate, using a mask from
- // CanonicalizeShuffle to convert unary shuffles.
- static int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask);
+ // Tries to match a byte shuffle to a blend operation, which is a shuffle
+ // where no lanes change position. E.g. [0 9 2 11 .. 14 31] interleaves the
+ // even lanes of the first source with the odd lanes of the second. The
+ // shuffle should be canonicalized.
+ static bool TryMatchBlend(const uint8_t* shuffle);
- // Canonicalize shuffles to make pattern matching simpler. Returns a mask that
- // will ignore the high bit of indices if shuffle is unary.
- uint8_t CanonicalizeShuffle(Node* node);
+ // Packs 4 bytes of shuffle into a 32 bit immediate.
+ static int32_t Pack4Lanes(const uint8_t* shuffle);
// ===========================================================================
@@ -683,12 +752,14 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
IntVector virtual_register_rename_;
InstructionScheduler* scheduler_;
EnableScheduling enable_scheduling_;
- EnableSerialization enable_serialization_;
+ EnableRootsRelativeAddressing enable_roots_relative_addressing_;
EnableSwitchJumpTable enable_switch_jump_table_;
PoisoningMitigationLevel poisoning_level_;
Frame* frame_;
bool instruction_selection_failed_;
+ ZoneVector<std::pair<int, int>> instr_origins_;
+ EnableTraceTurboJson trace_turbo_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 2ea3736b90..83ed28fb53 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -4,6 +4,8 @@
#include "src/compiler/instruction.h"
+#include <iomanip>
+
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/schedule.h"
@@ -117,11 +119,10 @@ bool LocationOperand::IsCompatible(LocationOperand* op) {
}
void InstructionOperand::Print(const RegisterConfiguration* config) const {
- OFStream os(stdout);
PrintableInstructionOperand wrapper;
wrapper.register_configuration_ = config;
wrapper.op_ = *this;
- os << wrapper << std::endl;
+ StdoutStream{} << wrapper << std::endl;
}
void InstructionOperand::Print() const { Print(GetRegConfig()); }
@@ -183,7 +184,8 @@ std::ostream& operator<<(std::ostream& os,
os << "[fp_stack:" << allocated.index();
} else if (op.IsRegister()) {
os << "["
- << GetRegConfig()->GetGeneralRegisterName(allocated.register_code())
+ << GetRegConfig()->GetGeneralOrSpecialRegisterName(
+ allocated.register_code())
<< "|R";
} else if (op.IsDoubleRegister()) {
os << "["
@@ -249,7 +251,7 @@ std::ostream& operator<<(std::ostream& os,
}
void MoveOperands::Print(const RegisterConfiguration* config) const {
- OFStream os(stdout);
+ StdoutStream os;
PrintableInstructionOperand wrapper;
wrapper.register_configuration_ = config;
wrapper.op_ = destination();
@@ -367,11 +369,10 @@ bool Instruction::AreMovesRedundant() const {
}
void Instruction::Print(const RegisterConfiguration* config) const {
- OFStream os(stdout);
PrintableInstruction wrapper;
wrapper.instr_ = this;
wrapper.register_configuration_ = config;
- os << wrapper << std::endl;
+ StdoutStream{} << wrapper << std::endl;
}
void Instruction::Print() const { Print(GetRegConfig()); }
@@ -691,7 +692,7 @@ static InstructionBlock* InstructionBlockFor(Zone* zone,
}
std::ostream& operator<<(std::ostream& os,
- PrintableInstructionBlock& printable_block) {
+ const PrintableInstructionBlock& printable_block) {
const InstructionBlock* block = printable_block.block_;
const RegisterConfiguration* config = printable_block.register_configuration_;
const InstructionSequence* code = printable_block.code_;
@@ -724,17 +725,15 @@ std::ostream& operator<<(std::ostream& os,
os << std::endl;
}
- ScopedVector<char> buf(32);
PrintableInstruction printable_instr;
printable_instr.register_configuration_ = config;
for (int j = block->first_instruction_index();
j <= block->last_instruction_index(); j++) {
- // TODO(svenpanne) Add some basic formatting to our streams.
- SNPrintF(buf, "%5d", j);
printable_instr.instr_ = code->InstructionAt(j);
- os << " " << buf.start() << ": " << printable_instr << std::endl;
+ os << " " << std::setw(5) << j << ": " << printable_instr << std::endl;
}
+ os << " successors:";
for (RpoNumber succ : block->successors()) {
os << " B" << succ.ToInt();
}
@@ -869,12 +868,8 @@ void InstructionSequence::StartBlock(RpoNumber rpo) {
void InstructionSequence::EndBlock(RpoNumber rpo) {
int end = static_cast<int>(instructions_.size());
DCHECK_EQ(current_block_->rpo_number(), rpo);
- if (current_block_->code_start() == end) { // Empty block. Insert a nop.
- AddInstruction(Instruction::New(zone(), kArchNop));
- end = static_cast<int>(instructions_.size());
- }
- DCHECK(current_block_->code_start() >= 0 &&
- current_block_->code_start() < end);
+ CHECK(current_block_->code_start() >= 0 &&
+ current_block_->code_start() < end);
current_block_->set_code_end(end);
current_block_ = nullptr;
}
@@ -990,23 +985,21 @@ void InstructionSequence::SetSourcePosition(const Instruction* instr,
}
void InstructionSequence::Print(const RegisterConfiguration* config) const {
- OFStream os(stdout);
PrintableInstructionSequence wrapper;
wrapper.register_configuration_ = config;
wrapper.sequence_ = this;
- os << wrapper << std::endl;
+ StdoutStream{} << wrapper << std::endl;
}
void InstructionSequence::Print() const { Print(GetRegConfig()); }
void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
int block_id) const {
- OFStream os(stdout);
RpoNumber rpo = RpoNumber::FromInt(block_id);
const InstructionBlock* block = InstructionBlockAt(rpo);
CHECK(block->rpo_number() == rpo);
PrintableInstructionBlock printable_block = {config, block, this};
- os << printable_block << std::endl;
+ StdoutStream{} << printable_block << std::endl;
}
void InstructionSequence::PrintBlock(int block_id) const {
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 1bf59adb4a..6b545c2853 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -9,17 +9,20 @@
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/access-info.h"
#include "src/compiler/allocation-builder.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/property-access-builder.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/feedback-vector-inl.h"
#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
+#include "src/objects/arguments-inl.h"
#include "src/vector-slot-pair.h"
namespace v8 {
@@ -247,8 +250,8 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
// We can fold away the Object(x) call if |x| is definitely not a primitive.
- if (NodeProperties::CanBePrimitive(value, effect)) {
- if (!NodeProperties::CanBeNullOrUndefined(value, effect)) {
+ if (NodeProperties::CanBePrimitive(isolate(), value, effect)) {
+ if (!NodeProperties::CanBeNullOrUndefined(isolate(), value, effect)) {
// Turn the {node} into a {JSToObject} call if we know that
// the {value} cannot be null or undefined.
NodeProperties::ReplaceValueInputs(node, value);
@@ -289,7 +292,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// If {arguments_list} cannot be null or undefined, we don't need
// to expand this {node} to control-flow.
- if (!NodeProperties::CanBeNullOrUndefined(arguments_list, effect)) {
+ if (!NodeProperties::CanBeNullOrUndefined(isolate(), arguments_list,
+ effect)) {
// Massage the value inputs appropriately.
node->ReplaceInput(0, target);
node->ReplaceInput(1, this_argument);
@@ -401,7 +405,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// definitely a constructor or not a constructor.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
bool const is_constructor = receiver_maps[0]->is_constructor();
@@ -426,18 +431,18 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
isolate());
if (descriptors->number_of_descriptors() < 2) return NoChange();
if (descriptors->GetKey(JSFunction::kLengthDescriptorIndex) !=
- isolate()->heap()->length_string()) {
+ ReadOnlyRoots(isolate()).length_string()) {
return NoChange();
}
- if (!descriptors->GetValue(JSFunction::kLengthDescriptorIndex)
+ if (!descriptors->GetStrongValue(JSFunction::kLengthDescriptorIndex)
->IsAccessorInfo()) {
return NoChange();
}
if (descriptors->GetKey(JSFunction::kNameDescriptorIndex) !=
- isolate()->heap()->name_string()) {
+ ReadOnlyRoots(isolate()).name_string()) {
return NoChange();
}
- if (!descriptors->GetValue(JSFunction::kNameDescriptorIndex)
+ if (!descriptors->GetStrongValue(JSFunction::kNameDescriptorIndex)
->IsAccessorInfo()) {
return NoChange();
}
@@ -451,7 +456,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
: native_context()->bound_function_without_constructor_map(),
isolate());
if (map->prototype() != *prototype) {
- map = Map::TransitionToPrototype(map, prototype);
+ map = Map::TransitionToPrototype(isolate(), map, prototype);
}
// Make sure we can rely on the {receiver_maps}.
@@ -562,7 +567,8 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
// Try to determine the {object} map.
ZoneHandleSet<Map> object_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(object, effect, &object_maps);
+ NodeProperties::InferReceiverMaps(isolate(), object, effect,
+ &object_maps);
if (result != NodeProperties::kNoReceiverMaps) {
Handle<Map> candidate_map = object_maps[0];
Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
@@ -588,7 +594,8 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
}
if (result == NodeProperties::kUnreliableReceiverMaps) {
for (size_t i = 0; i < object_maps.size(); ++i) {
- dependencies()->AssumeMapStable(object_maps[i]);
+ dependencies()->DependOnStableMap(
+ MapRef(js_heap_broker(), object_maps[i]));
}
}
Node* value = jsgraph()->Constant(candidate_prototype);
@@ -721,7 +728,8 @@ Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) {
// the ToObject step of Object.prototype.isPrototypeOf is a no-op).
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
for (size_t i = 0; i < receiver_maps.size(); ++i) {
if (!receiver_maps[i]->IsJSReceiverMap()) return NoChange();
@@ -855,9 +863,8 @@ Reduction JSCallReducer::ReduceReflectGet(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kGetProperty);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
- MachineType::AnyTagged(), 1);
+ graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
vtrue = etrue = if_true =
graph()->NewNode(common()->Call(call_descriptor), stub_code, target,
@@ -962,8 +969,8 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) {
return Changed(vtrue);
}
-bool CanInlineArrayIteratingBuiltin(Handle<Map> receiver_map) {
- Isolate* const isolate = receiver_map->GetIsolate();
+bool CanInlineArrayIteratingBuiltin(Isolate* isolate,
+ Handle<Map> receiver_map) {
if (!receiver_map->prototype()->IsJSArray()) return false;
Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
isolate);
@@ -1016,7 +1023,8 @@ Reduction JSCallReducer::ReduceArrayForEach(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// By ensuring that {kind} is object or double, we can be polymorphic
@@ -1027,7 +1035,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Node* node,
}
for (Handle<Map> receiver_map : receiver_maps) {
ElementsKind next_kind = receiver_map->elements_kind();
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map)) {
return NoChange();
}
if (!IsFastElementsKind(next_kind)) {
@@ -1043,7 +1051,8 @@ Reduction JSCallReducer::ReduceArrayForEach(Node* node,
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -1204,12 +1213,14 @@ Reduction JSCallReducer::ReduceArrayReduce(Node* node,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
ElementsKind kind = receiver_maps[0]->elements_kind();
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
+ return NoChange();
if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
return NoChange();
}
@@ -1225,7 +1236,8 @@ Reduction JSCallReducer::ReduceArrayReduce(Node* node,
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -1478,7 +1490,8 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// Ensure that any changes to the Array species constructor cause deopt.
@@ -1487,13 +1500,15 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
const ElementsKind kind = receiver_maps[0]->elements_kind();
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
+ return NoChange();
// We can handle different maps, as long as their elements kind are the
// same.
if (receiver_map->elements_kind() != kind) return NoChange();
}
- dependencies()->AssumePropertyCell(factory()->array_species_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
Handle<JSFunction> handle_constructor(
JSFunction::cast(
@@ -1618,10 +1633,12 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
&check_fail, &control);
}
- Handle<Map> double_map(Map::cast(
- native_context()->get(Context::ArrayMapIndex(HOLEY_DOUBLE_ELEMENTS))));
+ Handle<Map> double_map(Map::cast(native_context()->get(
+ Context::ArrayMapIndex(HOLEY_DOUBLE_ELEMENTS))),
+ isolate());
Handle<Map> fast_map(
- Map::cast(native_context()->get(Context::ArrayMapIndex(HOLEY_ELEMENTS))));
+ Map::cast(native_context()->get(Context::ArrayMapIndex(HOLEY_ELEMENTS))),
+ isolate());
effect = graph()->NewNode(
simplified()->TransitionAndStoreElement(double_map, fast_map), a, k,
callback_value, effect, control);
@@ -1678,7 +1695,8 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
@@ -1689,7 +1707,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
const ElementsKind packed_kind = GetPackedElementsKind(kind);
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map)) {
return NoChange();
}
// We can handle different maps, as long as their elements kind are the
@@ -1697,10 +1715,12 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
- dependencies()->AssumePropertyCell(factory()->array_species_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
Handle<Map> initial_map(
- Map::cast(native_context()->GetInitialJSArrayMap(packed_kind)));
+ Map::cast(native_context()->GetInitialJSArrayMap(packed_kind)),
+ isolate());
Node* k = jsgraph()->ZeroConstant();
Node* to = jsgraph()->ZeroConstant();
@@ -1724,7 +1744,8 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
ab.Store(AccessBuilder::ForJSArrayLength(packed_kind),
jsgraph()->ZeroConstant());
for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- ab.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ ab.Store(AccessBuilder::ForJSObjectInObjectProperty(
+ MapRef(js_heap_broker(), initial_map), i),
jsgraph()->UndefinedConstant());
}
a = effect = ab.Finish();
@@ -1952,7 +1973,8 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
@@ -1963,7 +1985,8 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
}
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
+ return NoChange();
// We can handle different maps, as long as their elements kind are the
// same.
if (receiver_map->elements_kind() != kind) return NoChange();
@@ -1971,7 +1994,8 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -2269,7 +2293,8 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
@@ -2278,13 +2303,15 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
const ElementsKind kind = receiver_maps[0]->elements_kind();
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
+ return NoChange();
// We can handle different maps, as long as their elements kind are the
// same.
if (receiver_map->elements_kind() != kind) return NoChange();
}
- dependencies()->AssumePropertyCell(factory()->array_species_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -2525,7 +2552,7 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
}
Handle<Map> receiver_map;
- if (!NodeProperties::GetMapWitness(node).ToHandle(&receiver_map))
+ if (!NodeProperties::GetMapWitness(isolate(), node).ToHandle(&receiver_map))
return NoChange();
if (receiver_map->instance_type() != JS_ARRAY_TYPE) return NoChange();
@@ -2537,8 +2564,8 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
: GetCallableForArrayIncludes(receiver_map->elements_kind(),
isolate());
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kEliminatable);
+ graph()->zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ Operator::kEliminatable);
// The stub expects the following arguments: the receiver array, its elements,
// the search_element, the array length, and the index to start searching
// from.
@@ -2605,7 +2632,8 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
@@ -2616,13 +2644,15 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
const ElementsKind kind = receiver_maps[0]->elements_kind();
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
+ return NoChange();
// We can handle different maps, as long as their elements kind are the
// same.
if (receiver_map->elements_kind() != kind) return NoChange();
}
- dependencies()->AssumePropertyCell(factory()->array_species_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
Node* k = jsgraph()->ZeroConstant();
@@ -2824,7 +2854,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Node* control = NodeProperties::GetControlInput(node);
Handle<FunctionTemplateInfo> function_template_info(
- FunctionTemplateInfo::cast(shared->function_data()));
+ FunctionTemplateInfo::cast(shared->function_data()), isolate());
// CallApiCallbackStub expects the target in a register, so we count it out,
// and counts the receiver as an implicit argument, so we count the receiver
@@ -2835,7 +2865,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// callback based on those.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
for (size_t i = 0; i < receiver_maps.size(); ++i) {
Handle<Map> receiver_map = receiver_maps[i];
@@ -2852,7 +2883,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
}
// See if we can constant-fold the compatible receiver checks.
- CallOptimization call_optimization(function_template_info);
+ CallOptimization call_optimization(isolate(), function_template_info);
if (!call_optimization.is_simple_api_call()) return NoChange();
CallOptimization::HolderLookup lookup;
Handle<JSObject> api_holder =
@@ -2869,7 +2900,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// Install stability dependencies for unreliable {receiver_maps}.
if (result == NodeProperties::kUnreliableReceiverMaps) {
for (size_t i = 0; i < receiver_maps.size(); ++i) {
- dependencies()->AssumeMapStable(receiver_maps[i]);
+ dependencies()->DependOnStableMap(
+ MapRef(js_heap_broker(), receiver_maps[i]));
}
}
@@ -2886,13 +2918,12 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Handle<CallHandlerInfo> call_handler_info(
CallHandlerInfo::cast(function_template_info->call_code()), isolate());
Handle<Object> data(call_handler_info->data(), isolate());
- CallApiCallbackStub stub(isolate(), argc);
- CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
+ Callable call_api_callback = CodeFactory::CallApiCallback(isolate(), argc);
+ CallInterfaceDescriptor cid = call_api_callback.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), cid,
+ graph()->zone(), cid,
cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
- CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
- MachineType::AnyTagged(), 1, Linkage::kNoContext);
+ CallDescriptor::kNeedsFrameState);
ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
Node* holder = lookup == CallOptimization::kHolderFound
? jsgraph()->HeapConstant(api_holder)
@@ -2900,7 +2931,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
ExternalReference function_reference = ExternalReference::Create(
&api_function, ExternalReference::DIRECT_API_CALL);
node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(stub.GetCode()));
+ jsgraph()->HeapConstant(call_api_callback.code()));
node->ReplaceInput(1, context);
node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(data));
node->InsertInput(graph()->zone(), 3, holder);
@@ -3040,7 +3071,8 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// that no one messed with the %ArrayIteratorPrototype%.next method.
if (node->opcode() == IrOpcode::kJSCallWithSpread ||
node->opcode() == IrOpcode::kJSConstructWithSpread) {
- dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->array_iterator_protector()));
}
// Remove the {arguments_list} input from the {node}.
@@ -3249,7 +3281,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Update the JSCall operator on {node}.
ConvertReceiverMode const convert_mode =
- NodeProperties::CanBeNullOrUndefined(bound_this, effect)
+ NodeProperties::CanBeNullOrUndefined(isolate(), bound_this, effect)
? ConvertReceiverMode::kAny
: ConvertReceiverMode::kNotNullOrUndefined;
NodeProperties::ChangeOp(
@@ -3273,15 +3305,14 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
- Handle<Object> feedback(nexus.GetFeedback(), isolate());
- if (feedback->IsWeakCell()) {
+ HeapObject* heap_object;
+ if (nexus.GetFeedback()->ToWeakHeapObject(&heap_object)) {
+ Handle<HeapObject> feedback(heap_object, isolate());
// Check if we want to use CallIC feedback here.
if (!ShouldUseCallICFeedback(target)) return NoChange();
- Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
- if (cell->value()->IsCallable()) {
- Node* target_function =
- jsgraph()->Constant(handle(cell->value(), isolate()));
+ if (feedback->IsCallable()) {
+ Node* target_function = jsgraph()->Constant(feedback);
// Check that the {target} is still the {target_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
@@ -3408,6 +3439,54 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceArrayBufferViewAccessor(
node, JS_DATA_VIEW_TYPE,
AccessBuilder::ForJSArrayBufferViewByteOffset());
+ case Builtins::kDataViewPrototypeGetUint8:
+ return ReduceDataViewPrototypeGet(node,
+ ExternalArrayType::kExternalUint8Array);
+ case Builtins::kDataViewPrototypeGetInt8:
+ return ReduceDataViewPrototypeGet(node,
+ ExternalArrayType::kExternalInt8Array);
+ case Builtins::kDataViewPrototypeGetUint16:
+ return ReduceDataViewPrototypeGet(
+ node, ExternalArrayType::kExternalUint16Array);
+ case Builtins::kDataViewPrototypeGetInt16:
+ return ReduceDataViewPrototypeGet(node,
+ ExternalArrayType::kExternalInt16Array);
+ case Builtins::kDataViewPrototypeGetUint32:
+ return ReduceDataViewPrototypeGet(
+ node, ExternalArrayType::kExternalUint32Array);
+ case Builtins::kDataViewPrototypeGetInt32:
+ return ReduceDataViewPrototypeGet(node,
+ ExternalArrayType::kExternalInt32Array);
+ case Builtins::kDataViewPrototypeGetFloat32:
+ return ReduceDataViewPrototypeGet(
+ node, ExternalArrayType::kExternalFloat32Array);
+ case Builtins::kDataViewPrototypeGetFloat64:
+ return ReduceDataViewPrototypeGet(
+ node, ExternalArrayType::kExternalFloat64Array);
+ case Builtins::kDataViewPrototypeSetUint8:
+ return ReduceDataViewPrototypeSet(node,
+ ExternalArrayType::kExternalUint8Array);
+ case Builtins::kDataViewPrototypeSetInt8:
+ return ReduceDataViewPrototypeSet(node,
+ ExternalArrayType::kExternalInt8Array);
+ case Builtins::kDataViewPrototypeSetUint16:
+ return ReduceDataViewPrototypeSet(
+ node, ExternalArrayType::kExternalUint16Array);
+ case Builtins::kDataViewPrototypeSetInt16:
+ return ReduceDataViewPrototypeSet(node,
+ ExternalArrayType::kExternalInt16Array);
+ case Builtins::kDataViewPrototypeSetUint32:
+ return ReduceDataViewPrototypeSet(
+ node, ExternalArrayType::kExternalUint32Array);
+ case Builtins::kDataViewPrototypeSetInt32:
+ return ReduceDataViewPrototypeSet(node,
+ ExternalArrayType::kExternalInt32Array);
+ case Builtins::kDataViewPrototypeSetFloat32:
+ return ReduceDataViewPrototypeSet(
+ node, ExternalArrayType::kExternalFloat32Array);
+ case Builtins::kDataViewPrototypeSetFloat64:
+ return ReduceDataViewPrototypeSet(
+ node, ExternalArrayType::kExternalFloat64Array);
case Builtins::kTypedArrayPrototypeByteLength:
return ReduceArrayBufferViewAccessor(
node, JS_TYPED_ARRAY_TYPE,
@@ -3507,6 +3586,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceMapPrototypeGet(node);
case Builtins::kMapPrototypeHas:
return ReduceMapPrototypeHas(node);
+ case Builtins::kRegExpPrototypeTest:
+ return ReduceRegExpPrototypeTest(node);
case Builtins::kReturnReceiver:
return ReduceReturnReceiver(node);
case Builtins::kStringPrototypeIndexOf:
@@ -3646,13 +3727,15 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
- Handle<Object> feedback(nexus.GetFeedback(), isolate());
- if (feedback->IsAllocationSite()) {
+ HeapObject* feedback_object;
+ if (nexus.GetFeedback()->ToStrongHeapObject(&feedback_object) &&
+ feedback_object->IsAllocationSite()) {
// The feedback is an AllocationSite, which means we have called the
// Array function and collected transition (and pretenuring) feedback
// for the resulting arrays. This has to be kept in sync with the
// implementation in Ignition.
- Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
+ Handle<AllocationSite> site(AllocationSite::cast(feedback_object),
+ isolate());
// Retrieve the Array function from the {node}.
Node* array_function = jsgraph()->HeapConstant(
@@ -3674,12 +3757,11 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
NodeProperties::ReplaceValueInput(node, array_function, 1);
NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
return Changed(node);
- } else if (feedback->IsWeakCell() &&
+ } else if (nexus.GetFeedback()->ToWeakHeapObject(&feedback_object) &&
!HeapObjectMatcher(new_target).HasValue()) {
- Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
- if (cell->value()->IsConstructor()) {
- Node* new_target_feedback =
- jsgraph()->Constant(handle(cell->value(), isolate()));
+ Handle<HeapObject> object(feedback_object, isolate());
+ if (object->IsConstructor()) {
+ Node* new_target_feedback = jsgraph()->Constant(object);
// Check that the {new_target} is still the {new_target_feedback}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
@@ -4206,20 +4288,17 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
namespace {
// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
-bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
+bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
- Isolate* isolate = jsarray_map->GetIsolate();
Handle<Name> length_string = isolate->factory()->length_string();
DescriptorArray* descriptors = jsarray_map->instance_descriptors();
- int number =
- descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
+ int number = descriptors->Search(*length_string, *jsarray_map);
DCHECK_NE(DescriptorArray::kNotFound, number);
return descriptors->GetDetails(number).IsReadOnly();
}
// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
-bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
- Isolate* const isolate = receiver_map->GetIsolate();
+bool CanInlineArrayResizeOperation(Isolate* isolate, Handle<Map> receiver_map) {
if (!receiver_map->prototype()->IsJSArray()) return false;
Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
isolate);
@@ -4227,7 +4306,7 @@ bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
IsFastElementsKind(receiver_map->elements_kind()) &&
!receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
- !IsReadOnlyLengthDescriptor(receiver_map);
+ !IsReadOnlyLengthDescriptor(isolate, receiver_map);
}
} // namespace
@@ -4250,20 +4329,23 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
// Try to determine the {receiver} map(s).
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
ElementsKind kind = receiver_maps[0]->elements_kind();
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ if (!CanInlineArrayResizeOperation(isolate(), receiver_map))
+ return NoChange();
if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
return NoChange();
}
// Install code dependencies on the {receiver} global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
// If the {receiver_maps} information is not reliable, we need
// to check that the {receiver} still has one of these maps.
@@ -4359,13 +4441,15 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
ElementsKind kind = receiver_maps[0]->elements_kind();
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ if (!CanInlineArrayResizeOperation(isolate(), receiver_map))
+ return NoChange();
// TODO(turbofan): Extend this to also handle fast holey double elements
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
@@ -4375,7 +4459,8 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
}
// Install code dependencies on the {receiver} global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
// If the {receiver_maps} information is not reliable, we need
// to check that the {receiver} still has one of these maps.
@@ -4475,13 +4560,15 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
ElementsKind kind = receiver_maps[0]->elements_kind();
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ if (!CanInlineArrayResizeOperation(isolate(), receiver_map))
+ return NoChange();
// TODO(turbofan): Extend this to also handle fast holey double elements
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
@@ -4491,7 +4578,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
}
// Install code dependencies on the {receiver} global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
// If the {receiver_maps} information is not reliable, we need
// to check that the {receiver} still has one of these maps.
@@ -4685,7 +4773,8 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
// Check if we know that {receiver} is a valid JSReceiver.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
for (Handle<Map> receiver_map : receiver_maps) {
@@ -4705,14 +4794,14 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
namespace {
-bool InferIteratedObjectMaps(Node* iterator,
+bool InferIteratedObjectMaps(Isolate* isolate, Node* iterator,
ZoneHandleSet<Map>* iterated_object_maps) {
DCHECK_EQ(IrOpcode::kJSCreateArrayIterator, iterator->opcode());
Node* iterated_object = NodeProperties::GetValueInput(iterator, 0);
Node* effect = NodeProperties::GetEffectInput(iterator);
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(iterated_object, effect,
+ NodeProperties::InferReceiverMaps(isolate, iterated_object, effect,
iterated_object_maps);
return result != NodeProperties::kNoReceiverMaps;
}
@@ -4739,7 +4828,7 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// Try to infer the [[IteratedObject]] maps from the {iterator}.
ZoneHandleSet<Map> iterated_object_maps;
- if (!InferIteratedObjectMaps(iterator, &iterated_object_maps)) {
+ if (!InferIteratedObjectMaps(isolate(), iterator, &iterated_object_maps)) {
return NoChange();
}
DCHECK_NE(0, iterated_object_maps.size());
@@ -4759,7 +4848,7 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
}
} else {
for (Handle<Map> iterated_object_map : iterated_object_maps) {
- if (!CanInlineArrayIteratingBuiltin(iterated_object_map)) {
+ if (!CanInlineArrayIteratingBuiltin(isolate(), iterated_object_map)) {
return NoChange();
}
if (!UnionElementsKindUptoSize(&elements_kind,
@@ -4771,7 +4860,8 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// Install code dependency on the array protector for holey arrays.
if (IsHoleyElementsKind(elements_kind)) {
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
}
// Load the (current) {iterated_object} from the {iterator}; this might be
@@ -4795,8 +4885,8 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
if (isolate()->IsArrayBufferNeuteringIntact()) {
// Add a code dependency so we are deoptimized in case an ArrayBuffer
// gets neutered.
- dependencies()->AssumePropertyCell(
- factory()->array_buffer_neutering_protector());
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->array_buffer_neutering_protector()));
} else {
// Deoptimize if the array buffer was neutered.
Node* buffer = effect = graph()->NewNode(
@@ -5164,7 +5254,7 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- if (NodeProperties::HasInstanceTypeWitness(receiver, effect,
+ if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
JS_STRING_ITERATOR_TYPE)) {
Node* string = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
@@ -5255,10 +5345,10 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(
Callable const callable =
CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState,
- Operator::kNoDeopt | Operator::kNoWrite);
+ auto call_descriptor =
+ Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState,
+ Operator::kNoDeopt | Operator::kNoWrite);
// TODO(turbofan): Massage the FrameState of the {node} here once we
// have an artificial builtin frame type, so that it looks like the
@@ -5281,7 +5371,8 @@ Reduction JSCallReducer::ReduceAsyncFunctionPromiseCreate(Node* node) {
if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
// Install a code dependency on the promise hook protector cell.
- dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
// Morph this {node} into a JSCreatePromise node.
RelaxControls(node);
@@ -5296,8 +5387,8 @@ Reduction JSCallReducer::ReduceAsyncFunctionPromiseRelease(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
- // Install a code dependency on the promise hook protector cell.
- dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
// The AsyncFunctionPromiseRelease builtin is a no-op as long as neither
// the debugger is active nor any promise hook has been installed (ever).
@@ -5352,11 +5443,11 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
// Only handle builtins Promises, not subclasses.
if (target != new_target) return NoChange();
- // Add a code dependency on the promise hook protector.
- dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
Handle<SharedFunctionInfo> promise_shared(
- handle(native_context()->promise_function()->shared()));
+ handle(native_context()->promise_function()->shared(), isolate()));
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
@@ -5510,8 +5601,8 @@ Reduction JSCallReducer::ReducePromiseInternalConstructor(Node* node) {
// Check that promises aren't being observed through (debug) hooks.
if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
- // Install a code dependency on the promise hook protector cell.
- dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
// Create a new pending promise.
Node* value = effect =
@@ -5589,7 +5680,8 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
@@ -5602,8 +5694,8 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
}
}
- // Add a code dependency on the necessary protectors.
- dependencies()->AssumePropertyCell(factory()->promise_then_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->promise_then_protector()));
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -5617,7 +5709,8 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
// Massage the {node} to call "then" instead by first removing all inputs
// following the onRejected parameter, and then filling up the parameters
// to two inputs from the left with undefined.
- Node* target = jsgraph()->Constant(handle(native_context()->promise_then()));
+ Node* target =
+ jsgraph()->Constant(handle(native_context()->promise_then(), isolate()));
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceEffectInput(node, effect);
for (; arity > 1; --arity) node->RemoveInput(3);
@@ -5664,7 +5757,8 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
@@ -5677,10 +5771,12 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
}
}
- // Add a code dependency on the necessary protectors.
- dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
- dependencies()->AssumePropertyCell(factory()->promise_then_protector());
- dependencies()->AssumePropertyCell(factory()->promise_species_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->promise_then_protector()));
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->promise_species_protector()));
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -5765,7 +5861,8 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// Massage the {node} to call "then" instead by first removing all inputs
// following the onFinally parameter, and then replacing the only parameter
// input with the {on_finally} value.
- Node* target = jsgraph()->Constant(handle(native_context()->promise_then()));
+ Node* target =
+ jsgraph()->Constant(handle(native_context()->promise_then(), isolate()));
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ReplaceControlInput(node, control);
@@ -5813,7 +5910,8 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (infer_receiver_maps_result == NodeProperties::kNoReceiverMaps) {
return NoChange();
}
@@ -5828,9 +5926,10 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
}
}
- // Add a code dependency on the necessary protectors.
- dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
- dependencies()->AssumePropertyCell(factory()->promise_species_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->promise_species_protector()));
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -5880,7 +5979,8 @@ Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (infer_receiver_maps_result == NodeProperties::kNoReceiverMaps) {
return NoChange();
}
@@ -6075,7 +6175,8 @@ Reduction JSCallReducer::ReduceMapPrototypeGet(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
+ if (!NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ JS_MAP_TYPE))
return NoChange();
Node* table = effect = graph()->NewNode(
@@ -6119,7 +6220,8 @@ Reduction JSCallReducer::ReduceMapPrototypeHas(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
+ if (!NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ JS_MAP_TYPE))
return NoChange();
Node* table = effect = graph()->NewNode(
@@ -6159,7 +6261,8 @@ Reduction JSCallReducer::ReduceCollectionIteration(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
if (NodeProperties::HasInstanceTypeWitness(
- receiver, effect, InstanceTypeForCollectionKind(collection_kind))) {
+ isolate(), receiver, effect,
+ InstanceTypeForCollectionKind(collection_kind))) {
Node* js_create_iterator = effect = graph()->NewNode(
javascript()->CreateCollectionIterator(collection_kind, iteration_kind),
receiver, context, effect, control);
@@ -6176,7 +6279,8 @@ Reduction JSCallReducer::ReduceCollectionPrototypeSize(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
if (NodeProperties::HasInstanceTypeWitness(
- receiver, effect, InstanceTypeForCollectionKind(collection_kind))) {
+ isolate(), receiver, effect,
+ InstanceTypeForCollectionKind(collection_kind))) {
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
receiver, effect, control);
@@ -6216,7 +6320,8 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
InstanceType receiver_instance_type;
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
receiver_instance_type = receiver_maps[0]->instance_type();
@@ -6268,8 +6373,8 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kOrderedHashTableHealIndex);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kEliminatable);
+ graph()->zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ Operator::kEliminatable);
index = effect =
graph()->NewNode(common()->Call(call_descriptor),
jsgraph()->HeapConstant(callable.code()), table, index,
@@ -6491,7 +6596,8 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(receiver, effect, instance_type)) {
+ if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ instance_type)) {
// Load the {receiver}s field.
Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
receiver, effect, control);
@@ -6500,16 +6606,15 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
if (isolate()->IsArrayBufferNeuteringIntact()) {
// Add a code dependency so we are deoptimized in case an ArrayBuffer
// gets neutered.
- dependencies()->AssumePropertyCell(
- factory()->array_buffer_neutering_protector());
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->array_buffer_neutering_protector()));
} else {
// Check if the {receiver}s buffer was neutered.
- Node* receiver_buffer = effect = graph()->NewNode(
+ Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- Node* check = effect =
- graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
- receiver_buffer, effect, control);
+ Node* check = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
// Default to zero if the {receiver}s buffer was neutered.
value = graph()->NewNode(
@@ -6523,6 +6628,361 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
return NoChange();
}
+namespace {
+int ExternalArrayElementSize(const ExternalArrayType element_type) {
+ switch (element_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return size;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ default:
+ UNREACHABLE();
+#undef TYPED_ARRAY_CASE
+ }
+}
+} // namespace
+
+Reduction JSCallReducer::ReduceDataViewPrototypeGet(
+ Node* node, ExternalArrayType element_type) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+
+ CallParameters const& p = CallParametersOf(node->op());
+
+ Node* offset = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->ZeroConstant();
+
+ Node* is_little_endian = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->FalseConstant();
+
+ // Only do stuff if the {receiver} is really a DataView.
+ if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ JS_DATA_VIEW_TYPE)) {
+ // Check that the {offset} is a positive Smi.
+ offset = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ offset, effect, control);
+
+ Node* is_positive = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), offset);
+
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNotASmi, p.feedback()),
+ is_positive, effect, control);
+
+ // Coerce {is_little_endian} to boolean.
+ is_little_endian =
+ graph()->NewNode(simplified()->ToBoolean(), is_little_endian);
+
+ // Get the underlying buffer and check that it has not been neutered.
+ Node* buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+
+ Node* check_neutered = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ Node* branch_neutered = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), check_neutered, control);
+
+ // Raise an error if it was neuteured.
+ Node* if_true_neutered =
+ graph()->NewNode(common()->IfTrue(), branch_neutered);
+ Node* etrue_neutered = effect;
+ {
+ if_true_neutered = etrue_neutered = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kDetachedOperation),
+ jsgraph()->HeapConstant(
+ factory()->NewStringFromAsciiChecked("DataView.prototype.get")),
+ context, frame_state, etrue_neutered, if_true_neutered);
+ }
+
+ // Otherwise, proceed.
+ Node* if_false_neutered =
+ graph()->NewNode(common()->IfFalse(), branch_neutered);
+ Node* efalse_neutered = effect;
+
+ // Get the byte offset and byte length of the {receiver}.
+ Node* byte_offset = efalse_neutered =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteOffset()),
+ receiver, efalse_neutered, if_false_neutered);
+
+ Node* byte_length = efalse_neutered =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteLength()),
+ receiver, efalse_neutered, if_false_neutered);
+
+ // The end offset is the offset plus the element size
+ // of the type that we want to load.
+ int element_size = ExternalArrayElementSize(element_type);
+ Node* end_offset = graph()->NewNode(simplified()->NumberAdd(), offset,
+ jsgraph()->Constant(element_size));
+
+ // We need to check that {end_offset} <= {byte_length}, ie
+ // throw a RangeError if {byte_length} < {end_offset}.
+ Node* check_range = graph()->NewNode(simplified()->NumberLessThan(),
+ byte_length, end_offset);
+ Node* branch_range = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_range, if_false_neutered);
+
+ Node* if_true_range = graph()->NewNode(common()->IfTrue(), branch_range);
+ Node* etrue_range = efalse_neutered;
+ {
+ if_true_range = etrue_range = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowRangeError, 2),
+ jsgraph()->Constant(MessageTemplate::kInvalidDataViewAccessorOffset),
+ jsgraph()->HeapConstant(
+ factory()->NewStringFromAsciiChecked("DataView.prototype.get")),
+ context, frame_state, etrue_range, if_true_range);
+ }
+
+ Node* if_false_range = graph()->NewNode(common()->IfFalse(), branch_range);
+ Node* efalse_range = efalse_neutered;
+ Node* vfalse_range;
+ {
+ // Get the buffer's backing store.
+ Node* backing_store = efalse_range =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferBackingStore()),
+ buffer, efalse_range, if_false_range);
+
+ // Compute the buffer index at which we'll read.
+ Node* buffer_index =
+ graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+
+ // Perform the load.
+ vfalse_range = efalse_range =
+ graph()->NewNode(simplified()->LoadDataViewElement(element_type),
+ buffer, backing_store, buffer_index,
+ is_little_endian, efalse_range, if_false_range);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ // Create appropriate {IfException} and {IfSuccess} nodes.
+ Node* extrue_neutered = graph()->NewNode(
+ common()->IfException(), etrue_neutered,
+ if_true_neutered); // We threw because the array was neutered.
+ if_true_neutered =
+ graph()->NewNode(common()->IfSuccess(), if_true_neutered);
+
+ Node* extrue_range =
+ graph()->NewNode(common()->IfException(), etrue_range,
+ if_true_range); // We threw because out of bounds.
+ if_true_range = graph()->NewNode(common()->IfSuccess(), if_true_range);
+
+ // We can't throw in LoadDataViewElement(),
+ // so we don't need to handle that path here.
+
+ // Join the exception edges.
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), extrue_neutered, extrue_range);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), extrue_neutered,
+ extrue_range, merge);
+ Node* phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ extrue_neutered, extrue_range, merge);
+ ReplaceWithValue(on_exception, phi, ephi, merge);
+ }
+
+ // Connect the throwing paths to end.
+ if_true_neutered =
+ graph()->NewNode(common()->Throw(), etrue_neutered, if_true_neutered);
+ NodeProperties::MergeControlToEnd(graph(), common(), if_true_neutered);
+ if_true_range =
+ graph()->NewNode(common()->Throw(), etrue_range, if_true_range);
+ NodeProperties::MergeControlToEnd(graph(), common(), if_true_range);
+
+ // Continue on the regular path.
+ ReplaceWithValue(node, vfalse_range, efalse_range, if_false_range);
+ return Changed(vfalse_range);
+ }
+
+ return NoChange();
+}
+Reduction JSCallReducer::ReduceDataViewPrototypeSet(
+ Node* node, ExternalArrayType element_type) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+
+ CallParameters const& p = CallParametersOf(node->op());
+
+ Node* offset = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->ZeroConstant();
+
+ Node* value = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->ZeroConstant();
+
+ Node* is_little_endian = node->op()->ValueInputCount() > 4
+ ? NodeProperties::GetValueInput(node, 4)
+ : jsgraph()->FalseConstant();
+
+ // Only do stuff if the {receiver} is really a DataView.
+ if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ JS_DATA_VIEW_TYPE)) {
+ // Check that the {offset} is a positive Smi.
+ offset = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ offset, effect, control);
+
+ Node* is_positive = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), offset);
+
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNotASmi, p.feedback()),
+ is_positive, effect, control);
+
+ // Coerce {is_little_endian} to boolean.
+ is_little_endian =
+ graph()->NewNode(simplified()->ToBoolean(), is_little_endian);
+
+ // Coerce {value} to Number.
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
+ p.feedback()),
+ value, effect, control);
+
+ // Get the underlying buffer and check that it has not been neutered.
+ Node* buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+
+ Node* check_neutered = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ Node* branch_neutered = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), check_neutered, control);
+
+ // Raise an error if it was neuteured.
+ Node* if_true_neutered =
+ graph()->NewNode(common()->IfTrue(), branch_neutered);
+ Node* etrue_neutered = effect;
+ {
+ if_true_neutered = etrue_neutered = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kDetachedOperation),
+ jsgraph()->HeapConstant(
+ factory()->NewStringFromAsciiChecked("DataView.prototype.set")),
+ context, frame_state, etrue_neutered, if_true_neutered);
+ }
+
+ // Otherwise, proceed.
+ Node* if_false_neutered =
+ graph()->NewNode(common()->IfFalse(), branch_neutered);
+ Node* efalse_neutered = effect;
+
+ // Get the byte offset and byte length of the {receiver}.
+ Node* byte_offset = efalse_neutered =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteOffset()),
+ receiver, efalse_neutered, if_false_neutered);
+
+ Node* byte_length = efalse_neutered =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteLength()),
+ receiver, efalse_neutered, if_false_neutered);
+
+ // The end offset is the offset plus the element size
+ // of the type that we want to store.
+ int element_size = ExternalArrayElementSize(element_type);
+ Node* end_offset = graph()->NewNode(simplified()->NumberAdd(), offset,
+ jsgraph()->Constant(element_size));
+
+ // We need to check that {end_offset} <= {byte_length}, ie
+ // throw a RangeError if {byte_length} < {end_offset}.
+ Node* check_range = graph()->NewNode(simplified()->NumberLessThan(),
+ byte_length, end_offset);
+ Node* branch_range = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_range, if_false_neutered);
+
+ Node* if_true_range = graph()->NewNode(common()->IfTrue(), branch_range);
+ Node* etrue_range = efalse_neutered;
+ {
+ if_true_range = etrue_range = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowRangeError, 2),
+ jsgraph()->Constant(MessageTemplate::kInvalidDataViewAccessorOffset),
+ jsgraph()->HeapConstant(
+ factory()->NewStringFromAsciiChecked("DataView.prototype.set")),
+ context, frame_state, etrue_range, if_true_range);
+ }
+
+ Node* if_false_range = graph()->NewNode(common()->IfFalse(), branch_range);
+ Node* efalse_range = efalse_neutered;
+ Node* vfalse_range = jsgraph()->UndefinedConstant(); // Return value.
+ {
+ // Get the buffer's backing store.
+ Node* backing_store = efalse_range =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferBackingStore()),
+ buffer, efalse_range, if_false_range);
+
+ // Compute the buffer index at which we'll write.
+ Node* buffer_index =
+ graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+
+ // Perform the store.
+ efalse_range =
+ graph()->NewNode(simplified()->StoreDataViewElement(element_type),
+ buffer, backing_store, buffer_index, value,
+ is_little_endian, efalse_range, if_false_range);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ // Create appropriate {IfException} and {IfSuccess} nodes.
+ Node* extrue_neutered = graph()->NewNode(
+ common()->IfException(), etrue_neutered,
+ if_true_neutered); // We threw because the array was neutered.
+ if_true_neutered =
+ graph()->NewNode(common()->IfSuccess(), if_true_neutered);
+
+ Node* extrue_range =
+ graph()->NewNode(common()->IfException(), etrue_range,
+ if_true_range); // We threw because out of bounds.
+ if_true_range = graph()->NewNode(common()->IfSuccess(), if_true_range);
+
+ // We can't throw in StoreDataViewElement(),
+ // so we don't need to handle that path here.
+
+ // Join the exception edges.
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), extrue_neutered, extrue_range);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), extrue_neutered,
+ extrue_range, merge);
+ Node* phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ extrue_neutered, extrue_range, merge);
+ ReplaceWithValue(on_exception, phi, ephi, merge);
+ }
+
+ // Connect the throwing paths to end.
+ if_true_neutered =
+ graph()->NewNode(common()->Throw(), etrue_neutered, if_true_neutered);
+ NodeProperties::MergeControlToEnd(graph(), common(), if_true_neutered);
+ if_true_range =
+ graph()->NewNode(common()->Throw(), etrue_range, if_true_range);
+ NodeProperties::MergeControlToEnd(graph(), common(), if_true_range);
+
+ // Continue on the regular path.
+ ReplaceWithValue(node, vfalse_range, efalse_range, if_false_range);
+ return Changed(vfalse_range);
+ }
+
+ return NoChange();
+}
+
// ES6 section 18.2.2 isFinite ( number )
Reduction JSCallReducer::ReduceGlobalIsFinite(Node* node) {
CallParameters const& p = CallParametersOf(node->op());
@@ -6578,7 +7038,8 @@ Reduction JSCallReducer::ReduceDatePrototypeGetTime(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_DATE_TYPE)) {
+ if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ JS_DATE_TYPE)) {
Node* value = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSDateValue()), receiver,
effect, control);
@@ -6626,6 +7087,101 @@ Reduction JSCallReducer::ReduceNumberParseInt(Node* node) {
return Changed(node);
}
+Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
+ if (FLAG_force_slow_path) return NoChange();
+ if (node->op()->ValueInputCount() < 3) return NoChange();
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* regexp = NodeProperties::GetValueInput(node, 1);
+
+ // Check if we know something about the {regexp}.
+ ZoneHandleSet<Map> regexp_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(isolate(), regexp, effect,
+ &regexp_maps);
+
+ bool need_map_check = false;
+ switch (result) {
+ case NodeProperties::kNoReceiverMaps:
+ return NoChange();
+ case NodeProperties::kUnreliableReceiverMaps:
+ need_map_check = true;
+ break;
+ case NodeProperties::kReliableReceiverMaps:
+ break;
+ }
+
+ for (auto map : regexp_maps) {
+ if (map->instance_type() != JS_REGEXP_TYPE) return NoChange();
+ }
+
+ // Compute property access info for "exec" on {resolution}.
+ PropertyAccessInfo ai_exec;
+ AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
+ native_context(), graph()->zone());
+ if (!access_info_factory.ComputePropertyAccessInfo(
+ MapHandles(regexp_maps.begin(), regexp_maps.end()),
+ factory()->exec_string(), AccessMode::kLoad, &ai_exec)) {
+ return NoChange();
+ }
+ // If "exec" has been modified on {regexp}, we can't do anything.
+ if (!ai_exec.IsDataConstant()) return NoChange();
+ Handle<Object> exec_on_proto = ai_exec.constant();
+ if (*exec_on_proto != *isolate()->regexp_exec_function()) return NoChange();
+
+ PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
+ dependencies());
+
+ // Add proper dependencies on the {regexp}s [[Prototype]]s.
+ Handle<JSObject> holder;
+ if (ai_exec.holder().ToHandle(&holder)) {
+ dependencies()->DependOnStablePrototypeChains(
+ js_heap_broker(), native_context(), ai_exec.receiver_maps(), holder);
+ }
+
+ if (need_map_check) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ regexp_maps, p.feedback()),
+ regexp, effect, control);
+ }
+
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* search = NodeProperties::GetValueInput(node, 2);
+ Node* search_string = effect = graph()->NewNode(
+ simplified()->CheckString(p.feedback()), search, effect, control);
+
+ Node* lastIndex = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSRegExpLastIndex()), regexp,
+ effect, control);
+
+ Node* lastIndexSmi = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), lastIndex, effect, control);
+
+ Node* is_positive = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), lastIndexSmi);
+
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNotASmi, p.feedback()),
+ is_positive, effect, control);
+
+ node->ReplaceInput(0, regexp);
+ node->ReplaceInput(1, search_string);
+ node->ReplaceInput(2, context);
+ node->ReplaceInput(3, frame_state);
+ node->ReplaceInput(4, effect);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node, javascript()->RegExpTest());
+ return Changed(node);
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 15228032cf..6e3f531647 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -14,7 +14,6 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class CompilationDependencies;
class Factory;
class VectorSlotPair;
@@ -23,8 +22,10 @@ namespace compiler {
// Forward declarations.
class CallFrequency;
class CommonOperatorBuilder;
+class CompilationDependencies;
struct FieldAccess;
class JSGraph;
+class JSHeapBroker;
class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
@@ -36,11 +37,12 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 0 };
typedef base::Flags<Flag> Flags;
- JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
- Handle<Context> native_context,
+ JSCallReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
+ Flags flags, Handle<Context> native_context,
CompilationDependencies* dependencies)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
+ js_heap_broker_(js_heap_broker),
flags_(flags),
native_context_(native_context),
dependencies_(dependencies) {}
@@ -109,6 +111,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceJSCall(Node* node, Handle<SharedFunctionInfo> shared);
Reduction ReduceJSCallWithArrayLike(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
+ Reduction ReduceRegExpPrototypeTest(Node* node);
Reduction ReduceReturnReceiver(Node* node);
Reduction ReduceStringPrototypeIndexOf(Node* node);
Reduction ReduceStringPrototypeSubstring(Node* node);
@@ -178,6 +181,11 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
InstanceType instance_type,
FieldAccess const& access);
+ Reduction ReduceDataViewPrototypeGet(Node* node,
+ ExternalArrayType element_type);
+ Reduction ReduceDataViewPrototypeSet(Node* node,
+ ExternalArrayType element_type);
+
Reduction ReduceDatePrototypeGetTime(Node* node);
Reduction ReduceDateNow(Node* node);
Reduction ReduceNumberParseInt(Node* node);
@@ -223,6 +231,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Isolate* isolate() const;
Factory* factory() const;
Handle<Context> native_context() const { return native_context_; }
@@ -234,6 +243,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
+ const JSHeapBroker* const js_heap_broker_;
Flags const flags_;
Handle<Context> const native_context_;
CompilationDependencies* const dependencies_;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 9f833b60eb..85a80a2b2f 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -17,6 +17,7 @@ namespace internal {
namespace compiler {
Reduction JSContextSpecialization::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kParameter:
return ReduceParameter(node);
@@ -99,12 +100,13 @@ bool IsContextParameter(Node* node) {
// context (which we want to read from or store to), try to return a
// specialization context. If successful, update {distance} to whatever
// distance remains from the specialization context.
-MaybeHandle<Context> GetSpecializationContext(Node* node, size_t* distance,
- Maybe<OuterContext> maybe_outer) {
+base::Optional<ContextRef> GetSpecializationContext(
+ const JSHeapBroker* broker, Node* node, size_t* distance,
+ Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- Handle<Object> object = HeapConstantOf(node->op());
- if (object->IsContext()) return Handle<Context>::cast(object);
+ HeapObjectRef object(broker, HeapConstantOf(node->op()));
+ if (object.IsContext()) return object.AsContext();
break;
}
case IrOpcode::kParameter: {
@@ -112,14 +114,14 @@ MaybeHandle<Context> GetSpecializationContext(Node* node, size_t* distance,
if (maybe_outer.To(&outer) && IsContextParameter(node) &&
*distance >= outer.distance) {
*distance -= outer.distance;
- return outer.context;
+ return ContextRef(broker, outer.context);
}
break;
}
default:
break;
}
- return MaybeHandle<Context>();
+ return base::Optional<ContextRef>();
}
} // anonymous namespace
@@ -133,16 +135,18 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// First walk up the context chain in the graph as far as possible.
Node* context = NodeProperties::GetOuterContext(node, &depth);
- Handle<Context> concrete;
- if (!GetSpecializationContext(context, &depth, outer()).ToHandle(&concrete)) {
+ base::Optional<ContextRef> maybe_concrete =
+ GetSpecializationContext(js_heap_broker(), context, &depth, outer());
+ if (!maybe_concrete.has_value()) {
// We do not have a concrete context object, so we can only partially reduce
// the load by folding-in the outer context node.
return SimplifyJSLoadContext(node, context, depth);
}
// Now walk up the concrete context chain for the remaining depth.
+ ContextRef concrete = maybe_concrete.value();
for (; depth > 0; --depth) {
- concrete = handle(concrete->previous(), isolate());
+ concrete = concrete.previous().value();
}
if (!access.immutable()) {
@@ -151,21 +155,32 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
- // Even though the context slot is immutable, the context might have escaped
- // before the function to which it belongs has initialized the slot.
- // We must be conservative and check if the value in the slot is currently
- // the hole or undefined. Only if it is neither of these, can we be sure that
- // it won't change anymore.
- Handle<Object> value(concrete->get(static_cast<int>(access.index())),
- isolate());
- if (value->IsUndefined(isolate()) || value->IsTheHole(isolate())) {
+ // This will hold the final value, if we can figure it out.
+ base::Optional<ObjectRef> maybe_value;
+
+ maybe_value = concrete.get(static_cast<int>(access.index()));
+ if (maybe_value.has_value() && !maybe_value->IsSmi()) {
+ // Even though the context slot is immutable, the context might have escaped
+ // before the function to which it belongs has initialized the slot.
+ // We must be conservative and check if the value in the slot is currently
+ // the hole or undefined. Only if it is neither of these, can we be sure
+ // that it won't change anymore.
+ OddballType oddball_type = maybe_value->oddball_type();
+ if (oddball_type == OddballType::kUndefined ||
+ oddball_type == OddballType::kHole) {
+ maybe_value.reset();
+ }
+ }
+
+ if (!maybe_value.has_value()) {
return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
// Success. The context load can be replaced with the constant.
- // TODO(titzer): record the specialization for sharing code across multiple
- // contexts that have the same value in the corresponding context slot.
- Node* constant = jsgraph_->Constant(value);
+ // TODO(titzer): record the specialization for sharing code across
+ // multiple contexts that have the same value in the corresponding context
+ // slot.
+ Node* constant = jsgraph_->Constant(*maybe_value);
ReplaceWithValue(node, constant);
return Replace(constant);
}
@@ -181,16 +196,18 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
// or hit a node that does not have a CreateXYZContext operator.
Node* context = NodeProperties::GetOuterContext(node, &depth);
- Handle<Context> concrete;
- if (!GetSpecializationContext(context, &depth, outer()).ToHandle(&concrete)) {
+ base::Optional<ContextRef> maybe_concrete =
+ GetSpecializationContext(js_heap_broker(), context, &depth, outer());
+ if (!maybe_concrete.has_value()) {
// We do not have a concrete context object, so we can only partially reduce
// the load by folding-in the outer context node.
return SimplifyJSStoreContext(node, context, depth);
}
// Now walk up the concrete context chain for the remaining depth.
+ ContextRef concrete = maybe_concrete.value();
for (; depth > 0; --depth) {
- concrete = handle(concrete->previous(), isolate());
+ concrete = concrete.previous().value();
}
return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index 83949fa3cc..d2f56d50f1 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -33,12 +33,14 @@ struct OuterContext {
class JSContextSpecialization final : public AdvancedReducer {
public:
JSContextSpecialization(Editor* editor, JSGraph* jsgraph,
+ JSHeapBroker* js_heap_broker,
Maybe<OuterContext> outer,
MaybeHandle<JSFunction> closure)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
outer_(outer),
- closure_(closure) {}
+ closure_(closure),
+ js_heap_broker_(js_heap_broker) {}
const char* reducer_name() const override {
return "JSContextSpecialization";
@@ -60,10 +62,12 @@ class JSContextSpecialization final : public AdvancedReducer {
JSGraph* jsgraph() const { return jsgraph_; }
Maybe<OuterContext> outer() const { return outer_; }
MaybeHandle<JSFunction> closure() const { return closure_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
JSGraph* const jsgraph_;
Maybe<OuterContext> outer_;
MaybeHandle<JSFunction> closure_;
+ const JSHeapBroker* const js_heap_broker_;
DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
};
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 87698565ef..a9ce42e1e2 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -4,12 +4,11 @@
#include "src/compiler/js-create-lowering.h"
-#include "src/allocation-site-scopes.h"
#include "src/code-factory.h"
-#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
@@ -20,7 +19,10 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
#include "src/objects-inl.h"
+#include "src/objects/arguments.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-promise.h"
+#include "src/objects/js-regexp-inl.h"
namespace v8 {
namespace internal {
@@ -39,11 +41,11 @@ Node* GetArgumentsFrameState(Node* frame_state) {
// Checks whether allocation using the given target and new.target can be
// inlined.
-bool IsAllocationInlineable(Handle<JSFunction> target,
- Handle<JSFunction> new_target) {
- return new_target->has_initial_map() &&
- !new_target->initial_map()->is_dictionary_map() &&
- new_target->initial_map()->constructor_or_backpointer() == *target;
+bool IsAllocationInlineable(const JSFunctionRef& target,
+ const JSFunctionRef& new_target) {
+ return new_target.has_initial_map() &&
+ !new_target.initial_map().is_dictionary_map() &&
+ new_target.initial_map().constructor_or_backpointer().equals(target);
}
// When initializing arrays, we'll unfold the loop if the number of
@@ -54,81 +56,6 @@ const int kElementLoopUnrollLimit = 16;
const int kFunctionContextAllocationLimit = 16;
const int kBlockContextAllocationLimit = 16;
-// Determines whether the given array or object literal boilerplate satisfies
-// all limits to be considered for fast deep-copying and computes the total
-// size of all objects that are part of the graph.
-bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
- int* max_properties) {
- DCHECK_GE(max_depth, 0);
- DCHECK_GE(*max_properties, 0);
-
- // Make sure the boilerplate map is not deprecated.
- if (!JSObject::TryMigrateInstance(boilerplate)) return false;
-
- // Check for too deep nesting.
- if (max_depth == 0) return false;
-
- // Check the elements.
- Isolate* const isolate = boilerplate->GetIsolate();
- Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
- if (elements->length() > 0 &&
- elements->map() != isolate->heap()->fixed_cow_array_map()) {
- if (boilerplate->HasSmiOrObjectElements()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- int length = elements->length();
- for (int i = 0; i < length; i++) {
- if ((*max_properties)-- == 0) return false;
- Handle<Object> value(fast_elements->get(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteral(value_object, max_depth - 1, max_properties)) {
- return false;
- }
- }
- }
- } else if (boilerplate->HasDoubleElements()) {
- if (elements->Size() > kMaxRegularHeapObjectSize) return false;
- } else {
- return false;
- }
- }
-
- // TODO(turbofan): Do we want to support out-of-object properties?
- if (!(boilerplate->HasFastProperties() &&
- boilerplate->property_array()->length() == 0)) {
- return false;
- }
-
- // Check the in-object properties.
- Handle<DescriptorArray> descriptors(
- boilerplate->map()->instance_descriptors(), isolate);
- int limit = boilerplate->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
- if ((*max_properties)-- == 0) return false;
- FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
- if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
- Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteral(value_object, max_depth - 1, max_properties)) {
- return false;
- }
- }
- }
- return true;
-}
-
-// Maximum depth and total number of elements and properties for literal
-// graphs to be considered for fast deep-copying. The limit is chosen to
-// match the maximum number of inobject properties, to ensure that the
-// performance of using object literals is not worse than using constructor
-// functions, see crbug.com/v8/6211 for details.
-const int kMaxFastLiteralDepth = 3;
-const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
-
} // namespace
Reduction JSCreateLowering::Reduce(Node* node) {
@@ -183,6 +110,7 @@ Reduction JSCreateLowering::Reduce(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
Node* const target = NodeProperties::GetValueInput(node, 0);
Type const target_type = NodeProperties::GetType(target);
@@ -191,58 +119,62 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
// Extract constructor and original constructor function.
- if (target_type.IsHeapConstant() && new_target_type.IsHeapConstant() &&
- target_type.AsHeapConstant()->Value()->IsJSFunction() &&
- new_target_type.AsHeapConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> constructor =
- Handle<JSFunction>::cast(target_type.AsHeapConstant()->Value());
- if (!constructor->IsConstructor()) return NoChange();
- Handle<JSFunction> original_constructor =
- Handle<JSFunction>::cast(new_target_type.AsHeapConstant()->Value());
- if (!original_constructor->IsConstructor()) return NoChange();
+ if (!target_type.IsHeapConstant() || !new_target_type.IsHeapConstant() ||
+ !target_type.AsHeapConstant()->Ref().IsJSFunction() ||
+ !new_target_type.AsHeapConstant()->Ref().IsJSFunction()) {
+ return NoChange();
+ }
- // Check if we can inline the allocation.
- if (IsAllocationInlineable(constructor, original_constructor)) {
- // Force completion of inobject slack tracking before
- // generating code to finalize the instance size.
- original_constructor->CompleteInobjectSlackTrackingIfActive();
- Handle<Map> initial_map(original_constructor->initial_map(), isolate());
- int const instance_size = initial_map->instance_size();
+ JSFunctionRef constructor =
+ target_type.AsHeapConstant()->Ref().AsJSFunction();
+ if (!constructor.IsConstructor()) return NoChange();
+ JSFunctionRef original_constructor =
+ new_target_type.AsHeapConstant()->Ref().AsJSFunction();
+ if (!original_constructor.IsConstructor()) return NoChange();
- // Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} changes.
- dependencies()->AssumeInitialMapCantChange(initial_map);
+ // Check if we can inline the allocation.
+ if (!IsAllocationInlineable(constructor, original_constructor)) {
+ return NoChange();
+ }
- // Emit code to allocate the JSObject instance for the
- // {original_constructor}.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(instance_size);
- a.Store(AccessBuilder::ForMap(), initial_map);
- a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSObjectElements(),
- jsgraph()->EmptyFixedArrayConstant());
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
- jsgraph()->UndefinedConstant());
- }
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
- }
+ // Add a dependency on the {initial_map} to make sure that this code is
+ // deoptimized whenever the {initial_map} changes.
+ MapRef initial_map = dependencies()->DependOnInitialMap(original_constructor);
+
+ // Force completion of inobject slack tracking before
+ // generating code to finalize the instance size.
+ SlackTrackingResult slack_tracking_result =
+ original_constructor.FinishSlackTracking();
+
+ // Emit code to allocate the JSObject instance for the
+ // {original_constructor}.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(slack_tracking_result.instance_size);
+ a.Store(AccessBuilder::ForMap(), initial_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ for (int i = 0; i < slack_tracking_result.inobject_property_count; ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ jsgraph()->UndefinedConstant());
}
- return NoChange();
+
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
}
Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
Node* const frame_state = NodeProperties::GetFrameStateInput(node);
Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
Node* const control = graph()->start();
FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
- Handle<SharedFunctionInfo> shared =
- state_info.shared_info().ToHandleChecked();
+ SharedFunctionInfoRef shared(js_heap_broker(),
+ state_info.shared_info().ToHandleChecked());
// Use the ArgumentsAccessStub for materializing both mapped and unmapped
// arguments object, but only for non-inlined (i.e. outermost) frames.
@@ -250,7 +182,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
switch (type) {
case CreateArgumentsType::kMappedArguments: {
// TODO(mstarzinger): Duplicate parameters are not handled yet.
- if (shared->has_duplicate_parameters()) return NoChange();
+ if (shared.has_duplicate_parameters()) return NoChange();
Node* const callee = NodeProperties::GetValueInput(node, 0);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -258,7 +190,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
graph()->NewNode(simplified()->ArgumentsFrame());
Node* const arguments_length = graph()->NewNode(
simplified()->ArgumentsLength(
- shared->internal_formal_parameter_count(), false),
+ shared.internal_formal_parameter_count(), false),
arguments_frame);
// Allocate the elements backing store.
bool has_aliased_arguments = false;
@@ -266,11 +198,10 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
effect, control, context, arguments_frame, arguments_length, shared,
&has_aliased_arguments);
// Load the arguments object map.
- Node* const arguments_map = jsgraph()->HeapConstant(
- handle(has_aliased_arguments
- ? native_context()->fast_aliased_arguments_map()
- : native_context()->sloppy_arguments_map(),
- isolate()));
+ Node* const arguments_map = jsgraph()->Constant(
+ has_aliased_arguments
+ ? native_context_ref().fast_aliased_arguments_map()
+ : native_context_ref().sloppy_arguments_map());
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -291,15 +222,15 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
graph()->NewNode(simplified()->ArgumentsFrame());
Node* const arguments_length = graph()->NewNode(
simplified()->ArgumentsLength(
- shared->internal_formal_parameter_count(), false),
+ shared.internal_formal_parameter_count(), false),
arguments_frame);
// Allocate the elements backing store.
Node* const elements = effect =
graph()->NewNode(simplified()->NewArgumentsElements(0),
arguments_frame, arguments_length, effect);
// Load the arguments object map.
- Node* const arguments_map = jsgraph()->HeapConstant(
- handle(native_context()->strict_arguments_map(), isolate()));
+ Node* const arguments_map =
+ jsgraph()->Constant(native_context_ref().strict_arguments_map());
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -319,7 +250,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
graph()->NewNode(simplified()->ArgumentsFrame());
Node* const rest_length = graph()->NewNode(
simplified()->ArgumentsLength(
- shared->internal_formal_parameter_count(), true),
+ shared.internal_formal_parameter_count(), true),
arguments_frame);
// Allocate the elements backing store. Since NewArgumentsElements
// copies from the end of the arguments adapter frame, this is a suffix
@@ -328,8 +259,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
graph()->NewNode(simplified()->NewArgumentsElements(0),
arguments_frame, rest_length, effect);
// Load the JSArray object map.
- Node* const jsarray_map = jsgraph()->HeapConstant(handle(
- native_context()->js_array_fast_elements_map_index(), isolate()));
+ Node* const jsarray_map = jsgraph()->Constant(
+ native_context_ref().js_array_fast_elements_map_index());
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -353,7 +284,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
// TODO(mstarzinger): Duplicate parameters are not handled yet.
- if (shared->has_duplicate_parameters()) return NoChange();
+ if (shared.has_duplicate_parameters()) return NoChange();
// Choose the correct frame state and frame state info depending on
// whether there conceptually is an arguments adaptor frame in the call
// chain.
@@ -372,10 +303,10 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
effect, control, args_state, context, shared, &has_aliased_arguments);
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
- Node* const arguments_map = jsgraph()->HeapConstant(handle(
- has_aliased_arguments ? native_context()->fast_aliased_arguments_map()
- : native_context()->sloppy_arguments_map(),
- isolate()));
+ Node* const arguments_map = jsgraph()->Constant(
+ has_aliased_arguments
+ ? native_context_ref().fast_aliased_arguments_map()
+ : native_context_ref().sloppy_arguments_map());
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -410,8 +341,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const elements = AllocateArguments(effect, control, args_state);
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
- Node* const arguments_map = jsgraph()->HeapConstant(
- handle(native_context()->strict_arguments_map(), isolate()));
+ Node* const arguments_map =
+ jsgraph()->Constant(native_context_ref().strict_arguments_map());
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -426,7 +357,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
a.FinishAndChange(node);
return Changed(node);
} else if (type == CreateArgumentsType::kRestParameter) {
- int start_index = shared->internal_formal_parameter_count();
+ int start_index = shared.internal_formal_parameter_count();
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
Node* effect = NodeProperties::GetEffectInput(node);
@@ -447,8 +378,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
AllocateRestArguments(effect, control, args_state, start_index);
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the JSArray object map.
- Node* const jsarray_map = jsgraph()->HeapConstant(handle(
- native_context()->js_array_fast_elements_map_index(), isolate()));
+ Node* const jsarray_map = jsgraph()->Constant(
+ native_context_ref().js_array_fast_elements_map_index());
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -473,6 +404,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateGeneratorObject, node->opcode());
Node* const closure = NodeProperties::GetValueInput(node, 0);
Node* const receiver = NodeProperties::GetValueInput(node, 1);
@@ -481,36 +413,39 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
if (closure_type.IsHeapConstant()) {
- DCHECK(closure_type.AsHeapConstant()->Value()->IsJSFunction());
- Handle<JSFunction> js_function =
- Handle<JSFunction>::cast(closure_type.AsHeapConstant()->Value());
- JSFunction::EnsureHasInitialMap(js_function);
+ DCHECK(closure_type.AsHeapConstant()->Ref().IsJSFunction());
+ JSFunctionRef js_function =
+ closure_type.AsHeapConstant()->Ref().AsJSFunction();
+ js_function.EnsureHasInitialMap();
// Force completion of inobject slack tracking before
// generating code to finalize the instance size.
- js_function->CompleteInobjectSlackTrackingIfActive();
- Handle<Map> initial_map(js_function->initial_map(), isolate());
- DCHECK(initial_map->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
- initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
+ SlackTrackingResult slack_tracking_result =
+ js_function.FinishSlackTracking();
// Add a dependency on the {initial_map} to make sure that this code is
// deoptimized whenever the {initial_map} changes.
- dependencies()->AssumeInitialMapCantChange(initial_map);
+ MapRef initial_map = dependencies()->DependOnInitialMap(js_function);
+ DCHECK(initial_map.instance_type() == JS_GENERATOR_OBJECT_TYPE ||
+ initial_map.instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
// Allocate a register file.
- DCHECK(js_function->shared()->HasBytecodeArray());
- int size = js_function->shared()->GetBytecodeArray()->register_count();
+ SharedFunctionInfoRef shared = js_function.shared();
+ DCHECK(shared.HasBytecodeArray());
+ int parameter_count_no_receiver = shared.internal_formal_parameter_count();
+ int size =
+ parameter_count_no_receiver + shared.GetBytecodeArrayRegisterCount();
AllocationBuilder ab(jsgraph(), effect, control);
ab.AllocateArray(size, factory()->fixed_array_map());
for (int i = 0; i < size; ++i) {
ab.Store(AccessBuilder::ForFixedArraySlot(i),
jsgraph()->UndefinedConstant());
}
- Node* register_file = effect = ab.Finish();
+ Node* parameters_and_registers = effect = ab.Finish();
// Emit code to allocate the JS[Async]GeneratorObject instance.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(initial_map->instance_size());
+ a.Allocate(slack_tracking_result.instance_size);
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
Node* undefined = jsgraph()->UndefinedConstant();
a.Store(AccessBuilder::ForMap(), initial_map);
@@ -524,16 +459,17 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
jsgraph()->Constant(JSGeneratorObject::kNext));
a.Store(AccessBuilder::ForJSGeneratorObjectContinuation(),
jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
- a.Store(AccessBuilder::ForJSGeneratorObjectRegisterFile(), register_file);
+ a.Store(AccessBuilder::ForJSGeneratorObjectParametersAndRegisters(),
+ parameters_and_registers);
- if (initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
+ if (initial_map.instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
a.Store(AccessBuilder::ForJSAsyncGeneratorObjectQueue(), undefined);
a.Store(AccessBuilder::ForJSAsyncGeneratorObjectIsAwaiting(),
jsgraph()->ZeroConstant());
}
// Handle in-object properties, too.
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ for (int i = 0; i < slack_tracking_result.inobject_property_count; ++i) {
a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
undefined);
}
@@ -555,8 +491,9 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Constructing an Array via new Array(N) where N is an unsigned
// integer, always creates a holey backing store.
if (!IsHoleyElementsKind(initial_map->elements_kind())) {
- initial_map = Map::AsElementsKind(
- initial_map, GetHoleyElementsKind(initial_map->elements_kind()));
+ initial_map =
+ Map::AsElementsKind(isolate(), initial_map,
+ GetHoleyElementsKind(initial_map->elements_kind()));
}
// Check that the {limit} is an unsigned integer in the valid range.
@@ -584,7 +521,8 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
a.Store(AccessBuilder::ForJSArrayLength(initial_map->elements_kind()),
length);
for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(
+ MapRef(js_heap_broker(), initial_map), i),
jsgraph()->UndefinedConstant());
}
RelaxControls(node);
@@ -607,7 +545,7 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
ElementsKind elements_kind = initial_map->elements_kind();
if (NodeProperties::GetType(length).Max() > 0.0) {
elements_kind = GetHoleyElementsKind(elements_kind);
- initial_map = Map::AsElementsKind(initial_map, elements_kind);
+ initial_map = Map::AsElementsKind(isolate(), initial_map, elements_kind);
}
DCHECK(IsFastElementsKind(elements_kind));
@@ -629,7 +567,8 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(
+ MapRef(js_heap_broker(), initial_map), i),
jsgraph()->UndefinedConstant());
}
RelaxControls(node);
@@ -685,7 +624,8 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(
+ MapRef(js_heap_broker(), initial_map), i),
jsgraph()->UndefinedConstant());
}
RelaxControls(node);
@@ -718,35 +658,35 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
: Operator::kNoDeopt | Operator::kNoWrite;
if (arity == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
- override_mode);
+ Callable callable = CodeFactory::ArrayNoArgumentConstructor(
+ isolate(), elements_kind, override_mode);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
- arity + 1, CallDescriptor::kNeedsFrameState, properties);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ graph()->zone(), callable.descriptor(), arity + 1,
+ CallDescriptor::kNeedsFrameState, properties);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(callable.code()));
node->InsertInput(graph()->zone(), 2, type_info);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else if (arity == 1) {
// Require elements kind to "go holey".
- ArraySingleArgumentConstructorStub stub(
+ Callable callable = CodeFactory::ArraySingleArgumentConstructor(
isolate(), GetHoleyElementsKind(elements_kind), override_mode);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
- arity + 1, CallDescriptor::kNeedsFrameState, properties);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ graph()->zone(), callable.descriptor(), arity + 1,
+ CallDescriptor::kNeedsFrameState, properties);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(callable.code()));
node->InsertInput(graph()->zone(), 2, type_info);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
DCHECK_GT(arity, 1);
- ArrayNArgumentsConstructorStub stub(isolate());
+ Handle<Code> code = BUILTIN_CODE(isolate(), ArrayNArgumentsConstructor);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
- arity + 1, CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ graph()->zone(), ArrayNArgumentsConstructorDescriptor{}, arity + 1,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(code));
node->InsertInput(graph()->zone(), 2, type_info);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
@@ -764,9 +704,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
Handle<JSFunction> constructor(native_context()->array_function(), isolate());
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
- Type new_target_type = (target == new_target)
- ? Type::HeapConstant(constructor, zone())
- : NodeProperties::GetType(new_target);
+ Type new_target_type =
+ (target == new_target)
+ ? Type::HeapConstant(js_heap_broker(), constructor, zone())
+ : NodeProperties::GetType(new_target);
// Extract original constructor function.
if (new_target_type.IsHeapConstant() &&
@@ -777,15 +718,17 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
DCHECK(original_constructor->IsConstructor());
// Check if we can inline the allocation.
- if (IsAllocationInlineable(constructor, original_constructor)) {
+ if (IsAllocationInlineable(
+ JSFunctionRef(js_heap_broker(), constructor),
+ JSFunctionRef(js_heap_broker(), original_constructor))) {
// Force completion of inobject slack tracking before
// generating code to finalize the instance size.
original_constructor->CompleteInobjectSlackTrackingIfActive();
- Handle<Map> initial_map(original_constructor->initial_map(), isolate());
// Add a dependency on the {initial_map} to make sure that this code is
// deoptimized whenever the {initial_map} changes.
- dependencies()->AssumeInitialMapCantChange(initial_map);
+ MapRef initial_map = dependencies()->DependOnInitialMap(
+ JSFunctionRef(js_heap_broker(), original_constructor));
// Tells whether we are protected by either the {site} or a
// protector cell to do certain speculative optimizations.
@@ -794,14 +737,16 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
// Check if we have a feedback {site} on the {node}.
if (!site.is_null()) {
ElementsKind elements_kind = site->GetElementsKind();
- if (initial_map->elements_kind() != elements_kind) {
- initial_map = Map::AsElementsKind(initial_map, elements_kind);
+ if (initial_map.elements_kind() != elements_kind) {
+ initial_map =
+ MapRef(js_heap_broker(),
+ Map::AsElementsKind(isolate(), initial_map.object<Map>(),
+ elements_kind));
}
can_inline_call = site->CanInlineCall();
- pretenure = site->GetPretenureMode();
-
- dependencies()->AssumeTransitionStable(site);
- dependencies()->AssumeTenuringDecision(site);
+ auto site_ref = AllocationSiteRef(js_heap_broker(), site);
+ pretenure = dependencies()->DependOnPretenureMode(site_ref);
+ dependencies()->DependOnElementsKind(site_ref);
} else {
can_inline_call = isolate()->IsArrayConstructorIntact();
}
@@ -809,30 +754,36 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
if (arity == 0) {
Node* length = jsgraph()->ZeroConstant();
int capacity = JSArray::kPreallocatedArrayElements;
- return ReduceNewArray(node, length, capacity, initial_map, pretenure);
+ return ReduceNewArray(node, length, capacity, initial_map.object<Map>(),
+ pretenure);
} else if (arity == 1) {
Node* length = NodeProperties::GetValueInput(node, 2);
Type length_type = NodeProperties::GetType(length);
if (!length_type.Maybe(Type::Number())) {
// Handle the single argument case, where we know that the value
// cannot be a valid Array length.
- ElementsKind elements_kind = initial_map->elements_kind();
+ ElementsKind elements_kind = initial_map.elements_kind();
elements_kind = GetMoreGeneralElementsKind(
elements_kind, IsHoleyElementsKind(elements_kind)
? HOLEY_ELEMENTS
: PACKED_ELEMENTS);
- initial_map = Map::AsElementsKind(initial_map, elements_kind);
- return ReduceNewArray(node, std::vector<Node*>{length}, initial_map,
- pretenure);
+ initial_map =
+ MapRef(js_heap_broker(),
+ Map::AsElementsKind(isolate(), initial_map.object<Map>(),
+ elements_kind));
+ return ReduceNewArray(node, std::vector<Node*>{length},
+ initial_map.object<Map>(), pretenure);
}
if (length_type.Is(Type::SignedSmall()) && length_type.Min() >= 0 &&
length_type.Max() <= kElementLoopUnrollLimit &&
length_type.Min() == length_type.Max()) {
int capacity = static_cast<int>(length_type.Max());
- return ReduceNewArray(node, length, capacity, initial_map, pretenure);
+ return ReduceNewArray(node, length, capacity,
+ initial_map.object<Map>(), pretenure);
}
if (length_type.Maybe(Type::UnsignedSmall()) && can_inline_call) {
- return ReduceNewArray(node, length, initial_map, pretenure);
+ return ReduceNewArray(node, length, initial_map.object<Map>(),
+ pretenure);
}
} else if (arity <= JSArray::kInitialMaxFastElementArray) {
// Gather the values to store into the newly created array.
@@ -856,7 +807,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
}
// Try to figure out the ideal elements kind statically.
- ElementsKind elements_kind = initial_map->elements_kind();
+ ElementsKind elements_kind = initial_map.elements_kind();
if (values_all_smis) {
// Smis can be stored with any elements kind.
} else if (values_all_numbers) {
@@ -877,9 +828,13 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
// we cannot inline this invocation of the Array constructor here.
return NoChange();
}
- initial_map = Map::AsElementsKind(initial_map, elements_kind);
+ initial_map =
+ MapRef(js_heap_broker(),
+ Map::AsElementsKind(isolate(), initial_map.object<Map>(),
+ elements_kind));
- return ReduceNewArray(node, values, initial_map, pretenure);
+ return ReduceNewArray(node, values, initial_map.object<Map>(),
+ pretenure);
}
}
}
@@ -891,6 +846,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateArrayIterator, node->opcode());
CreateArrayIteratorParameters const& p =
CreateArrayIteratorParametersOf(node->op());
@@ -902,7 +858,7 @@ Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(JSArrayIterator::kSize, NOT_TENURED, Type::OtherObject());
a.Store(AccessBuilder::ForMap(),
- handle(native_context()->initial_array_iterator_map(), isolate()));
+ native_context_ref().initial_array_iterator_map());
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
@@ -919,27 +875,28 @@ Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
namespace {
-Context::Field ContextFieldForCollectionIterationKind(
- CollectionKind collection_kind, IterationKind iteration_kind) {
+MapRef MapForCollectionIterationKind(const NativeContextRef& native_context,
+ CollectionKind collection_kind,
+ IterationKind iteration_kind) {
switch (collection_kind) {
case CollectionKind::kSet:
switch (iteration_kind) {
case IterationKind::kKeys:
UNREACHABLE();
case IterationKind::kValues:
- return Context::SET_VALUE_ITERATOR_MAP_INDEX;
+ return native_context.set_value_iterator_map();
case IterationKind::kEntries:
- return Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX;
+ return native_context.set_key_value_iterator_map();
}
break;
case CollectionKind::kMap:
switch (iteration_kind) {
case IterationKind::kKeys:
- return Context::MAP_KEY_ITERATOR_MAP_INDEX;
+ return native_context.map_key_iterator_map();
case IterationKind::kValues:
- return Context::MAP_VALUE_ITERATOR_MAP_INDEX;
+ return native_context.map_value_iterator_map();
case IterationKind::kEntries:
- return Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX;
+ return native_context.map_key_value_iterator_map();
}
break;
}
@@ -949,6 +906,7 @@ Context::Field ContextFieldForCollectionIterationKind(
} // namespace
Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateCollectionIterator, node->opcode());
CreateCollectionIteratorParameters const& p =
CreateCollectionIteratorParametersOf(node->op());
@@ -965,9 +923,8 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(JSCollectionIterator::kSize, NOT_TENURED, Type::OtherObject());
a.Store(AccessBuilder::ForMap(),
- handle(native_context()->get(ContextFieldForCollectionIterationKind(
- p.collection_kind(), p.iteration_kind())),
- isolate()));
+ MapForCollectionIterationKind(
+ native_context_ref(), p.collection_kind(), p.iteration_kind()));
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
@@ -981,11 +938,12 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateBoundFunction, node->opcode());
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- Handle<Map> const map = p.map();
+ MapRef const map(js_heap_broker(), p.map());
Node* bound_target_function = NodeProperties::GetValueInput(node, 0);
Node* bound_this = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -1021,11 +979,12 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- Handle<SharedFunctionInfo> shared = p.shared_info();
- Handle<FeedbackCell> feedback_cell = p.feedback_cell();
- Handle<Code> code = p.code();
+ SharedFunctionInfoRef shared(js_heap_broker(), p.shared_info());
+ HeapObjectRef feedback_cell(js_heap_broker(), p.feedback_cell());
+ HeapObjectRef code(js_heap_broker(), p.code());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
@@ -1033,16 +992,15 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
// Use inline allocation of closures only for instantiation sites that have
// seen more than one instantiation, this simplifies the generated code and
// also serves as a heuristic of which allocation sites benefit from it.
- if (feedback_cell->map() != isolate()->heap()->many_closures_cell_map()) {
- // The generic path can only create closures for user functions.
- DCHECK_EQ(isolate()->builtins()->builtin(Builtins::kCompileLazy), *code);
+ if (!feedback_cell.map().equals(
+ MapRef(js_heap_broker(), factory()->many_closures_cell_map()))) {
return NoChange();
}
- Handle<Map> function_map(
- Map::cast(native_context()->get(shared->function_map_index())));
- DCHECK(!function_map->IsInobjectSlackTrackingInProgress());
- DCHECK(!function_map->is_dictionary_map());
+ MapRef function_map =
+ native_context_ref().GetFunctionMapFromIndex(shared.function_map_index());
+ DCHECK(!function_map.IsInobjectSlackTrackingInProgress());
+ DCHECK(!function_map.is_dictionary_map());
// TODO(turbofan): We should use the pretenure flag from {p} here,
// but currently the heuristic in the parser works against us, as
@@ -1058,7 +1016,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
// Emit code to allocate the JSFunction instance.
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(function_map->instance_size(), pretenure, Type::Function());
+ a.Allocate(function_map.instance_size(), pretenure, Type::Function());
a.Store(AccessBuilder::ForMap(), function_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
jsgraph()->EmptyFixedArrayConstant());
@@ -1069,12 +1027,12 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
a.Store(AccessBuilder::ForJSFunctionFeedbackCell(), feedback_cell);
a.Store(AccessBuilder::ForJSFunctionCode(), code);
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- if (function_map->has_prototype_slot()) {
+ if (function_map.has_prototype_slot()) {
a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(),
jsgraph()->TheHoleConstant());
STATIC_ASSERT(JSFunction::kSizeWithPrototype == 8 * kPointerSize);
}
- for (int i = 0; i < function_map->GetInObjectProperties(); i++) {
+ for (int i = 0; i < function_map.GetInObjectProperties(); i++) {
a.Store(AccessBuilder::ForJSObjectInObjectProperty(function_map, i),
jsgraph()->UndefinedConstant());
}
@@ -1084,13 +1042,14 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
Node* done = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* iterator_result_map = jsgraph()->HeapConstant(
- handle(native_context()->iterator_result_map(), isolate()));
+ Node* iterator_result_map =
+ jsgraph()->Constant(native_context_ref().iterator_result_map());
// Emit code to allocate the JSIteratorResult instance.
AllocationBuilder a(jsgraph(), effect, graph()->start());
@@ -1108,12 +1067,12 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateStringIterator, node->opcode());
Node* string = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* map = jsgraph()->HeapConstant(
- handle(native_context()->string_iterator_map(), isolate()));
+ Node* map = jsgraph()->Constant(native_context_ref().string_iterator_map());
// Allocate new iterator and attach the iterator to this string.
AllocationBuilder a(jsgraph(), effect, graph()->start());
a.Allocate(JSStringIterator::kSize, NOT_TENURED, Type::OtherObject());
@@ -1130,13 +1089,14 @@ Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateKeyValueArray, node->opcode());
Node* key = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* array_map = jsgraph()->HeapConstant(
- handle(native_context()->js_array_fast_elements_map_index()));
+ Node* array_map = jsgraph()->Constant(
+ native_context_ref().js_array_fast_elements_map_index());
Node* properties = jsgraph()->EmptyFixedArrayConstant();
Node* length = jsgraph()->Constant(2);
@@ -1160,13 +1120,14 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreatePromise, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
- Handle<Map> promise_map(native_context()->promise_function()->initial_map());
+ MapRef promise_map = native_context_ref().promise_function_initial_map();
AllocationBuilder a(jsgraph(), effect, graph()->start());
- a.Allocate(promise_map->instance_size());
+ a.Allocate(promise_map.instance_size());
a.Store(AccessBuilder::ForMap(), promise_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
jsgraph()->EmptyFixedArrayConstant());
@@ -1194,18 +1155,19 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Handle<Object> feedback(
- p.feedback().vector()->Get(p.feedback().slot())->ToObject(), isolate());
- if (feedback->IsAllocationSite()) {
- Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
- Handle<JSObject> boilerplate(site->boilerplate(), isolate());
- int max_properties = kMaxFastLiteralProperties;
- if (IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
- AllocationSiteUsageContext site_context(isolate(), site, false);
- site_context.EnterNewScope();
+ FeedbackVectorRef feedback_vector(js_heap_broker(), p.feedback().vector());
+ ObjectRef feedback = feedback_vector.get(p.feedback().slot());
+ if (feedback.IsAllocationSite()) {
+ AllocationSiteRef site = feedback.AsAllocationSite();
+ if (site.IsFastLiteral()) {
+ PretenureFlag pretenure = NOT_TENURED;
+ if (FLAG_allocation_site_pretenuring) {
+ pretenure = dependencies()->DependOnPretenureMode(site);
+ }
+ dependencies()->DependOnElementsKinds(site);
+ JSObjectRef boilerplate = site.boilerplate();
Node* value = effect =
- AllocateFastLiteral(effect, control, boilerplate, &site_context);
- site_context.ExitScope(site, boilerplate);
+ AllocateFastLiteral(effect, control, boilerplate, pretenure);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -1224,9 +1186,10 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
Handle<Map> const initial_map(
native_context()->GetInitialJSArrayMap(site->GetElementsKind()),
isolate());
- PretenureFlag const pretenure = site->GetPretenureMode();
- dependencies()->AssumeTransitionStable(site);
- dependencies()->AssumeTenuringDecision(site);
+ auto site_ref = AllocationSiteRef(js_heap_broker(), site);
+ PretenureFlag const pretenure =
+ dependencies()->DependOnPretenureMode(site_ref);
+ dependencies()->DependOnElementsKind(site_ref);
Node* length = jsgraph()->ZeroConstant();
return ReduceNewArray(node, length, 0, initial_map, pretenure);
}
@@ -1255,7 +1218,8 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
for (int i = 0; i < map->GetInObjectProperties(); i++) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(map, i),
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(
+ MapRef(js_heap_broker(), map), i),
jsgraph()->UndefinedConstant());
}
@@ -1265,15 +1229,16 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateLiteralRegExp, node->opcode());
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Handle<Object> feedback(
- p.feedback().vector()->Get(p.feedback().slot())->ToObject(), isolate());
- if (feedback->IsJSRegExp()) {
- Handle<JSRegExp> boilerplate = Handle<JSRegExp>::cast(feedback);
+ FeedbackVectorRef feedback_vector(js_heap_broker(), p.feedback().vector());
+ ObjectRef feedback = feedback_vector.get(p.feedback().slot());
+ if (feedback.IsJSRegExp()) {
+ JSRegExpRef boilerplate = feedback.AsJSRegExp();
Node* value = effect = AllocateLiteralRegExp(effect, control, boilerplate);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1282,10 +1247,11 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
const CreateFunctionContextParameters& parameters =
CreateFunctionContextParametersOf(node->op());
- Handle<ScopeInfo> scope_info = parameters.scope_info();
+ ScopeInfoRef scope_info(js_heap_broker(), parameters.scope_info());
int slot_count = parameters.slot_count();
ScopeType scope_type = parameters.scope_type();
@@ -1329,8 +1295,9 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
- Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
+ ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
Node* extension = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1350,8 +1317,9 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
- Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
+ ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
Node* exception = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1375,9 +1343,10 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
- Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
- int const context_length = scope_info->ContextLength();
+ ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
+ int const context_length = scope_info.ContextLength();
// Use inline allocation for block contexts up to a size limit.
if (context_length < kBlockContextAllocationLimit) {
@@ -1408,22 +1377,24 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
+ DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateObject, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* prototype = NodeProperties::GetValueInput(node, 0);
- HeapObjectMatcher m(prototype);
- if (!m.IsHeapConstant()) return NoChange();
-
- Handle<HeapObject> prototype_const = m.Value();
- Handle<Map> instance_map;
- MaybeHandle<Map> maybe_instance_map =
- Map::TryGetObjectCreateMap(prototype_const);
- if (!maybe_instance_map.ToHandle(&instance_map)) return NoChange();
+ Type prototype_type = NodeProperties::GetType(prototype);
+ if (!prototype_type.IsHeapConstant()) return NoChange();
+
+ HeapObjectRef prototype_const = prototype_type.AsHeapConstant()->Ref();
+ auto maybe_instance_map = prototype_const.TryGetObjectCreateMap();
+ if (!maybe_instance_map) return NoChange();
+ MapRef instance_map = maybe_instance_map.value();
+
Node* properties = jsgraph()->EmptyFixedArrayConstant();
- if (instance_map->is_dictionary_map()) {
- // Allocated an empty NameDictionary as backing store for the properties.
- Handle<Map> map(isolate()->heap()->name_dictionary_map(), isolate());
+ if (instance_map.is_dictionary_map()) {
+ DCHECK_EQ(prototype_const.type().oddball_type(), OddballType::kNull);
+ // Allocate an empty NameDictionary as backing store for the properties.
+ Handle<Map> map = isolate()->factory()->name_dictionary_map();
int capacity =
NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
DCHECK(base::bits::IsPowerOfTwo(capacity));
@@ -1460,9 +1431,9 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
properties = effect = a.Finish();
}
- int const instance_size = instance_map->instance_size();
+ int const instance_size = instance_map.instance_size();
if (instance_size > kMaxRegularHeapObjectSize) return NoChange();
- dependencies()->AssumeInitialMapCantChange(instance_map);
+ CHECK(!instance_map.IsInobjectSlackTrackingInProgress());
// Emit code to allocate the JSObject instance for the given
// {instance_map}.
@@ -1543,14 +1514,14 @@ Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
// given {context}. Serves as backing store for JSCreateArguments nodes.
Node* JSCreateLowering::AllocateAliasedArguments(
Node* effect, Node* control, Node* frame_state, Node* context,
- Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
+ const SharedFunctionInfoRef& shared, bool* has_aliased_arguments) {
FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
// If there is no aliasing, the arguments object elements are not special in
// any way, we can just return an unmapped backing store instead.
- int parameter_count = shared->internal_formal_parameter_count();
+ int parameter_count = shared.internal_formal_parameter_count();
if (parameter_count == 0) {
return AllocateArguments(effect, control, frame_state);
}
@@ -1596,11 +1567,11 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// Serves as backing store for JSCreateArguments nodes.
Node* JSCreateLowering::AllocateAliasedArguments(
Node* effect, Node* control, Node* context, Node* arguments_frame,
- Node* arguments_length, Handle<SharedFunctionInfo> shared,
+ Node* arguments_length, const SharedFunctionInfoRef& shared,
bool* has_aliased_arguments) {
// If there is no aliasing, the arguments object elements are not
// special in any way, we can just return an unmapped backing store.
- int parameter_count = shared->internal_formal_parameter_count();
+ int parameter_count = shared.internal_formal_parameter_count();
if (parameter_count == 0) {
return graph()->NewNode(simplified()->NewArgumentsElements(0),
arguments_frame, arguments_length, effect);
@@ -1687,59 +1658,41 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
return a.Finish();
}
-Node* JSCreateLowering::AllocateFastLiteral(
- Node* effect, Node* control, Handle<JSObject> boilerplate,
- AllocationSiteUsageContext* site_context) {
- Handle<AllocationSite> current_site(*site_context->current(), isolate());
- dependencies()->AssumeTransitionStable(current_site);
-
- PretenureFlag pretenure = NOT_TENURED;
- if (FLAG_allocation_site_pretenuring) {
- Handle<AllocationSite> top_site(*site_context->top(), isolate());
- pretenure = top_site->GetPretenureMode();
- if (current_site.is_identical_to(top_site)) {
- // We install a dependency for pretenuring only on the outermost literal.
- dependencies()->AssumeTenuringDecision(top_site);
- }
- }
-
+Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
+ JSObjectRef boilerplate,
+ PretenureFlag pretenure) {
// Setup the properties backing store.
Node* properties = jsgraph()->EmptyFixedArrayConstant();
// Compute the in-object properties to store first (might have effects).
- Handle<Map> boilerplate_map(boilerplate->map(), isolate());
+ MapRef boilerplate_map = boilerplate.map();
ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
- inobject_fields.reserve(boilerplate_map->GetInObjectProperties());
- int const boilerplate_nof = boilerplate_map->NumberOfOwnDescriptors();
+ inobject_fields.reserve(boilerplate_map.GetInObjectProperties());
+ int const boilerplate_nof = boilerplate_map.NumberOfOwnDescriptors();
for (int i = 0; i < boilerplate_nof; ++i) {
PropertyDetails const property_details =
- boilerplate_map->instance_descriptors()->GetDetails(i);
+ boilerplate_map.GetPropertyDetails(i);
if (property_details.location() != kField) continue;
DCHECK_EQ(kData, property_details.kind());
- Handle<Name> property_name(
- boilerplate_map->instance_descriptors()->GetKey(i), isolate());
- FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
- FieldAccess access = {kTaggedBase, index.offset(),
- property_name, MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ NameRef property_name = boilerplate_map.GetPropertyKey(i);
+ FieldIndex index = boilerplate_map.GetFieldIndexFor(i);
+ FieldAccess access = {
+ kTaggedBase, index.offset(), property_name.object<Name>(),
+ MaybeHandle<Map>(), Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
Node* value;
- if (boilerplate->IsUnboxedDoubleField(index)) {
+ if (boilerplate.IsUnboxedDoubleField(index)) {
access.machine_type = MachineType::Float64();
access.type = Type::Number();
- value = jsgraph()->Constant(boilerplate->RawFastDoublePropertyAt(index));
+ value = jsgraph()->Constant(boilerplate.RawFastDoublePropertyAt(index));
} else {
- Handle<Object> boilerplate_value(boilerplate->RawFastPropertyAt(index),
- isolate());
- if (boilerplate_value->IsJSObject()) {
- Handle<JSObject> boilerplate_object =
- Handle<JSObject>::cast(boilerplate_value);
- Handle<AllocationSite> current_site = site_context->EnterNewScope();
- value = effect = AllocateFastLiteral(effect, control,
- boilerplate_object, site_context);
- site_context->ExitScope(current_site, boilerplate_object);
+ ObjectRef boilerplate_value = boilerplate.RawFastPropertyAt(index);
+ if (boilerplate_value.IsJSObject()) {
+ JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
+ value = effect =
+ AllocateFastLiteral(effect, control, boilerplate_object, pretenure);
} else if (property_details.representation().IsDouble()) {
- double number = Handle<HeapNumber>::cast(boilerplate_value)->value();
+ double number = boilerplate_value.AsMutableHeapNumber().value();
// Allocate a mutable HeapNumber box and store the value into it.
AllocationBuilder builder(jsgraph(), effect, control);
builder.Allocate(HeapNumber::kSize, pretenure);
@@ -1750,9 +1703,9 @@ Node* JSCreateLowering::AllocateFastLiteral(
value = effect = builder.Finish();
} else if (property_details.representation().IsSmi()) {
// Ensure that value is stored as smi.
- value = boilerplate_value->IsUninitialized(isolate())
+ value = boilerplate_value.oddball_type() == OddballType::kUninitialized
? jsgraph()->ZeroConstant()
- : jsgraph()->Constant(boilerplate_value);
+ : jsgraph()->Constant(boilerplate_value.AsSmi());
} else {
value = jsgraph()->Constant(boilerplate_value);
}
@@ -1761,7 +1714,7 @@ Node* JSCreateLowering::AllocateFastLiteral(
}
// Fill slack at the end of the boilerplate object with filler maps.
- int const boilerplate_length = boilerplate_map->GetInObjectProperties();
+ int const boilerplate_length = boilerplate_map.GetInObjectProperties();
for (int index = static_cast<int>(inobject_fields.size());
index < boilerplate_length; ++index) {
FieldAccess access =
@@ -1771,22 +1724,22 @@ Node* JSCreateLowering::AllocateFastLiteral(
}
// Setup the elements backing store.
- Node* elements = AllocateFastLiteralElements(effect, control, boilerplate,
- pretenure, site_context);
+ Node* elements =
+ AllocateFastLiteralElements(effect, control, boilerplate, pretenure);
if (elements->op()->EffectOutputCount() > 0) effect = elements;
// Actually allocate and initialize the object.
AllocationBuilder builder(jsgraph(), effect, control);
- builder.Allocate(boilerplate_map->instance_size(), pretenure,
- Type::For(boilerplate_map));
+ builder.Allocate(boilerplate_map.instance_size(), pretenure,
+ Type::For(js_heap_broker(), boilerplate_map.object<Map>()));
builder.Store(AccessBuilder::ForMap(), boilerplate_map);
builder.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
builder.Store(AccessBuilder::ForJSObjectElements(), elements);
- if (boilerplate_map->IsJSArrayMap()) {
- Handle<JSArray> boilerplate_array = Handle<JSArray>::cast(boilerplate);
+ if (boilerplate_map.IsJSArrayMap()) {
+ JSArrayRef boilerplate_array = boilerplate.AsJSArray();
builder.Store(
- AccessBuilder::ForJSArrayLength(boilerplate_array->GetElementsKind()),
- handle(boilerplate_array->length(), isolate()));
+ AccessBuilder::ForJSArrayLength(boilerplate_array.GetElementsKind()),
+ boilerplate_array.length());
}
for (auto const& inobject_field : inobject_fields) {
builder.Store(inobject_field.first, inobject_field.second);
@@ -1794,57 +1747,43 @@ Node* JSCreateLowering::AllocateFastLiteral(
return builder.Finish();
}
-Node* JSCreateLowering::AllocateFastLiteralElements(
- Node* effect, Node* control, Handle<JSObject> boilerplate,
- PretenureFlag pretenure, AllocationSiteUsageContext* site_context) {
- Handle<FixedArrayBase> boilerplate_elements(boilerplate->elements(),
- isolate());
+Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
+ JSObjectRef boilerplate,
+ PretenureFlag pretenure) {
+ FixedArrayBaseRef boilerplate_elements = boilerplate.elements();
// Empty or copy-on-write elements just store a constant.
- if (boilerplate_elements->length() == 0 ||
- boilerplate_elements->map() == isolate()->heap()->fixed_cow_array_map()) {
- if (pretenure == TENURED &&
- isolate()->heap()->InNewSpace(*boilerplate_elements)) {
- // If we would like to pretenure a fixed cow array, we must ensure that
- // the array is already in old space, otherwise we'll create too many
- // old-to-new-space pointers (overflowing the store buffer).
- boilerplate_elements = Handle<FixedArrayBase>(
- isolate()->factory()->CopyAndTenureFixedCOWArray(
- Handle<FixedArray>::cast(boilerplate_elements)));
- boilerplate->set_elements(*boilerplate_elements);
+ int const elements_length = boilerplate_elements.length();
+ MapRef elements_map = boilerplate_elements.map();
+ if (boilerplate_elements.length() == 0 || elements_map.IsFixedCowArrayMap()) {
+ if (pretenure == TENURED) {
+ boilerplate.EnsureElementsTenured();
+ boilerplate_elements = boilerplate.elements();
}
- return jsgraph()->HeapConstant(boilerplate_elements);
+ return jsgraph()->HeapConstant(boilerplate_elements.object<HeapObject>());
}
// Compute the elements to store first (might have effects).
- int const elements_length = boilerplate_elements->length();
- Handle<Map> elements_map(boilerplate_elements->map(), isolate());
ZoneVector<Node*> elements_values(elements_length, zone());
- if (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
- Handle<FixedDoubleArray> elements =
- Handle<FixedDoubleArray>::cast(boilerplate_elements);
+ if (elements_map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
+ FixedDoubleArrayRef elements = boilerplate_elements.AsFixedDoubleArray();
for (int i = 0; i < elements_length; ++i) {
- if (elements->is_the_hole(i)) {
+ if (elements.is_the_hole(i)) {
elements_values[i] = jsgraph()->TheHoleConstant();
} else {
- elements_values[i] = jsgraph()->Constant(elements->get_scalar(i));
+ elements_values[i] = jsgraph()->Constant(elements.get_scalar(i));
}
}
} else {
- Handle<FixedArray> elements =
- Handle<FixedArray>::cast(boilerplate_elements);
+ FixedArrayRef elements = boilerplate_elements.AsFixedArray();
for (int i = 0; i < elements_length; ++i) {
- if (elements->is_the_hole(isolate(), i)) {
+ if (elements.is_the_hole(i)) {
elements_values[i] = jsgraph()->TheHoleConstant();
} else {
- Handle<Object> element_value(elements->get(i), isolate());
- if (element_value->IsJSObject()) {
- Handle<JSObject> boilerplate_object =
- Handle<JSObject>::cast(element_value);
- Handle<AllocationSite> current_site = site_context->EnterNewScope();
+ ObjectRef element_value = elements.get(i);
+ if (element_value.IsJSObject()) {
elements_values[i] = effect = AllocateFastLiteral(
- effect, control, boilerplate_object, site_context);
- site_context->ExitScope(current_site, boilerplate_object);
+ effect, control, element_value.AsJSObject(), pretenure);
} else {
elements_values[i] = jsgraph()->Constant(element_value);
}
@@ -1854,9 +1793,9 @@ Node* JSCreateLowering::AllocateFastLiteralElements(
// Allocate the backing store array and store the elements.
AllocationBuilder builder(jsgraph(), effect, control);
- builder.AllocateArray(elements_length, elements_map, pretenure);
+ builder.AllocateArray(elements_length, elements_map.object<Map>(), pretenure);
ElementAccess const access =
- (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE)
+ (elements_map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE)
? AccessBuilder::ForFixedDoubleArrayElement()
: AccessBuilder::ForFixedArrayElement();
for (int i = 0; i < elements_length; ++i) {
@@ -1866,8 +1805,8 @@ Node* JSCreateLowering::AllocateFastLiteralElements(
}
Node* JSCreateLowering::AllocateLiteralRegExp(Node* effect, Node* control,
- Handle<JSRegExp> boilerplate) {
- Handle<Map> boilerplate_map(boilerplate->map(), isolate());
+ JSRegExpRef boilerplate) {
+ MapRef boilerplate_map = boilerplate.map();
// Sanity check that JSRegExp object layout hasn't changed.
STATIC_ASSERT(JSRegExp::kDataOffset == JSObject::kHeaderSize);
@@ -1884,21 +1823,18 @@ Node* JSCreateLowering::AllocateLiteralRegExp(Node* effect, Node* control,
JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
AllocationBuilder builder(jsgraph(), effect, control);
- builder.Allocate(size, pretenure, Type::For(boilerplate_map));
+ builder.Allocate(size, pretenure,
+ Type::For(js_heap_broker(), boilerplate_map.object<Map>()));
builder.Store(AccessBuilder::ForMap(), boilerplate_map);
builder.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- handle(boilerplate->raw_properties_or_hash(), isolate()));
- builder.Store(AccessBuilder::ForJSObjectElements(),
- handle(boilerplate->elements(), isolate()));
-
- builder.Store(AccessBuilder::ForJSRegExpData(),
- handle(boilerplate->data(), isolate()));
- builder.Store(AccessBuilder::ForJSRegExpSource(),
- handle(boilerplate->source(), isolate()));
- builder.Store(AccessBuilder::ForJSRegExpFlags(),
- handle(boilerplate->flags(), isolate()));
+ boilerplate.raw_properties_or_hash());
+ builder.Store(AccessBuilder::ForJSObjectElements(), boilerplate.elements());
+
+ builder.Store(AccessBuilder::ForJSRegExpData(), boilerplate.data());
+ builder.Store(AccessBuilder::ForJSRegExpSource(), boilerplate.source());
+ builder.Store(AccessBuilder::ForJSRegExpFlags(), boilerplate.flags());
builder.Store(AccessBuilder::ForJSRegExpLastIndex(),
- handle(boilerplate->last_index(), isolate()));
+ boilerplate.last_index());
return builder.Finish();
}
@@ -1917,6 +1853,10 @@ SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
return jsgraph()->simplified();
}
+NativeContextRef JSCreateLowering::native_context_ref() const {
+ return NativeContextRef(js_heap_broker(), native_context());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 7a61f171ff..667298c238 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -14,7 +14,6 @@ namespace internal {
// Forward declarations.
class AllocationSiteUsageContext;
-class CompilationDependencies;
class Factory;
class JSRegExp;
@@ -22,6 +21,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
+class CompilationDependencies;
class JSGraph;
class JSOperatorBuilder;
class MachineOperatorBuilder;
@@ -33,11 +33,12 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph,
+ JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
Handle<Context> native_context, Zone* zone)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
+ js_heap_broker_(js_heap_broker),
native_context_(native_context),
zone_(zone) {}
~JSCreateLowering() final {}
@@ -79,11 +80,12 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
int start_index);
Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
- Node* context, Handle<SharedFunctionInfo>,
+ Node* context,
+ const SharedFunctionInfoRef& shared,
bool* has_aliased_arguments);
Node* AllocateAliasedArguments(Node* effect, Node* control, Node* context,
Node* arguments_frame, Node* arguments_length,
- Handle<SharedFunctionInfo>,
+ const SharedFunctionInfoRef& shared,
bool* has_aliased_arguments);
Node* AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind, int capacity,
@@ -95,14 +97,12 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
std::vector<Node*> const& values,
PretenureFlag pretenure);
Node* AllocateFastLiteral(Node* effect, Node* control,
- Handle<JSObject> boilerplate,
- AllocationSiteUsageContext* site_context);
+ JSObjectRef boilerplate, PretenureFlag pretenure);
Node* AllocateFastLiteralElements(Node* effect, Node* control,
- Handle<JSObject> boilerplate,
- PretenureFlag pretenure,
- AllocationSiteUsageContext* site_context);
+ JSObjectRef boilerplate,
+ PretenureFlag pretenure);
Node* AllocateLiteralRegExp(Node* effect, Node* control,
- Handle<JSRegExp> boilerplate);
+ JSRegExpRef boilerplate);
Reduction ReduceNewArrayToStubCall(Node* node, Handle<AllocationSite> site);
@@ -111,13 +111,16 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
Handle<Context> native_context() const { return native_context_; }
+ NativeContextRef native_context_ref() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
CompilationDependencies* dependencies() const { return dependencies_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Zone* zone() const { return zone_; }
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
+ const JSHeapBroker* const js_heap_broker_;
Handle<Context> const native_context_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index cedeb96d2d..5e134307f4 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -101,12 +101,11 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags,
- Operator::Properties properties,
- int result_size) {
+ Operator::Properties properties) {
const CallInterfaceDescriptor& descriptor = callable.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(), flags,
- properties, MachineType::AnyTagged(), result_size);
+ zone(), descriptor, descriptor.GetStackParameterCount(), flags,
+ properties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(zone(), 0, stub_code);
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -360,11 +359,9 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
Handle<AllocationSite> const site = p.site();
- ArrayConstructorDescriptor descriptor(isolate());
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, arity + 1,
- CallDescriptor::kNeedsFrameState, node->op()->properties(),
- MachineType::AnyTagged());
+ zone(), ArrayConstructorDescriptor{}, arity + 1,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
Node* stub_arity = jsgraph()->Int32Constant(arity);
Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
@@ -406,6 +403,13 @@ void JSGenericLowering::LowerJSParseInt(Node* node) {
ReplaceWithStubCall(node, callable, flags);
}
+void JSGenericLowering::LowerJSRegExpTest(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kRegExpPrototypeTestFast);
+ ReplaceWithStubCall(node, callable, flags);
+}
+
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
Handle<SharedFunctionInfo> const shared_info = p.shared_info();
@@ -566,7 +570,7 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructForwardVarargs(isolate());
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
@@ -587,7 +591,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::Construct(isolate());
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
Node* new_target = node->InputAt(arg_count + 1);
@@ -604,8 +608,8 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 1, flags);
+ auto call_descriptor =
+ Linkage::GetStubCallDescriptor(zone(), callable.descriptor(), 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = jsgraph()->UndefinedConstant();
Node* arguments_list = node->InputAt(1);
@@ -625,7 +629,7 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructWithSpread(isolate());
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count, flags);
+ zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
Node* new_target = node->InputAt(new_target_index);
@@ -649,7 +653,7 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::CallForwardVarargs(isolate());
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
@@ -666,7 +670,7 @@ void JSGenericLowering::LowerJSCall(Node* node) {
Callable callable = CodeFactory::Call(isolate(), mode);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
node->InsertInput(zone(), 0, stub_code);
@@ -677,8 +681,8 @@ void JSGenericLowering::LowerJSCall(Node* node) {
void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
Callable callable = CodeFactory::CallWithArrayLike(isolate());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 1, flags);
+ auto call_descriptor =
+ Linkage::GetStubCallDescriptor(zone(), callable.descriptor(), 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = node->InputAt(1);
Node* arguments_list = node->InputAt(2);
@@ -695,7 +699,7 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::CallWithSpread(isolate());
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count, flags);
+ zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 8cd89fcb26..09fafb4dc5 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -39,8 +39,7 @@ class JSGenericLowering final : public Reducer {
// Helpers to replace existing nodes with a generic call.
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags,
- Operator::Properties properties,
- int result_size = 1);
+ Operator::Properties properties);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
Zone* zone() const;
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index cb9a6dda2e..b3ef85fb07 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -66,6 +66,33 @@ Node* JSGraph::Constant(Handle<Object> value) {
}
}
+Node* JSGraph::Constant(const ObjectRef& ref) {
+ if (ref.IsSmi()) return Constant(ref.AsSmi());
+ OddballType oddball_type = ref.oddball_type();
+ if (ref.IsHeapNumber()) {
+ return Constant(ref.AsHeapNumber().value());
+ } else if (oddball_type == OddballType::kUndefined) {
+ DCHECK(
+ ref.object<Object>().equals(isolate()->factory()->undefined_value()));
+ return UndefinedConstant();
+ } else if (oddball_type == OddballType::kNull) {
+ DCHECK(ref.object<Object>().equals(isolate()->factory()->null_value()));
+ return NullConstant();
+ } else if (oddball_type == OddballType::kHole) {
+ DCHECK(ref.object<Object>().equals(isolate()->factory()->the_hole_value()));
+ return TheHoleConstant();
+ } else if (oddball_type == OddballType::kBoolean) {
+ if (ref.object<Object>().equals(isolate()->factory()->true_value())) {
+ return TrueConstant();
+ } else {
+ DCHECK(ref.object<Object>().equals(isolate()->factory()->false_value()));
+ return FalseConstant();
+ }
+ } else {
+ return HeapConstant(ref.object<HeapObject>());
+ }
+}
+
Node* JSGraph::Constant(double value) {
if (bit_cast<int64_t>(value) == bit_cast<int64_t>(0.0)) return ZeroConstant();
if (bit_cast<int64_t>(value) == bit_cast<int64_t>(1.0)) return OneConstant();
@@ -118,7 +145,7 @@ DEFINE_GETTER(AllocateInOldSpaceStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), AllocateInOldSpace)))
DEFINE_GETTER(ArrayConstructorStubConstant,
- HeapConstant(ArrayConstructorStub(isolate()).GetCode()))
+ HeapConstant(BUILTIN_CODE(isolate(), ArrayConstructorImpl)))
DEFINE_GETTER(ToNumberBuiltinConstant,
HeapConstant(BUILTIN_CODE(isolate(), ToNumber)))
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 0046ef8b5e..517b799a24 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -55,6 +55,9 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
// canonicalized globals or a number constant should be returned.
Node* Constant(Handle<Object> value);
+ // Like above, but doesn't access the heap directly.
+ Node* Constant(const ObjectRef& value);
+
// Creates a NumberConstant node, usually canonicalized.
Node* Constant(double value);
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
new file mode 100644
index 0000000000..2624387165
--- /dev/null
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -0,0 +1,848 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-heap-broker.h"
+
+#include "src/compiler/compilation-dependencies.h"
+#include "src/objects-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/module-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MapRef HeapObjectRef::map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(),
+ handle(object<HeapObject>()->map(), broker()->isolate()));
+}
+
+double HeapNumberRef::value() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<HeapNumber>()->value();
+}
+
+double MutableHeapNumberRef::value() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<MutableHeapNumber>()->value();
+}
+
+bool ObjectRef::IsSmi() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object_->IsSmi();
+}
+
+int ObjectRef::AsSmi() const { return object<Smi>()->value(); }
+
+bool ObjectRef::equals(const ObjectRef& other) const {
+ return object<Object>().equals(other.object<Object>());
+}
+
+StringRef ObjectRef::TypeOf() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ return StringRef(broker(),
+ Object::TypeOf(broker()->isolate(), object<Object>()));
+}
+
+base::Optional<ContextRef> ContextRef::previous() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ Context* previous = object<Context>()->previous();
+ if (previous == nullptr) return base::Optional<ContextRef>();
+ return ContextRef(broker(), handle(previous, broker()->isolate()));
+}
+
+ObjectRef ContextRef::get(int index) const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ Handle<Object> value(object<Context>()->get(index), broker()->isolate());
+ return ObjectRef(broker(), value);
+}
+
+JSHeapBroker::JSHeapBroker(Isolate* isolate) : isolate_(isolate) {}
+
+HeapObjectType JSHeapBroker::HeapObjectTypeFromMap(Map* map) const {
+ AllowHandleDereference allow_handle_dereference;
+ OddballType oddball_type = OddballType::kNone;
+ if (map->instance_type() == ODDBALL_TYPE) {
+ ReadOnlyRoots roots(isolate_);
+ if (map == roots.undefined_map()) {
+ oddball_type = OddballType::kUndefined;
+ } else if (map == roots.null_map()) {
+ oddball_type = OddballType::kNull;
+ } else if (map == roots.boolean_map()) {
+ oddball_type = OddballType::kBoolean;
+ } else if (map == roots.the_hole_map()) {
+ oddball_type = OddballType::kHole;
+ } else if (map == roots.uninitialized_map()) {
+ oddball_type = OddballType::kUninitialized;
+ } else {
+ oddball_type = OddballType::kOther;
+ DCHECK(map == roots.termination_exception_map() ||
+ map == roots.arguments_marker_map() ||
+ map == roots.optimized_out_map() ||
+ map == roots.stale_register_map());
+ }
+ }
+ HeapObjectType::Flags flags(0);
+ if (map->is_undetectable()) flags |= HeapObjectType::kUndetectable;
+ if (map->is_callable()) flags |= HeapObjectType::kCallable;
+
+ return HeapObjectType(map->instance_type(), flags, oddball_type);
+}
+
+// static
+base::Optional<int> JSHeapBroker::TryGetSmi(Handle<Object> object) {
+ AllowHandleDereference allow_handle_dereference;
+ if (!object->IsSmi()) return base::Optional<int>();
+ return Smi::cast(*object)->value();
+}
+
+#define DEFINE_IS_AND_AS(Name) \
+ bool ObjectRef::Is##Name() const { \
+ AllowHandleDereference allow_handle_dereference; \
+ return object<Object>()->Is##Name(); \
+ } \
+ Name##Ref ObjectRef::As##Name() const { \
+ DCHECK(Is##Name()); \
+ return Name##Ref(broker(), object<HeapObject>()); \
+ }
+HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
+#undef DEFINE_IS_AND_AS
+
+HeapObjectType HeapObjectRef::type() const {
+ AllowHandleDereference allow_handle_dereference;
+ return broker()->HeapObjectTypeFromMap(object<HeapObject>()->map());
+}
+
+base::Optional<MapRef> HeapObjectRef::TryGetObjectCreateMap() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ Handle<Map> instance_map;
+ if (Map::TryGetObjectCreateMap(broker()->isolate(), object<HeapObject>())
+ .ToHandle(&instance_map)) {
+ return MapRef(broker(), instance_map);
+ } else {
+ return base::Optional<MapRef>();
+ }
+}
+
+bool HeapObjectRef::IsSeqString() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<HeapObject>()->IsSeqString();
+}
+
+bool HeapObjectRef::IsExternalString() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<HeapObject>()->IsExternalString();
+}
+
+bool JSFunctionRef::HasBuiltinFunctionId() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<JSFunction>()->shared()->HasBuiltinFunctionId();
+}
+
+BuiltinFunctionId JSFunctionRef::GetBuiltinFunctionId() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<JSFunction>()->shared()->builtin_function_id();
+}
+
+bool JSFunctionRef::IsConstructor() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<JSFunction>()->IsConstructor();
+}
+
+void JSFunctionRef::EnsureHasInitialMap() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ AllowHeapAllocation heap_allocation;
+ // TODO(jarin) Eventually, we will prepare initial maps for resumable
+ // functions (i.e., generators).
+ DCHECK(IsResumableFunction(object<JSFunction>()->shared()->kind()));
+ JSFunction::EnsureHasInitialMap(object<JSFunction>());
+}
+
+SlackTrackingResult JSFunctionRef::FinishSlackTracking() const {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation handle_allocation;
+ object<JSFunction>()->CompleteInobjectSlackTrackingIfActive();
+ int instance_size = object<JSFunction>()->initial_map()->instance_size();
+ int inobject_property_count =
+ object<JSFunction>()->initial_map()->GetInObjectProperties();
+ return SlackTrackingResult(instance_size, inobject_property_count);
+}
+
+bool JSFunctionRef::has_initial_map() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<JSFunction>()->has_initial_map();
+}
+
+MapRef JSFunctionRef::initial_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), handle(object<JSFunction>()->initial_map(),
+ broker()->isolate()));
+}
+
+SharedFunctionInfoRef JSFunctionRef::shared() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return SharedFunctionInfoRef(
+ broker(), handle(object<JSFunction>()->shared(), broker()->isolate()));
+}
+
+JSGlobalProxyRef JSFunctionRef::global_proxy() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return JSGlobalProxyRef(broker(), handle(object<JSFunction>()->global_proxy(),
+ broker()->isolate()));
+}
+
+base::Optional<ScriptContextTableRef::LookupResult>
+ScriptContextTableRef::lookup(const NameRef& name) const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ if (!name.IsString()) return {};
+ ScriptContextTable::LookupResult lookup_result;
+ auto table = object<ScriptContextTable>();
+ if (!ScriptContextTable::Lookup(broker()->isolate(), table,
+ name.object<String>(), &lookup_result)) {
+ return {};
+ }
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ broker()->isolate(), table, lookup_result.context_index);
+ LookupResult result{ContextRef(broker(), script_context),
+ lookup_result.mode == VariableMode::kConst,
+ lookup_result.slot_index};
+ return result;
+}
+
+ScriptContextTableRef NativeContextRef::script_context_table() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ return ScriptContextTableRef(
+ broker(),
+ handle(object<Context>()->script_context_table(), broker()->isolate()));
+}
+
+OddballType ObjectRef::oddball_type() const {
+ return IsSmi() ? OddballType::kNone : AsHeapObject().type().oddball_type();
+}
+
+ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ Handle<Object> value(object<FeedbackVector>()->Get(slot)->ToObject(),
+ broker()->isolate());
+ return ObjectRef(broker(), value);
+}
+
+JSObjectRef AllocationSiteRef::boilerplate() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ Handle<JSObject> value(object<AllocationSite>()->boilerplate(),
+ broker()->isolate());
+ return JSObjectRef(broker(), value);
+}
+
+ObjectRef AllocationSiteRef::nested_site() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ Handle<Object> obj(object<AllocationSite>()->nested_site(),
+ broker()->isolate());
+ return ObjectRef(broker(), obj);
+}
+
+bool AllocationSiteRef::PointsToLiteral() const {
+ AllowHandleDereference handle_dereference;
+ return object<AllocationSite>()->PointsToLiteral();
+}
+
+ElementsKind AllocationSiteRef::GetElementsKind() const {
+ AllowHandleDereference handle_dereference;
+ return object<AllocationSite>()->GetElementsKind();
+}
+
+bool JSObjectRef::IsUnboxedDoubleField(FieldIndex index) const {
+ AllowHandleDereference handle_dereference;
+ return object<JSObject>()->IsUnboxedDoubleField(index);
+}
+
+double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
+ AllowHandleDereference handle_dereference;
+ return object<JSObject>()->RawFastDoublePropertyAt(index);
+}
+
+ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ return ObjectRef(broker(),
+ handle(object<JSObject>()->RawFastPropertyAt(index),
+ broker()->isolate()));
+}
+
+ElementsKind JSObjectRef::GetElementsKind() {
+ AllowHandleDereference handle_dereference;
+ return object<JSObject>()->GetElementsKind();
+}
+
+FixedArrayBaseRef JSObjectRef::elements() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ return FixedArrayBaseRef(
+ broker(), handle(object<JSObject>()->elements(), broker()->isolate()));
+}
+
+namespace {
+
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
+ int* max_properties) {
+ DCHECK_GE(max_depth, 0);
+ DCHECK_GE(*max_properties, 0);
+
+ // Make sure the boilerplate map is not deprecated.
+ if (!JSObject::TryMigrateInstance(boilerplate)) return false;
+
+ // Check for too deep nesting.
+ if (max_depth == 0) return false;
+
+ // Check the elements.
+ Isolate* const isolate = boilerplate->GetIsolate();
+ Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
+ if (elements->length() > 0 &&
+ elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
+ if (boilerplate->HasSmiOrObjectElements()) {
+ Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+ int length = elements->length();
+ for (int i = 0; i < length; i++) {
+ if ((*max_properties)-- == 0) return false;
+ Handle<Object> value(fast_elements->get(i), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteralHelper(value_object, max_depth - 1,
+ max_properties)) {
+ return false;
+ }
+ }
+ }
+ } else if (boilerplate->HasDoubleElements()) {
+ if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+ } else {
+ return false;
+ }
+ }
+
+ // TODO(turbofan): Do we want to support out-of-object properties?
+ if (!(boilerplate->HasFastProperties() &&
+ boilerplate->property_array()->length() == 0)) {
+ return false;
+ }
+
+ // Check the in-object properties.
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map()->instance_descriptors(), isolate);
+ int limit = boilerplate->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
+ if ((*max_properties)-- == 0) return false;
+ FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+ if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
+ Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteralHelper(value_object, max_depth - 1, max_properties)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+// Maximum depth and total number of elements and properties for literal
+// graphs to be considered for fast deep-copying. The limit is chosen to
+// match the maximum number of inobject properties, to ensure that the
+// performance of using object literals is not worse than using constructor
+// functions, see crbug.com/v8/6211 for details.
+const int kMaxFastLiteralDepth = 3;
+const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
+
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+bool AllocationSiteRef::IsFastLiteral() const {
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ int max_properties = kMaxFastLiteralProperties;
+ Handle<JSObject> boilerplate(object<AllocationSite>()->boilerplate(),
+ broker()->isolate());
+ return IsFastLiteralHelper(boilerplate, kMaxFastLiteralDepth,
+ &max_properties);
+}
+
+PretenureFlag AllocationSiteRef::GetPretenureMode() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<AllocationSite>()->GetPretenureMode();
+}
+
+void JSObjectRef::EnsureElementsTenured() {
+ // TODO(jarin) Eventually, we will pretenure the boilerplates before
+ // the compilation job starts.
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ Handle<FixedArrayBase> object_elements = elements().object<FixedArrayBase>();
+ if (Heap::InNewSpace(*object_elements)) {
+ // If we would like to pretenure a fixed cow array, we must ensure that
+ // the array is already in old space, otherwise we'll create too many
+ // old-to-new-space pointers (overflowing the store buffer).
+ object_elements = Handle<FixedArrayBase>(
+ broker()->isolate()->factory()->CopyAndTenureFixedCOWArray(
+ Handle<FixedArray>::cast(object_elements)));
+ object<JSObject>()->set_elements(*object_elements);
+ }
+}
+
+ElementsKind MapRef::elements_kind() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->elements_kind();
+}
+
+bool MapRef::is_deprecated() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->is_deprecated();
+}
+
+bool MapRef::CanBeDeprecated() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->CanBeDeprecated();
+}
+
+int MapRef::GetInObjectProperties() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->GetInObjectProperties();
+}
+
+int MapRef::NumberOfOwnDescriptors() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->NumberOfOwnDescriptors();
+}
+
+FieldIndex MapRef::GetFieldIndexFor(int i) const {
+ AllowHandleDereference allow_handle_dereference;
+ return FieldIndex::ForDescriptor(*object<Map>(), i);
+}
+
+int MapRef::GetInObjectPropertyOffset(int i) const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->GetInObjectPropertyOffset(i);
+}
+
+bool MapRef::is_dictionary_map() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->is_dictionary_map();
+}
+
+ObjectRef MapRef::constructor_or_backpointer() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(broker(), handle(object<Map>()->constructor_or_backpointer(),
+ broker()->isolate()));
+}
+
+int MapRef::instance_size() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->instance_size();
+}
+
+InstanceType MapRef::instance_type() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->instance_type();
+}
+
+PropertyDetails MapRef::GetPropertyDetails(int i) const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->instance_descriptors()->GetDetails(i);
+}
+
+NameRef MapRef::GetPropertyKey(int i) const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return NameRef(broker(),
+ handle(object<Map>()->instance_descriptors()->GetKey(i),
+ broker()->isolate()));
+}
+
+bool MapRef::IsJSArrayMap() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->IsJSArrayMap();
+}
+
+bool MapRef::IsInobjectSlackTrackingInProgress() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->IsInobjectSlackTrackingInProgress();
+}
+
+bool MapRef::IsFixedCowArrayMap() const {
+ AllowHandleDereference allow_handle_dereference;
+ return *object<Map>() ==
+ ReadOnlyRoots(broker()->isolate()).fixed_cow_array_map();
+}
+
+bool MapRef::has_prototype_slot() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->has_prototype_slot();
+}
+
+bool MapRef::is_stable() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->is_stable();
+}
+
+bool MapRef::CanTransition() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->CanTransition();
+}
+
+MapRef MapRef::FindFieldOwner(int descriptor) const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ Handle<Map> owner(
+ object<Map>()->FindFieldOwner(broker()->isolate(), descriptor),
+ broker()->isolate());
+ return MapRef(broker(), owner);
+}
+
+FieldTypeRef MapRef::GetFieldType(int descriptor) const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ Handle<FieldType> field_type(
+ object<Map>()->instance_descriptors()->GetFieldType(descriptor),
+ broker()->isolate());
+ return FieldTypeRef(broker(), field_type);
+}
+
+ElementsKind JSArrayRef::GetElementsKind() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<JSArray>()->GetElementsKind();
+}
+
+ObjectRef JSArrayRef::length() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(broker(),
+ handle(object<JSArray>()->length(), broker()->isolate()));
+}
+
+int StringRef::length() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<String>()->length();
+}
+
+uint16_t StringRef::GetFirstChar() {
+ AllowHandleDereference allow_handle_dereference;
+ return object<String>()->Get(0);
+}
+
+double StringRef::ToNumber() {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHeapAllocation allow_heap_allocation;
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ return StringToDouble(broker()->isolate(),
+ broker()->isolate()->unicode_cache(), object<String>(),
+ flags);
+}
+
+ObjectRef JSRegExpRef::raw_properties_or_hash() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(broker(),
+ handle(object<JSRegExp>()->raw_properties_or_hash(),
+ broker()->isolate()));
+}
+
+ObjectRef JSRegExpRef::data() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(broker(),
+ handle(object<JSRegExp>()->data(), broker()->isolate()));
+}
+
+ObjectRef JSRegExpRef::source() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(broker(),
+ handle(object<JSRegExp>()->source(), broker()->isolate()));
+}
+
+ObjectRef JSRegExpRef::flags() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(broker(),
+ handle(object<JSRegExp>()->flags(), broker()->isolate()));
+}
+
+ObjectRef JSRegExpRef::last_index() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(
+ broker(), handle(object<JSRegExp>()->last_index(), broker()->isolate()));
+}
+
+int FixedArrayBaseRef::length() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<FixedArrayBase>()->length();
+}
+
+bool FixedArrayRef::is_the_hole(int i) const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<FixedArray>()->is_the_hole(broker()->isolate(), i);
+}
+
+ObjectRef FixedArrayRef::get(int i) const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(broker(),
+ handle(object<FixedArray>()->get(i), broker()->isolate()));
+}
+
+bool FixedDoubleArrayRef::is_the_hole(int i) const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<FixedDoubleArray>()->is_the_hole(i);
+}
+
+double FixedDoubleArrayRef::get_scalar(int i) const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<FixedDoubleArray>()->get_scalar(i);
+}
+
+int ScopeInfoRef::ContextLength() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<ScopeInfo>()->ContextLength();
+}
+
+int SharedFunctionInfoRef::internal_formal_parameter_count() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->internal_formal_parameter_count();
+}
+
+int SharedFunctionInfoRef::function_map_index() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->function_map_index();
+}
+
+bool SharedFunctionInfoRef::has_duplicate_parameters() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->has_duplicate_parameters();
+}
+
+FunctionKind SharedFunctionInfoRef::kind() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->kind();
+}
+
+LanguageMode SharedFunctionInfoRef::language_mode() {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->language_mode();
+}
+
+bool SharedFunctionInfoRef::native() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->native();
+}
+
+bool SharedFunctionInfoRef::HasBreakInfo() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->HasBreakInfo();
+}
+
+bool SharedFunctionInfoRef::HasBuiltinId() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->HasBuiltinId();
+}
+
+int SharedFunctionInfoRef::builtin_id() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->builtin_id();
+}
+
+bool SharedFunctionInfoRef::construct_as_builtin() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->construct_as_builtin();
+}
+
+bool SharedFunctionInfoRef::HasBytecodeArray() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->HasBytecodeArray();
+}
+
+int SharedFunctionInfoRef::GetBytecodeArrayRegisterCount() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<SharedFunctionInfo>()->GetBytecodeArray()->register_count();
+}
+
+MapRef NativeContextRef::fast_aliased_arguments_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(),
+ handle(object<Context>()->fast_aliased_arguments_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::sloppy_arguments_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), handle(object<Context>()->sloppy_arguments_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::strict_arguments_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), handle(object<Context>()->strict_arguments_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::js_array_fast_elements_map_index() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(),
+ handle(object<Context>()->js_array_fast_elements_map_index(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::initial_array_iterator_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(),
+ handle(object<Context>()->initial_array_iterator_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::set_value_iterator_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), handle(object<Context>()->set_value_iterator_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::set_key_value_iterator_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(),
+ handle(object<Context>()->set_key_value_iterator_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::map_key_iterator_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), handle(object<Context>()->map_key_iterator_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::map_value_iterator_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), handle(object<Context>()->map_value_iterator_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::map_key_value_iterator_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(),
+ handle(object<Context>()->map_key_value_iterator_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::iterator_result_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), handle(object<Context>()->iterator_result_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::string_iterator_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), handle(object<Context>()->string_iterator_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::promise_function_initial_map() const {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(),
+ handle(object<Context>()->promise_function()->initial_map(),
+ broker()->isolate()));
+}
+
+MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
+ DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
+ DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
+ return get(index).AsMap();
+}
+
+bool ObjectRef::BooleanValue() {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Object>()->BooleanValue(broker()->isolate());
+}
+
+double ObjectRef::OddballToNumber() const {
+ OddballType type = oddball_type();
+
+ switch (type) {
+ case OddballType::kBoolean: {
+ ObjectRef true_ref(broker(),
+ broker()->isolate()->factory()->true_value());
+ return this->equals(true_ref) ? 1 : 0;
+ break;
+ }
+ case OddballType::kUndefined: {
+ return std::numeric_limits<double>::quiet_NaN();
+ break;
+ }
+ case OddballType::kNull: {
+ return 0;
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+CellRef ModuleRef::GetCell(int cell_index) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return CellRef(broker(), handle(object<Module>()->GetCell(cell_index),
+ broker()->isolate()));
+}
+
+ObjectRef PropertyCellRef::value() const {
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(
+ broker(), handle(object<PropertyCell>()->value(), broker()->isolate()));
+}
+
+PropertyDetails PropertyCellRef::property_details() const {
+ AllowHandleDereference allow_handle_dereference;
+ return object<PropertyCell>()->property_details();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
new file mode 100644
index 0000000000..8503e82d12
--- /dev/null
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -0,0 +1,432 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_HEAP_BROKER_H_
+#define V8_COMPILER_JS_HEAP_BROKER_H_
+
+#include "src/base/compiler-specific.h"
+#include "src/base/optional.h"
+#include "src/globals.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class DisallowHeapAccess {
+ DisallowHeapAllocation no_heap_allocation_;
+ DisallowHandleAllocation no_handle_allocation_;
+ DisallowHandleDereference no_handle_dereference_;
+ DisallowCodeDependencyChange no_dependency_change_;
+};
+
+enum class OddballType : uint8_t {
+ kNone, // Not an Oddball.
+ kBoolean, // True or False.
+ kUndefined,
+ kNull,
+ kHole,
+ kUninitialized,
+ kOther // Oddball, but none of the above.
+};
+
+class HeapObjectType {
+ public:
+ enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
+
+ typedef base::Flags<Flag> Flags;
+
+ HeapObjectType(InstanceType instance_type, Flags flags,
+ OddballType oddball_type)
+ : instance_type_(instance_type),
+ oddball_type_(oddball_type),
+ flags_(flags) {
+ DCHECK_EQ(instance_type == ODDBALL_TYPE,
+ oddball_type != OddballType::kNone);
+ }
+
+ OddballType oddball_type() const { return oddball_type_; }
+ InstanceType instance_type() const { return instance_type_; }
+ Flags flags() const { return flags_; }
+
+ bool is_callable() const { return flags_ & kCallable; }
+ bool is_undetectable() const { return flags_ & kUndetectable; }
+
+ private:
+ InstanceType const instance_type_;
+ OddballType const oddball_type_;
+ Flags const flags_;
+};
+
+#define HEAP_BROKER_OBJECT_LIST(V) \
+ V(AllocationSite) \
+ V(Cell) \
+ V(Code) \
+ V(Context) \
+ V(FeedbackVector) \
+ V(FixedArray) \
+ V(FixedArrayBase) \
+ V(FixedDoubleArray) \
+ V(HeapNumber) \
+ V(HeapObject) \
+ V(InternalizedString) \
+ V(JSArray) \
+ V(JSFunction) \
+ V(JSGlobalProxy) \
+ V(JSObject) \
+ V(JSRegExp) \
+ V(Map) \
+ V(Module) \
+ V(MutableHeapNumber) \
+ V(Name) \
+ V(NativeContext) \
+ V(PropertyCell) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ V(SharedFunctionInfo) \
+ V(String)
+
+class CompilationDependencies;
+class JSHeapBroker;
+#define FORWARD_DECL(Name) class Name##Ref;
+HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+#undef FORWARD_DECL
+
+class ObjectRef {
+ public:
+ explicit ObjectRef(const JSHeapBroker* broker, Handle<Object> object)
+ : broker_(broker), object_(object) {}
+
+ template <typename T>
+ Handle<T> object() const {
+ AllowHandleDereference handle_dereference;
+ return Handle<T>::cast(object_);
+ }
+
+ OddballType oddball_type() const;
+
+ bool IsSmi() const;
+ int AsSmi() const;
+
+ bool equals(const ObjectRef& other) const;
+
+#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+#undef HEAP_IS_METHOD_DECL
+
+#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+#undef HEAP_AS_METHOD_DECL
+
+ StringRef TypeOf() const;
+ bool BooleanValue();
+ double OddballToNumber() const;
+
+ protected:
+ const JSHeapBroker* broker() const { return broker_; }
+
+ private:
+ const JSHeapBroker* broker_;
+ Handle<Object> object_;
+};
+
+class FieldTypeRef : public ObjectRef {
+ public:
+ using ObjectRef::ObjectRef;
+};
+
+class HeapObjectRef : public ObjectRef {
+ public:
+ using ObjectRef::ObjectRef;
+
+ HeapObjectType type() const;
+ MapRef map() const;
+ base::Optional<MapRef> TryGetObjectCreateMap() const;
+ bool IsSeqString() const;
+ bool IsExternalString() const;
+};
+
+class PropertyCellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ ObjectRef value() const;
+ PropertyDetails property_details() const;
+};
+
+class JSObjectRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ bool IsUnboxedDoubleField(FieldIndex index) const;
+ double RawFastDoublePropertyAt(FieldIndex index) const;
+ ObjectRef RawFastPropertyAt(FieldIndex index) const;
+
+ FixedArrayBaseRef elements() const;
+ void EnsureElementsTenured();
+ ElementsKind GetElementsKind();
+};
+
+struct SlackTrackingResult {
+ SlackTrackingResult(int instance_sizex, int inobject_property_countx)
+ : instance_size(instance_sizex),
+ inobject_property_count(inobject_property_countx) {}
+ int instance_size;
+ int inobject_property_count;
+};
+
+class JSFunctionRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+
+ bool HasBuiltinFunctionId() const;
+ BuiltinFunctionId GetBuiltinFunctionId() const;
+ bool IsConstructor() const;
+ bool has_initial_map() const;
+ MapRef initial_map() const;
+ JSGlobalProxyRef global_proxy() const;
+ SlackTrackingResult FinishSlackTracking() const;
+ SharedFunctionInfoRef shared() const;
+ void EnsureHasInitialMap() const;
+};
+
+class JSRegExpRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+
+ ObjectRef raw_properties_or_hash() const;
+ ObjectRef data() const;
+ ObjectRef source() const;
+ ObjectRef flags() const;
+ ObjectRef last_index() const;
+};
+
+class HeapNumberRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ double value() const;
+};
+
+class MutableHeapNumberRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ double value() const;
+};
+
+class ContextRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ base::Optional<ContextRef> previous() const;
+ ObjectRef get(int index) const;
+};
+
+class NativeContextRef : public ContextRef {
+ public:
+ using ContextRef::ContextRef;
+
+ ScriptContextTableRef script_context_table() const;
+
+ MapRef fast_aliased_arguments_map() const;
+ MapRef sloppy_arguments_map() const;
+ MapRef strict_arguments_map() const;
+ MapRef js_array_fast_elements_map_index() const;
+ MapRef initial_array_iterator_map() const;
+ MapRef set_value_iterator_map() const;
+ MapRef set_key_value_iterator_map() const;
+ MapRef map_key_iterator_map() const;
+ MapRef map_value_iterator_map() const;
+ MapRef map_key_value_iterator_map() const;
+ MapRef iterator_result_map() const;
+ MapRef string_iterator_map() const;
+ MapRef promise_function_initial_map() const;
+
+ MapRef GetFunctionMapFromIndex(int index) const;
+};
+
+class NameRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+};
+
+class ScriptContextTableRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ struct LookupResult {
+ ContextRef context;
+ bool immutable;
+ int index;
+ };
+
+ base::Optional<LookupResult> lookup(const NameRef& name) const;
+};
+
+class FeedbackVectorRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ ObjectRef get(FeedbackSlot slot) const;
+};
+
+class AllocationSiteRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ JSObjectRef boilerplate() const;
+ PretenureFlag GetPretenureMode() const;
+ bool IsFastLiteral() const;
+ ObjectRef nested_site() const;
+ bool PointsToLiteral() const;
+ ElementsKind GetElementsKind() const;
+};
+
+class MapRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ int instance_size() const;
+ InstanceType instance_type() const;
+ int GetInObjectProperties() const;
+ int NumberOfOwnDescriptors() const;
+ PropertyDetails GetPropertyDetails(int i) const;
+ NameRef GetPropertyKey(int i) const;
+ FieldIndex GetFieldIndexFor(int i) const;
+ int GetInObjectPropertyOffset(int index) const;
+ ElementsKind elements_kind() const;
+ ObjectRef constructor_or_backpointer() const;
+ bool is_stable() const;
+ bool has_prototype_slot() const;
+ bool is_deprecated() const;
+ bool CanBeDeprecated() const;
+ bool CanTransition() const;
+ bool IsInobjectSlackTrackingInProgress() const;
+ MapRef FindFieldOwner(int descriptor) const;
+ bool is_dictionary_map() const;
+ bool IsJSArrayMap() const;
+ bool IsFixedCowArrayMap() const;
+
+ // Concerning the underlying instance_descriptors:
+ FieldTypeRef GetFieldType(int descriptor) const;
+};
+
+class FixedArrayBaseRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ int length() const;
+};
+
+class FixedArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+
+ ObjectRef get(int i) const;
+ bool is_the_hole(int i) const;
+};
+
+class FixedDoubleArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+
+ double get_scalar(int i) const;
+ bool is_the_hole(int i) const;
+};
+
+class JSArrayRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+
+ ElementsKind GetElementsKind() const;
+ ObjectRef length() const;
+};
+
+class ScopeInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ int ContextLength() const;
+};
+
+class SharedFunctionInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ int internal_formal_parameter_count() const;
+ bool has_duplicate_parameters() const;
+ int function_map_index() const;
+ FunctionKind kind() const;
+ LanguageMode language_mode();
+ bool native() const;
+ bool HasBreakInfo() const;
+ bool HasBuiltinId() const;
+ int builtin_id() const;
+ bool construct_as_builtin() const;
+ bool HasBytecodeArray() const;
+ int GetBytecodeArrayRegisterCount() const;
+};
+
+class StringRef : public NameRef {
+ public:
+ using NameRef::NameRef;
+
+ int length() const;
+ uint16_t GetFirstChar();
+ double ToNumber();
+};
+
+class ModuleRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+
+ CellRef GetCell(int cell_index);
+};
+
+class CellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+};
+
+class JSGlobalProxyRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+};
+
+class CodeRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+};
+
+class InternalizedStringRef : public StringRef {
+ public:
+ using StringRef::StringRef;
+};
+
+class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
+ public:
+ JSHeapBroker(Isolate* isolate);
+
+ HeapObjectType HeapObjectTypeFromMap(Handle<Map> map) const {
+ AllowHandleDereference handle_dereference;
+ return HeapObjectTypeFromMap(*map);
+ }
+
+ static base::Optional<int> TryGetSmi(Handle<Object> object);
+
+ Isolate* isolate() const { return isolate_; }
+
+ private:
+ friend class HeapObjectRef;
+ HeapObjectType HeapObjectTypeFromMap(Map* map) const;
+
+ Isolate* const isolate_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_HEAP_BROKER_H_
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 5f50eb4d2e..588626e292 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -113,7 +113,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
Handle<SharedFunctionInfo> shared =
candidate.functions[i].is_null()
? candidate.shared_info
- : handle(candidate.functions[i]->shared());
+ : handle(candidate.functions[i]->shared(), isolate());
candidate.can_inline_function[i] = CanInlineFunction(shared);
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recurion like f() -> g() -> f(). The indirect recursion is helpful in
@@ -607,7 +607,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
Handle<SharedFunctionInfo> shared =
candidate.functions[0].is_null()
? candidate.shared_info
- : handle(candidate.functions[0]->shared());
+ : handle(candidate.functions[0]->shared(), isolate());
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
cumulative_count_ += shared->GetBytecodeArray()->length();
@@ -709,7 +709,7 @@ bool JSInliningHeuristic::CandidateCompare::operator()(
}
void JSInliningHeuristic::PrintCandidates() {
- OFStream os(stdout);
+ StdoutStream os;
os << "Candidates for inlining (size=" << candidates_.size() << "):\n";
for (const Candidate& candidate : candidates_) {
os << " #" << candidate.node->id() << ":"
@@ -719,7 +719,7 @@ void JSInliningHeuristic::PrintCandidates() {
Handle<SharedFunctionInfo> shared =
candidate.functions[i].is_null()
? candidate.shared_info
- : handle(candidate.functions[i]->shared());
+ : handle(candidate.functions[i]->shared(), isolate());
PrintF(" - size:%d, name: %s\n", shared->GetBytecodeArray()->length(),
shared->DebugName()->ToCString().get());
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index af004011e3..dc8d70f6ac 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -80,6 +80,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
CommonOperatorBuilder* common() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const { return jsgraph_->isolate(); }
SimplifiedOperatorBuilder* simplified() const;
Mode const mode_;
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 08917ab78b..247e36d5b3 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -298,7 +298,7 @@ bool JSInliner::DetermineCallTarget(
return false;
}
- shared_info_out = handle(function->shared());
+ shared_info_out = handle(function->shared(), isolate());
return true;
}
@@ -343,8 +343,8 @@ void JSInliner::DetermineCallContext(
JSFunction::EnsureFeedbackVector(function);
// The inlinee specializes to the context from the JSFunction object.
- context_out = jsgraph()->Constant(handle(function->context()));
- feedback_vector_out = handle(function->feedback_vector());
+ context_out = jsgraph()->Constant(handle(function->context(), isolate()));
+ feedback_vector_out = handle(function->feedback_vector(), isolate());
return;
}
@@ -358,7 +358,8 @@ void JSInliner::DetermineCallContext(
// The inlinee uses the locally provided context at instantiation.
context_out = NodeProperties::GetContextInput(match.node());
- feedback_vector_out = handle(FeedbackVector::cast(cell->value()));
+ feedback_vector_out =
+ handle(FeedbackVector::cast(cell->value()), isolate());
return;
}
@@ -372,7 +373,7 @@ Reduction JSInliner::Reduce(Node* node) {
}
Handle<Context> JSInliner::native_context() const {
- return handle(info_->context()->native_context());
+ return handle(info_->context()->native_context(), isolate());
}
Reduction JSInliner::ReduceJSCall(Node* node) {
@@ -604,10 +605,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
if (node->opcode() == IrOpcode::kJSCall &&
is_sloppy(shared_info->language_mode()) && !shared_info->native()) {
Node* effect = NodeProperties::GetEffectInput(node);
- if (NodeProperties::CanBePrimitive(call.receiver(), effect)) {
+ if (NodeProperties::CanBePrimitive(isolate(), call.receiver(), effect)) {
CallParameters const& p = CallParametersOf(node->op());
Node* global_proxy = jsgraph()->HeapConstant(
- handle(info_->native_context()->global_proxy()));
+ handle(info_->native_context()->global_proxy(), isolate()));
Node* receiver = effect =
graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
call.receiver(), global_proxy, effect, start);
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 5fca638daf..1c7ee6c0b8 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -47,6 +47,7 @@ class JSInliner final : public AdvancedReducer {
SimplifiedOperatorBuilder* simplified() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const { return jsgraph_->isolate(); }
Handle<Context> native_context() const;
Zone* const local_zone_;
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index d9742e47d9..fcb9e87adb 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -55,10 +55,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsJSProxy:
return ReduceIsInstanceType(node, JS_PROXY_TYPE);
- case Runtime::kInlineIsJSMap:
- return ReduceIsInstanceType(node, JS_MAP_TYPE);
- case Runtime::kInlineIsJSSet:
- return ReduceIsInstanceType(node, JS_SET_TYPE);
case Runtime::kInlineIsJSWeakMap:
return ReduceIsInstanceType(node, JS_WEAK_MAP_TYPE);
case Runtime::kInlineIsJSWeakSet:
@@ -424,7 +420,7 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
int stack_parameter_count) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), stack_parameter_count,
+ graph()->zone(), callable.descriptor(), stack_parameter_count,
CallDescriptor::kNeedsFrameState, node->op()->properties());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 07724530f6..4c6ea30bae 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -7,10 +7,10 @@
#include "src/accessors.h"
#include "src/api.h"
#include "src/code-factory.h"
-#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
#include "src/compiler/allocation-builder.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
@@ -58,15 +58,17 @@ struct JSNativeContextSpecialization::ScriptContextTableLookupResult {
};
JSNativeContextSpecialization::JSNativeContextSpecialization(
- Editor* editor, JSGraph* jsgraph, Flags flags,
- Handle<Context> native_context, CompilationDependencies* dependencies,
- Zone* zone)
+ Editor* editor, JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ Flags flags, Handle<Context> native_context,
+ CompilationDependencies* dependencies, Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
+ js_heap_broker_(js_heap_broker),
flags_(flags),
- global_object_(native_context->global_object()),
- global_proxy_(JSGlobalProxy::cast(native_context->global_proxy())),
- native_context_(native_context),
+ global_object_(native_context->global_object(), jsgraph->isolate()),
+ global_proxy_(JSGlobalProxy::cast(native_context->global_proxy()),
+ jsgraph->isolate()),
+ native_context_(js_heap_broker, native_context),
dependencies_(dependencies),
zone_(zone),
type_cache_(TypeCache::Get()) {}
@@ -152,13 +154,11 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
// We can constant-fold the super constructor access if the
// {function}s map is stable, i.e. we can use a code dependency
// to guard against [[Prototype]] changes of {function}.
- if (function_map->is_stable()) {
+ if (function_map->is_stable() && function_prototype->IsConstructor()) {
+ dependencies()->DependOnStableMap(MapRef(js_heap_broker(), function_map));
Node* value = jsgraph()->Constant(function_prototype);
- dependencies()->AssumeMapStable(function_map);
- if (function_prototype->IsConstructor()) {
- ReplaceWithValue(node, value);
- return Replace(value);
- }
+ ReplaceWithValue(node, value);
+ return Replace(value);
}
return NoChange();
@@ -190,7 +190,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Compute property access info for @@hasInstance on {receiver}.
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(dependencies(), native_context(),
+ AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
+ native_context().object<Context>(),
graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad,
@@ -198,7 +199,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return NoChange();
}
- PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
+ dependencies());
if (access_info.IsNotFound()) {
// If there's no @@hasInstance handler, the OrdinaryHasInstance operation
@@ -207,8 +209,9 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- access_builder.AssumePrototypesStable(
- native_context(), access_info.receiver_maps(), holder);
+ dependencies()->DependOnStablePrototypeChains(
+ js_heap_broker(), native_context().object<Context>(),
+ access_info.receiver_maps(), holder);
}
// Check that {constructor} is actually {receiver}.
@@ -232,8 +235,9 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- access_builder.AssumePrototypesStable(
- native_context(), access_info.receiver_maps(), holder);
+ dependencies()->DependOnStablePrototypeChains(
+ js_heap_broker(), native_context().object<Context>(),
+ access_info.receiver_maps(), holder);
} else {
holder = receiver;
}
@@ -301,7 +305,8 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
Node* receiver, Node* effect, Handle<HeapObject> prototype) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
// Check if either all or none of the {receiver_maps} have the given
@@ -322,7 +327,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
return kMayBeInPrototypeChain;
}
}
- for (PrototypeIterator j(receiver_map);; j.Advance()) {
+ for (PrototypeIterator j(isolate(), receiver_map);; j.Advance()) {
if (j.IsAtEnd()) {
all = false;
break;
@@ -386,7 +391,8 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// invocation of the instanceof operator again.
// ES6 section 7.3.19 OrdinaryHasInstance (C, O) step 2.
Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(m.Value());
- Handle<JSReceiver> bound_target_function(function->bound_target_function());
+ Handle<JSReceiver> bound_target_function(function->bound_target_function(),
+ isolate());
NodeProperties::ReplaceValueInput(node, object, 0);
NodeProperties::ReplaceValueInput(
node, jsgraph()->HeapConstant(bound_target_function), 1);
@@ -406,11 +412,10 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// depend on that for the prototype constant-folding below.
JSFunction::EnsureHasInitialMap(function);
- // Install a code dependency on the {function}s initial map.
- Handle<Map> initial_map(function->initial_map(), isolate());
- dependencies()->AssumeInitialMapCantChange(initial_map);
- Node* prototype =
- jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+ MapRef initial_map = dependencies()->DependOnInitialMap(
+ JSFunctionRef(js_heap_broker(), function));
+ Node* prototype = jsgraph()->Constant(
+ handle(initial_map.object<Map>()->prototype(), isolate()));
// Lower the {node} to JSHasInPrototypeChain.
NodeProperties::ReplaceValueInput(node, object, 0);
@@ -436,12 +441,14 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
- if (!m.Is(handle(native_context()->promise_function()))) return NoChange();
+ if (!m.Is(handle(native_context().object<Context>()->promise_function(),
+ isolate())))
+ return NoChange();
// Check if we know something about the {value}.
ZoneHandleSet<Map> value_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(value, effect, &value_maps);
+ NodeProperties::InferReceiverMaps(isolate(), value, effect, &value_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, value_maps.size());
@@ -471,13 +478,15 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
// Check if we know something about the {resolution}.
ZoneHandleSet<Map> resolution_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(resolution, effect, &resolution_maps);
+ NodeProperties::InferReceiverMaps(isolate(), resolution, effect,
+ &resolution_maps);
if (result != NodeProperties::kReliableReceiverMaps) return NoChange();
DCHECK_NE(0, resolution_maps.size());
// Compute property access info for "then" on {resolution}.
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(dependencies(), native_context(),
+ AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
+ native_context().object<Context>(),
graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
MapHandles(resolution_maps.begin(), resolution_maps.end()),
@@ -488,13 +497,15 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
// We can further optimize the case where {resolution}
// definitely doesn't have a "then" property.
if (!access_info.IsNotFound()) return NoChange();
- PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
+ dependencies());
// Add proper dependencies on the {resolution}s [[Prototype]]s.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- access_builder.AssumePrototypesStable(native_context(),
- access_info.receiver_maps(), holder);
+ dependencies()->DependOnStablePrototypeChains(
+ js_heap_broker(), native_context().object<Context>(),
+ access_info.receiver_maps(), holder);
}
// Simply fulfill the {promise} with the {resolution}.
@@ -512,7 +523,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
// context (if any), so we can constant-fold those fields, which is
// safe, since the NATIVE_CONTEXT_INDEX slot is always immutable.
if (access.index() == Context::NATIVE_CONTEXT_INDEX) {
- Node* value = jsgraph()->HeapConstant(native_context());
+ Node* value = jsgraph()->Constant(native_context());
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -546,7 +557,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// Lookup on the global object. We only deal with own data properties
// of the global object here (represented as PropertyCell).
- LookupIterator it(global_object(), name, LookupIterator::OWN);
+ LookupIterator it(isolate(), global_object(), name, LookupIterator::OWN);
it.TryLookupCachedProperty();
if (it.state() != LookupIterator::DATA) return NoChange();
if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
@@ -600,7 +611,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// can be deleted or reconfigured to an accessor property).
if (property_details.cell_type() != PropertyCellType::kMutable ||
property_details.IsConfigurable()) {
- dependencies()->AssumePropertyCell(property_cell);
+ dependencies()->DependOnGlobalProperty(
+ PropertyCellRef(js_heap_broker(), property_cell));
}
// Load from constant/undefined global property can be constant-folded.
@@ -624,14 +636,16 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Handle<Map> property_cell_value_map(
Handle<HeapObject>::cast(property_cell_value)->map(),
isolate());
- property_cell_value_type = Type::For(property_cell_value_map);
+ property_cell_value_type =
+ Type::For(js_heap_broker(), property_cell_value_map);
representation = MachineRepresentation::kTaggedPointer;
// We can only use the property cell value map for map check
// elimination if it's stable, i.e. the HeapObject wasn't
// mutated without the cell state being updated.
if (property_cell_value_map->is_stable()) {
- dependencies()->AssumeMapStable(property_cell_value_map);
+ dependencies()->DependOnStableMap(
+ MapRef(js_heap_broker(), property_cell_value_map));
map = property_cell_value_map;
}
}
@@ -653,7 +667,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
case PropertyCellType::kConstant: {
// Record a code dependency on the cell, and just deoptimize if the new
// value doesn't match the previous value stored inside the cell.
- dependencies()->AssumePropertyCell(property_cell);
+ dependencies()->DependOnGlobalProperty(
+ PropertyCellRef(js_heap_broker(), property_cell));
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), value,
jsgraph()->Constant(property_cell_value));
@@ -666,7 +681,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// Record a code dependency on the cell, and just deoptimize if the new
// values' type doesn't match the type of the previous value in the
// cell.
- dependencies()->AssumePropertyCell(property_cell);
+ dependencies()->DependOnGlobalProperty(
+ PropertyCellRef(js_heap_broker(), property_cell));
Type property_cell_value_type;
MachineRepresentation representation = MachineRepresentation::kTagged;
if (property_cell_value->IsHeapObject()) {
@@ -675,7 +691,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Handle<Map> property_cell_value_map(
Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
DCHECK(property_cell_value_map->is_stable());
- dependencies()->AssumeMapStable(property_cell_value_map);
+ dependencies()->DependOnStableMap(
+ MapRef(js_heap_broker(), property_cell_value_map));
// Check that the {value} is a HeapObject.
value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
@@ -706,7 +723,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
case PropertyCellType::kMutable: {
// Record a code dependency on the cell, and just deoptimize if the
// property ever becomes read-only.
- dependencies()->AssumePropertyCell(property_cell);
+ dependencies()->DependOnGlobalProperty(
+ PropertyCellRef(js_heap_broker(), property_cell));
effect = graph()->NewNode(
simplified()->StoreField(ForPropertyCellValue(
MachineRepresentation::kTagged, Type::NonInternal(),
@@ -723,46 +741,57 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
- Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
+ NameRef name(js_heap_broker(), LoadGlobalParametersOf(node->op()).name());
Node* effect = NodeProperties::GetEffectInput(node);
// Try to lookup the name on the script context table first (lexical scoping).
- ScriptContextTableLookupResult result;
- if (LookupInScriptContextTable(name, &result)) {
- if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
- Node* context = jsgraph()->HeapConstant(result.context);
+ base::Optional<ScriptContextTableRef::LookupResult> result =
+ native_context().script_context_table().lookup(name);
+ if (result) {
+ ObjectRef contents = result->context.get(result->index);
+ OddballType oddball_type = contents.oddball_type();
+ if (oddball_type == OddballType::kHole) {
+ return NoChange();
+ }
+ Node* context = jsgraph()->Constant(result->context);
Node* value = effect = graph()->NewNode(
- javascript()->LoadContext(0, result.index, result.immutable), context,
+ javascript()->LoadContext(0, result->index, result->immutable), context,
effect);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
// Lookup the {name} on the global object instead.
- return ReduceGlobalAccess(node, nullptr, nullptr, name, AccessMode::kLoad);
+ return ReduceGlobalAccess(node, nullptr, nullptr, name.object<Name>(),
+ AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
- Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
+ NameRef name(js_heap_broker(), StoreGlobalParametersOf(node->op()).name());
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Try to lookup the name on the script context table first (lexical scoping).
- ScriptContextTableLookupResult result;
- if (LookupInScriptContextTable(name, &result)) {
- if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
- if (result.immutable) return NoChange();
- Node* context = jsgraph()->HeapConstant(result.context);
- effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
+ base::Optional<ScriptContextTableRef::LookupResult> result =
+ native_context().script_context_table().lookup(name);
+ if (result) {
+ ObjectRef contents = result->context.get(result->index);
+ OddballType oddball_type = contents.oddball_type();
+ if (oddball_type == OddballType::kHole || result->immutable) {
+ return NoChange();
+ }
+ Node* context = jsgraph()->Constant(result->context);
+ effect = graph()->NewNode(javascript()->StoreContext(0, result->index),
value, context, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
// Lookup the {name} on the global object instead.
- return ReduceGlobalAccess(node, nullptr, value, name, AccessMode::kStore);
+ return ReduceGlobalAccess(node, nullptr, value, name.object<Name>(),
+ AccessMode::kStore);
}
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
@@ -789,7 +818,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Detached global proxies have |null| as their constructor.
if (maybe_constructor->IsJSFunction() &&
JSFunction::cast(maybe_constructor)->native_context() ==
- *native_context()) {
+ *native_context().object<Context>()) {
return ReduceGlobalAccess(node, receiver, value, name, access_mode,
index);
}
@@ -797,7 +826,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Compute property access infos for the receiver maps.
- AccessInfoFactory access_info_factory(dependencies(), native_context(),
+ AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
+ native_context().object<Context>(),
graph()->zone());
ZoneVector<PropertyAccessInfo> access_infos(zone());
if (!access_info_factory.ComputePropertyAccessInfos(
@@ -824,7 +854,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if_exceptions = &if_exception_nodes;
}
- PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
+ dependencies());
// Check for the monomorphic cases.
if (access_infos.size() == 1) {
@@ -1075,8 +1106,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
// {function} in order to be notified about changes to the
// "prototype" of {function}.
JSFunction::EnsureHasInitialMap(function);
- Handle<Map> initial_map(function->initial_map(), isolate());
- dependencies()->AssumeInitialMapCantChange(initial_map);
+ dependencies()->DependOnInitialMap(
+ JSFunctionRef(js_heap_broker(), function));
Handle<Object> prototype(function->prototype(), isolate());
Node* value = jsgraph()->Constant(prototype);
ReplaceWithValue(node, value);
@@ -1161,7 +1192,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
} else {
// Retrieve the native context from the given {node}.
// Compute element access infos for the receiver maps.
- AccessInfoFactory access_info_factory(dependencies(), native_context(),
+ AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
+ native_context().object<Context>(),
graph()->zone());
ZoneVector<ElementAccessInfo> access_infos(zone());
if (!access_info_factory.ComputeElementAccessInfos(
@@ -1209,12 +1241,14 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Install dependencies on the relevant prototype maps.
for (Handle<Map> prototype_map : prototype_maps) {
- dependencies()->AssumeMapStable(prototype_map);
+ dependencies()->DependOnStableMap(
+ MapRef(js_heap_broker(), prototype_map));
}
}
// Ensure that {receiver} is a heap object.
- PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
+ dependencies());
receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
// Check for the monomorphic case.
@@ -1736,22 +1770,21 @@ Node* JSNativeContextSpecialization::InlineApiCall(
// Only setters have a value.
int const argc = value == nullptr ? 0 : 1;
// The stub always expects the receiver as the first param on the stack.
- CallApiCallbackStub stub(isolate(), argc);
+ Callable call_api_callback = CodeFactory::CallApiCallback(isolate(), argc);
CallInterfaceDescriptor call_interface_descriptor =
- stub.GetCallInterfaceDescriptor();
+ call_api_callback.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), call_interface_descriptor,
+ graph()->zone(), call_interface_descriptor,
call_interface_descriptor.GetStackParameterCount() + argc +
1 /* implicit receiver */,
- CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
- MachineType::AnyTagged(), 1, Linkage::kNoContext);
+ CallDescriptor::kNeedsFrameState);
Node* data = jsgraph()->Constant(call_data_object);
ApiFunction function(v8::ToCData<Address>(call_handler_info->callback()));
Node* function_reference =
graph()->NewNode(common()->ExternalConstant(ExternalReference::Create(
&function, ExternalReference::DIRECT_API_CALL)));
- Node* code = jsgraph()->HeapConstant(stub.GetCode());
+ Node* code = jsgraph()->HeapConstant(call_api_callback.code());
// Add CallApiCallbackStub's register argument as well.
Node* context = jsgraph()->Constant(native_context());
@@ -1778,10 +1811,12 @@ JSNativeContextSpecialization::BuildPropertyLoad(
PropertyAccessInfo const& access_info) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
- PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
+ dependencies());
if (access_info.holder().ToHandle(&holder)) {
- access_builder.AssumePrototypesStable(native_context(),
- access_info.receiver_maps(), holder);
+ dependencies()->DependOnStablePrototypeChains(
+ js_heap_broker(), native_context().object<Context>(),
+ access_info.receiver_maps(), holder);
}
// Generate the actual property access.
@@ -1834,11 +1869,13 @@ JSNativeContextSpecialization::BuildPropertyStore(
PropertyAccessInfo const& access_info, AccessMode access_mode) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
- PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
+ dependencies());
if (access_info.holder().ToHandle(&holder)) {
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
- access_builder.AssumePrototypesStable(native_context(),
- access_info.receiver_maps(), holder);
+ dependencies()->DependOnStablePrototypeChains(
+ js_heap_broker(), native_context().object<Context>(),
+ access_info.receiver_maps(), holder);
}
DCHECK(!access_info.IsNotFound());
@@ -2054,13 +2091,15 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
}
Handle<Map> receiver_map(map, isolate());
- if (!Map::TryUpdate(receiver_map).ToHandle(&receiver_map)) return NoChange();
+ if (!Map::TryUpdate(isolate(), receiver_map).ToHandle(&receiver_map))
+ return NoChange();
Handle<Name> cached_name = handle(
Name::cast(nexus.GetFeedbackExtra()->ToStrongHeapObject()), isolate());
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(dependencies(), native_context(),
+ AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
+ native_context().object<Context>(),
graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
receiver_map, cached_name, AccessMode::kStoreInLiteral,
@@ -2073,7 +2112,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Node* control = NodeProperties::GetControlInput(node);
// Monomorphic property access.
- PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
+ dependencies());
receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
access_builder.BuildCheckMaps(receiver, &effect, control,
access_info.receiver_maps());
@@ -2234,8 +2274,8 @@ JSNativeContextSpecialization::BuildElementAccess(
if (isolate()->IsArrayBufferNeuteringIntact()) {
// Add a code dependency so we are deoptimized in case an ArrayBuffer
// gets neutered.
- dependencies()->AssumePropertyCell(
- factory()->array_buffer_neutering_protector());
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->array_buffer_neutering_protector()));
} else {
// Default to zero if the {receiver}s buffer was neutered.
Node* check = effect = graph()->NewNode(
@@ -2637,8 +2677,8 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
KeyedAccessLoadMode load_mode) {
if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
isolate()->IsNoElementsProtectorIntact()) {
- // Add a code dependency on the "no elements" protector.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
// Ensure that the {index} is a valid String length.
index = *effect = graph()->NewNode(
@@ -2787,8 +2827,8 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
// Check if the array prototype chain is intact.
if (!isolate()->IsNoElementsProtectorIntact()) return false;
- // Install code dependency on the array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
return true;
}
@@ -2817,11 +2857,12 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
Handle<Map> receiver_map;
if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
DCHECK(!receiver_map->is_abandoned_prototype_map());
+ Isolate* isolate = this->isolate();
receiver_maps->erase(
std::remove_if(receiver_maps->begin(), receiver_maps->end(),
- [receiver_map](const Handle<Map>& map) {
+ [receiver_map, isolate](const Handle<Map>& map) {
return map->is_abandoned_prototype_map() ||
- map->FindRootMap() != *receiver_map;
+ map->FindRootMap(isolate) != *receiver_map;
}),
receiver_maps->end());
}
@@ -2835,7 +2876,7 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
Node* receiver, Node* effect, MapHandles* receiver_maps) {
ZoneHandleSet<Map> maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect, &maps);
if (result == NodeProperties::kReliableReceiverMaps) {
for (size_t i = 0; i < maps.size(); ++i) {
receiver_maps->push_back(maps[i]);
@@ -2859,7 +2900,7 @@ MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
Node* receiver) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
- return handle(m.Value()->map()->FindRootMap(), isolate());
+ return handle(m.Value()->map()->FindRootMap(isolate()), isolate());
} else if (m.IsJSCreate()) {
HeapObjectMatcher mtarget(m.InputAt(0));
HeapObjectMatcher mnewtarget(m.InputAt(1));
@@ -2869,7 +2910,7 @@ MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
if (constructor->has_initial_map()) {
Handle<Map> initial_map(constructor->initial_map(), isolate());
if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
- DCHECK_EQ(*initial_map, initial_map->FindRootMap());
+ DCHECK_EQ(*initial_map, initial_map->FindRootMap(isolate()));
return initial_map;
}
}
@@ -2878,24 +2919,6 @@ MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
return MaybeHandle<Map>();
}
-bool JSNativeContextSpecialization::LookupInScriptContextTable(
- Handle<Name> name, ScriptContextTableLookupResult* result) {
- if (!name->IsString()) return false;
- Handle<ScriptContextTable> script_context_table(
- global_object()->native_context()->script_context_table(), isolate());
- ScriptContextTable::LookupResult lookup_result;
- if (!ScriptContextTable::Lookup(script_context_table,
- Handle<String>::cast(name), &lookup_result)) {
- return false;
- }
- Handle<Context> script_context = ScriptContextTable::GetContext(
- script_context_table, lookup_result.context_index);
- result->context = script_context;
- result->immutable = lookup_result.mode == CONST;
- result->index = lookup_result.slot_index;
- return true;
-}
-
Graph* JSNativeContextSpecialization::graph() const {
return jsgraph()->graph();
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 5f357a2924..53fe9e2c11 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -14,7 +14,6 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class CompilationDependencies;
class Factory;
class FeedbackNexus;
@@ -23,8 +22,10 @@ namespace compiler {
// Forward declarations.
enum class AccessMode;
class CommonOperatorBuilder;
+class CompilationDependencies;
class ElementAccessInfo;
class JSGraph;
+class JSHeapBroker;
class JSOperatorBuilder;
class MachineOperatorBuilder;
class PropertyAccessInfo;
@@ -45,7 +46,8 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
};
typedef base::Flags<Flag> Flags;
- JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+ JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph,
+ const JSHeapBroker* js_heap_broker, Flags flags,
Handle<Context> native_context,
CompilationDependencies* dependencies,
Zone* zone);
@@ -214,6 +216,8 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Isolate* isolate() const;
Factory* factory() const;
CommonOperatorBuilder* common() const;
@@ -222,15 +226,16 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Flags flags() const { return flags_; }
Handle<JSGlobalObject> global_object() const { return global_object_; }
Handle<JSGlobalProxy> global_proxy() const { return global_proxy_; }
- Handle<Context> native_context() const { return native_context_; }
+ const NativeContextRef& native_context() const { return native_context_; }
CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
JSGraph* const jsgraph_;
+ const JSHeapBroker* const js_heap_broker_;
Flags const flags_;
Handle<JSGlobalObject> global_object_;
Handle<JSGlobalProxy> global_proxy_;
- Handle<Context> native_context_;
+ NativeContextRef native_context_;
CompilationDependencies* const dependencies_;
Zone* const zone_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 8fe10bb36a..5d45bb7f95 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -617,7 +617,8 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(RejectPromise, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
V(ResolvePromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
V(GetSuperConstructor, Operator::kNoWrite, 1, 1) \
- V(ParseInt, Operator::kNoProperties, 2, 1)
+ V(ParseInt, Operator::kNoProperties, 2, 1) \
+ V(RegExpTest, Operator::kNoProperties, 2, 1)
#define BINARY_OP_LIST(V) V(Add)
@@ -950,7 +951,7 @@ const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
register_count); // parameter
}
-int GeneratorStoreRegisterCountOf(const Operator* op) {
+int GeneratorStoreValueCountOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSGeneratorStore, op->opcode());
return OpParameter<int>(op);
}
@@ -1141,10 +1142,10 @@ const Operator* JSOperatorBuilder::CreateClosure(
}
const Operator* JSOperatorBuilder::CreateLiteralArray(
- Handle<ConstantElementsPair> constant_elements,
+ Handle<ArrayBoilerplateDescription> description,
VectorSlotPair const& feedback, int literal_flags, int number_of_elements) {
- CreateLiteralParameters parameters(constant_elements, feedback,
- number_of_elements, literal_flags);
+ CreateLiteralParameters parameters(description, feedback, number_of_elements,
+ literal_flags);
return new (zone()) Operator1<CreateLiteralParameters>( // --
IrOpcode::kJSCreateLiteralArray, // opcode
Operator::kNoProperties, // properties
@@ -1165,7 +1166,7 @@ const Operator* JSOperatorBuilder::CreateEmptyLiteralArray(
}
const Operator* JSOperatorBuilder::CreateLiteralObject(
- Handle<BoilerplateDescription> constant_properties,
+ Handle<ObjectBoilerplateDescription> constant_properties,
VectorSlotPair const& feedback, int literal_flags,
int number_of_properties) {
CreateLiteralParameters parameters(constant_properties, feedback,
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 6d89e5ac09..f73aca819f 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -16,8 +16,8 @@ namespace v8 {
namespace internal {
class AllocationSite;
-class BoilerplateDescription;
-class ConstantElementsPair;
+class ObjectBoilerplateDescription;
+class ArrayBoilerplateDescription;
class FeedbackCell;
class SharedFunctionInfo;
@@ -643,7 +643,7 @@ BinaryOperationHint BinaryOperationHintOf(const Operator* op);
CompareOperationHint CompareOperationHintOf(const Operator* op);
-int GeneratorStoreRegisterCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+int GeneratorStoreValueCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
int RestoreRegisterIndexOf(const Operator* op) V8_WARN_UNUSED_RESULT;
Handle<ScopeInfo> ScopeInfoOf(const Operator* op) V8_WARN_UNUSED_RESULT;
@@ -705,16 +705,17 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateObject();
const Operator* CreatePromise();
const Operator* CreateTypedArray();
- const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
- VectorSlotPair const& feedback,
- int literal_flags, int number_of_elements);
+ const Operator* CreateLiteralArray(
+ Handle<ArrayBoilerplateDescription> constant,
+ VectorSlotPair const& feedback, int literal_flags,
+ int number_of_elements);
const Operator* CreateEmptyLiteralArray(VectorSlotPair const& feedback);
const Operator* CreateEmptyLiteralObject();
- const Operator* CreateLiteralObject(Handle<BoilerplateDescription> constant,
- VectorSlotPair const& feedback,
- int literal_flags,
- int number_of_properties);
+ const Operator* CreateLiteralObject(
+ Handle<ObjectBoilerplateDescription> constant,
+ VectorSlotPair const& feedback, int literal_flags,
+ int number_of_properties);
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
VectorSlotPair const& feedback,
int literal_flags);
@@ -789,7 +790,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StoreMessage();
// Used to implement Ignition's SuspendGenerator bytecode.
- const Operator* GeneratorStore(int register_count);
+ const Operator* GeneratorStore(int value_count);
// Used to implement Ignition's SwitchOnGeneratorState bytecode.
const Operator* GeneratorRestoreContinuation();
@@ -816,6 +817,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ObjectIsArray();
const Operator* ParseInt();
+ const Operator* RegExpTest();
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index d97e1dcbc7..fc7fab4b54 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -189,6 +189,7 @@ class JSSpeculativeBinopBuilder final {
}
JSGraph* jsgraph() const { return lowering_->jsgraph(); }
+ Isolate* isolate() const { return jsgraph()->isolate(); }
Graph* graph() const { return jsgraph()->graph(); }
JSOperatorBuilder* javascript() { return jsgraph()->javascript(); }
SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
@@ -212,6 +213,8 @@ JSTypeHintLowering::JSTypeHintLowering(JSGraph* jsgraph,
Flags flags)
: jsgraph_(jsgraph), flags_(flags), feedback_vector_(feedback_vector) {}
+Isolate* JSTypeHintLowering::isolate() const { return jsgraph()->isolate(); }
+
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
const Operator* op, Node* operand, Node* effect, Node* control,
FeedbackSlot slot) const {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 90686e5248..5b6da84bcf 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -157,6 +157,7 @@ class JSTypeHintLowering {
DeoptimizeReason reson) const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
Flags flags() const { return flags_; }
const Handle<FeedbackVector>& feedback_vector() const {
return feedback_vector_;
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 877b4b5646..4fc1f84538 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -17,6 +17,7 @@
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
#include "src/objects-inl.h"
+#include "src/objects/module-inl.h"
namespace v8 {
namespace internal {
@@ -92,19 +93,20 @@ class JSBinopReduction final {
if (BothInputsAre(Type::String()) ||
BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString) {
HeapObjectBinopMatcher m(node_);
- if (m.right().HasValue() && m.right().Value()->IsString()) {
- Handle<String> right_string = Handle<String>::cast(m.right().Value());
- if (right_string->length() >= ConsString::kMinLength) return true;
+ const JSHeapBroker* broker = lowering_->js_heap_broker();
+ if (m.right().HasValue() && m.right().Ref(broker).IsString()) {
+ StringRef right_string = m.right().Ref(broker).AsString();
+ if (right_string.length() >= ConsString::kMinLength) return true;
}
- if (m.left().HasValue() && m.left().Value()->IsString()) {
- Handle<String> left_string = Handle<String>::cast(m.left().Value());
- if (left_string->length() >= ConsString::kMinLength) {
+ if (m.left().HasValue() && m.left().Ref(broker).IsString()) {
+ StringRef left_string = m.left().Ref(broker).AsString();
+ if (left_string.length() >= ConsString::kMinLength) {
// The invariant for ConsString requires the left hand side to be
// a sequential or external string if the right hand side is the
// empty string. Since we don't know anything about the right hand
// side here, we must ensure that the left hand side satisfy the
// constraints independent of the right hand side.
- return left_string->IsSeqString() || left_string->IsExternalString();
+ return left_string.IsSeqString() || left_string.IsExternalString();
}
}
}
@@ -359,6 +361,7 @@ class JSBinopReduction final {
SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
Graph* graph() const { return lowering_->graph(); }
JSGraph* jsgraph() { return lowering_->jsgraph(); }
+ Isolate* isolate() { return jsgraph()->isolate(); }
JSOperatorBuilder* javascript() { return lowering_->javascript(); }
CommonOperatorBuilder* common() { return jsgraph()->common(); }
Zone* zone() const { return graph()->zone(); }
@@ -404,12 +407,13 @@ class JSBinopReduction final {
// - immediately put in type bounds for all new nodes
// - relax effects from generic but not-side-effecting operations
-JSTypedLowering::JSTypedLowering(Editor* editor,
- JSGraph* jsgraph, Zone* zone)
+JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph,
+ const JSHeapBroker* js_heap_broker, Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- empty_string_type_(
- Type::HeapConstant(factory()->empty_string(), graph()->zone())),
+ js_heap_broker_(js_heap_broker),
+ empty_string_type_(Type::HeapConstant(
+ js_heap_broker, factory()->empty_string(), graph()->zone())),
pointer_comparable_type_(
Type::Union(Type::Oddball(),
Type::Union(Type::SymbolOrReceiver(), empty_string_type_,
@@ -525,22 +529,6 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
NodeProperties::ReplaceValueInput(node, reduction.replacement(), 0);
}
}
- // We might be able to constant-fold the String concatenation now.
- if (r.BothInputsAre(Type::String())) {
- HeapObjectBinopMatcher m(node);
- if (m.IsFoldable()) {
- Handle<String> left = Handle<String>::cast(m.left().Value());
- Handle<String> right = Handle<String>::cast(m.right().Value());
- if (left->length() + right->length() > String::kMaxLength) {
- // No point in trying to optimize this, as it will just throw.
- return NoChange();
- }
- Node* value = jsgraph()->HeapConstant(
- factory()->NewConsString(left, right).ToHandleChecked());
- ReplaceWithValue(node, value);
- return Replace(value);
- }
- }
// We might know for sure that we're creating a ConsString here.
if (r.ShouldCreateConsString()) {
return ReduceCreateConsString(node);
@@ -581,7 +569,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Callable const callable =
CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
+ graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, properties);
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
node->InsertInput(graph()->zone(), 0,
@@ -731,11 +719,11 @@ Node* JSTypedLowering::BuildGetStringLength(Node* value) {
// TODO(bmeurer): Get rid of this hack and instead have a way to
// express the string length in the types.
HeapObjectMatcher m(value);
- Node* length =
- (m.HasValue() && m.Value()->IsString())
- ? jsgraph()->Constant(Handle<String>::cast(m.Value())->length())
- : graph()->NewNode(simplified()->StringLength(), value);
- return length;
+ if (!m.HasValue() || !m.Ref(js_heap_broker()).IsString()) {
+ return graph()->NewNode(simplified()->StringLength(), value);
+ }
+
+ return jsgraph()->Constant(m.Ref(js_heap_broker()).AsString().length());
}
Reduction JSTypedLowering::ReduceSpeculativeNumberComparison(Node* node) {
@@ -969,19 +957,18 @@ Reduction JSTypedLowering::ReduceJSToNumberOrNumericInput(Node* input) {
// Try constant-folding of JSToNumber/JSToNumeric with constant inputs. Here
// we only cover cases where ToNumber and ToNumeric coincide.
Type input_type = NodeProperties::GetType(input);
+
if (input_type.Is(Type::String())) {
HeapObjectMatcher m(input);
- if (m.HasValue() && m.Value()->IsString()) {
- Handle<Object> input_value = m.Value();
- return Replace(jsgraph()->Constant(
- String::ToNumber(Handle<String>::cast(input_value))));
+ if (m.HasValue() && m.Ref(js_heap_broker()).IsString()) {
+ StringRef input_value = m.Ref(js_heap_broker()).AsString();
+ return Replace(jsgraph()->Constant(input_value.ToNumber()));
}
}
if (input_type.IsHeapConstant()) {
- Handle<Object> input_value = input_type.AsHeapConstant()->Value();
- if (input_value->IsOddball()) {
- return Replace(jsgraph()->Constant(
- Oddball::ToNumber(Handle<Oddball>::cast(input_value))));
+ ObjectRef input_value = input_type.AsHeapConstant()->Ref();
+ if (input_value.oddball_type() != OddballType::kNone) {
+ return Replace(jsgraph()->Constant(input_value.OddballToNumber()));
}
}
if (input_type.Is(Type::Number())) {
@@ -1048,13 +1035,6 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
if (input_type.Is(Type::NaN())) {
return Replace(jsgraph()->HeapConstant(factory()->NaN_string()));
}
- if (input_type.Is(Type::OrderedNumber()) &&
- input_type.Min() == input_type.Max()) {
- // Note that we can use Type::OrderedNumber(), since
- // both 0 and -0 map to the String "0" in JavaScript.
- return Replace(jsgraph()->HeapConstant(
- factory()->NumberToString(factory()->NewNumber(input_type.Min()))));
- }
if (input_type.Is(Type::Number())) {
return Replace(graph()->NewNode(simplified()->NumberToString(), input));
}
@@ -1102,7 +1082,7 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
// Convert {receiver} using the ToObjectStub.
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
+ graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, node->op()->properties());
rfalse = efalse = if_false =
graph()->NewNode(common()->Call(call_descriptor),
@@ -1140,10 +1120,10 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
Type receiver_type = NodeProperties::GetType(receiver);
- Handle<Name> name = NamedAccessOf(node->op()).name();
+ NameRef name(js_heap_broker(), NamedAccessOf(node->op()).name());
+ NameRef length_str(js_heap_broker(), factory()->length_string());
// Optimize "length" property of strings.
- if (name.is_identical_to(factory()->length_string()) &&
- receiver_type.Is(Type::String())) {
+ if (name.equals(length_str) && receiver_type.Is(Type::String())) {
Node* value = graph()->NewNode(simplified()->StringLength(), receiver);
ReplaceWithValue(node, value);
return Replace(value);
@@ -1375,10 +1355,9 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
Type module_type = NodeProperties::GetType(module);
if (module_type.IsHeapConstant()) {
- Handle<Module> module_constant =
- Handle<Module>::cast(module_type.AsHeapConstant()->Value());
- Handle<Cell> cell_constant(module_constant->GetCell(cell_index), isolate());
- return jsgraph()->HeapConstant(cell_constant);
+ ModuleRef module_constant = module_type.AsHeapConstant()->Ref().AsModule();
+ CellRef cell_constant(module_constant.GetCell(cell_index));
+ return jsgraph()->Constant(cell_constant);
}
FieldAccess field_access;
@@ -1508,9 +1487,9 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
NodeProperties::ChangeOp(node, jsgraph->common()->Call(call_descriptor));
}
-bool NeedsArgumentAdaptorFrame(Handle<SharedFunctionInfo> shared, int arity) {
+bool NeedsArgumentAdaptorFrame(SharedFunctionInfoRef shared, int arity) {
static const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- const int num_decl_parms = shared->internal_formal_parameter_count();
+ const int num_decl_parms = shared.internal_formal_parameter_count();
return (num_decl_parms != arity && num_decl_parms != sentinel);
}
@@ -1529,11 +1508,10 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
// Check if {target} is a JSFunction.
if (target_type.IsHeapConstant() &&
- target_type.AsHeapConstant()->Value()->IsJSFunction()) {
+ target_type.AsHeapConstant()->Ref().IsJSFunction()) {
// Only optimize [[Construct]] here if {function} is a Constructor.
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(target_type.AsHeapConstant()->Value());
- if (!function->IsConstructor()) return NoChange();
+ JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction();
+ if (!function.IsConstructor()) return NoChange();
// Patch {node} to an indirect call via ConstructFunctionForwardVarargs.
Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
node->RemoveInput(arity + 1);
@@ -1545,7 +1523,7 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), arity + 1,
+ graph()->zone(), callable.descriptor(), arity + 1,
CallDescriptor::kNeedsFrameState)));
return Changed(node);
}
@@ -1564,33 +1542,33 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
// Check if {target} is a known JSFunction.
if (target_type.IsHeapConstant() &&
- target_type.AsHeapConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(target_type.AsHeapConstant()->Value());
- Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+ target_type.AsHeapConstant()->Ref().IsJSFunction()) {
+ JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction();
+ SharedFunctionInfoRef shared = function.shared();
// Only optimize [[Construct]] here if {function} is a Constructor.
- if (!function->IsConstructor()) return NoChange();
+ if (!function.IsConstructor()) return NoChange();
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
// Patch {node} to an indirect call via the {function}s construct stub.
- bool use_builtin_construct_stub = shared->construct_as_builtin();
+ bool use_builtin_construct_stub = shared.construct_as_builtin();
- Handle<Code> code = use_builtin_construct_stub
- ? BUILTIN_CODE(isolate(), JSBuiltinsConstructStub)
- : BUILTIN_CODE(isolate(), JSConstructStubGeneric);
+ CodeRef code(js_heap_broker(),
+ use_builtin_construct_stub
+ ? BUILTIN_CODE(isolate(), JSBuiltinsConstructStub)
+ : BUILTIN_CODE(isolate(), JSConstructStubGeneric));
node->RemoveInput(arity + 1);
- node->InsertInput(graph()->zone(), 0, jsgraph()->HeapConstant(code));
+ node->InsertInput(graph()->zone(), 0, jsgraph()->Constant(code));
node->InsertInput(graph()->zone(), 2, new_target);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
- node, common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(),
- ConstructStubDescriptor(isolate()), 1 + arity, flags)));
+ node,
+ common()->Call(Linkage::GetStubCallDescriptor(
+ graph()->zone(), ConstructStubDescriptor{}, 1 + arity, flags)));
return Changed(node);
}
@@ -1619,8 +1597,7 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(start_index));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), arity + 1,
- flags)));
+ graph()->zone(), callable.descriptor(), arity + 1, flags)));
return Changed(node);
}
@@ -1648,19 +1625,18 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Check if {target} is a known JSFunction.
if (target_type.IsHeapConstant() &&
- target_type.AsHeapConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(target_type.AsHeapConstant()->Value());
- Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+ target_type.AsHeapConstant()->Ref().IsJSFunction()) {
+ JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction();
+ SharedFunctionInfoRef shared = function.shared();
- if (function->shared()->HasBreakInfo()) {
+ if (shared.HasBreakInfo()) {
// Do not inline the call if we need to check whether to break at entry.
return NoChange();
}
// Class constructors are callable, but [[Call]] will raise an exception.
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
- if (IsClassConstructor(shared->kind())) return NoChange();
+ if (IsClassConstructor(shared.kind())) return NoChange();
// Load the context from the {target}.
Node* context = effect = graph()->NewNode(
@@ -1669,10 +1645,9 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
NodeProperties::ReplaceContextInput(node, context);
// Check if we need to convert the {receiver}.
- if (is_sloppy(shared->language_mode()) && !shared->native() &&
+ if (is_sloppy(shared.language_mode()) && !shared.native() &&
!receiver_type.Is(Type::Receiver())) {
- Node* global_proxy =
- jsgraph()->HeapConstant(handle(function->global_proxy()));
+ Node* global_proxy = jsgraph()->Constant(function.global_proxy());
receiver = effect =
graph()->NewNode(simplified()->ConvertReceiver(convert_mode),
receiver, global_proxy, effect, control);
@@ -1696,16 +1671,30 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node->InsertInput(graph()->zone(), 3, argument_count);
node->InsertInput(
graph()->zone(), 4,
- jsgraph()->Constant(shared->internal_formal_parameter_count()));
+ jsgraph()->Constant(shared.internal_formal_parameter_count()));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(),
- 1 + arity, flags)));
- } else if (shared->HasBuiltinId() &&
- Builtins::HasCppImplementation(shared->builtin_id())) {
+ graph()->zone(), callable.descriptor(), 1 + arity, flags)));
+ } else if (shared.HasBuiltinId() &&
+ Builtins::HasCppImplementation(shared.builtin_id())) {
// Patch {node} to a direct CEntry call.
- ReduceBuiltin(isolate(), jsgraph(), node, shared->builtin_id(), arity,
+ ReduceBuiltin(isolate(), jsgraph(), node, shared.builtin_id(), arity,
flags);
+ } else if (shared.HasBuiltinId() &&
+ Builtins::KindOf(shared.builtin_id()) == Builtins::TFJ) {
+ // Patch {node} to a direct code object call.
+ Callable callable = Builtins::CallableFor(
+ isolate(), static_cast<Builtins::Name>(shared.builtin_id()));
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+
+ const CallInterfaceDescriptor& descriptor = callable.descriptor();
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), descriptor, 1 + arity, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code); // Code object.
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, argument_count);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
// Patch {node} to a direct call.
node->InsertInput(graph()->zone(), arity + 2, new_target);
@@ -1728,8 +1717,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
- flags)));
+ graph()->zone(), callable.descriptor(), 1 + arity, flags)));
return Changed(node);
}
@@ -1818,7 +1806,7 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kForInFilter);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
+ graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState);
vfalse = efalse = if_false =
graph()->NewNode(common()->Call(call_descriptor),
@@ -2013,9 +2001,10 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- int register_count = GeneratorStoreRegisterCountOf(node->op());
+ int value_count = GeneratorStoreValueCountOf(node->op());
- FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
+ FieldAccess array_field =
+ AccessBuilder::ForJSGeneratorObjectParametersAndRegisters();
FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
FieldAccess continuation_field =
AccessBuilder::ForJSGeneratorObjectContinuation();
@@ -2025,7 +2014,7 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
generator, effect, control);
- for (int i = 0; i < register_count; ++i) {
+ for (int i = 0; i < value_count; ++i) {
Node* value = NodeProperties::GetValueInput(node, 3 + i);
if (value != jsgraph()->OptimizedOutConstant()) {
effect = graph()->NewNode(
@@ -2086,7 +2075,8 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
int index = RestoreRegisterIndexOf(node->op());
- FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
+ FieldAccess array_field =
+ AccessBuilder::ForJSGeneratorObjectParametersAndRegisters();
FieldAccess element_field = AccessBuilder::ForFixedArraySlot(index);
Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
@@ -2240,6 +2230,8 @@ Reduction JSTypedLowering::ReduceJSParseInt(Node* node) {
}
Reduction JSTypedLowering::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
+
switch (node->opcode()) {
case IrOpcode::kJSEqual:
return ReduceJSEqual(node);
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 64bc059106..c8fcac5ff6 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -31,7 +31,8 @@ enum Signedness { kSigned, kUnsigned };
class V8_EXPORT_PRIVATE JSTypedLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- JSTypedLowering(Editor* editor, JSGraph* jsgraph, Zone* zone);
+ JSTypedLowering(Editor* editor, JSGraph* jsgraph,
+ const JSHeapBroker* js_heap_broker, Zone* zone);
~JSTypedLowering() final {}
const char* reducer_name() const override { return "JSTypedLowering"; }
@@ -97,12 +98,14 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
JSGraph* jsgraph_;
+ const JSHeapBroker* js_heap_broker_;
Type empty_string_type_;
Type pointer_comparable_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index 933ccc0a9c..d10f06e4f4 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -155,13 +155,12 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
return state.forwarded;
}
-
-void JumpThreading::ApplyForwarding(ZoneVector<RpoNumber>& result,
+void JumpThreading::ApplyForwarding(Zone* local_zone,
+ ZoneVector<RpoNumber>& result,
InstructionSequence* code) {
if (!FLAG_turbo_jt) return;
- Zone local_zone(code->isolate()->allocator(), ZONE_NAME);
- ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
+ ZoneVector<bool> skip(static_cast<int>(result.size()), false, local_zone);
// Skip empty blocks when the previous block doesn't fall through.
bool prev_fallthru = true;
diff --git a/deps/v8/src/compiler/jump-threading.h b/deps/v8/src/compiler/jump-threading.h
index 3a378d0499..4d57f281c5 100644
--- a/deps/v8/src/compiler/jump-threading.h
+++ b/deps/v8/src/compiler/jump-threading.h
@@ -22,7 +22,8 @@ class JumpThreading {
// Rewrite the instructions to forward jumps and branches.
// May also negate some branches.
- static void ApplyForwarding(ZoneVector<RpoNumber>& forwarding,
+ static void ApplyForwarding(Zone* local_zone,
+ ZoneVector<RpoNumber>& forwarding,
InstructionSequence* code);
};
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 91019f3163..99f192acdf 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -158,7 +158,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kAbort:
case Runtime::kAllocateInTargetSpace:
case Runtime::kCreateIterResultObject:
- case Runtime::kGeneratorGetContinuation:
case Runtime::kIncBlockCounter:
case Runtime::kIsFunction:
case Runtime::kNewClosure:
@@ -185,8 +184,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineGeneratorGetResumeMode:
case Runtime::kInlineCreateJSGeneratorObject:
case Runtime::kInlineIsArray:
- case Runtime::kInlineIsJSMap:
- case Runtime::kInlineIsJSSet:
case Runtime::kInlineIsJSWeakMap:
case Runtime::kInlineIsJSWeakSet:
case Runtime::kInlineIsJSReceiver:
@@ -343,32 +340,30 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
"js-call");
}
-// TODO(all): Add support for return representations/locations to
-// CallInterfaceDescriptor.
// TODO(turbofan): cache call descriptors for code stub calls.
CallDescriptor* Linkage::GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type,
- size_t return_count, Linkage::ContextSpecification context_spec) {
+ Operator::Properties properties, StubCallMode stub_mode) {
const int register_parameter_count = descriptor.GetRegisterParameterCount();
const int js_parameter_count =
register_parameter_count + stack_parameter_count;
- const int context_count = context_spec == kPassContext ? 1 : 0;
+ const int context_count = descriptor.HasContextParameter() ? 1 : 0;
const size_t parameter_count =
static_cast<size_t>(js_parameter_count + context_count);
+ size_t return_count = descriptor.GetReturnCount();
LocationSignature::Builder locations(zone, return_count, parameter_count);
// Add returns.
if (locations.return_count_ > 0) {
- locations.AddReturn(regloc(kReturnRegister0, return_type));
+ locations.AddReturn(regloc(kReturnRegister0, descriptor.GetReturnType(0)));
}
if (locations.return_count_ > 1) {
- locations.AddReturn(regloc(kReturnRegister1, return_type));
+ locations.AddReturn(regloc(kReturnRegister1, descriptor.GetReturnType(1)));
}
if (locations.return_count_ > 2) {
- locations.AddReturn(regloc(kReturnRegister2, return_type));
+ locations.AddReturn(regloc(kReturnRegister2, descriptor.GetReturnType(2)));
}
// Add parameters in registers and on the stack.
@@ -390,57 +385,39 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
}
- // The target for stub calls is a code object.
- MachineType target_type = MachineType::AnyTagged();
- LinkageLocation target_loc =
- LinkageLocation::ForAnyRegister(MachineType::AnyTagged());
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallCodeObject, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- stack_parameter_count, // stack_parameter_count
- properties, // properties
- kNoCalleeSaved, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp
- CallDescriptor::kCanUseRoots | // flags
- flags, // flags
- descriptor.DebugName(isolate), descriptor.allocatable_registers());
-}
-
-// static
-CallDescriptor* Linkage::GetAllocateCallDescriptor(Zone* zone) {
- LocationSignature::Builder locations(zone, 1, 1);
-
- locations.AddParam(regloc(kAllocateSizeRegister, MachineType::Int32()));
-
- locations.AddReturn(regloc(kReturnRegister0, MachineType::AnyTagged()));
-
- // The target for allocate calls is a code object.
- MachineType target_type = MachineType::AnyTagged();
- LinkageLocation target_loc =
- LinkageLocation::ForAnyRegister(MachineType::AnyTagged());
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallCodeObject, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- 0, // stack_parameter_count
- Operator::kNoThrow, // properties
- kNoCalleeSaved, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp
- CallDescriptor::kCanUseRoots, // flags
- "Allocate");
+ // The target for stub calls depends on the requested mode.
+ CallDescriptor::Kind kind = stub_mode == StubCallMode::kCallWasmRuntimeStub
+ ? CallDescriptor::kCallWasmFunction
+ : CallDescriptor::kCallCodeObject;
+ MachineType target_type = stub_mode == StubCallMode::kCallWasmRuntimeStub
+ ? MachineType::Pointer()
+ : MachineType::AnyTagged();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
+ return new (zone) CallDescriptor( // --
+ kind, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ stack_parameter_count, // stack_parameter_count
+ properties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kCanUseRoots | flags, // flags
+ descriptor.DebugName(), // debug name
+ descriptor.allocatable_registers());
}
// static
CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count) {
const int register_parameter_count = descriptor.GetRegisterParameterCount();
const int parameter_count = register_parameter_count + stack_parameter_count;
- LocationSignature::Builder locations(zone, 0, parameter_count);
+ DCHECK_EQ(descriptor.GetReturnCount(), 1);
+ LocationSignature::Builder locations(zone, 1, parameter_count);
+
+ locations.AddReturn(regloc(kReturnRegister0, descriptor.GetReturnType(0)));
// Add parameters in registers and on the stack.
for (int i = 0; i < parameter_count; i++) {
@@ -472,7 +449,7 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
kFlags, // flags
- descriptor.DebugName(isolate));
+ descriptor.DebugName());
}
LinkageLocation Linkage::GetOsrValueLocation(int index) const {
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index ae3a6bfe2d..e8c15123d4 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -366,8 +366,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
// Call[BytecodeDispatch] address, arg 1, arg 2, [...]
class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
public:
- enum ContextSpecification { kNoContext, kPassContext };
-
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
static CallDescriptor* ComputeIncoming(Zone* zone,
@@ -390,16 +388,13 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
CallDescriptor::Flags flags);
static CallDescriptor* GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties = Operator::kNoProperties,
- MachineType return_type = MachineType::AnyTagged(),
- size_t return_count = 1,
- ContextSpecification context_spec = kPassContext);
+ StubCallMode stub_mode = StubCallMode::kCallOnHeapBuiltin);
- static CallDescriptor* GetAllocateCallDescriptor(Zone* zone);
static CallDescriptor* GetBytecodeDispatchCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count);
// Creates a call descriptor for simplified C calls that is appropriate
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 53d5d794d9..c1d8570353 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -445,12 +445,13 @@ LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Extend(
}
void LoadElimination::AbstractMaps::Print() const {
+ StdoutStream os;
for (auto pair : info_for_node_) {
- PrintF(" #%d:%s\n", pair.first->id(), pair.first->op()->mnemonic());
- OFStream os(stdout);
+ os << " #" << pair.first->id() << ":" << pair.first->op()->mnemonic()
+ << std::endl;
ZoneHandleSet<Map> const& maps = pair.second;
for (size_t i = 0; i < maps.size(); ++i) {
- os << " - " << Brief(*maps[i]) << "\n";
+ os << " - " << Brief(*maps[i]) << std::endl;
}
}
}
@@ -1368,6 +1369,8 @@ CommonOperatorBuilder* LoadElimination::common() const {
Graph* LoadElimination::graph() const { return jsgraph()->graph(); }
+Isolate* LoadElimination::isolate() const { return jsgraph()->isolate(); }
+
Factory* LoadElimination::factory() const { return jsgraph()->factory(); }
} // namespace compiler
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index f853c8f953..d3b1b5c14a 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -302,6 +302,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
CommonOperatorBuilder* common() const;
AbstractState const* empty_state() const { return &empty_state_; }
+ Isolate* isolate() const;
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 6473c3ee1a..fe5b8c7889 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -77,10 +77,9 @@ void LoopVariableOptimizer::Run() {
void InductionVariable::AddUpperBound(Node* bound,
InductionVariable::ConstraintKind kind) {
if (FLAG_trace_turbo_loop) {
- OFStream os(stdout);
- os << "New upper bound for " << phi()->id() << " (loop "
- << NodeProperties::GetControlInput(phi())->id() << "): " << *bound
- << std::endl;
+ StdoutStream{} << "New upper bound for " << phi()->id() << " (loop "
+ << NodeProperties::GetControlInput(phi())->id()
+ << "): " << *bound << std::endl;
}
upper_bounds_.push_back(Bound(bound, kind));
}
@@ -88,9 +87,9 @@ void InductionVariable::AddUpperBound(Node* bound,
void InductionVariable::AddLowerBound(Node* bound,
InductionVariable::ConstraintKind kind) {
if (FLAG_trace_turbo_loop) {
- OFStream os(stdout);
- os << "New lower bound for " << phi()->id() << " (loop "
- << NodeProperties::GetControlInput(phi())->id() << "): " << *bound;
+ StdoutStream{} << "New lower bound for " << phi()->id() << " (loop "
+ << NodeProperties::GetControlInput(phi())->id()
+ << "): " << *bound;
}
lower_bounds_.push_back(Bound(bound, kind));
}
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index b3f5ec2964..55ef35d231 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -120,7 +120,6 @@ class MachineRepresentationInferrer {
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
- case IrOpcode::kLoadRootsPointer:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
@@ -253,6 +252,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kRoundUint32ToFloat32:
case IrOpcode::kRoundInt64ToFloat32:
case IrOpcode::kRoundUint64ToFloat32:
+ case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kFloat32Constant:
case IrOpcode::kTruncateFloat64ToFloat32:
MACHINE_FLOAT32_BINOP_LIST(LABEL)
@@ -266,6 +266,8 @@ class MachineRepresentationInferrer {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeInt32ToFloat64:
case IrOpcode::kChangeUint32ToFloat64:
+ case IrOpcode::kFloat64InsertLowWord32:
+ case IrOpcode::kFloat64InsertHighWord32:
case IrOpcode::kFloat64Constant:
case IrOpcode::kFloat64SilenceNaN:
MACHINE_FLOAT64_BINOP_LIST(LABEL)
@@ -407,6 +409,7 @@ class MachineRepresentationChecker {
case IrOpcode::kChangeUint32ToFloat64:
case IrOpcode::kRoundInt32ToFloat32:
case IrOpcode::kRoundUint32ToFloat32:
+ case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
MACHINE_UNOP_32_LIST(LABEL) { CheckValueInputForInt32Op(node, 0); }
@@ -470,6 +473,11 @@ class MachineRepresentationChecker {
}
break;
#undef LABEL
+ case IrOpcode::kFloat64InsertLowWord32:
+ case IrOpcode::kFloat64InsertHighWord32:
+ CheckValueInputForFloat64Op(node, 0);
+ CheckValueInputForInt32Op(node, 1);
+ break;
case IrOpcode::kParameter:
case IrOpcode::kProjection:
break;
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 61a22489f7..8eac3ed18c 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -226,7 +226,6 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
- V(LoadRootsPointer, Operator::kNoProperties, 0, 0, 1) \
V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index f07c89f70a..65217cf2a6 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -10,6 +10,7 @@
#include "src/globals.h"
#include "src/machine-type.h"
#include "src/utils.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -614,9 +615,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
- // Access to the root register.
- const Operator* LoadRootsPointer();
-
// atomic-load [base + index]
const Operator* Word32AtomicLoad(LoadRepresentation rep);
// atomic-load [base + index]
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 0298f01251..e7ec150985 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -236,8 +236,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
- auto call_descriptor =
- Linkage::GetAllocateCallDescriptor(graph()->zone());
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), AllocateDescriptor{}, 0,
+ CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ Call(allocate_operator_.get(), target, size);
@@ -291,8 +292,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
- auto call_descriptor =
- Linkage::GetAllocateCallDescriptor(graph()->zone());
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), AllocateDescriptor{}, 0,
+ CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
__ Goto(&done, __ Call(allocate_operator_.get(), target, size));
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 7f1868e12a..00575fe117 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -307,9 +307,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
-#define UNSUPPORTED_COND(opcode, condition) \
- OFStream out(stdout); \
- out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
+#define UNSUPPORTED_COND(opcode, condition) \
+ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
+ << "\""; \
UNIMPLEMENTED();
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
@@ -605,8 +605,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- __ Call(kScratchReg, i.InputRegister(0),
- Code::kHeaderSize - kHeapObjectTag);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Call(reg, reg, Code::kHeaderSize - kHeapObjectTag);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -614,10 +617,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
- __ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
- : RelocInfo::JS_TO_WASM_CALL);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+ __ Call(wasm_code, constant.rmode());
} else {
__ Call(i.InputRegister(0));
}
@@ -635,8 +637,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- __ Jump(kScratchReg, i.InputRegister(0),
- Code::kHeaderSize - kHeapObjectTag);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Addu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -644,10 +650,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallWasm: {
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
- __ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
- : RelocInfo::JS_TO_WASM_CALL);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+ __ Jump(wasm_code, constant.rmode());
} else {
__ Jump(i.InputRegister(0));
}
@@ -657,7 +662,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
- __ Jump(i.InputRegister(0));
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -745,6 +754,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
@@ -799,12 +811,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), fp);
}
break;
- case kArchRootsPointer:
- __ mov(i.OutputRegister(), kRootRegister);
- break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
- i.InputDoubleRegister(0));
+ i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -2939,31 +2948,19 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
void Generate() final {
MipsOperandConverter i(gen_, instr_);
-
- Builtins::Name trap_id =
- static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- __ set_has_frame(old_has_frame);
- }
}
private:
- void GenerateCallToTrap(Builtins::Name trap_id) {
- if (trap_id == Builtins::builtin_count) {
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -2979,8 +2976,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(tasm()->isolate()->builtins()->builtin_handle(trap_id),
- RelocInfo::CODE_TARGET);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2991,12 +2989,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}
}
- bool frame_elided_;
Instruction* instr_;
CodeGenerator* gen_;
};
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ auto ool = new (zone()) OutOfLineTrap(this, instr);
Label* tlabel = ool->entry();
AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
}
@@ -3151,6 +3147,16 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
}
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
@@ -3363,7 +3369,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
- __ li(dst, Operand(src.ToExternalReference()));
+ __ li(dst, src.ToExternalReference());
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
diff --git a/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc
index af86a87ad7..a0fe188430 100644
--- a/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc
@@ -2,23 +2,1728 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/code-generator.h"
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
-bool InstructionScheduler::SchedulerSupported() { return false; }
-
+bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
- UNIMPLEMENTED();
+ switch (instr->arch_opcode()) {
+ case kMipsAbsD:
+ case kMipsAbsS:
+ case kMipsAdd:
+ case kMipsAddD:
+ case kMipsAddOvf:
+ case kMipsAddPair:
+ case kMipsAddS:
+ case kMipsAnd:
+ case kMipsByteSwap32:
+ case kMipsCeilWD:
+ case kMipsCeilWS:
+ case kMipsClz:
+ case kMipsCmp:
+ case kMipsCmpD:
+ case kMipsCmpS:
+ case kMipsCtz:
+ case kMipsCvtDS:
+ case kMipsCvtDUw:
+ case kMipsCvtDW:
+ case kMipsCvtSD:
+ case kMipsCvtSUw:
+ case kMipsCvtSW:
+ case kMipsDiv:
+ case kMipsDivD:
+ case kMipsDivS:
+ case kMipsDivU:
+ case kMipsExt:
+ case kMipsF32x4Abs:
+ case kMipsF32x4Add:
+ case kMipsF32x4AddHoriz:
+ case kMipsF32x4Eq:
+ case kMipsF32x4ExtractLane:
+ case kMipsF32x4Le:
+ case kMipsF32x4Lt:
+ case kMipsF32x4Max:
+ case kMipsF32x4Min:
+ case kMipsF32x4Mul:
+ case kMipsF32x4Ne:
+ case kMipsF32x4Neg:
+ case kMipsF32x4RecipApprox:
+ case kMipsF32x4RecipSqrtApprox:
+ case kMipsF32x4ReplaceLane:
+ case kMipsF32x4SConvertI32x4:
+ case kMipsF32x4Splat:
+ case kMipsF32x4Sub:
+ case kMipsF32x4UConvertI32x4:
+ case kMipsFloat32Max:
+ case kMipsFloat32Min:
+ case kMipsFloat32RoundDown:
+ case kMipsFloat32RoundTiesEven:
+ case kMipsFloat32RoundTruncate:
+ case kMipsFloat32RoundUp:
+ case kMipsFloat64ExtractHighWord32:
+ case kMipsFloat64ExtractLowWord32:
+ case kMipsFloat64InsertHighWord32:
+ case kMipsFloat64InsertLowWord32:
+ case kMipsFloat64Max:
+ case kMipsFloat64Min:
+ case kMipsFloat64RoundDown:
+ case kMipsFloat64RoundTiesEven:
+ case kMipsFloat64RoundTruncate:
+ case kMipsFloat64RoundUp:
+ case kMipsFloat64SilenceNaN:
+ case kMipsFloorWD:
+ case kMipsFloorWS:
+ case kMipsI16x8Add:
+ case kMipsI16x8AddHoriz:
+ case kMipsI16x8AddSaturateS:
+ case kMipsI16x8AddSaturateU:
+ case kMipsI16x8Eq:
+ case kMipsI16x8ExtractLane:
+ case kMipsI16x8GeS:
+ case kMipsI16x8GeU:
+ case kMipsI16x8GtS:
+ case kMipsI16x8GtU:
+ case kMipsI16x8MaxS:
+ case kMipsI16x8MaxU:
+ case kMipsI16x8MinS:
+ case kMipsI16x8MinU:
+ case kMipsI16x8Mul:
+ case kMipsI16x8Ne:
+ case kMipsI16x8Neg:
+ case kMipsI16x8ReplaceLane:
+ case kMipsI16x8SConvertI32x4:
+ case kMipsI16x8SConvertI8x16High:
+ case kMipsI16x8SConvertI8x16Low:
+ case kMipsI16x8Shl:
+ case kMipsI16x8ShrS:
+ case kMipsI16x8ShrU:
+ case kMipsI16x8Splat:
+ case kMipsI16x8Sub:
+ case kMipsI16x8SubSaturateS:
+ case kMipsI16x8SubSaturateU:
+ case kMipsI16x8UConvertI32x4:
+ case kMipsI16x8UConvertI8x16High:
+ case kMipsI16x8UConvertI8x16Low:
+ case kMipsI32x4Add:
+ case kMipsI32x4AddHoriz:
+ case kMipsI32x4Eq:
+ case kMipsI32x4ExtractLane:
+ case kMipsI32x4GeS:
+ case kMipsI32x4GeU:
+ case kMipsI32x4GtS:
+ case kMipsI32x4GtU:
+ case kMipsI32x4MaxS:
+ case kMipsI32x4MaxU:
+ case kMipsI32x4MinS:
+ case kMipsI32x4MinU:
+ case kMipsI32x4Mul:
+ case kMipsI32x4Ne:
+ case kMipsI32x4Neg:
+ case kMipsI32x4ReplaceLane:
+ case kMipsI32x4SConvertF32x4:
+ case kMipsI32x4SConvertI16x8High:
+ case kMipsI32x4SConvertI16x8Low:
+ case kMipsI32x4Shl:
+ case kMipsI32x4ShrS:
+ case kMipsI32x4ShrU:
+ case kMipsI32x4Splat:
+ case kMipsI32x4Sub:
+ case kMipsI32x4UConvertF32x4:
+ case kMipsI32x4UConvertI16x8High:
+ case kMipsI32x4UConvertI16x8Low:
+ case kMipsI8x16Add:
+ case kMipsI8x16AddSaturateS:
+ case kMipsI8x16AddSaturateU:
+ case kMipsI8x16Eq:
+ case kMipsI8x16ExtractLane:
+ case kMipsI8x16GeS:
+ case kMipsI8x16GeU:
+ case kMipsI8x16GtS:
+ case kMipsI8x16GtU:
+ case kMipsI8x16MaxS:
+ case kMipsI8x16MaxU:
+ case kMipsI8x16MinS:
+ case kMipsI8x16MinU:
+ case kMipsI8x16Mul:
+ case kMipsI8x16Ne:
+ case kMipsI8x16Neg:
+ case kMipsI8x16ReplaceLane:
+ case kMipsI8x16SConvertI16x8:
+ case kMipsI8x16Shl:
+ case kMipsI8x16ShrS:
+ case kMipsI8x16ShrU:
+ case kMipsI8x16Splat:
+ case kMipsI8x16Sub:
+ case kMipsI8x16SubSaturateS:
+ case kMipsI8x16SubSaturateU:
+ case kMipsI8x16UConvertI16x8:
+ case kMipsIns:
+ case kMipsLsa:
+ case kMipsMaddD:
+ case kMipsMaddS:
+ case kMipsMaxD:
+ case kMipsMaxS:
+ case kMipsMinD:
+ case kMipsMinS:
+ case kMipsMod:
+ case kMipsModU:
+ case kMipsMov:
+ case kMipsMsubD:
+ case kMipsMsubS:
+ case kMipsMul:
+ case kMipsMulD:
+ case kMipsMulHigh:
+ case kMipsMulHighU:
+ case kMipsMulOvf:
+ case kMipsMulPair:
+ case kMipsMulS:
+ case kMipsNegD:
+ case kMipsNegS:
+ case kMipsNor:
+ case kMipsOr:
+ case kMipsPopcnt:
+ case kMipsRor:
+ case kMipsRoundWD:
+ case kMipsRoundWS:
+ case kMipsS128And:
+ case kMipsS128Not:
+ case kMipsS128Or:
+ case kMipsS128Select:
+ case kMipsS128Xor:
+ case kMipsS128Zero:
+ case kMipsS16x2Reverse:
+ case kMipsS16x4Reverse:
+ case kMipsS16x8InterleaveEven:
+ case kMipsS16x8InterleaveLeft:
+ case kMipsS16x8InterleaveOdd:
+ case kMipsS16x8InterleaveRight:
+ case kMipsS16x8PackEven:
+ case kMipsS16x8PackOdd:
+ case kMipsS1x16AllTrue:
+ case kMipsS1x16AnyTrue:
+ case kMipsS1x4AllTrue:
+ case kMipsS1x4AnyTrue:
+ case kMipsS1x8AllTrue:
+ case kMipsS1x8AnyTrue:
+ case kMipsS32x4InterleaveEven:
+ case kMipsS32x4InterleaveLeft:
+ case kMipsS32x4InterleaveOdd:
+ case kMipsS32x4InterleaveRight:
+ case kMipsS32x4PackEven:
+ case kMipsS32x4PackOdd:
+ case kMipsS32x4Shuffle:
+ case kMipsS8x16Concat:
+ case kMipsS8x16InterleaveEven:
+ case kMipsS8x16InterleaveLeft:
+ case kMipsS8x16InterleaveOdd:
+ case kMipsS8x16InterleaveRight:
+ case kMipsS8x16PackEven:
+ case kMipsS8x16PackOdd:
+ case kMipsS8x16Shuffle:
+ case kMipsS8x2Reverse:
+ case kMipsS8x4Reverse:
+ case kMipsS8x8Reverse:
+ case kMipsSar:
+ case kMipsSarPair:
+ case kMipsSeb:
+ case kMipsSeh:
+ case kMipsShl:
+ case kMipsShlPair:
+ case kMipsShr:
+ case kMipsShrPair:
+ case kMipsSqrtD:
+ case kMipsSqrtS:
+ case kMipsSub:
+ case kMipsSubD:
+ case kMipsSubOvf:
+ case kMipsSubPair:
+ case kMipsSubS:
+ case kMipsTruncUwD:
+ case kMipsTruncUwS:
+ case kMipsTruncWD:
+ case kMipsTruncWS:
+ case kMipsTst:
+ case kMipsXor:
+ return kNoOpcodeFlags;
+
+ case kMipsLb:
+ case kMipsLbu:
+ case kMipsLdc1:
+ case kMipsLh:
+ case kMipsLhu:
+ case kMipsLw:
+ case kMipsLwc1:
+ case kMipsMsaLd:
+ case kMipsPeek:
+ case kMipsUldc1:
+ case kMipsUlh:
+ case kMipsUlhu:
+ case kMipsUlw:
+ case kMipsUlwc1:
+ return kIsLoadOperation;
+
+ case kMipsModD:
+ case kMipsModS:
+ case kMipsMsaSt:
+ case kMipsPush:
+ case kMipsSb:
+ case kMipsSdc1:
+ case kMipsSh:
+ case kMipsStackClaim:
+ case kMipsStoreToStackSlot:
+ case kMipsSw:
+ case kMipsSwc1:
+ case kMipsUsdc1:
+ case kMipsUsh:
+ case kMipsUsw:
+ case kMipsUswc1:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+}
+
+enum Latency {
+ BRANCH = 4, // Estimated max.
+ RINT_S = 4, // Estimated.
+ RINT_D = 4, // Estimated.
+
+ MULT = 4,
+ MULTU = 4,
+ MADD = 4,
+ MADDU = 4,
+ MSUB = 4,
+ MSUBU = 4,
+
+ MUL = 7,
+ MULU = 7,
+ MUH = 7,
+ MUHU = 7,
+
+ DIV = 50, // Min:11 Max:50
+ DIVU = 50,
+
+ ABS_S = 4,
+ ABS_D = 4,
+ NEG_S = 4,
+ NEG_D = 4,
+ ADD_S = 4,
+ ADD_D = 4,
+ SUB_S = 4,
+ SUB_D = 4,
+ MAX_S = 4, // Estimated.
+ MAX_D = 4, // Estimated.
+ C_cond_S = 4,
+ C_cond_D = 4,
+ MUL_S = 4,
+
+ MADD_S = 4,
+ MSUB_S = 4,
+ NMADD_S = 4,
+ NMSUB_S = 4,
+
+ CABS_cond_S = 4,
+ CABS_cond_D = 4,
+
+ CVT_D_S = 4,
+ CVT_PS_PW = 4,
+
+ CVT_S_W = 4,
+ CVT_S_L = 4,
+ CVT_D_W = 4,
+ CVT_D_L = 4,
+
+ CVT_S_D = 4,
+
+ CVT_W_S = 4,
+ CVT_W_D = 4,
+ CVT_L_S = 4,
+ CVT_L_D = 4,
+
+ CEIL_W_S = 4,
+ CEIL_W_D = 4,
+ CEIL_L_S = 4,
+ CEIL_L_D = 4,
+
+ FLOOR_W_S = 4,
+ FLOOR_W_D = 4,
+ FLOOR_L_S = 4,
+ FLOOR_L_D = 4,
+
+ ROUND_W_S = 4,
+ ROUND_W_D = 4,
+ ROUND_L_S = 4,
+ ROUND_L_D = 4,
+
+ TRUNC_W_S = 4,
+ TRUNC_W_D = 4,
+ TRUNC_L_S = 4,
+ TRUNC_L_D = 4,
+
+ MOV_S = 4,
+ MOV_D = 4,
+
+ MOVF_S = 4,
+ MOVF_D = 4,
+
+ MOVN_S = 4,
+ MOVN_D = 4,
+
+ MOVT_S = 4,
+ MOVT_D = 4,
+
+ MOVZ_S = 4,
+ MOVZ_D = 4,
+
+ MUL_D = 5,
+ MADD_D = 5,
+ MSUB_D = 5,
+ NMADD_D = 5,
+ NMSUB_D = 5,
+
+ RECIP_S = 13,
+ RECIP_D = 26,
+
+ RSQRT_S = 17,
+ RSQRT_D = 36,
+
+ DIV_S = 17,
+ SQRT_S = 17,
+
+ DIV_D = 32,
+ SQRT_D = 32,
+
+ MTC1 = 4,
+ MTHC1 = 4,
+ DMTC1 = 4,
+ LWC1 = 4,
+ LDC1 = 4,
+ LDXC1 = 4,
+ LUXC1 = 4,
+ LWXC1 = 4,
+
+ MFC1 = 1,
+ MFHC1 = 1,
+ MFHI = 1,
+ MFLO = 1,
+ DMFC1 = 1,
+ SWC1 = 1,
+ SDC1 = 1,
+ SDXC1 = 1,
+ SUXC1 = 1,
+ SWXC1 = 1,
+};
+
+int ClzLatency() {
+ if (IsMipsArchVariant(kLoongson)) {
+ return (6 + 2 * Latency::BRANCH);
+ } else {
+ return 1;
+ }
+}
+
+int RorLatency(bool is_operand_register = true) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ if (is_operand_register) {
+ return 4;
+ } else {
+ return 3; // Estimated max.
+ }
+ }
+}
+
+int AdduLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int XorLatency(bool is_operand_register = true) {
+ return AdduLatency(is_operand_register);
+}
+
+int AndLatency(bool is_operand_register = true) {
+ return AdduLatency(is_operand_register);
+}
+
+int OrLatency(bool is_operand_register = true) {
+ return AdduLatency(is_operand_register);
+}
+
+int SubuLatency(bool is_operand_register = true) {
+ return AdduLatency(is_operand_register);
+}
+
+int MulLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ if (IsMipsArchVariant(kLoongson)) {
+ return Latency::MULT + 1;
+ } else {
+ return Latency::MUL + 1;
+ }
+ } else {
+ if (IsMipsArchVariant(kLoongson)) {
+ return Latency::MULT + 2;
+ } else {
+ return Latency::MUL + 2;
+ }
+ }
+}
+
+int NorLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+int InsLatency() {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ return SubuLatency(false) + 7;
+ }
+}
+
+int ShlPairLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ int latency =
+ AndLatency(false) + NorLatency() + OrLatency() + AndLatency(false) + 4;
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
+ return latency + Latency::BRANCH + 2;
+ } else {
+ return latency + 2;
+ }
+ } else {
+ return 2;
+ }
+}
+
+int ShrPairLatency(bool is_operand_register = true, uint32_t shift = 0) {
+ if (is_operand_register) {
+ int latency =
+ AndLatency(false) + NorLatency() + OrLatency() + AndLatency(false) + 4;
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
+ return latency + Latency::BRANCH + 2;
+ } else {
+ return latency + 2;
+ }
+ } else {
+ // Estimated max.
+ return (InsLatency() + 2 > OrLatency() + 3) ? InsLatency() + 2
+ : OrLatency() + 3;
+ }
+}
+
+int SarPairLatency(bool is_operand_register = true, uint32_t shift = 0) {
+ if (is_operand_register) {
+ return AndLatency(false) + NorLatency() + OrLatency() + AndLatency(false) +
+ Latency::BRANCH + 6;
+ } else {
+ shift = shift & 0x3F;
+ if (shift == 0) {
+ return 2;
+ } else if (shift < 32) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ return InsLatency() + 2;
+ } else {
+ return OrLatency() + 3;
+ }
+ } else if (shift == 32) {
+ return 2;
+ } else {
+ return 2;
+ }
+ }
+}
+
+int ExtLatency() {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ // Estimated max.
+ return 2;
+ }
+}
+
+int LsaLatency() {
+ // Estimated max.
+ return AdduLatency() + 1;
+}
+
+int SltLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int SltuLatency(bool is_operand_register = true) {
+ return SltLatency(is_operand_register);
+}
+
+int AddPairLatency() { return 3 * AdduLatency() + SltLatency(); }
+
+int SubPairLatency() { return SltuLatency() + 3 * SubuLatency(); }
+
+int MuluLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (!is_operand_register) latency++;
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return latency + Latency::MULTU + 2;
+ } else {
+ return latency + Latency::MULU + Latency::MUHU;
+ }
+}
+
+int MulPairLatency() {
+ return MuluLatency() + 2 * MulLatency() + 2 * AdduLatency();
+}
+
+int MaddSLatency() {
+ if (IsMipsArchVariant(kMips32r2)) {
+ return Latency::MADD_D;
+ } else {
+ return Latency::MUL_D + Latency::ADD_D;
+ }
+}
+
+int MaddDLatency() {
+ if (IsMipsArchVariant(kMips32r2)) {
+ return Latency::MADD_D;
+ } else {
+ return Latency::MUL_D + Latency::ADD_D;
+ }
+}
+
+int MsubSLatency() {
+ if (IsMipsArchVariant(kMips32r2)) {
+ return Latency::MSUB_S;
+ } else {
+ return Latency::MUL_S + Latency::SUB_S;
+ }
+}
+
+int MsubDLatency() {
+ if (IsMipsArchVariant(kMips32r2)) {
+ return Latency::MSUB_D;
+ } else {
+ return Latency::MUL_D + Latency::SUB_D;
+ }
+}
+
+int Mfhc1Latency() {
+ if (IsFp32Mode()) {
+ return Latency::MFC1;
+ } else {
+ return 1;
+ }
+}
+
+int Mthc1Latency() {
+ if (IsFp32Mode()) {
+ return Latency::MTC1;
+ } else {
+ return 1;
+ }
+}
+
+int MoveLatency(bool is_double_register = true) {
+ if (!is_double_register) {
+ return Latency::MTC1 + 1;
+ } else {
+ return Mthc1Latency() + 1; // Estimated.
+ }
+}
+
+int Float64RoundLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return Latency::RINT_D + 4;
+ } else {
+ // For ceil_l_d, floor_l_d, round_l_d, trunc_l_d latency is 4.
+ return Mfhc1Latency() + ExtLatency() + Latency::BRANCH + Latency::MOV_D +
+ 4 + MoveLatency() + 1 + Latency::BRANCH + Latency::CVT_D_L;
+ }
+}
+
+int Float32RoundLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return Latency::RINT_S + 4;
+ } else {
+ // For ceil_w_s, floor_w_s, round_w_s, trunc_w_s latency is 4.
+ return Latency::MFC1 + ExtLatency() + Latency::BRANCH + Latency::MOV_S + 4 +
+ Latency::MFC1 + Latency::BRANCH + Latency::CVT_S_W;
+ }
+}
+
+int CvtDUwLatency() {
+ if (IsFp64Mode()) {
+ return Latency::MTC1 + Mthc1Latency() + Latency::CVT_D_L;
+ } else {
+ return Latency::BRANCH + Latency::MTC1 + 1 + Latency::MTC1 +
+ Mthc1Latency() + Latency::CVT_D_W + Latency::BRANCH +
+ Latency::ADD_D + Latency::CVT_D_W;
+ }
+}
+
+int CvtSUwLatency() { return CvtDUwLatency() + Latency::CVT_S_D; }
+
+int Floor_w_dLatency() {
+ if (IsMipsArchVariant(kLoongson)) {
+ return Mfhc1Latency() + Latency::FLOOR_W_D + Mthc1Latency();
+ } else {
+ return Latency::FLOOR_W_D;
+ }
+}
+
+int FloorWDLatency() { return Floor_w_dLatency() + Latency::MFC1; }
+
+int Ceil_w_dLatency() {
+ if (IsMipsArchVariant(kLoongson)) {
+ return Mfhc1Latency() + Latency::CEIL_W_D + Mthc1Latency();
+ } else {
+ return Latency::CEIL_W_D;
+ }
+}
+
+int CeilWDLatency() { return Ceil_w_dLatency() + Latency::MFC1; }
+
+int Round_w_dLatency() {
+ if (IsMipsArchVariant(kLoongson)) {
+ return Mfhc1Latency() + Latency::ROUND_W_D + Mthc1Latency();
+ } else {
+ return Latency::ROUND_W_D;
+ }
+}
+
+int RoundWDLatency() { return Round_w_dLatency() + Latency::MFC1; }
+
+int Trunc_w_dLatency() {
+ if (IsMipsArchVariant(kLoongson)) {
+ return Mfhc1Latency() + Latency::TRUNC_W_D + Mthc1Latency();
+ } else {
+ return Latency::TRUNC_W_D;
+ }
+}
+
+int MovnLatency() {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
+ return Latency::BRANCH + 1;
+ } else {
+ return 1;
+ }
+}
+
+int Trunc_uw_dLatency() {
+ return 1 + Latency::MTC1 + Mthc1Latency() + Latency::BRANCH + Latency::SUB_D +
+ Latency::TRUNC_W_D + Latency::MFC1 + OrLatency(false) +
+ Latency::BRANCH + Latency::TRUNC_W_D + Latency::MFC1;
+}
+
+int Trunc_uw_sLatency() {
+ return 1 + Latency::MTC1 + Latency::BRANCH + Latency::SUB_S +
+ Latency::TRUNC_W_S + Latency::MFC1 + OrLatency(false) +
+ Latency::TRUNC_W_S + Latency::MFC1;
+}
+
+int MovzLatency() {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
+ return Latency::BRANCH + 1;
+ } else {
+ return 1;
+ }
+}
+
+int FmoveLowLatency() {
+ if (IsFp32Mode()) {
+ return Latency::MTC1;
+ } else {
+ return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1;
+ }
+}
+
+int SebLatency() {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+int SehLatency() {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+int UlhuLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ return 4;
+ }
+}
+
+int UlhLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ return 4;
+ }
+}
+
+int AdjustBaseAndOffsetLatency() {
+ return 3; // Estimated max.
+}
+
+int UshLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ return AdjustBaseAndOffsetLatency() + 4; // Estimated max.
+ }
+}
+
+int UlwLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ return AdjustBaseAndOffsetLatency() + 3; // Estimated max.
+ }
}
+int UswLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return 1;
+ } else {
+ return AdjustBaseAndOffsetLatency() + 2;
+ }
+}
+
+int Ulwc1Latency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return Latency::LWC1;
+ } else {
+ return UlwLatency() + Latency::MTC1;
+ }
+}
+
+int Uswc1Latency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return Latency::SWC1;
+ } else {
+ return Latency::MFC1 + UswLatency();
+ }
+}
+
+int Ldc1Latency() {
+ int latency = AdjustBaseAndOffsetLatency() + Latency::LWC1;
+ if (IsFp32Mode()) {
+ return latency + Latency::LWC1;
+ } else {
+ return latency + 1 + Mthc1Latency();
+ }
+}
+
+int Uldc1Latency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return Ldc1Latency();
+ } else {
+ return 2 * UlwLatency() + Latency::MTC1 + Mthc1Latency();
+ }
+}
+
+int Sdc1Latency() {
+ int latency = AdjustBaseAndOffsetLatency() + Latency::SWC1;
+ if (IsFp32Mode()) {
+ return latency + Latency::SWC1;
+ } else {
+ return latency + Mfhc1Latency() + 1;
+ }
+}
+
+int Usdc1Latency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return Sdc1Latency();
+ } else {
+ return Latency::MFC1 + 2 * UswLatency() + Mfhc1Latency();
+ }
+}
+
+int PushRegisterLatency() { return AdduLatency(false) + 1; }
+
+int ByteSwapSignedLatency() {
+ // operand_size == 4
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ return 2;
+ } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
+ return 10;
+ }
+}
+
+int LlLatency(int offset) {
+ bool is_one_instruction =
+ IsMipsArchVariant(kMips32r6) ? is_int9(offset) : is_int16(offset);
+ if (is_one_instruction) {
+ return 1;
+ } else {
+ return 3;
+ }
+}
+
+int ExtractBitsLatency(int size, bool sign_extend) {
+ int latency = 1 + ExtLatency();
+ if (size == 8) {
+ if (sign_extend) {
+ return latency + SebLatency();
+ } else {
+ return 0;
+ }
+ } else if (size == 16) {
+ if (sign_extend) {
+ return latency + SehLatency();
+ } else {
+ return 0;
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+int NegLatency() { return 1; }
+
+int InsertBitsLatency() {
+ return RorLatency() + InsLatency() + SubuLatency(false) + NegLatency() +
+ RorLatency();
+}
+
+int ScLatency(int offset) {
+ bool is_one_instruction =
+ IsMipsArchVariant(kMips32r6) ? is_int9(offset) : is_int16(offset);
+ if (is_one_instruction) {
+ return 1;
+ } else {
+ return 3;
+ }
+}
+
+int BranchShortHelperR6Latency() {
+ return 2; // Estimated max.
+}
+
+int BranchShortHelperLatency() {
+ return SltLatency() + 2; // Estimated max.
+}
+
+int BranchShortLatency(BranchDelaySlot bdslot = PROTECT) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ return BranchShortHelperR6Latency();
+ } else {
+ return BranchShortHelperLatency();
+ }
+}
+
+int Word32AtomicExchangeLatency(bool sign_extend, int size) {
+ return AdduLatency() + 1 + SubuLatency() + 2 + LlLatency(0) +
+ ExtractBitsLatency(size, sign_extend) + InsertBitsLatency() +
+ ScLatency(0) + BranchShortLatency() + 1;
+}
+
+int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) {
+ return AdduLatency() + 1 + SubuLatency() + 2 + LlLatency(0) +
+ ExtractBitsLatency(size, sign_extend) + BranchShortLatency() + 1;
+}
+
+int AddOverflowLatency() {
+ return 6; // Estimated max.
+}
+
+int SubOverflowLatency() {
+ return 6; // Estimated max.
+}
+
+int MulhLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return Latency::MULT + Latency::MFHI;
+ } else {
+ return Latency::MUH;
+ }
+ } else {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return 1 + Latency::MULT + Latency::MFHI;
+ } else {
+ return 1 + Latency::MUH;
+ }
+ }
+}
+
+int MulhuLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return Latency::MULTU + Latency::MFHI;
+ } else {
+ return Latency::MUHU;
+ }
+ } else {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return 1 + Latency::MULTU + Latency::MFHI;
+ } else {
+ return 1 + Latency::MUHU;
+ }
+ }
+}
+
+int MulOverflowLatency() {
+ return MulLatency() + 4; // Estimated max.
+}
+
+int ModLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return Latency::DIV + Latency::MFHI;
+ } else {
+ return 1;
+ }
+ } else {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return 1 + Latency::DIV + Latency::MFHI;
+ } else {
+ return 2;
+ }
+ }
+}
+
+int ModuLatency(bool is_operand_register = true) {
+ return ModLatency(is_operand_register);
+}
+
+int DivLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return Latency::DIV + Latency::MFLO;
+ } else {
+ return Latency::DIV;
+ }
+ } else {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return 1 + Latency::DIV + Latency::MFLO;
+ } else {
+ return 1 + Latency::DIV;
+ }
+ }
+}
+
+int DivuLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return Latency::DIVU + Latency::MFLO;
+ } else {
+ return Latency::DIVU;
+ }
+ } else {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return 1 + Latency::DIVU + Latency::MFLO;
+ } else {
+ return 1 + Latency::DIVU;
+ }
+ }
+}
+
+int CtzLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return RorLatency(false) + 2 + ClzLatency();
+ } else {
+ return AdduLatency(false) + XorLatency() + AndLatency() + ClzLatency() + 1 +
+ SubuLatency();
+ }
+}
+
+int PopcntLatency() {
+ return 4 * AndLatency() + SubuLatency() + 2 * AdduLatency() + MulLatency() +
+ 8;
+}
+
+int CompareFLatency() { return Latency::C_cond_S; }
+
+int CompareIsNanFLatency() { return CompareFLatency(); }
+
+int CompareIsNanF32Latency() { return CompareIsNanFLatency(); }
+
+int Neg_sLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return Latency::NEG_S;
+ } else {
+ // Estimated.
+ return CompareIsNanF32Latency() + 2 * Latency::BRANCH + Latency::NEG_S +
+ Latency::MFC1 + 1 + XorLatency() + Latency::MTC1;
+ }
+}
+
+int CompareIsNanF64Latency() { return CompareIsNanFLatency(); }
+
+int Neg_dLatency() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return Latency::NEG_D;
+ } else {
+ // Estimated.
+ return CompareIsNanF64Latency() + 2 * Latency::BRANCH + Latency::NEG_D +
+ Mfhc1Latency() + 1 + XorLatency() + Mthc1Latency();
+ }
+}
+
+int CompareF32Latency() { return CompareFLatency(); }
+
+int Move_sLatency() {
+ return Latency::MOV_S; // Estimated max.
+}
+
+int Float32MaxLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF32Latency() + Latency::BRANCH;
+ if (IsMipsArchVariant(kMips32r6)) {
+ return latency + Latency::MAX_S;
+ } else {
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::MFC1 + Move_sLatency();
+ }
+}
+
+int CompareF64Latency() { return CompareF32Latency(); }
+
+int Move_dLatency() {
+ return Latency::MOV_D; // Estimated max.
+}
+
+int Float64MaxLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF64Latency() + Latency::BRANCH;
+ if (IsMipsArchVariant(kMips32r6)) {
+ return latency + Latency::MAX_D;
+ } else {
+ return latency + 5 * Latency::BRANCH + 2 * CompareF64Latency() +
+ Latency::MFHC1 + 2 * Move_dLatency();
+ }
+}
+
+int PrepareCallCFunctionLatency() {
+ int frame_alignment = TurboAssembler::ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ return 1 + SubuLatency(false) + AndLatency(false) + 1;
+ } else {
+ return SubuLatency(false);
+ }
+}
+
+int MovToFloatParametersLatency() { return 2 * MoveLatency(); }
+
+int CallLatency() {
+ // Estimated.
+ return AdduLatency(false) + Latency::BRANCH + 3;
+}
+
+int CallCFunctionHelperLatency() {
+ // Estimated.
+ int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency();
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ latency++;
+ } else {
+ latency += AdduLatency(false);
+ }
+ return latency;
+}
+
+int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); }
+
+int MovFromFloatResultLatency() { return MoveLatency(); }
+
+int Float32MinLatency() {
+ // Estimated max.
+ return CompareIsNanF32Latency() + Latency::BRANCH +
+ 2 * (CompareF32Latency() + Latency::BRANCH) + Latency::MFC1 +
+ 2 * Latency::BRANCH + Move_sLatency();
+}
+
+int Float64MinLatency() {
+ // Estimated max.
+ return CompareIsNanF64Latency() + Latency::BRANCH +
+ 2 * (CompareF64Latency() + Latency::BRANCH) + Mfhc1Latency() +
+ 2 * Latency::BRANCH + Move_dLatency();
+}
+
+int SmiUntagLatency() { return 1; }
+
+int PrepareForTailCallLatency() {
+ // Estimated max.
+ return 2 * (LsaLatency() + AdduLatency(false)) + 2 + Latency::BRANCH +
+ Latency::BRANCH + 2 * SubuLatency(false) + 2 + Latency::BRANCH + 1;
+}
+
+int AssemblePopArgumentsAdaptorFrameLatency() {
+ return 1 + Latency::BRANCH + 1 + SmiUntagLatency() +
+ PrepareForTailCallLatency();
+}
+
+int JumpLatency() {
+ // Estimated max.
+ return 1 + AdduLatency(false) + Latency::BRANCH + 2;
+}
+
+int AssertLatency() { return 1; }
+
+int MultiPushLatency() {
+ int latency = SubuLatency(false);
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ latency++;
+ }
+ return latency;
+}
+
+int MultiPushFPULatency() {
+ int latency = SubuLatency(false);
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ latency += Sdc1Latency();
+ }
+ return latency;
+}
+
+int PushCallerSavedLatency(SaveFPRegsMode fp_mode) {
+ int latency = MultiPushLatency();
+ if (fp_mode == kSaveFPRegs) {
+ latency += MultiPushFPULatency();
+ }
+ return latency;
+}
+
+int MultiPopFPULatency() {
+ int latency = 0;
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ latency += Ldc1Latency();
+ }
+ return latency++;
+}
+
+int MultiPopLatency() {
+ int latency = 0;
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ latency++;
+ }
+ return latency++;
+}
+
+int PopCallerSavedLatency(SaveFPRegsMode fp_mode) {
+ int latency = 0;
+ if (fp_mode == kSaveFPRegs) {
+ latency += MultiPopFPULatency();
+ }
+ return latency + MultiPopLatency();
+}
+
+int AssembleArchJumpLatency() {
+ // Estimated max.
+ return Latency::BRANCH;
+}
+
+int AssembleArchLookupSwitchLatency(int cases) {
+ return cases * (1 + Latency::BRANCH) + AssembleArchJumpLatency();
+}
+
+int AssembleArchBinarySearchSwitchLatency(int cases) {
+ if (cases < CodeGenerator::kBinarySearchSwitchMinimalCases) {
+ return AssembleArchLookupSwitchLatency(cases);
+ }
+ return 1 + Latency::BRANCH + AssembleArchBinarySearchSwitchLatency(cases / 2);
+}
+
+int GenerateSwitchTableLatency() {
+ int latency = 0;
+ if (kArchVariant >= kMips32r6) {
+ latency = LsaLatency() + 2;
+ } else {
+ latency = 6;
+ }
+ latency += 2;
+ return latency;
+}
+
+int AssembleArchTableSwitchLatency() {
+ return Latency::BRANCH + GenerateSwitchTableLatency();
+}
+
+int AssembleReturnLatency() {
+ // Estimated max.
+ return AdduLatency(false) + MultiPopLatency() + MultiPopFPULatency() +
+ Latency::BRANCH + 1 + AdduLatency() + 8;
+}
+
+int TryInlineTruncateDoubleToILatency() {
+ return 2 + Latency::TRUNC_W_D + Latency::MFC1 + 2 + AndLatency(false) +
+ Latency::BRANCH;
+}
+
+int CallStubDelayedLatency() { return 1 + CallLatency(); }
+
+int TruncateDoubleToIDelayedLatency() {
+ return TryInlineTruncateDoubleToILatency() + 1 + SubuLatency(false) +
+ Sdc1Latency() + CallStubDelayedLatency() + AdduLatency(false) + 1;
+}
+
+int CheckPageFlagLatency() {
+ return 2 * AndLatency(false) + 1 + Latency::BRANCH;
+}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
- UNIMPLEMENTED();
+ // Basic latency modeling for MIPS32 instructions. They have been determined
+ // in an empirical way.
+ switch (instr->arch_opcode()) {
+ case kArchCallCodeObject:
+ case kArchCallWasmFunction:
+ return CallLatency();
+ case kArchTailCallCodeObjectFromJSFunction:
+ case kArchTailCallCodeObject: {
+ int latency = 0;
+ if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) {
+ latency = AssemblePopArgumentsAdaptorFrameLatency();
+ }
+ return latency + JumpLatency();
+ }
+ case kArchTailCallWasm:
+ case kArchTailCallAddress:
+ return JumpLatency();
+ case kArchCallJSFunction: {
+ int latency = 0;
+ if (FLAG_debug_code) {
+ latency = 1 + AssertLatency();
+ }
+ return latency + 1 + AdduLatency(false) + CallLatency();
+ }
+ case kArchPrepareCallCFunction:
+ return PrepareCallCFunctionLatency();
+ case kArchSaveCallerRegisters: {
+ auto fp_mode =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ return PushCallerSavedLatency(fp_mode);
+ }
+ case kArchRestoreCallerRegisters: {
+ auto fp_mode =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ return PopCallerSavedLatency(fp_mode);
+ }
+ case kArchPrepareTailCall:
+ return 2; // Estimated max.
+ case kArchCallCFunction:
+ return CallCFunctionLatency();
+ case kArchJmp:
+ return AssembleArchJumpLatency();
+ case kArchBinarySearchSwitch:
+ return AssembleArchBinarySearchSwitchLatency((instr->InputCount() - 2) /
+ 2);
+ case kArchLookupSwitch:
+ return AssembleArchLookupSwitchLatency((instr->InputCount() - 2) / 2);
+ case kArchTableSwitch:
+ return AssembleArchTableSwitchLatency();
+ case kArchDebugAbort:
+ return CallLatency() + 1;
+ case kArchComment:
+ case kArchDeoptimize:
+ return 0;
+ case kArchRet:
+ return AssembleReturnLatency();
+ case kArchTruncateDoubleToI:
+ return TruncateDoubleToIDelayedLatency();
+ case kArchStoreWithWriteBarrier:
+ return AdduLatency() + 1 + CheckPageFlagLatency();
+ case kArchStackSlot: {
+ // Estimated max.
+ return AdduLatency(false) + AndLatency(false) + AssertLatency() +
+ AdduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 +
+ SubuLatency() + AdduLatency();
+ }
+ case kArchWordPoisonOnSpeculation:
+ return AndLatency();
+ case kIeee754Float64Acos:
+ case kIeee754Float64Acosh:
+ case kIeee754Float64Asin:
+ case kIeee754Float64Asinh:
+ case kIeee754Float64Atan:
+ case kIeee754Float64Atanh:
+ case kIeee754Float64Atan2:
+ case kIeee754Float64Cos:
+ case kIeee754Float64Cosh:
+ case kIeee754Float64Cbrt:
+ case kIeee754Float64Exp:
+ case kIeee754Float64Expm1:
+ case kIeee754Float64Log:
+ case kIeee754Float64Log1p:
+ case kIeee754Float64Log10:
+ case kIeee754Float64Log2:
+ case kIeee754Float64Pow:
+ case kIeee754Float64Sin:
+ case kIeee754Float64Sinh:
+ case kIeee754Float64Tan:
+ case kIeee754Float64Tanh:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kMipsAdd:
+ return AdduLatency(instr->InputAt(1)->IsRegister());
+ case kMipsAnd:
+ return AndLatency(instr->InputAt(1)->IsRegister());
+ case kMipsOr:
+ return OrLatency(instr->InputAt(1)->IsRegister());
+ case kMipsXor:
+ return XorLatency(instr->InputAt(1)->IsRegister());
+ case kMipsSub:
+ return SubuLatency(instr->InputAt(1)->IsRegister());
+ case kMipsNor:
+ return NorLatency(instr->InputAt(1)->IsRegister());
+ case kMipsAddOvf:
+ return AddOverflowLatency();
+ case kMipsSubOvf:
+ return SubOverflowLatency();
+ case kMipsMul:
+ return MulLatency(false);
+ case kMipsMulHigh:
+ return MulhLatency(instr->InputAt(1)->IsRegister());
+ case kMipsMulHighU:
+ return MulhuLatency(instr->InputAt(1)->IsRegister());
+ case kMipsMulOvf:
+ return MulOverflowLatency();
+ case kMipsMod:
+ return ModLatency(instr->InputAt(1)->IsRegister());
+ case kMipsModU:
+ return ModuLatency(instr->InputAt(1)->IsRegister());
+ case kMipsDiv: {
+ int latency = DivLatency(instr->InputAt(1)->IsRegister());
+ if (IsMipsArchVariant(kMips32r6)) {
+ return latency++;
+ } else {
+ return latency + MovzLatency();
+ }
+ }
+ case kMipsDivU: {
+ int latency = DivuLatency(instr->InputAt(1)->IsRegister());
+ if (IsMipsArchVariant(kMips32r6)) {
+ return latency++;
+ } else {
+ return latency + MovzLatency();
+ }
+ }
+ case kMipsClz:
+ return ClzLatency();
+ case kMipsCtz:
+ return CtzLatency();
+ case kMipsPopcnt:
+ return PopcntLatency();
+ case kMipsShlPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ return ShlPairLatency();
+ } else {
+ return ShlPairLatency(false);
+ }
+ }
+ case kMipsShrPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ return ShrPairLatency();
+ } else {
+ // auto immediate_operand = ImmediateOperand::cast(instr->InputAt(2));
+ // return ShrPairLatency(false, immediate_operand->inline_value());
+ return 1;
+ }
+ }
+ case kMipsSarPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ return SarPairLatency();
+ } else {
+ return SarPairLatency(false);
+ }
+ }
+ case kMipsExt:
+ return ExtLatency();
+ case kMipsIns:
+ return InsLatency();
+ case kMipsRor:
+ return RorLatency(instr->InputAt(1)->IsRegister());
+ case kMipsLsa:
+ return LsaLatency();
+ case kMipsModS:
+ case kMipsModD:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kMipsAddPair:
+ return AddPairLatency();
+ case kMipsSubPair:
+ return SubPairLatency();
+ case kMipsMulPair:
+ return MulPairLatency();
+ case kMipsMaddS:
+ return MaddSLatency();
+ case kMipsMaddD:
+ return MaddDLatency();
+ case kMipsMsubS:
+ return MsubSLatency();
+ case kMipsMsubD:
+ return MsubDLatency();
+ case kMipsNegS:
+ return Neg_sLatency();
+ case kMipsNegD:
+ return Neg_dLatency();
+ case kMipsFloat64RoundDown:
+ case kMipsFloat64RoundTruncate:
+ case kMipsFloat64RoundUp:
+ case kMipsFloat64RoundTiesEven:
+ return Float64RoundLatency();
+ case kMipsFloat32RoundDown:
+ case kMipsFloat32RoundTruncate:
+ case kMipsFloat32RoundUp:
+ case kMipsFloat32RoundTiesEven:
+ return Float32RoundLatency();
+ case kMipsFloat32Max:
+ return Float32MaxLatency();
+ case kMipsFloat64Max:
+ return Float64MaxLatency();
+ case kMipsFloat32Min:
+ return Float32MinLatency();
+ case kMipsFloat64Min:
+ return Float64MinLatency();
+ case kMipsCvtSUw:
+ return CvtSUwLatency();
+ case kMipsCvtDUw:
+ return CvtDUwLatency();
+ case kMipsFloorWD:
+ return FloorWDLatency();
+ case kMipsCeilWD:
+ return CeilWDLatency();
+ case kMipsRoundWD:
+ return RoundWDLatency();
+ case kMipsTruncWD:
+ return Trunc_w_dLatency() + Latency::MFC1;
+ case kMipsTruncWS:
+ return Latency::TRUNC_W_S + Latency::MFC1 + AdduLatency(false) +
+ SltLatency() + MovnLatency();
+ case kMipsTruncUwD:
+ return Trunc_uw_dLatency();
+ case kMipsTruncUwS:
+ return Trunc_uw_sLatency() + AdduLatency(false) + MovzLatency();
+ case kMipsFloat64ExtractLowWord32:
+ return Latency::MFC1;
+ case kMipsFloat64ExtractHighWord32:
+ return Mfhc1Latency();
+ case kMipsFloat64InsertLowWord32: {
+ if (IsFp32Mode()) {
+ return Latency::MTC1;
+ } else {
+ return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1;
+ }
+ }
+ case kMipsFloat64InsertHighWord32:
+ return Mthc1Latency();
+ case kMipsFloat64SilenceNaN:
+ return Latency::SUB_D;
+ case kMipsSeb:
+ return SebLatency();
+ case kMipsSeh:
+ return SehLatency();
+ case kMipsUlhu:
+ return UlhuLatency();
+ case kMipsUlh:
+ return UlhLatency();
+ case kMipsUsh:
+ return UshLatency();
+ case kMipsUlw:
+ return UlwLatency();
+ case kMipsUsw:
+ return UswLatency();
+ case kMipsUlwc1:
+ return Ulwc1Latency();
+ case kMipsSwc1:
+ return MoveLatency(false) + Latency::SWC1; // Estimated max.
+ case kMipsUswc1:
+ return MoveLatency(false) + Uswc1Latency(); // Estimated max.
+ case kMipsLdc1:
+ return Ldc1Latency();
+ case kMipsUldc1:
+ return Uldc1Latency();
+ case kMipsSdc1:
+ return MoveLatency(false) + Sdc1Latency(); // Estimated max.
+ case kMipsUsdc1:
+ return MoveLatency(false) + Usdc1Latency(); // Estimated max.
+ case kMipsPush: {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ auto op = LocationOperand::cast(instr->InputAt(0));
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat32:
+ return Latency::SWC1 + SubuLatency(false);
+ break;
+ case MachineRepresentation::kFloat64:
+ return Sdc1Latency() + SubuLatency(false);
+ break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ return PushRegisterLatency();
+ }
+ break;
+ }
+ case kMipsPeek: {
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ auto op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ return Ldc1Latency();
+ } else {
+ return Latency::LWC1;
+ }
+ } else {
+ return 1;
+ }
+ break;
+ }
+ case kMipsStackClaim:
+ return SubuLatency(false);
+ case kMipsStoreToStackSlot: {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ auto op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ return Sdc1Latency();
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
+ return Latency::SWC1;
+ } else {
+ return 1; // Estimated value.
+ }
+ } else {
+ return 1;
+ }
+ break;
+ }
+ case kMipsByteSwap32:
+ return ByteSwapSignedLatency();
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
+ return 2;
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
+ return 3;
+ case kWord32AtomicExchangeInt8:
+ return Word32AtomicExchangeLatency(true, 8);
+ case kWord32AtomicExchangeUint8:
+ return Word32AtomicExchangeLatency(false, 8);
+ case kWord32AtomicExchangeInt16:
+ return Word32AtomicExchangeLatency(true, 16);
+ case kWord32AtomicExchangeUint16:
+ return Word32AtomicExchangeLatency(false, 16);
+ case kWord32AtomicExchangeWord32: {
+ return 1 + AdduLatency() + Ldc1Latency() + 1 + ScLatency(0) +
+ BranchShortLatency() + 1;
+ }
+ case kWord32AtomicCompareExchangeInt8:
+ return Word32AtomicCompareExchangeLatency(true, 8);
+ case kWord32AtomicCompareExchangeUint8:
+ return Word32AtomicCompareExchangeLatency(false, 8);
+ case kWord32AtomicCompareExchangeInt16:
+ return Word32AtomicCompareExchangeLatency(true, 16);
+ case kWord32AtomicCompareExchangeUint16:
+ return Word32AtomicCompareExchangeLatency(false, 16);
+ case kWord32AtomicCompareExchangeWord32:
+ return AdduLatency() + 1 + LlLatency(0) + BranchShortLatency() + 1;
+ case kMipsTst:
+ return AndLatency(instr->InputAt(1)->IsRegister());
+ case kMipsCmpS:
+ return MoveLatency() + CompareF32Latency();
+ case kMipsCmpD:
+ return MoveLatency() + CompareF64Latency();
+ case kArchNop:
+ case kArchThrowTerminator:
+ case kMipsCmp:
+ return 0;
+ case kArchDebugBreak:
+ case kArchStackPointer:
+ case kArchFramePointer:
+ case kArchParentFramePointer:
+ case kMipsShl:
+ case kMipsShr:
+ case kMipsSar:
+ case kMipsMov:
+ case kMipsMaxS:
+ case kMipsMinS:
+ case kMipsMaxD:
+ case kMipsMinD:
+ case kMipsLbu:
+ case kMipsLb:
+ case kMipsSb:
+ case kMipsLhu:
+ case kMipsLh:
+ case kMipsSh:
+ case kMipsLw:
+ case kMipsSw:
+ case kMipsLwc1:
+ return 1;
+ case kMipsAddS:
+ return Latency::ADD_S;
+ case kMipsSubS:
+ return Latency::SUB_S;
+ case kMipsMulS:
+ return Latency::MUL_S;
+ case kMipsAbsS:
+ return Latency::ABS_S;
+ case kMipsAddD:
+ return Latency::ADD_D;
+ case kMipsSubD:
+ return Latency::SUB_D;
+ case kMipsAbsD:
+ return Latency::ABS_D;
+ case kMipsCvtSD:
+ return Latency::CVT_S_D;
+ case kMipsCvtDS:
+ return Latency::CVT_D_S;
+ case kMipsMulD:
+ return Latency::MUL_D;
+ case kMipsFloorWS:
+ return Latency::FLOOR_W_S;
+ case kMipsCeilWS:
+ return Latency::CEIL_W_S;
+ case kMipsRoundWS:
+ return Latency::ROUND_W_S;
+ case kMipsCvtDW:
+ return Latency::CVT_D_W;
+ case kMipsCvtSW:
+ return Latency::CVT_S_W;
+ case kMipsDivS:
+ return Latency::DIV_S;
+ case kMipsSqrtS:
+ return Latency::SQRT_S;
+ case kMipsDivD:
+ return Latency::DIV_D;
+ case kMipsSqrtD:
+ return Latency::SQRT_D;
+ default:
+ return 1;
+ }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index a60c866353..cfe132338c 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -1555,11 +1555,10 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
}
}
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(std::move(sw), value_operand);
}
-
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
@@ -2133,7 +2132,9 @@ static const ShuffleEntry arch_shuffles[] = {
{{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kMipsS8x2Reverse}};
bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
- size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
for (size_t i = 0; i < num_entries; ++i) {
const ShuffleEntry& entry = table[i];
int j = 0;
@@ -2153,35 +2154,35 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
} // namespace
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
- uint8_t mask = CanonicalizeShuffle(node);
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
uint8_t shuffle32x4[4];
ArchOpcode opcode;
if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
- mask, &opcode)) {
+ is_swizzle, &opcode)) {
VisitRRR(this, opcode, node);
return;
}
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
uint8_t offset;
MipsOperandGenerator g(this);
- if (TryMatchConcat(shuffle, mask, &offset)) {
- Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)),
- g.UseImmediate(offset));
+ if (TryMatchConcat(shuffle, &offset)) {
+ Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(offset));
return;
}
if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
- Emit(kMipsS32x4Shuffle, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ Emit(kMipsS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4)));
return;
}
- Emit(kMipsS8x16Shuffle, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(Pack4Lanes(shuffle, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
+ Emit(kMipsS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12)));
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index f76f3fc448..7beb887b53 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -10,6 +10,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h"
+#include "src/mips64/constants-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/optimized-compilation-info.h"
@@ -617,9 +618,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- __ daddiu(kScratchReg, i.InputRegister(0),
- Code::kHeaderSize - kHeapObjectTag);
- __ Call(kScratchReg);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -632,10 +636,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt64());
- __ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
- : RelocInfo::JS_TO_WASM_CALL);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Call(wasm_code, constant.rmode());
} else {
__ daddiu(kScratchReg, i.InputRegister(0), 0);
__ Call(kScratchReg);
@@ -654,9 +657,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- __ daddiu(kScratchReg, i.InputRegister(0),
- Code::kHeaderSize - kHeapObjectTag);
- __ Jump(kScratchReg);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -664,10 +670,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallWasm: {
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt64());
- __ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
- : RelocInfo::JS_TO_WASM_CALL);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
} else {
__ daddiu(kScratchReg, i.InputRegister(0), 0);
__ Jump(kScratchReg);
@@ -678,7 +683,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
- __ Jump(i.InputRegister(0));
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -765,6 +774,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
@@ -819,12 +831,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), fp);
}
break;
- case kArchRootsPointer:
- __ mov(i.OutputRegister(), kRootRegister);
- break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
- i.InputDoubleRegister(0));
+ i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -1786,7 +1795,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
} else {
DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
- __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ __ lwc1(
+ i.OutputSingleRegister(0),
+ MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset));
}
} else {
__ Ld(i.OutputRegister(0), MemOperand(fp, offset));
@@ -1816,8 +1827,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64ByteSwap32: {
- __ ByteSwapUnsigned(i.OutputRegister(0), i.InputRegister(0), 4);
- __ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
case kWord32AtomicLoadInt8:
@@ -2978,13 +2988,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
return kSuccess;
} // NOLINT(readability/fn_size)
-
-#define UNSUPPORTED_COND(opcode, condition) \
- OFStream out(stdout); \
- out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
+#define UNSUPPORTED_COND(opcode, condition) \
+ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
+ << "\""; \
UNIMPLEMENTED();
-
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
@@ -3179,29 +3187,18 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
void Generate() final {
MipsOperandConverter i(gen_, instr_);
- Builtins::Name trap_id =
- static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- __ set_has_frame(old_has_frame);
- }
}
private:
- void GenerateCallToTrap(Builtins::Name trap_id) {
- if (trap_id == Builtins::builtin_count) {
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -3218,8 +3215,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(tasm()->isolate()->builtins()->builtin_handle(trap_id),
- RelocInfo::CODE_TARGET);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -3229,12 +3227,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}
}
}
- bool frame_elided_;
Instruction* instr_;
CodeGenerator* gen_;
};
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ auto ool = new (zone()) OutOfLineTrap(this, instr);
Label* tlabel = ool->entry();
AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
}
@@ -3404,6 +3400,16 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
}
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
@@ -3613,7 +3619,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
- __ li(dst, Operand(src.ToExternalReference()));
+ __ li(dst, src.ToExternalReference());
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
@@ -3634,10 +3640,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
- __ Sw(zero_reg, dst);
+ __ Sd(zero_reg, dst);
} else {
__ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ Sw(kScratchReg, dst);
+ __ Sd(kScratchReg, dst);
}
} else {
DCHECK(destination->IsFPRegister());
diff --git a/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
index af86a87ad7..b0f6d65bfe 100644
--- a/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
@@ -8,17 +8,1675 @@ namespace v8 {
namespace internal {
namespace compiler {
-bool InstructionScheduler::SchedulerSupported() { return false; }
-
+bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
- UNIMPLEMENTED();
+ switch (instr->arch_opcode()) {
+ case kMips64AbsD:
+ case kMips64AbsS:
+ case kMips64Add:
+ case kMips64AddD:
+ case kMips64AddS:
+ case kMips64And:
+ case kMips64And32:
+ case kMips64AssertEqual:
+ case kMips64BitcastDL:
+ case kMips64BitcastLD:
+ case kMips64ByteSwap32:
+ case kMips64ByteSwap64:
+ case kMips64CeilWD:
+ case kMips64CeilWS:
+ case kMips64Clz:
+ case kMips64Cmp:
+ case kMips64CmpD:
+ case kMips64CmpS:
+ case kMips64Ctz:
+ case kMips64CvtDL:
+ case kMips64CvtDS:
+ case kMips64CvtDUl:
+ case kMips64CvtDUw:
+ case kMips64CvtDW:
+ case kMips64CvtSD:
+ case kMips64CvtSL:
+ case kMips64CvtSUl:
+ case kMips64CvtSUw:
+ case kMips64CvtSW:
+ case kMips64DMulHigh:
+ case kMips64MulHighU:
+ case kMips64Dadd:
+ case kMips64DaddOvf:
+ case kMips64Dclz:
+ case kMips64Dctz:
+ case kMips64Ddiv:
+ case kMips64DdivU:
+ case kMips64Dext:
+ case kMips64Dins:
+ case kMips64Div:
+ case kMips64DivD:
+ case kMips64DivS:
+ case kMips64DivU:
+ case kMips64Dlsa:
+ case kMips64Dmod:
+ case kMips64DmodU:
+ case kMips64Dmul:
+ case kMips64Dpopcnt:
+ case kMips64Dror:
+ case kMips64Dsar:
+ case kMips64Dshl:
+ case kMips64Dshr:
+ case kMips64Dsub:
+ case kMips64DsubOvf:
+ case kMips64Ext:
+ case kMips64F32x4Abs:
+ case kMips64F32x4Add:
+ case kMips64F32x4AddHoriz:
+ case kMips64F32x4Eq:
+ case kMips64F32x4ExtractLane:
+ case kMips64F32x4Lt:
+ case kMips64F32x4Le:
+ case kMips64F32x4Max:
+ case kMips64F32x4Min:
+ case kMips64F32x4Mul:
+ case kMips64F32x4Ne:
+ case kMips64F32x4Neg:
+ case kMips64F32x4RecipApprox:
+ case kMips64F32x4RecipSqrtApprox:
+ case kMips64F32x4ReplaceLane:
+ case kMips64F32x4SConvertI32x4:
+ case kMips64F32x4Splat:
+ case kMips64F32x4Sub:
+ case kMips64F32x4UConvertI32x4:
+ case kMips64Float32Max:
+ case kMips64Float32Min:
+ case kMips64Float32RoundDown:
+ case kMips64Float32RoundTiesEven:
+ case kMips64Float32RoundTruncate:
+ case kMips64Float32RoundUp:
+ case kMips64Float64ExtractLowWord32:
+ case kMips64Float64ExtractHighWord32:
+ case kMips64Float64InsertLowWord32:
+ case kMips64Float64InsertHighWord32:
+ case kMips64Float64Max:
+ case kMips64Float64Min:
+ case kMips64Float64RoundDown:
+ case kMips64Float64RoundTiesEven:
+ case kMips64Float64RoundTruncate:
+ case kMips64Float64RoundUp:
+ case kMips64Float64SilenceNaN:
+ case kMips64FloorWD:
+ case kMips64FloorWS:
+ case kMips64I16x8Add:
+ case kMips64I16x8AddHoriz:
+ case kMips64I16x8AddSaturateS:
+ case kMips64I16x8AddSaturateU:
+ case kMips64I16x8Eq:
+ case kMips64I16x8ExtractLane:
+ case kMips64I16x8GeS:
+ case kMips64I16x8GeU:
+ case kMips64I16x8GtS:
+ case kMips64I16x8GtU:
+ case kMips64I16x8MaxS:
+ case kMips64I16x8MaxU:
+ case kMips64I16x8MinS:
+ case kMips64I16x8MinU:
+ case kMips64I16x8Mul:
+ case kMips64I16x8Ne:
+ case kMips64I16x8Neg:
+ case kMips64I16x8ReplaceLane:
+ case kMips64I8x16SConvertI16x8:
+ case kMips64I16x8SConvertI32x4:
+ case kMips64I16x8SConvertI8x16High:
+ case kMips64I16x8SConvertI8x16Low:
+ case kMips64I16x8Shl:
+ case kMips64I16x8ShrS:
+ case kMips64I16x8ShrU:
+ case kMips64I16x8Splat:
+ case kMips64I16x8Sub:
+ case kMips64I16x8SubSaturateS:
+ case kMips64I16x8SubSaturateU:
+ case kMips64I8x16UConvertI16x8:
+ case kMips64I16x8UConvertI32x4:
+ case kMips64I16x8UConvertI8x16High:
+ case kMips64I16x8UConvertI8x16Low:
+ case kMips64I32x4Add:
+ case kMips64I32x4AddHoriz:
+ case kMips64I32x4Eq:
+ case kMips64I32x4ExtractLane:
+ case kMips64I32x4GeS:
+ case kMips64I32x4GeU:
+ case kMips64I32x4GtS:
+ case kMips64I32x4GtU:
+ case kMips64I32x4MaxS:
+ case kMips64I32x4MaxU:
+ case kMips64I32x4MinS:
+ case kMips64I32x4MinU:
+ case kMips64I32x4Mul:
+ case kMips64I32x4Ne:
+ case kMips64I32x4Neg:
+ case kMips64I32x4ReplaceLane:
+ case kMips64I32x4SConvertF32x4:
+ case kMips64I32x4SConvertI16x8High:
+ case kMips64I32x4SConvertI16x8Low:
+ case kMips64I32x4Shl:
+ case kMips64I32x4ShrS:
+ case kMips64I32x4ShrU:
+ case kMips64I32x4Splat:
+ case kMips64I32x4Sub:
+ case kMips64I32x4UConvertF32x4:
+ case kMips64I32x4UConvertI16x8High:
+ case kMips64I32x4UConvertI16x8Low:
+ case kMips64I8x16Add:
+ case kMips64I8x16AddSaturateS:
+ case kMips64I8x16AddSaturateU:
+ case kMips64I8x16Eq:
+ case kMips64I8x16ExtractLane:
+ case kMips64I8x16GeS:
+ case kMips64I8x16GeU:
+ case kMips64I8x16GtS:
+ case kMips64I8x16GtU:
+ case kMips64I8x16MaxS:
+ case kMips64I8x16MaxU:
+ case kMips64I8x16MinS:
+ case kMips64I8x16MinU:
+ case kMips64I8x16Mul:
+ case kMips64I8x16Ne:
+ case kMips64I8x16Neg:
+ case kMips64I8x16ReplaceLane:
+ case kMips64I8x16Shl:
+ case kMips64I8x16ShrS:
+ case kMips64I8x16ShrU:
+ case kMips64I8x16Splat:
+ case kMips64I8x16Sub:
+ case kMips64I8x16SubSaturateS:
+ case kMips64I8x16SubSaturateU:
+ case kMips64Ins:
+ case kMips64Lsa:
+ case kMips64MaxD:
+ case kMips64MaxS:
+ case kMips64MinD:
+ case kMips64MinS:
+ case kMips64Mod:
+ case kMips64ModU:
+ case kMips64Mov:
+ case kMips64Mul:
+ case kMips64MulD:
+ case kMips64MulHigh:
+ case kMips64MulOvf:
+ case kMips64MulS:
+ case kMips64NegD:
+ case kMips64NegS:
+ case kMips64Nor:
+ case kMips64Nor32:
+ case kMips64Or:
+ case kMips64Or32:
+ case kMips64Popcnt:
+ case kMips64Ror:
+ case kMips64RoundWD:
+ case kMips64RoundWS:
+ case kMips64S128And:
+ case kMips64S128Or:
+ case kMips64S128Not:
+ case kMips64S128Select:
+ case kMips64S128Xor:
+ case kMips64S128Zero:
+ case kMips64S16x8InterleaveEven:
+ case kMips64S16x8InterleaveOdd:
+ case kMips64S16x8InterleaveLeft:
+ case kMips64S16x8InterleaveRight:
+ case kMips64S16x8PackEven:
+ case kMips64S16x8PackOdd:
+ case kMips64S16x2Reverse:
+ case kMips64S16x4Reverse:
+ case kMips64S1x16AllTrue:
+ case kMips64S1x16AnyTrue:
+ case kMips64S1x4AllTrue:
+ case kMips64S1x4AnyTrue:
+ case kMips64S1x8AllTrue:
+ case kMips64S1x8AnyTrue:
+ case kMips64S32x4InterleaveEven:
+ case kMips64S32x4InterleaveOdd:
+ case kMips64S32x4InterleaveLeft:
+ case kMips64S32x4InterleaveRight:
+ case kMips64S32x4PackEven:
+ case kMips64S32x4PackOdd:
+ case kMips64S32x4Shuffle:
+ case kMips64S8x16Concat:
+ case kMips64S8x16InterleaveEven:
+ case kMips64S8x16InterleaveOdd:
+ case kMips64S8x16InterleaveLeft:
+ case kMips64S8x16InterleaveRight:
+ case kMips64S8x16PackEven:
+ case kMips64S8x16PackOdd:
+ case kMips64S8x2Reverse:
+ case kMips64S8x4Reverse:
+ case kMips64S8x8Reverse:
+ case kMips64S8x16Shuffle:
+ case kMips64Sar:
+ case kMips64Seb:
+ case kMips64Seh:
+ case kMips64Shl:
+ case kMips64Shr:
+ case kMips64SqrtD:
+ case kMips64SqrtS:
+ case kMips64Sub:
+ case kMips64SubD:
+ case kMips64SubS:
+ case kMips64TruncLD:
+ case kMips64TruncLS:
+ case kMips64TruncUlD:
+ case kMips64TruncUlS:
+ case kMips64TruncUwD:
+ case kMips64TruncUwS:
+ case kMips64TruncWD:
+ case kMips64TruncWS:
+ case kMips64Tst:
+ case kMips64Xor:
+ case kMips64Xor32:
+ return kNoOpcodeFlags;
+
+ case kMips64Lb:
+ case kMips64Lbu:
+ case kMips64Ld:
+ case kMips64Ldc1:
+ case kMips64Lh:
+ case kMips64Lhu:
+ case kMips64Lw:
+ case kMips64Lwc1:
+ case kMips64Lwu:
+ case kMips64MsaLd:
+ case kMips64Peek:
+ case kMips64Uld:
+ case kMips64Uldc1:
+ case kMips64Ulh:
+ case kMips64Ulhu:
+ case kMips64Ulw:
+ case kMips64Ulwu:
+ case kMips64Ulwc1:
+ return kIsLoadOperation;
+
+ case kMips64ModD:
+ case kMips64ModS:
+ case kMips64MsaSt:
+ case kMips64Push:
+ case kMips64Sb:
+ case kMips64Sd:
+ case kMips64Sdc1:
+ case kMips64Sh:
+ case kMips64StackClaim:
+ case kMips64StoreToStackSlot:
+ case kMips64Sw:
+ case kMips64Swc1:
+ case kMips64Usd:
+ case kMips64Usdc1:
+ case kMips64Ush:
+ case kMips64Usw:
+ case kMips64Uswc1:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+}
+
+enum Latency {
+ BRANCH = 4, // Estimated max.
+ RINT_S = 4, // Estimated.
+ RINT_D = 4, // Estimated.
+
+ MULT = 4,
+ MULTU = 4,
+ DMULT = 4,
+ DMULTU = 4,
+
+ MUL = 7,
+ DMUL = 7,
+ MUH = 7,
+ MUHU = 7,
+ DMUH = 7,
+ DMUHU = 7,
+
+ DIV = 50, // Min:11 Max:50
+ DDIV = 50,
+ DIVU = 50,
+ DDIVU = 50,
+
+ ABS_S = 4,
+ ABS_D = 4,
+ NEG_S = 4,
+ NEG_D = 4,
+ ADD_S = 4,
+ ADD_D = 4,
+ SUB_S = 4,
+ SUB_D = 4,
+ MAX_S = 4, // Estimated.
+ MIN_S = 4,
+ MAX_D = 4, // Estimated.
+ MIN_D = 4,
+ C_cond_S = 4,
+ C_cond_D = 4,
+ MUL_S = 4,
+
+ MADD_S = 4,
+ MSUB_S = 4,
+ NMADD_S = 4,
+ NMSUB_S = 4,
+
+ CABS_cond_S = 4,
+ CABS_cond_D = 4,
+
+ CVT_D_S = 4,
+ CVT_PS_PW = 4,
+
+ CVT_S_W = 4,
+ CVT_S_L = 4,
+ CVT_D_W = 4,
+ CVT_D_L = 4,
+
+ CVT_S_D = 4,
+
+ CVT_W_S = 4,
+ CVT_W_D = 4,
+ CVT_L_S = 4,
+ CVT_L_D = 4,
+
+ CEIL_W_S = 4,
+ CEIL_W_D = 4,
+ CEIL_L_S = 4,
+ CEIL_L_D = 4,
+
+ FLOOR_W_S = 4,
+ FLOOR_W_D = 4,
+ FLOOR_L_S = 4,
+ FLOOR_L_D = 4,
+
+ ROUND_W_S = 4,
+ ROUND_W_D = 4,
+ ROUND_L_S = 4,
+ ROUND_L_D = 4,
+
+ TRUNC_W_S = 4,
+ TRUNC_W_D = 4,
+ TRUNC_L_S = 4,
+ TRUNC_L_D = 4,
+
+ MOV_S = 4,
+ MOV_D = 4,
+
+ MOVF_S = 4,
+ MOVF_D = 4,
+
+ MOVN_S = 4,
+ MOVN_D = 4,
+
+ MOVT_S = 4,
+ MOVT_D = 4,
+
+ MOVZ_S = 4,
+ MOVZ_D = 4,
+
+ MUL_D = 5,
+ MADD_D = 5,
+ MSUB_D = 5,
+ NMADD_D = 5,
+ NMSUB_D = 5,
+
+ RECIP_S = 13,
+ RECIP_D = 26,
+
+ RSQRT_S = 17,
+ RSQRT_D = 36,
+
+ DIV_S = 17,
+ SQRT_S = 17,
+
+ DIV_D = 32,
+ SQRT_D = 32,
+
+ MTC1 = 4,
+ MTHC1 = 4,
+ DMTC1 = 4,
+ LWC1 = 4,
+ LDC1 = 4,
+
+ MFC1 = 1,
+ MFHC1 = 1,
+ DMFC1 = 1,
+ MFHI = 1,
+ MFLO = 1,
+ SWC1 = 1,
+ SDC1 = 1,
+};
+
+int DadduLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int DsubuLatency(bool is_operand_register = true) {
+ return DadduLatency(is_operand_register);
+}
+
+int AndLatency(bool is_operand_register = true) {
+ return DadduLatency(is_operand_register);
+}
+
+int OrLatency(bool is_operand_register = true) {
+ return DadduLatency(is_operand_register);
+}
+
+int NorLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int XorLatency(bool is_operand_register = true) {
+ return DadduLatency(is_operand_register);
+}
+
+int MulLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return Latency::MUL;
+ } else {
+ return Latency::MUL + 1;
+ }
+}
+
+int DmulLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = Latency::DMUL;
+ } else {
+ latency = Latency::DMULT + Latency::MFLO;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int MulhLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = Latency::MUH;
+ } else {
+ latency = Latency::MULT + Latency::MFHI;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int MulhuLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = Latency::MUH;
+ } else {
+ latency = Latency::MULTU + Latency::MFHI;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int DMulhLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = Latency::DMUH;
+ } else {
+ latency = Latency::DMULT + Latency::MFHI;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int DivLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return Latency::DIV;
+ } else {
+ return Latency::DIV + 1;
+ }
+}
+
+int DivuLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return Latency::DIVU;
+ } else {
+ return Latency::DIVU + 1;
+ }
+}
+
+int DdivLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = Latency::DDIV;
+ } else {
+ latency = Latency::DDIV + Latency::MFLO;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int DdivuLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = Latency::DDIVU;
+ } else {
+ latency = Latency::DDIVU + Latency::MFLO;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int ModLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = 1;
+ } else {
+ latency = Latency::DIV + Latency::MFHI;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int ModuLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = 1;
+ } else {
+ latency = Latency::DIVU + Latency::MFHI;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int DmodLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = 1;
+ } else {
+ latency = Latency::DDIV + Latency::MFHI;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int DmoduLatency(bool is_operand_register = true) {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = 1;
+ } else {
+ latency = Latency::DDIV + Latency::MFHI;
+ }
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int MovzLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return Latency::BRANCH + 1;
+ } else {
+ return 1;
+ }
+}
+
+int MovnLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return Latency::BRANCH + 1;
+ } else {
+ return 1;
+ }
+}
+
+int DlsaLatency() {
+ // Estimated max.
+ return DadduLatency() + 1;
+}
+
+int CallLatency() {
+ // Estimated.
+ return DadduLatency(false) + Latency::BRANCH + 5;
+}
+
+int JumpLatency() {
+ // Estimated max.
+ return 1 + DadduLatency() + Latency::BRANCH + 2;
+}
+
+int SmiUntagLatency() { return 1; }
+
+int PrepareForTailCallLatency() {
+ // Estimated max.
+ return 2 * (DlsaLatency() + DadduLatency(false)) + 2 + Latency::BRANCH +
+ Latency::BRANCH + 2 * DsubuLatency(false) + 2 + Latency::BRANCH + 1;
}
+int AssemblePopArgumentsAdoptFrameLatency() {
+ return 1 + Latency::BRANCH + 1 + SmiUntagLatency() +
+ PrepareForTailCallLatency();
+}
+
+int AssertLatency() { return 1; }
+
+int PrepareCallCFunctionLatency() {
+ int frame_alignment = TurboAssembler::ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ return 1 + DsubuLatency(false) + AndLatency(false) + 1;
+ } else {
+ return DsubuLatency(false);
+ }
+}
+
+int AdjustBaseAndOffsetLatency() {
+ return 3; // Estimated max.
+}
+
+int AlignedMemoryLatency() { return AdjustBaseAndOffsetLatency() + 1; }
+
+int UlhuLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ return AdjustBaseAndOffsetLatency() + 2 * AlignedMemoryLatency() + 2;
+ }
+}
+
+int UlwLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ // Estimated max.
+ return AdjustBaseAndOffsetLatency() + 3;
+ }
+}
+
+int UlwuLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ return UlwLatency() + 1;
+ }
+}
+
+int UldLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ // Estimated max.
+ return AdjustBaseAndOffsetLatency() + 3;
+ }
+}
+
+int Ulwc1Latency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ return UlwLatency() + Latency::MTC1;
+ }
+}
+
+int Uldc1Latency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ return UldLatency() + Latency::DMTC1;
+ }
+}
+
+int UshLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ // Estimated max.
+ return AdjustBaseAndOffsetLatency() + 2 + 2 * AlignedMemoryLatency();
+ }
+}
+
+int UswLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ return AdjustBaseAndOffsetLatency() + 2;
+ }
+}
+
+int UsdLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ return AdjustBaseAndOffsetLatency() + 2;
+ }
+}
+
+int Uswc1Latency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ return Latency::MFC1 + UswLatency();
+ }
+}
+
+int Usdc1Latency() {
+ if (kArchVariant >= kMips64r6) {
+ return AlignedMemoryLatency();
+ } else {
+ return Latency::DMFC1 + UsdLatency();
+ }
+}
+
+int Lwc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LWC1; }
+
+int Swc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SWC1; }
+
+int Sdc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SDC1; }
+
+int Ldc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LDC1; }
+
+int MultiPushLatency() {
+ int latency = DsubuLatency(false);
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ latency++;
+ }
+ return latency;
+}
+
+int MultiPushFPULatency() {
+ int latency = DsubuLatency(false);
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ latency += Sdc1Latency();
+ }
+ return latency;
+}
+
+int PushCallerSavedLatency(SaveFPRegsMode fp_mode) {
+ int latency = MultiPushLatency();
+ if (fp_mode == kSaveFPRegs) {
+ latency += MultiPushFPULatency();
+ }
+ return latency;
+}
+
+int MultiPopLatency() {
+ int latency = DadduLatency(false);
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ latency++;
+ }
+ return latency;
+}
+
+int MultiPopFPULatency() {
+ int latency = DadduLatency(false);
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ latency += Ldc1Latency();
+ }
+ return latency;
+}
+
+int PopCallerSavedLatency(SaveFPRegsMode fp_mode) {
+ int latency = MultiPopLatency();
+ if (fp_mode == kSaveFPRegs) {
+ latency += MultiPopFPULatency();
+ }
+ return latency;
+}
+
+int CallCFunctionHelperLatency() {
+ // Estimated.
+ int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency();
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ latency++;
+ } else {
+ latency += DadduLatency(false);
+ }
+ return latency;
+}
+
+int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); }
+
+int AssembleArchJumpLatency() {
+ // Estimated max.
+ return Latency::BRANCH;
+}
+
+int AssembleArchLookupSwitchLatency(const Instruction* instr) {
+ int latency = 0;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ latency += 1 + Latency::BRANCH;
+ }
+ return latency + AssembleArchJumpLatency();
+}
+
+int GenerateSwitchTableLatency() {
+ int latency = 0;
+ if (kArchVariant >= kMips64r6) {
+ latency = DlsaLatency() + 2;
+ } else {
+ latency = 6;
+ }
+ latency += 2;
+ return latency;
+}
+
+int AssembleArchTableSwitchLatency() {
+ return Latency::BRANCH + GenerateSwitchTableLatency();
+}
+
+int DropAndRetLatency() {
+ // Estimated max.
+ return DadduLatency(false) + JumpLatency();
+}
+
+int AssemblerReturnLatency() {
+ // Estimated max.
+ return DadduLatency(false) + MultiPopLatency() + MultiPopFPULatency() +
+ Latency::BRANCH + DadduLatency() + 1 + DropAndRetLatency();
+}
+
+int TryInlineTruncateDoubleToILatency() {
+ return 2 + Latency::TRUNC_W_D + Latency::MFC1 + 2 + AndLatency(false) +
+ Latency::BRANCH;
+}
+
+int CallStubDelayedLatency() { return 1 + CallLatency(); }
+
+int TruncateDoubleToIDelayedLatency() {
+ return TryInlineTruncateDoubleToILatency() + 1 + DsubuLatency(false) +
+ Sdc1Latency() + CallStubDelayedLatency() + DadduLatency(false) + 1;
+}
+
+int CheckPageFlagLatency() {
+ return AndLatency(false) + AlignedMemoryLatency() + AndLatency(false) +
+ Latency::BRANCH;
+}
+
+int SltuLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int BranchShortHelperR6Latency() {
+ return 2; // Estimated max.
+}
+
+int BranchShortHelperLatency() {
+ return SltuLatency() + 2; // Estimated max.
+}
+
+int BranchShortLatency(BranchDelaySlot bdslot = PROTECT) {
+ if (kArchVariant >= kMips64r6 && bdslot == PROTECT) {
+ return BranchShortHelperR6Latency();
+ } else {
+ return BranchShortHelperLatency();
+ }
+}
+
+int MoveLatency() { return 1; }
+
+int MovToFloatParametersLatency() { return 2 * MoveLatency(); }
+
+int MovFromFloatResultLatency() { return MoveLatency(); }
+
+int DaddOverflowLatency() {
+ // Estimated max.
+ return 6;
+}
+
+int DsubOverflowLatency() {
+ // Estimated max.
+ return 6;
+}
+
+int MulOverflowLatency() {
+ // Estimated max.
+ return MulLatency() + MulhLatency() + 2;
+}
+
+int DclzLatency() { return 1; }
+
+int CtzLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return 3 + DclzLatency();
+ } else {
+ return DadduLatency(false) + XorLatency() + AndLatency() + DclzLatency() +
+ 1 + DsubuLatency();
+ }
+}
+
+int DctzLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return 4;
+ } else {
+ return DadduLatency(false) + XorLatency() + AndLatency() + 1 +
+ DsubuLatency();
+ }
+}
+
+int PopcntLatency() {
+ return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 +
+ AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() +
+ 1 + MulLatency() + 1;
+}
+
+int DpopcntLatency() {
+ return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 +
+ AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() +
+ 1 + DmulLatency() + 1;
+}
+
+int CompareFLatency() { return Latency::C_cond_S; }
+
+int CompareF32Latency() { return CompareFLatency(); }
+
+int CompareF64Latency() { return CompareFLatency(); }
+
+int CompareIsNanFLatency() { return CompareFLatency(); }
+
+int CompareIsNanF32Latency() { return CompareIsNanFLatency(); }
+
+int CompareIsNanF64Latency() { return CompareIsNanFLatency(); }
+
+int NegsLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return Latency::NEG_S;
+ } else {
+ // Estimated.
+ return CompareIsNanF32Latency() + 2 * Latency::BRANCH + Latency::NEG_S +
+ Latency::MFC1 + 1 + XorLatency() + Latency::MTC1;
+ }
+}
+
+int NegdLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return Latency::NEG_D;
+ } else {
+ // Estimated.
+ return CompareIsNanF64Latency() + 2 * Latency::BRANCH + Latency::NEG_D +
+ Latency::DMFC1 + 1 + XorLatency() + Latency::DMTC1;
+ }
+}
+
+int Float64RoundLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return Latency::RINT_D + 4;
+ } else {
+ // For ceil_l_d, floor_l_d, round_l_d, trunc_l_d latency is 4.
+ return Latency::DMFC1 + 1 + Latency::BRANCH + Latency::MOV_D + 4 +
+ Latency::DMFC1 + Latency::BRANCH + Latency::CVT_D_L + 2 +
+ Latency::MTHC1;
+ }
+}
+
+int Float32RoundLatency() {
+ if (kArchVariant >= kMips64r6) {
+ return Latency::RINT_S + 4;
+ } else {
+ // For ceil_w_s, floor_w_s, round_w_s, trunc_w_s latency is 4.
+ return Latency::MFC1 + 1 + Latency::BRANCH + Latency::MOV_S + 4 +
+ Latency::MFC1 + Latency::BRANCH + Latency::CVT_S_W + 2 +
+ Latency::MTC1;
+ }
+}
+
+int Float32MaxLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF32Latency() + Latency::BRANCH;
+ if (kArchVariant >= kMips64r6) {
+ return latency + Latency::MAX_S;
+ } else {
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::MFC1 + 1 + Latency::MOV_S;
+ }
+}
+
+int Float64MaxLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF64Latency() + Latency::BRANCH;
+ if (kArchVariant >= kMips64r6) {
+ return latency + Latency::MAX_D;
+ } else {
+ return latency + 5 * Latency::BRANCH + 2 * CompareF64Latency() +
+ Latency::DMFC1 + Latency::MOV_D;
+ }
+}
+
+int Float32MinLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF32Latency() + Latency::BRANCH;
+ if (kArchVariant >= kMips64r6) {
+ return latency + Latency::MIN_S;
+ } else {
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::MFC1 + 1 + Latency::MOV_S;
+ }
+}
+
+int Float64MinLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF64Latency() + Latency::BRANCH;
+ if (kArchVariant >= kMips64r6) {
+ return latency + Latency::MIN_D;
+ } else {
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::DMFC1 + Latency::MOV_D;
+ }
+}
+
+int TruncLSLatency(bool load_status) {
+ int latency = Latency::TRUNC_L_S + Latency::DMFC1;
+ if (load_status) {
+ latency += SltuLatency() + 7;
+ }
+ return latency;
+}
+
+int TruncLDLatency(bool load_status) {
+ int latency = Latency::TRUNC_L_D + Latency::DMFC1;
+ if (load_status) {
+ latency += SltuLatency() + 7;
+ }
+ return latency;
+}
+
+int TruncUlSLatency() {
+ // Estimated max.
+ return 2 * CompareF32Latency() + CompareIsNanF32Latency() +
+ 4 * Latency::BRANCH + Latency::SUB_S + 2 * Latency::TRUNC_L_S +
+ 3 * Latency::DMFC1 + OrLatency() + Latency::MTC1 + Latency::MOV_S +
+ SltuLatency() + 4;
+}
+
+int TruncUlDLatency() {
+ // Estimated max.
+ return 2 * CompareF64Latency() + CompareIsNanF64Latency() +
+ 4 * Latency::BRANCH + Latency::SUB_D + 2 * Latency::TRUNC_L_D +
+ 3 * Latency::DMFC1 + OrLatency() + Latency::DMTC1 + Latency::MOV_D +
+ SltuLatency() + 4;
+}
+
+int PushLatency() { return DadduLatency() + AlignedMemoryLatency(); }
+
+int ByteSwapSignedLatency() { return 2; }
+
+int LlLatency(int offset) {
+ bool is_one_instruction =
+ (kArchVariant == kMips64r6) ? is_int9(offset) : is_int16(offset);
+ if (is_one_instruction) {
+ return 1;
+ } else {
+ return 3;
+ }
+}
+
+int ExtractBitsLatency(bool sign_extend, int size) {
+ int latency = 2;
+ if (sign_extend) {
+ switch (size) {
+ case 8:
+ case 16:
+ case 32:
+ latency += 1;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return latency;
+}
+
+int InsertBitsLatency() { return 2 + DsubuLatency(false) + 2; }
+
+int ScLatency(int offset) {
+ bool is_one_instruction =
+ (kArchVariant == kMips64r6) ? is_int9(offset) : is_int16(offset);
+ if (is_one_instruction) {
+ return 1;
+ } else {
+ return 3;
+ }
+}
+
+int Word32AtomicExchangeLatency(bool sign_extend, int size) {
+ return DadduLatency(false) + 1 + DsubuLatency() + 2 + LlLatency(0) +
+ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() +
+ ScLatency(0) + BranchShortLatency() + 1;
+}
+
+int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) {
+ return 2 + DsubuLatency() + 2 + LlLatency(0) +
+ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() +
+ ScLatency(0) + BranchShortLatency() + 1;
+}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
- UNIMPLEMENTED();
+ // Basic latency modeling for MIPS64 instructions. They have been determined
+ // in empirical way.
+ switch (instr->arch_opcode()) {
+ case kArchCallCodeObject:
+ case kArchCallWasmFunction:
+ return CallLatency();
+ case kArchTailCallCodeObjectFromJSFunction:
+ case kArchTailCallCodeObject: {
+ int latency = 0;
+ if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) {
+ latency = AssemblePopArgumentsAdoptFrameLatency();
+ }
+ return latency + JumpLatency();
+ }
+ case kArchTailCallWasm:
+ case kArchTailCallAddress:
+ return JumpLatency();
+ case kArchCallJSFunction: {
+ int latency = 0;
+ if (FLAG_debug_code) {
+ latency = 1 + AssertLatency();
+ }
+ return latency + 1 + DadduLatency(false) + CallLatency();
+ }
+ case kArchPrepareCallCFunction:
+ return PrepareCallCFunctionLatency();
+ case kArchSaveCallerRegisters: {
+ auto fp_mode =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ return PushCallerSavedLatency(fp_mode);
+ }
+ case kArchRestoreCallerRegisters: {
+ auto fp_mode =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ return PopCallerSavedLatency(fp_mode);
+ }
+ case kArchPrepareTailCall:
+ return 2;
+ case kArchCallCFunction:
+ return CallCFunctionLatency();
+ case kArchJmp:
+ return AssembleArchJumpLatency();
+ case kArchLookupSwitch:
+ return AssembleArchLookupSwitchLatency(instr);
+ case kArchTableSwitch:
+ return AssembleArchTableSwitchLatency();
+ case kArchDebugAbort:
+ return CallLatency() + 1;
+ case kArchDebugBreak:
+ return 1;
+ case kArchComment:
+ case kArchNop:
+ case kArchThrowTerminator:
+ case kArchDeoptimize:
+ return 0;
+ case kArchRet:
+ return AssemblerReturnLatency();
+ case kArchStackPointer:
+ case kArchFramePointer:
+ return 1;
+ case kArchParentFramePointer:
+ // Estimated max.
+ return AlignedMemoryLatency();
+ case kArchTruncateDoubleToI:
+ return TruncateDoubleToIDelayedLatency();
+ case kArchStoreWithWriteBarrier:
+ return DadduLatency() + 1 + CheckPageFlagLatency();
+ case kArchStackSlot:
+ // Estimated max.
+ return DadduLatency(false) + AndLatency(false) + AssertLatency() +
+ DadduLatency(false) + AndLatency(false) + BranchShortLatency() +
+ 1 + DsubuLatency() + DadduLatency();
+ case kArchWordPoisonOnSpeculation:
+ return AndLatency();
+ case kIeee754Float64Acos:
+ case kIeee754Float64Acosh:
+ case kIeee754Float64Asin:
+ case kIeee754Float64Asinh:
+ case kIeee754Float64Atan:
+ case kIeee754Float64Atanh:
+ case kIeee754Float64Atan2:
+ case kIeee754Float64Cos:
+ case kIeee754Float64Cosh:
+ case kIeee754Float64Cbrt:
+ case kIeee754Float64Exp:
+ case kIeee754Float64Expm1:
+ case kIeee754Float64Log:
+ case kIeee754Float64Log1p:
+ case kIeee754Float64Log10:
+ case kIeee754Float64Log2:
+ case kIeee754Float64Pow:
+ case kIeee754Float64Sin:
+ case kIeee754Float64Sinh:
+ case kIeee754Float64Tan:
+ case kIeee754Float64Tanh:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kMips64Add:
+ case kMips64Dadd:
+ return DadduLatency(instr->InputAt(1)->IsRegister());
+ case kMips64DaddOvf:
+ return DaddOverflowLatency();
+ case kMips64Sub:
+ case kMips64Dsub:
+ return DsubuLatency(instr->InputAt(1)->IsRegister());
+ case kMips64DsubOvf:
+ return DsubOverflowLatency();
+ case kMips64Mul:
+ return MulLatency();
+ case kMips64MulOvf:
+ return MulOverflowLatency();
+ case kMips64MulHigh:
+ return MulhLatency();
+ case kMips64MulHighU:
+ return MulhuLatency();
+ case kMips64DMulHigh:
+ return DMulhLatency();
+ case kMips64Div: {
+ int latency = DivLatency(instr->InputAt(1)->IsRegister());
+ if (kArchVariant >= kMips64r6) {
+ return latency++;
+ } else {
+ return latency + MovzLatency();
+ }
+ }
+ case kMips64DivU: {
+ int latency = DivuLatency(instr->InputAt(1)->IsRegister());
+ if (kArchVariant >= kMips64r6) {
+ return latency++;
+ } else {
+ return latency + MovzLatency();
+ }
+ }
+ case kMips64Mod:
+ return ModLatency();
+ case kMips64ModU:
+ return ModuLatency();
+ case kMips64Dmul:
+ return DmulLatency();
+ case kMips64Ddiv: {
+ int latency = DdivLatency();
+ if (kArchVariant >= kMips64r6) {
+ return latency++;
+ } else {
+ return latency + MovzLatency();
+ }
+ }
+ case kMips64DdivU: {
+ int latency = DdivuLatency();
+ if (kArchVariant >= kMips64r6) {
+ return latency++;
+ } else {
+ return latency + MovzLatency();
+ }
+ }
+ case kMips64Dmod:
+ return DmodLatency();
+ case kMips64DmodU:
+ return DmoduLatency();
+ case kMips64Dlsa:
+ case kMips64Lsa:
+ return DlsaLatency();
+ case kMips64And:
+ return AndLatency(instr->InputAt(1)->IsRegister());
+ case kMips64And32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = AndLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kMips64Or:
+ return OrLatency(instr->InputAt(1)->IsRegister());
+ case kMips64Or32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = OrLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kMips64Nor:
+ return NorLatency(instr->InputAt(1)->IsRegister());
+ case kMips64Nor32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = NorLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kMips64Xor:
+ return XorLatency(instr->InputAt(1)->IsRegister());
+ case kMips64Xor32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = XorLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kMips64Clz:
+ case kMips64Dclz:
+ return DclzLatency();
+ case kMips64Ctz:
+ return CtzLatency();
+ case kMips64Dctz:
+ return DctzLatency();
+ case kMips64Popcnt:
+ return PopcntLatency();
+ case kMips64Dpopcnt:
+ return DpopcntLatency();
+ case kMips64Shl:
+ return 1;
+ case kMips64Shr:
+ case kMips64Sar:
+ return 2;
+ case kMips64Ext:
+ case kMips64Ins:
+ case kMips64Dext:
+ case kMips64Dins:
+ case kMips64Dshl:
+ case kMips64Dshr:
+ case kMips64Dsar:
+ case kMips64Ror:
+ case kMips64Dror:
+ return 1;
+ case kMips64Tst:
+ return AndLatency(instr->InputAt(1)->IsRegister());
+ case kMips64Mov:
+ return 1;
+ case kMips64CmpS:
+ return MoveLatency() + CompareF32Latency();
+ case kMips64AddS:
+ return Latency::ADD_S;
+ case kMips64SubS:
+ return Latency::SUB_S;
+ case kMips64MulS:
+ return Latency::MUL_S;
+ case kMips64DivS:
+ return Latency::DIV_S;
+ case kMips64ModS:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kMips64AbsS:
+ return Latency::ABS_S;
+ case kMips64NegS:
+ return NegdLatency();
+ case kMips64SqrtS:
+ return Latency::SQRT_S;
+ case kMips64MaxS:
+ return Latency::MAX_S;
+ case kMips64MinS:
+ return Latency::MIN_S;
+ case kMips64CmpD:
+ return MoveLatency() + CompareF64Latency();
+ case kMips64AddD:
+ return Latency::ADD_D;
+ case kMips64SubD:
+ return Latency::SUB_D;
+ case kMips64MulD:
+ return Latency::MUL_D;
+ case kMips64DivD:
+ return Latency::DIV_D;
+ case kMips64ModD:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kMips64AbsD:
+ return Latency::ABS_D;
+ case kMips64NegD:
+ return NegdLatency();
+ case kMips64SqrtD:
+ return Latency::SQRT_D;
+ case kMips64MaxD:
+ return Latency::MAX_D;
+ case kMips64MinD:
+ return Latency::MIN_D;
+ case kMips64Float64RoundDown:
+ case kMips64Float64RoundTruncate:
+ case kMips64Float64RoundUp:
+ case kMips64Float64RoundTiesEven:
+ return Float64RoundLatency();
+ case kMips64Float32RoundDown:
+ case kMips64Float32RoundTruncate:
+ case kMips64Float32RoundUp:
+ case kMips64Float32RoundTiesEven:
+ return Float32RoundLatency();
+ case kMips64Float32Max:
+ return Float32MaxLatency();
+ case kMips64Float64Max:
+ return Float64MaxLatency();
+ case kMips64Float32Min:
+ return Float32MinLatency();
+ case kMips64Float64Min:
+ return Float64MinLatency();
+ case kMips64Float64SilenceNaN:
+ return Latency::SUB_D;
+ case kMips64CvtSD:
+ return Latency::CVT_S_D;
+ case kMips64CvtDS:
+ return Latency::CVT_D_S;
+ case kMips64CvtDW:
+ return Latency::MTC1 + Latency::CVT_D_W;
+ case kMips64CvtSW:
+ return Latency::MTC1 + Latency::CVT_S_W;
+ case kMips64CvtSUw:
+ return 1 + Latency::DMTC1 + Latency::CVT_S_L;
+ case kMips64CvtSL:
+ return Latency::DMTC1 + Latency::CVT_S_L;
+ case kMips64CvtDL:
+ return Latency::DMTC1 + Latency::CVT_D_L;
+ case kMips64CvtDUw:
+ return 1 + Latency::DMTC1 + Latency::CVT_D_L;
+ case kMips64CvtDUl:
+ return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 +
+ 2 * Latency::CVT_D_L + Latency::ADD_D;
+ case kMips64CvtSUl:
+ return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 +
+ 2 * Latency::CVT_S_L + Latency::ADD_S;
+ case kMips64FloorWD:
+ return Latency::FLOOR_W_D + Latency::MFC1;
+ case kMips64CeilWD:
+ return Latency::CEIL_W_D + Latency::MFC1;
+ case kMips64RoundWD:
+ return Latency::ROUND_W_D + Latency::MFC1;
+ case kMips64TruncWD:
+ return Latency::TRUNC_W_D + Latency::MFC1;
+ case kMips64FloorWS:
+ return Latency::FLOOR_W_S + Latency::MFC1;
+ case kMips64CeilWS:
+ return Latency::CEIL_W_S + Latency::MFC1;
+ case kMips64RoundWS:
+ return Latency::ROUND_W_S + Latency::MFC1;
+ case kMips64TruncWS:
+ return Latency::TRUNC_W_S + Latency::MFC1 + 2 + MovnLatency();
+ case kMips64TruncLS:
+ return TruncLSLatency(instr->OutputCount() > 1);
+ case kMips64TruncLD:
+ return TruncLDLatency(instr->OutputCount() > 1);
+ case kMips64TruncUwD:
+ // Estimated max.
+ return CompareF64Latency() + 2 * Latency::BRANCH +
+ 2 * Latency::TRUNC_W_D + Latency::SUB_D + OrLatency() +
+ Latency::MTC1 + Latency::MFC1 + Latency::MTHC1 + 1;
+ case kMips64TruncUwS:
+ // Estimated max.
+ return CompareF32Latency() + 2 * Latency::BRANCH +
+ 2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() +
+ Latency::MTC1 + 2 * Latency::MFC1 + 2 + MovzLatency();
+ case kMips64TruncUlS:
+ return TruncUlSLatency();
+ case kMips64TruncUlD:
+ return TruncUlDLatency();
+ case kMips64BitcastDL:
+ return Latency::DMFC1;
+ case kMips64BitcastLD:
+ return Latency::DMTC1;
+ case kMips64Float64ExtractLowWord32:
+ return Latency::MFC1;
+ case kMips64Float64InsertLowWord32:
+ return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1;
+ case kMips64Float64ExtractHighWord32:
+ return Latency::MFHC1;
+ case kMips64Float64InsertHighWord32:
+ return Latency::MTHC1;
+ case kMips64Seb:
+ case kMips64Seh:
+ return 1;
+ case kMips64Lbu:
+ case kMips64Lb:
+ case kMips64Lhu:
+ case kMips64Lh:
+ case kMips64Lwu:
+ case kMips64Lw:
+ case kMips64Ld:
+ case kMips64Sb:
+ case kMips64Sh:
+ case kMips64Sw:
+ case kMips64Sd:
+ return AlignedMemoryLatency();
+ case kMips64Lwc1:
+ return Lwc1Latency();
+ case kMips64Ldc1:
+ return Ldc1Latency();
+ case kMips64Swc1:
+ return Swc1Latency();
+ case kMips64Sdc1:
+ return Sdc1Latency();
+ case kMips64Ulhu:
+ case kMips64Ulh:
+ return UlhuLatency();
+ case kMips64Ulwu:
+ return UlwuLatency();
+ case kMips64Ulw:
+ return UlwLatency();
+ case kMips64Uld:
+ return UldLatency();
+ case kMips64Ulwc1:
+ return Ulwc1Latency();
+ case kMips64Uldc1:
+ return Uldc1Latency();
+ case kMips64Ush:
+ return UshLatency();
+ case kMips64Usw:
+ return UswLatency();
+ case kMips64Usd:
+ return UsdLatency();
+ case kMips64Uswc1:
+ return Uswc1Latency();
+ case kMips64Usdc1:
+ return Usdc1Latency();
+ case kMips64Push: {
+ int latency = 0;
+ if (instr->InputAt(0)->IsFPRegister()) {
+ latency = Sdc1Latency() + DsubuLatency(false);
+ } else {
+ latency = PushLatency();
+ }
+ return latency;
+ }
+ case kMips64Peek: {
+ int latency = 0;
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ auto op = LocationOperand::cast(instr->OutputAt(0));
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat64:
+ latency = Ldc1Latency();
+ break;
+ case MachineRepresentation::kFloat32:
+ latency = Latency::LWC1;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ latency = AlignedMemoryLatency();
+ }
+ return latency;
+ }
+ case kMips64StackClaim:
+ return DsubuLatency(false);
+ case kMips64StoreToStackSlot: {
+ int latency = 0;
+ if (instr->InputAt(0)->IsFPRegister()) {
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ latency = 1; // Estimated value.
+ } else {
+ latency = Sdc1Latency();
+ }
+ } else {
+ latency = AlignedMemoryLatency();
+ }
+ return latency;
+ }
+ case kMips64ByteSwap64:
+ return ByteSwapSignedLatency();
+ case kMips64ByteSwap32:
+ return ByteSwapSignedLatency();
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
+ return 2;
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
+ return 3;
+ case kWord32AtomicExchangeInt8:
+ return Word32AtomicExchangeLatency(true, 8);
+ case kWord32AtomicExchangeUint8:
+ return Word32AtomicExchangeLatency(false, 8);
+ case kWord32AtomicExchangeInt16:
+ return Word32AtomicExchangeLatency(true, 16);
+ case kWord32AtomicExchangeUint16:
+ return Word32AtomicExchangeLatency(false, 16);
+ case kWord32AtomicExchangeWord32:
+ return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
+ case kWord32AtomicCompareExchangeInt8:
+ return Word32AtomicCompareExchangeLatency(true, 8);
+ case kWord32AtomicCompareExchangeUint8:
+ return Word32AtomicCompareExchangeLatency(false, 8);
+ case kWord32AtomicCompareExchangeInt16:
+ return Word32AtomicCompareExchangeLatency(true, 16);
+ case kWord32AtomicCompareExchangeUint16:
+ return Word32AtomicCompareExchangeLatency(false, 16);
+ case kWord32AtomicCompareExchangeWord32:
+ return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
+ BranchShortLatency() + 1;
+ case kMips64AssertEqual:
+ return AssertLatency();
+ default:
+ return 1;
+ }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 9302b2bca3..ee02d30244 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -2171,8 +2171,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
}
}
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
}
@@ -2802,7 +2802,9 @@ static const ShuffleEntry arch_shuffles[] = {
kMips64S8x2Reverse}};
bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
- size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
for (size_t i = 0; i < num_entries; ++i) {
const ShuffleEntry& entry = table[i];
int j = 0;
@@ -2822,35 +2824,35 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
} // namespace
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
- uint8_t mask = CanonicalizeShuffle(node);
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
uint8_t shuffle32x4[4];
ArchOpcode opcode;
if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
- mask, &opcode)) {
+ is_swizzle, &opcode)) {
VisitRRR(this, opcode, node);
return;
}
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
uint8_t offset;
Mips64OperandGenerator g(this);
- if (TryMatchConcat(shuffle, mask, &offset)) {
- Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)),
- g.UseImmediate(offset));
+ if (TryMatchConcat(shuffle, &offset)) {
+ Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(offset));
return;
}
if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
- Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4)));
return;
}
- Emit(kMips64S8x16Shuffle, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(Pack4Lanes(shuffle, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
+ Emit(kMips64S8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12)));
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index c31f9691f2..4b02cad9b9 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -7,18 +7,19 @@
#include <cmath>
-// TODO(turbofan): Move ExternalReference out of assembler.h
-#include "src/assembler.h"
#include "src/base/compiler-specific.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/double.h"
+#include "src/external-reference.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
namespace compiler {
+class JSHeapBroker;
+
// A pattern matcher for nodes.
struct NodeMatcher {
explicit NodeMatcher(Node* node) : node_(node) {}
@@ -193,6 +194,10 @@ struct HeapObjectMatcher final
bool Is(Handle<HeapObject> const& value) const {
return this->HasValue() && this->Value().address() == value.address();
}
+
+ ObjectRef Ref(const JSHeapBroker* broker) const {
+ return ObjectRef(broker, this->Value());
+ }
};
@@ -391,11 +396,10 @@ struct AddMatcher : public BinopMatcher {
return;
}
- if (this->right().opcode() == kAddOpcode &&
- this->left().opcode() != kAddOpcode) {
- this->SwapInputs();
- } else if (this->right().opcode() == kSubOpcode &&
- this->left().opcode() != kSubOpcode) {
+ if ((this->left().opcode() != kSubOpcode &&
+ this->left().opcode() != kAddOpcode) &&
+ (this->right().opcode() == kAddOpcode ||
+ this->right().opcode() == kSubOpcode)) {
this->SwapInputs();
}
}
@@ -491,14 +495,14 @@ struct BaseWithIndexAndDisplacementMatcher {
bool power_of_two_plus_one = false;
DisplacementMode displacement_mode = kPositiveDisplacement;
int scale = 0;
- if (m.HasIndexInput() && left->OwnedByAddressingOperand()) {
+ if (m.HasIndexInput() && OwnedByAddressingOperand(left)) {
index = m.IndexInput();
scale = m.scale();
scale_expression = left;
power_of_two_plus_one = m.power_of_two_plus_one();
bool match_found = false;
if (right->opcode() == AddMatcher::kSubOpcode &&
- right->OwnedByAddressingOperand()) {
+ OwnedByAddressingOperand(right)) {
AddMatcher right_matcher(right);
if (right_matcher.right().HasValue()) {
// (S + (B - D))
@@ -510,7 +514,7 @@ struct BaseWithIndexAndDisplacementMatcher {
}
if (!match_found) {
if (right->opcode() == AddMatcher::kAddOpcode &&
- right->OwnedByAddressingOperand()) {
+ OwnedByAddressingOperand(right)) {
AddMatcher right_matcher(right);
if (right_matcher.right().HasValue()) {
// (S + (B + D))
@@ -531,7 +535,7 @@ struct BaseWithIndexAndDisplacementMatcher {
} else {
bool match_found = false;
if (left->opcode() == AddMatcher::kSubOpcode &&
- left->OwnedByAddressingOperand()) {
+ OwnedByAddressingOperand(left)) {
AddMatcher left_matcher(left);
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
@@ -557,7 +561,7 @@ struct BaseWithIndexAndDisplacementMatcher {
}
if (!match_found) {
if (left->opcode() == AddMatcher::kAddOpcode &&
- left->OwnedByAddressingOperand()) {
+ OwnedByAddressingOperand(left)) {
AddMatcher left_matcher(left);
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
@@ -667,6 +671,29 @@ struct BaseWithIndexAndDisplacementMatcher {
scale_ = scale;
matches_ = true;
}
+
+ static bool OwnedByAddressingOperand(Node* node) {
+ for (auto use : node->use_edges()) {
+ Node* from = use.from();
+ switch (from->opcode()) {
+ case IrOpcode::kLoad:
+ case IrOpcode::kPoisonedLoad:
+ case IrOpcode::kInt32Add:
+ case IrOpcode::kInt64Add:
+ // Skip addressing uses.
+ break;
+ case IrOpcode::kStore:
+ // If the stored value is this node, it is not an addressing use.
+ if (from->InputAt(2) == node) return false;
+ // Otherwise it is used as an address and skipped.
+ break;
+ default:
+ // Non-addressing use found.
+ return false;
+ }
+ }
+ return true;
+ }
};
typedef BaseWithIndexAndDisplacementMatcher<Int32AddMatcher>
@@ -722,6 +749,64 @@ struct V8_EXPORT_PRIVATE DiamondMatcher
Node* if_false_;
};
+template <class BinopMatcher, IrOpcode::Value expected_opcode>
+struct WasmStackCheckMatcher {
+ explicit WasmStackCheckMatcher(Node* compare) : compare_(compare) {}
+
+ bool Matched() {
+ if (compare_->opcode() != expected_opcode) return false;
+ BinopMatcher m(compare_);
+ return MatchedInternal(m.left(), m.right());
+ }
+
+ private:
+ bool MatchedInternal(const typename BinopMatcher::LeftMatcher& l,
+ const typename BinopMatcher::RightMatcher& r) {
+ // In wasm, the stack check is performed by loading the value given by
+ // the address of a field stored in the instance object. That object is
+ // passed as a parameter.
+ if (l.IsLoad() && r.IsLoadStackPointer()) {
+ LoadMatcher<LoadMatcher<NodeMatcher>> mleft(l.node());
+ if (mleft.object().IsLoad() && mleft.index().Is(0) &&
+ mleft.object().object().IsParameter()) {
+ return true;
+ }
+ }
+ return false;
+ }
+ Node* compare_;
+};
+
+template <class BinopMatcher, IrOpcode::Value expected_opcode>
+struct StackCheckMatcher {
+ StackCheckMatcher(Isolate* isolate, Node* compare)
+ : isolate_(isolate), compare_(compare) {}
+ bool Matched() {
+ // TODO(jgruber): Ideally, we could be more flexible here and also match the
+ // same pattern with switched operands (i.e.: left is LoadStackPointer and
+ // right is the js_stack_limit load). But to be correct in all cases, we'd
+ // then have to invert the outcome of the stack check comparison.
+ if (compare_->opcode() != expected_opcode) return false;
+ BinopMatcher m(compare_);
+ return MatchedInternal(m.left(), m.right());
+ }
+
+ private:
+ bool MatchedInternal(const typename BinopMatcher::LeftMatcher& l,
+ const typename BinopMatcher::RightMatcher& r) {
+ if (l.IsLoad() && r.IsLoadStackPointer()) {
+ LoadMatcher<ExternalReferenceMatcher> mleft(l.node());
+ ExternalReference js_stack_limit =
+ ExternalReference::address_of_stack_limit(isolate_);
+ if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) return true;
+ }
+ return false;
+ }
+
+ Isolate* isolate_;
+ Node* compare_;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/node-origin-table.cc b/deps/v8/src/compiler/node-origin-table.cc
index 406d634812..fc1f403939 100644
--- a/deps/v8/src/compiler/node-origin-table.cc
+++ b/deps/v8/src/compiler/node-origin-table.cc
@@ -12,7 +12,15 @@ namespace compiler {
void NodeOrigin::PrintJson(std::ostream& out) const {
out << "{ ";
- out << "\"nodeId\" : " << created_from();
+ switch (origin_kind_) {
+ case kGraphNode:
+ out << "\"nodeId\" : ";
+ break;
+ case kWasmBytecode:
+ out << "\"bytecodePosition\" : ";
+ break;
+ }
+ out << created_from();
out << ", \"reducer\" : \"" << reducer_name() << "\"";
out << ", \"phase\" : \"" << phase_name() << "\"";
out << "}";
diff --git a/deps/v8/src/compiler/node-origin-table.h b/deps/v8/src/compiler/node-origin-table.h
index fae53c5788..8429b2f692 100644
--- a/deps/v8/src/compiler/node-origin-table.h
+++ b/deps/v8/src/compiler/node-origin-table.h
@@ -18,11 +18,21 @@ namespace compiler {
class NodeOrigin {
public:
+ typedef enum { kWasmBytecode, kGraphNode } OriginKind;
NodeOrigin(const char* phase_name, const char* reducer_name,
NodeId created_from)
: phase_name_(phase_name),
reducer_name_(reducer_name),
+ origin_kind_(kGraphNode),
created_from_(created_from) {}
+
+ NodeOrigin(const char* phase_name, const char* reducer_name,
+ OriginKind origin_kind, uint64_t created_from)
+ : phase_name_(phase_name),
+ reducer_name_(reducer_name),
+ origin_kind_(origin_kind),
+ created_from_(created_from) {}
+
NodeOrigin(const NodeOrigin& other) = default;
static NodeOrigin Unknown() { return NodeOrigin(); }
@@ -31,6 +41,8 @@ class NodeOrigin {
const char* reducer_name() const { return reducer_name_; }
const char* phase_name() const { return phase_name_; }
+ OriginKind origin_kind() const { return origin_kind_; }
+
bool operator==(const NodeOrigin& o) const {
return reducer_name_ == o.reducer_name_ && created_from_ == o.created_from_;
}
@@ -44,6 +56,7 @@ class NodeOrigin {
created_from_(std::numeric_limits<int64_t>::min()) {}
const char* phase_name_;
const char* reducer_name_;
+ OriginKind origin_kind_;
int64_t created_from_;
};
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 2ccabb18eb..0d0e4f3c97 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -363,11 +363,11 @@ bool NodeProperties::IsSame(Node* a, Node* b) {
// static
NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
- Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return) {
+ Isolate* isolate, Node* receiver, Node* effect,
+ ZoneHandleSet<Map>* maps_return) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
Handle<HeapObject> receiver = m.Value();
- Isolate* const isolate = m.Value()->GetIsolate();
// We don't use ICs for the Array.prototype and the Object.prototype
// because the runtime has to be able to intercept them properly, so
// we better make sure that TurboFan doesn't outsmart the system here
@@ -416,7 +416,8 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
Handle<JSFunction> original_constructor =
Handle<JSFunction>::cast(mnewtarget.Value());
if (original_constructor->has_initial_map()) {
- Handle<Map> initial_map(original_constructor->initial_map());
+ Handle<Map> initial_map(original_constructor->initial_map(),
+ isolate);
if (initial_map->constructor_or_backpointer() ==
*mtarget.Value()) {
*maps_return = ZoneHandleSet<Map>(initial_map);
@@ -503,12 +504,12 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
}
// static
-MaybeHandle<Map> NodeProperties::GetMapWitness(Node* node) {
+MaybeHandle<Map> NodeProperties::GetMapWitness(Isolate* isolate, Node* node) {
ZoneHandleSet<Map> maps;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+ NodeProperties::InferReceiverMaps(isolate, receiver, effect, &maps);
if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
return maps[0];
}
@@ -516,11 +517,13 @@ MaybeHandle<Map> NodeProperties::GetMapWitness(Node* node) {
}
// static
-bool NodeProperties::HasInstanceTypeWitness(Node* receiver, Node* effect,
+bool NodeProperties::HasInstanceTypeWitness(Isolate* isolate, Node* receiver,
+ Node* effect,
InstanceType instance_type) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(isolate, receiver, effect,
+ &receiver_maps);
switch (result) {
case NodeProperties::kUnreliableReceiverMaps:
case NodeProperties::kReliableReceiverMaps:
@@ -551,7 +554,8 @@ bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
}
// static
-bool NodeProperties::CanBePrimitive(Node* receiver, Node* effect) {
+bool NodeProperties::CanBePrimitive(Isolate* isolate, Node* receiver,
+ Node* effect) {
switch (receiver->opcode()) {
#define CASE(Opcode) case IrOpcode::k##Opcode:
JS_CONSTRUCT_OP_LIST(CASE)
@@ -571,7 +575,8 @@ bool NodeProperties::CanBePrimitive(Node* receiver, Node* effect) {
// just the instance types, which don't change
// across potential side-effecting operations.
ZoneHandleSet<Map> maps;
- if (InferReceiverMaps(receiver, effect, &maps) != kNoReceiverMaps) {
+ if (InferReceiverMaps(isolate, receiver, effect, &maps) !=
+ kNoReceiverMaps) {
// Check if all {maps} are actually JSReceiver maps.
for (size_t i = 0; i < maps.size(); ++i) {
if (!maps[i]->IsJSReceiverMap()) return true;
@@ -584,8 +589,9 @@ bool NodeProperties::CanBePrimitive(Node* receiver, Node* effect) {
}
// static
-bool NodeProperties::CanBeNullOrUndefined(Node* receiver, Node* effect) {
- if (CanBePrimitive(receiver, effect)) {
+bool NodeProperties::CanBeNullOrUndefined(Isolate* isolate, Node* receiver,
+ Node* effect) {
+ if (CanBePrimitive(isolate, receiver, effect)) {
switch (receiver->opcode()) {
case IrOpcode::kCheckInternalizedString:
case IrOpcode::kCheckNumber:
@@ -602,7 +608,6 @@ bool NodeProperties::CanBeNullOrUndefined(Node* receiver, Node* effect) {
return false;
case IrOpcode::kHeapConstant: {
Handle<HeapObject> value = HeapObjectMatcher(receiver).Value();
- Isolate* const isolate = value->GetIsolate();
return value->IsNullOrUndefined(isolate);
}
default:
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 10c19b6e35..abcee4eaf9 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -152,11 +152,12 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// but instance type is reliable.
};
static InferReceiverMapsResult InferReceiverMaps(
- Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
+ Isolate* isolate, Node* receiver, Node* effect,
+ ZoneHandleSet<Map>* maps_return);
- static MaybeHandle<Map> GetMapWitness(Node* node);
- static bool HasInstanceTypeWitness(Node* receiver, Node* effect,
- InstanceType instance_type);
+ static MaybeHandle<Map> GetMapWitness(Isolate* isolate, Node* node);
+ static bool HasInstanceTypeWitness(Isolate* isolate, Node* receiver,
+ Node* effect, InstanceType instance_type);
// Walks up the {effect} chain to check that there's no observable side-effect
// between the {effect} and it's {dominator}. Aborts the walk if there's join
@@ -166,11 +167,12 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// Returns true if the {receiver} can be a primitive value (i.e. is not
// definitely a JavaScript object); might walk up the {effect} chain to
// find map checks on {receiver}.
- static bool CanBePrimitive(Node* receiver, Node* effect);
+ static bool CanBePrimitive(Isolate* isolate, Node* receiver, Node* effect);
// Returns true if the {receiver} can be null or undefined. Might walk
// up the {effect} chain to find map checks for {receiver}.
- static bool CanBeNullOrUndefined(Node* receiver, Node* effect);
+ static bool CanBeNullOrUndefined(Isolate* isolate, Node* receiver,
+ Node* effect);
// ---------------------------------------------------------------------------
// Context.
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index f53468c216..0e1ebce9bc 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -296,22 +296,8 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
return mask == 3;
}
-bool Node::OwnedByAddressingOperand() const {
- for (Use* use = first_use_; use; use = use->next) {
- Node* from = use->from();
- if (from->opcode() != IrOpcode::kLoad &&
- // If {from} is store, make sure it does not use {this} as value
- (from->opcode() != IrOpcode::kStore || from->InputAt(2) == this) &&
- from->opcode() != IrOpcode::kInt32Add &&
- from->opcode() != IrOpcode::kInt64Add) {
- return false;
- }
- }
- return true;
-}
-
void Node::Print() const {
- OFStream os(stdout);
+ StdoutStream os;
os << *this << std::endl;
for (Node* input : this->inputs()) {
os << " " << *input << std::endl;
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 506b38d53f..fc5a17c19d 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -159,9 +159,6 @@ class V8_EXPORT_PRIVATE Node final {
// Returns true if {owner1} and {owner2} are the only users of {this} node.
bool OwnedBy(Node const* owner1, Node const* owner2) const;
- // Returns true if addressing related operands (such as load, store, lea)
- // are the only users of {this} node.
- bool OwnedByAddressingOperand() const;
void Print() const;
private:
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index f84598e3fd..7a6b19cb35 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -209,6 +209,7 @@
V(JSResolvePromise) \
V(JSStackCheck) \
V(JSObjectIsArray) \
+ V(JSRegExpTest) \
V(JSDebugger)
#define JS_OP_LIST(V) \
@@ -379,9 +380,11 @@
V(LoadField) \
V(LoadElement) \
V(LoadTypedElement) \
+ V(LoadDataViewElement) \
V(StoreField) \
V(StoreElement) \
V(StoreTypedElement) \
+ V(StoreDataViewElement) \
V(StoreSignedSmallElement) \
V(TransitionAndStoreElement) \
V(TransitionAndStoreNumberElement) \
@@ -623,7 +626,6 @@
V(LoadStackPointer) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
- V(LoadRootsPointer) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 4512a52742..a9ae8c322a 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -16,20 +16,29 @@ namespace v8 {
namespace internal {
namespace compiler {
-OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
+OperationTyper::OperationTyper(Isolate* isolate,
+ const JSHeapBroker* js_heap_broker, Zone* zone)
: zone_(zone), cache_(TypeCache::Get()) {
Factory* factory = isolate->factory();
- infinity_ = Type::NewConstant(factory->infinity_value(), zone);
- minus_infinity_ = Type::NewConstant(factory->minus_infinity_value(), zone);
+ infinity_ =
+ Type::NewConstant(js_heap_broker, factory->infinity_value(), zone);
+ minus_infinity_ =
+ Type::NewConstant(js_heap_broker, factory->minus_infinity_value(), zone);
Type truncating_to_zero = Type::MinusZeroOrNaN();
DCHECK(!truncating_to_zero.Maybe(Type::Integral32()));
- singleton_empty_string_ = Type::HeapConstant(factory->empty_string(), zone);
- singleton_NaN_string_ = Type::HeapConstant(factory->NaN_string(), zone);
- singleton_zero_string_ = Type::HeapConstant(factory->zero_string(), zone);
- singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
- singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
- singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
+ singleton_empty_string_ =
+ Type::HeapConstant(js_heap_broker, factory->empty_string(), zone);
+ singleton_NaN_string_ =
+ Type::HeapConstant(js_heap_broker, factory->NaN_string(), zone);
+ singleton_zero_string_ =
+ Type::HeapConstant(js_heap_broker, factory->zero_string(), zone);
+ singleton_false_ =
+ Type::HeapConstant(js_heap_broker, factory->false_value(), zone);
+ singleton_true_ =
+ Type::HeapConstant(js_heap_broker, factory->true_value(), zone);
+ singleton_the_hole_ =
+ Type::HeapConstant(js_heap_broker, factory->the_hole_value(), zone);
signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index 719d381ab8..81f20bcda4 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -27,7 +27,8 @@ class TypeCache;
class V8_EXPORT_PRIVATE OperationTyper {
public:
- OperationTyper(Isolate* isolate, Zone* zone);
+ OperationTyper(Isolate* isolate, const JSHeapBroker* js_heap_broker,
+ Zone* zone);
// Typing Phi.
Type Merge(Type left, Type right);
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index ba9b400b92..689561059c 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -123,6 +123,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSResolvePromise:
case IrOpcode::kJSPerformPromiseThen:
case IrOpcode::kJSObjectIsArray:
+ case IrOpcode::kJSRegExpTest:
return true;
default:
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index af8fa50140..099e5599b8 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -6,7 +6,6 @@
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/zone-stats.h"
-#include "src/isolate.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
#include "src/optimized-compilation-info.h"
@@ -46,11 +45,11 @@ void PipelineStatistics::CommonStats::End(
}
PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
- Isolate* isolate, ZoneStats* zone_stats)
- : isolate_(isolate),
- outer_zone_(info->zone()),
+ CompilationStatistics* compilation_stats,
+ ZoneStats* zone_stats)
+ : outer_zone_(info->zone()),
zone_stats_(zone_stats),
- compilation_stats_(isolate_->GetTurboStatistics()),
+ compilation_stats_(compilation_stats),
source_size_(0),
phase_kind_name_(nullptr),
phase_name_(nullptr) {
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index 56467f496a..21ef2b02aa 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -20,8 +20,8 @@ class PhaseScope;
class PipelineStatistics : public Malloced {
public:
- PipelineStatistics(OptimizedCompilationInfo* info, Isolate* isolate,
- ZoneStats* zone_stats);
+ PipelineStatistics(OptimizedCompilationInfo* info,
+ CompilationStatistics* turbo_stats, ZoneStats* zone_stats);
~PipelineStatistics();
void BeginPhaseKind(const char* phase_kind_name);
@@ -56,7 +56,6 @@ class PipelineStatistics : public Malloced {
void BeginPhase(const char* name);
void EndPhase();
- Isolate* isolate_;
Zone* outer_zone_;
ZoneStats* zone_stats_;
CompilationStatistics* compilation_stats_;
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 779457bcf7..3366d1db94 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -14,6 +14,7 @@
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/bootstrapper.h"
+#include "src/code-tracer.h"
#include "src/compiler.h"
#include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/branch-elimination.h"
@@ -21,6 +22,7 @@
#include "src/compiler/checkpoint-elimination.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
@@ -37,6 +39,7 @@
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-create-lowering.h"
#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-inlining-heuristic.h"
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-native-context-specialization.h"
@@ -69,7 +72,9 @@
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/compiler/zone-stats.h"
+#include "src/disassembler.h"
#include "src/isolate-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/optimized-compilation-info.h"
@@ -77,6 +82,7 @@
#include "src/parsing/parse-info.h"
#include "src/register-configuration.h"
#include "src/utils.h"
+#include "src/wasm/function-body-decoder.h"
namespace v8 {
namespace internal {
@@ -99,6 +105,7 @@ class PipelineData {
OptimizedCompilationInfo* info,
PipelineStatistics* pipeline_statistics)
: isolate_(isolate),
+ allocator_(isolate->allocator()),
info_(info),
debug_name_(info_->GetDebugName()),
may_have_unverifiable_graph_(false),
@@ -111,7 +118,8 @@ class PipelineData {
codegen_zone_scope_(zone_stats_, ZONE_NAME),
codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
- register_allocation_zone_(register_allocation_zone_scope_.zone()) {
+ register_allocation_zone_(register_allocation_zone_scope_.zone()),
+ assembler_options_(AssemblerOptions::Default(isolate)) {
PhaseScope scope(pipeline_statistics, "init pipeline data");
graph_ = new (graph_zone_) Graph(graph_zone_);
source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
@@ -127,21 +135,30 @@ class PipelineData {
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
+ js_heap_broker_ = new (codegen_zone_) JSHeapBroker(isolate_);
+ dependencies_ =
+ new (codegen_zone_) CompilationDependencies(isolate_, codegen_zone_);
}
// For WebAssembly compile entry point.
- PipelineData(ZoneStats* zone_stats, Isolate* isolate,
+ PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine,
OptimizedCompilationInfo* info, MachineGraph* mcgraph,
PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- WasmCompilationData* wasm_compilation_data)
- : isolate_(isolate),
+ WasmCompilationData* wasm_compilation_data,
+ int wasm_function_index,
+ const AssemblerOptions& assembler_options)
+ : isolate_(nullptr),
+ wasm_engine_(wasm_engine),
+ allocator_(wasm_engine->allocator()),
info_(info),
debug_name_(info_->GetDebugName()),
+ wasm_function_index_(wasm_function_index),
zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
graph_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_(graph_zone_scope_.zone()),
graph_(mcgraph->graph()),
source_positions_(source_positions),
node_origins_(node_origins),
@@ -154,14 +171,17 @@ class PipelineData {
codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
- wasm_compilation_data_(wasm_compilation_data) {}
+ wasm_compilation_data_(wasm_compilation_data),
+ assembler_options_(assembler_options) {}
// For machine graph testing entry point.
PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
Isolate* isolate, Graph* graph, Schedule* schedule,
SourcePositionTable* source_positions,
- NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt)
+ NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
+ const AssemblerOptions& assembler_options)
: isolate_(isolate),
+ allocator_(isolate->allocator()),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
@@ -176,12 +196,14 @@ class PipelineData {
codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
- jump_optimization_info_(jump_opt) {}
+ jump_optimization_info_(jump_opt),
+ assembler_options_(assembler_options) {}
// For register allocation testing entry point.
PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
Isolate* isolate, InstructionSequence* sequence)
: isolate_(isolate),
+ allocator_(isolate->allocator()),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
@@ -192,7 +214,8 @@ class PipelineData {
codegen_zone_scope_(zone_stats_, ZONE_NAME),
codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
- register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+ register_allocation_zone_(register_allocation_zone_scope_.zone()),
+ assembler_options_(AssemblerOptions::Default(isolate)) {}
~PipelineData() {
delete code_generator_; // Must happen before zones are destroyed.
@@ -204,8 +227,10 @@ class PipelineData {
}
Isolate* isolate() const { return isolate_; }
+ AccountingAllocator* allocator() const { return allocator_; }
OptimizedCompilationInfo* info() const { return info_; }
ZoneStats* zone_stats() const { return zone_stats_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
OsrHelper* osr_helper() { return &(*osr_helper_); }
bool compilation_failed() const { return compilation_failed_; }
@@ -214,8 +239,8 @@ class PipelineData {
bool verify_graph() const { return verify_graph_; }
void set_verify_graph(bool value) { verify_graph_ = value; }
- Handle<Code> code() { return code_; }
- void set_code(Handle<Code> code) {
+ MaybeHandle<Code> code() { return code_; }
+ void set_code(MaybeHandle<Code> code) {
DCHECK(code_.is_null());
code_ = code;
}
@@ -241,6 +266,8 @@ class PipelineData {
return handle(info()->global_object(), isolate());
}
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+
Schedule* schedule() const { return schedule_; }
void set_schedule(Schedule* schedule) {
DCHECK(!schedule_);
@@ -274,6 +301,11 @@ class PipelineData {
return jump_optimization_info_;
}
+ CodeTracer* GetCodeTracer() const {
+ return wasm_engine_ == nullptr ? isolate_->GetCodeTracer()
+ : wasm_engine_->GetCodeTracer();
+ }
+
void DeleteGraphZone() {
if (graph_zone_ == nullptr) return;
graph_zone_scope_.Destroy();
@@ -301,6 +333,8 @@ class PipelineData {
if (codegen_zone_ == nullptr) return;
codegen_zone_scope_.Destroy();
codegen_zone_ = nullptr;
+ dependencies_ = nullptr;
+ js_heap_broker_ = nullptr;
frame_ = nullptr;
}
@@ -359,7 +393,8 @@ class PipelineData {
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
- wasm_compilation_data_, info()->GetPoisoningMitigationLevel());
+ wasm_compilation_data_, info()->GetPoisoningMitigationLevel(),
+ assembler_options_, info_->builtin_index());
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -376,10 +411,19 @@ class PipelineData {
const char* debug_name() const { return debug_name_.get(); }
+ WasmCompilationData* wasm_compilation_data() const {
+ return wasm_compilation_data_;
+ }
+
+ int wasm_function_index() const { return wasm_function_index_; }
+
private:
Isolate* const isolate_;
+ wasm::WasmEngine* const wasm_engine_ = nullptr;
+ AccountingAllocator* const allocator_;
OptimizedCompilationInfo* const info_;
std::unique_ptr<char[]> debug_name_;
+ int wasm_function_index_ = -1;
bool may_have_unverifiable_graph_ = true;
ZoneStats* const zone_stats_;
PipelineStatistics* pipeline_statistics_ = nullptr;
@@ -387,7 +431,7 @@ class PipelineData {
bool verify_graph_ = false;
int start_source_position_ = kNoSourcePosition;
base::Optional<OsrHelper> osr_helper_;
- Handle<Code> code_ = Handle<Code>::null();
+ MaybeHandle<Code> code_;
CodeGenerator* code_generator_ = nullptr;
// All objects in the following group of fields are allocated in graph_zone_.
@@ -417,6 +461,8 @@ class PipelineData {
// is destroyed.
ZoneStats::Scope codegen_zone_scope_;
Zone* codegen_zone_;
+ CompilationDependencies* dependencies_ = nullptr;
+ JSHeapBroker* js_heap_broker_ = nullptr;
Frame* frame_ = nullptr;
// All objects in the following group of fields are allocated in
@@ -435,6 +481,7 @@ class PipelineData {
WasmCompilationData* wasm_compilation_data_ = nullptr;
JumpOptimizationInfo* jump_optimization_info_ = nullptr;
+ AssemblerOptions assembler_options_;
DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
@@ -467,10 +514,14 @@ class PipelineImpl final {
void AssembleCode(Linkage* linkage);
// Step D. Run the code finalization pass.
- Handle<Code> FinalizeCode();
+ MaybeHandle<Code> FinalizeCode();
+ // Step E. Install any code dependencies.
+ bool CommitDependencies(Handle<Code> code);
+
+ void VerifyGeneratedCodeIsIdempotent();
void RunPrintAndVerify(const char* phase, bool untyped = false);
- Handle<Code> GenerateCode(CallDescriptor* call_descriptor);
+ MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* call_descriptor, bool run_verifier);
@@ -550,8 +601,8 @@ void PrintParticipatingSource(OptimizedCompilationInfo* info,
}
// Print the code after compiling it.
-void PrintCode(Handle<Code> code, OptimizedCompilationInfo* info) {
- Isolate* isolate = code->GetIsolate();
+void PrintCode(Isolate* isolate, Handle<Code> code,
+ OptimizedCompilationInfo* info) {
if (FLAG_print_opt_source && info->IsOptimizing()) {
PrintParticipatingSource(info, isolate);
}
@@ -560,11 +611,11 @@ void PrintCode(Handle<Code> code, OptimizedCompilationInfo* info) {
AllowDeferredHandleDereference allow_deference_for_print_code;
bool print_code =
isolate->bootstrapper()->IsActive()
- ? FLAG_print_builtin_code
+ ? FLAG_print_builtin_code && info->shared_info()->PassesFilter(
+ FLAG_print_builtin_code_filter)
: (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
(info->IsOptimizing() && FLAG_print_opt_code &&
- info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)) ||
- (info->IsWasm() && FLAG_print_wasm_code));
+ info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)));
if (print_code) {
std::unique_ptr<char[]> debug_name = info->GetDebugName();
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
@@ -613,7 +664,7 @@ struct TurboCfgFile : public std::ofstream {
std::ios_base::app) {}
};
-void TraceSchedule(OptimizedCompilationInfo* info, Isolate* isolate,
+void TraceSchedule(OptimizedCompilationInfo* info, PipelineData* data,
Schedule* schedule, const char* phase_name) {
if (info->trace_turbo_json_enabled()) {
AllowHandleDereference allow_deref;
@@ -630,7 +681,7 @@ void TraceSchedule(OptimizedCompilationInfo* info, Isolate* isolate,
}
if (info->trace_turbo_graph_enabled() || FLAG_trace_turbo_scheduler) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "-- Schedule --------------------------------------\n" << *schedule;
}
@@ -724,7 +775,8 @@ PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
PipelineStatistics* pipeline_statistics = nullptr;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics = new PipelineStatistics(info, isolate, zone_stats);
+ pipeline_statistics =
+ new PipelineStatistics(info, isolate->GetTurboStatistics(), zone_stats);
pipeline_statistics->BeginPhaseKind("initializing");
}
@@ -739,11 +791,52 @@ PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
return pipeline_statistics;
}
+PipelineStatistics* CreatePipelineStatistics(wasm::WasmEngine* wasm_engine,
+ wasm::FunctionBody function_body,
+ wasm::WasmModule* wasm_module,
+ OptimizedCompilationInfo* info,
+ ZoneStats* zone_stats) {
+ PipelineStatistics* pipeline_statistics = nullptr;
+
+ if (FLAG_turbo_stats_wasm) {
+ pipeline_statistics = new PipelineStatistics(
+ info, wasm_engine->GetOrCreateTurboStatistics(), zone_stats);
+ pipeline_statistics->BeginPhaseKind("initializing");
+ }
+
+ if (info->trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(info, std::ios_base::trunc);
+ std::unique_ptr<char[]> function_name = info->GetDebugName();
+ json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
+ AccountingAllocator allocator;
+ std::ostringstream disassembly;
+ std::vector<int> source_positions;
+ wasm::PrintRawWasmCode(&allocator, function_body, wasm_module,
+ wasm::kPrintLocals, disassembly, &source_positions);
+ for (const auto& c : disassembly.str()) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+ json_of << "\",\n\"sourceLineToBytecodePosition\" : [";
+ bool insert_comma = false;
+ for (auto val : source_positions) {
+ if (insert_comma) {
+ json_of << ", ";
+ }
+ json_of << val;
+ insert_comma = true;
+ }
+ json_of << "],\n\"phases\":[";
+ }
+
+ return pipeline_statistics;
+}
+
} // namespace
class PipelineCompilationJob final : public OptimizedCompilationJob {
public:
- PipelineCompilationJob(Handle<SharedFunctionInfo> shared_info,
+ PipelineCompilationJob(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info,
Handle<JSFunction> function)
// Note that the OptimizedCompilationInfo is not initialized at the time
// we pass it to the CompilationJob constructor, but it is not
@@ -756,8 +849,8 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
compilation_info_(&zone_, function->GetIsolate(), shared_info,
function),
pipeline_statistics_(CreatePipelineStatistics(
- handle(Script::cast(shared_info->script())), compilation_info(),
- function->GetIsolate(), &zone_stats_)),
+ handle(Script::cast(shared_info->script()), isolate),
+ compilation_info(), function->GetIsolate(), &zone_stats_)),
data_(&zone_stats_, function->GetIsolate(), compilation_info(),
pipeline_statistics_.get()),
pipeline_(&data_),
@@ -818,7 +911,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
}
if (compilation_info()->closure()->feedback_cell()->map() ==
- isolate->heap()->one_closure_cell_map()) {
+ ReadOnlyRoots(isolate).one_closure_cell_map()) {
compilation_info()->MarkAsFunctionContextSpecializing();
}
@@ -851,16 +944,19 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
- Handle<Code> code = pipeline_.FinalizeCode();
- if (code.is_null()) {
+ MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
+ Handle<Code> code;
+ if (!maybe_code.ToHandle(&code)) {
if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
return AbortOptimization(BailoutReason::kCodeGenerationFailed);
}
return FAILED;
}
- compilation_info()->dependencies()->Commit(code);
- compilation_info()->SetCode(code);
+ if (!pipeline_.CommitDependencies(code)) {
+ return RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
+ }
+ compilation_info()->SetCode(code);
compilation_info()->context()->native_context()->AddOptimizedCode(*code);
RegisterWeakObjectsInOptimizedCode(code, isolate);
return SUCCEEDED;
@@ -891,22 +987,30 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
code->set_can_have_weak_objects(true);
}
+// The stack limit used during compilation is used to limit the recursion
+// depth in, e.g. AST walking. No such recursion happens in WASM compilations.
+constexpr uintptr_t kNoStackLimit = 0;
+
class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
public:
explicit PipelineWasmCompilationJob(
- OptimizedCompilationInfo* info, Isolate* isolate, MachineGraph* mcgraph,
- CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
- NodeOriginTable* node_origins, WasmCompilationData* wasm_compilation_data,
- bool asmjs_origin)
- : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), info,
- "TurboFan", State::kReadyToExecute),
- zone_stats_(isolate->allocator()),
+ OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
+ MachineGraph* mcgraph, CallDescriptor* call_descriptor,
+ SourcePositionTable* source_positions, NodeOriginTable* node_origins,
+ WasmCompilationData* wasm_compilation_data,
+ wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
+ wasm::NativeModule* native_module, int function_index, bool asmjs_origin)
+ : OptimizedCompilationJob(kNoStackLimit, info, "TurboFan",
+ State::kReadyToExecute),
+ zone_stats_(wasm_engine->allocator()),
pipeline_statistics_(CreatePipelineStatistics(
- Handle<Script>::null(), info, isolate, &zone_stats_)),
- data_(&zone_stats_, isolate, info, mcgraph, pipeline_statistics_.get(),
- source_positions, node_origins, wasm_compilation_data),
+ wasm_engine, function_body, wasm_module, info, &zone_stats_)),
+ data_(&zone_stats_, wasm_engine, info, mcgraph,
+ pipeline_statistics_.get(), source_positions, node_origins,
+ wasm_compilation_data, function_index, WasmAssemblerOptions()),
pipeline_(&data_),
linkage_(call_descriptor),
+ native_module_(native_module),
asmjs_origin_(asmjs_origin) {}
protected:
@@ -915,19 +1019,12 @@ class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
Status FinalizeJobImpl(Isolate* isolate) final;
private:
- size_t AllocatedMemory() const override;
-
- // Temporary regression check while we get the wasm code off the GC heap, and
- // until we decontextualize wasm code.
- // We expect the only embedded objects to be: CEntry, undefined, and
- // the various builtins for throwing exceptions like OOB.
- void ValidateImmovableEmbeddedObjects() const;
-
ZoneStats zone_stats_;
std::unique_ptr<PipelineStatistics> pipeline_statistics_;
PipelineData data_;
PipelineImpl pipeline_;
Linkage linkage_;
+ wasm::NativeModule* native_module_;
bool asmjs_origin_;
};
@@ -939,16 +1036,12 @@ PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::PrepareJobImpl(
PipelineWasmCompilationJob::Status
PipelineWasmCompilationJob::ExecuteJobImpl() {
- if (compilation_info()->trace_turbo_json_enabled()) {
- TurboJsonFile json_of(compilation_info(), std::ios_base::trunc);
- json_of << "{\"function\":\"" << compilation_info()->GetDebugName().get()
- << "\", \"source\":\"\",\n\"phases\":[";
- }
+ pipeline_.RunPrintAndVerify("Machine", true);
- pipeline_.RunPrintAndVerify("machine", true);
+ PipelineData* data = &data_;
+ data->BeginPhaseKind("wasm optimization");
if (FLAG_wasm_opt || asmjs_origin_) {
- PipelineData* data = &data_;
- PipelineRunScope scope(data, "wasm optimization");
+ PipelineRunScope scope(data, "wasm full optimization");
GraphReducer graph_reducer(scope.zone(), data->graph(),
data->mcgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
@@ -956,80 +1049,71 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
MachineOperatorReducer machine_reducer(data->mcgraph(), asmjs_origin_);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine(),
- scope.zone());
+ data->js_heap_broker(), data->common(),
+ data->machine(), scope.zone());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
- pipeline_.RunPrintAndVerify("wasm optimization", true);
+ } else {
+ PipelineRunScope scope(data, "wasm base optimization");
+ GraphReducer graph_reducer(scope.zone(), data->graph(),
+ data->mcgraph()->Dead());
+ ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
+ AddReducer(data, &graph_reducer, &value_numbering);
+ graph_reducer.ReduceGraph();
+ }
+ pipeline_.RunPrintAndVerify("wasm optimization", true);
+
+ if (data_.node_origins()) {
+ data_.node_origins()->RemoveDecorator();
}
pipeline_.ComputeScheduledGraph();
if (!pipeline_.SelectInstructions(&linkage_)) return FAILED;
pipeline_.AssembleCode(&linkage_);
- return SUCCEEDED;
-}
-size_t PipelineWasmCompilationJob::AllocatedMemory() const {
- return pipeline_.data_->zone_stats()->GetCurrentAllocatedBytes();
+ CodeGenerator* code_generator = pipeline_.data_->code_generator();
+ CodeDesc code_desc;
+ code_generator->tasm()->GetCode(nullptr, &code_desc);
+
+ wasm::WasmCode* code = native_module_->AddCode(
+ data_.wasm_function_index(), code_desc,
+ code_generator->frame()->GetTotalFrameSlotCount(),
+ code_generator->GetSafepointTableOffset(),
+ code_generator->GetHandlerTableOffset(),
+ data_.wasm_compilation_data()->GetProtectedInstructions(),
+ code_generator->GetSourcePositionTable(), wasm::WasmCode::kTurbofan);
+
+ if (data_.info()->trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(data_.info(), std::ios_base::app);
+ json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+#ifdef ENABLE_DISASSEMBLER
+ std::stringstream disassembler_stream;
+ Disassembler::Decode(
+ nullptr, &disassembler_stream, code->instructions().start(),
+ code->instructions().start() + code->safepoint_table_offset(),
+ CodeReference(code));
+ for (auto const c : disassembler_stream.str()) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+#endif // ENABLE_DISASSEMBLER
+ json_of << "\"}\n]";
+ json_of << "\n}";
+ }
+
+ compilation_info()->SetCode(code);
+
+ return SUCCEEDED;
}
PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
- CodeGenerator* code_generator = pipeline_.data_->code_generator();
- OptimizedCompilationInfo::WasmCodeDesc* wasm_code_desc =
- compilation_info()->wasm_code_desc();
- code_generator->tasm()->GetCode(isolate, &wasm_code_desc->code_desc);
- wasm_code_desc->safepoint_table_offset =
- code_generator->GetSafepointTableOffset();
- wasm_code_desc->handler_table_offset =
- code_generator->GetHandlerTableOffset();
- wasm_code_desc->frame_slot_count =
- code_generator->frame()->GetTotalFrameSlotCount();
- wasm_code_desc->source_positions_table =
- code_generator->GetSourcePositionTable();
+ UNREACHABLE(); // Finalize should always be skipped for WasmCompilationJob.
return SUCCEEDED;
}
-void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
-#if DEBUG
- // We expect the only embedded objects to be those originating from
- // a snapshot, which are immovable.
- DisallowHeapAllocation no_gc;
- Handle<Code> result = pipeline_.data_->code();
- if (result.is_null()) return;
- // TODO(aseemgarg): remove this restriction when
- // wasm-to-js is also internally immovable to include WASM_TO_JS
- if (result->kind() != Code::WASM_FUNCTION) return;
- static const int kAllGCRefs = (1 << (RelocInfo::LAST_GCED_ENUM + 1)) - 1;
- for (RelocIterator it(*result, kAllGCRefs); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- Object* target = nullptr;
- switch (mode) {
- case RelocInfo::CODE_TARGET:
- // this would be either one of the stubs or builtins, because
- // we didn't link yet.
- target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- break;
- case RelocInfo::EMBEDDED_OBJECT:
- target = it.rinfo()->target_object();
- break;
- default:
- UNREACHABLE();
- }
- CHECK_NOT_NULL(target);
- bool is_immovable =
- target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target));
- bool is_wasm = target->IsCode() &&
- (Code::cast(target)->kind() == Code::WASM_FUNCTION ||
- Code::cast(target)->kind() == Code::WASM_TO_JS_FUNCTION);
- CHECK(is_immovable || is_wasm);
- }
-#endif
-}
-
template <typename Phase>
void PipelineImpl::Run() {
PipelineRunScope scope(this->data_, Phase::phase_name());
@@ -1061,7 +1145,7 @@ struct GraphBuilderPhase {
}
BytecodeGraphBuilder graph_builder(
temp_zone, data->info()->shared_info(),
- handle(data->info()->closure()->feedback_vector()),
+ handle(data->info()->closure()->feedback_vector(), data->isolate()),
data->info()->osr_offset(), data->jsgraph(), CallFrequency(1.0f),
data->source_positions(), data->native_context(),
SourcePosition::kNotInlined, flags, true,
@@ -1077,7 +1161,8 @@ Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
size_t distance = 0;
while (!current->IsNativeContext()) {
if (current->IsModuleContext()) {
- return Just(OuterContext(handle(current), distance));
+ return Just(
+ OuterContext(handle(current, current->GetIsolate()), distance));
}
current = current->previous();
distance++;
@@ -1086,10 +1171,10 @@ Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
}
Maybe<OuterContext> ChooseSpecializationContext(
- OptimizedCompilationInfo* info) {
+ Isolate* isolate, OptimizedCompilationInfo* info) {
if (info->is_function_context_specializing()) {
DCHECK(info->has_context());
- return Just(OuterContext(handle(info->context()), 0));
+ return Just(OuterContext(handle(info->context(), isolate), 0));
}
return GetModuleContext(info->closure());
}
@@ -1100,23 +1185,24 @@ struct InliningPhase {
static const char* phase_name() { return "inlining"; }
void Run(PipelineData* data, Zone* temp_zone) {
+ Isolate* isolate = data->isolate();
GraphReducer graph_reducer(temp_zone, data->graph(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine(),
- temp_zone);
+ data->js_heap_broker(), data->common(),
+ data->machine(), temp_zone);
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
+ data->js_heap_broker(),
data->info()->is_bailout_on_uninitialized()
? JSCallReducer::kBailoutOnUninitialized
: JSCallReducer::kNoFlags,
- data->native_context(),
- data->info()->dependencies());
+ data->native_context(), data->dependencies());
JSContextSpecialization context_specialization(
- &graph_reducer, data->jsgraph(),
- ChooseSpecializationContext(data->info()),
+ &graph_reducer, data->jsgraph(), data->js_heap_broker(),
+ ChooseSpecializationContext(isolate, data->info()),
data->info()->is_function_context_specializing()
? data->info()->closure()
: MaybeHandle<JSFunction>());
@@ -1129,8 +1215,8 @@ struct InliningPhase {
flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
}
JSNativeContextSpecialization native_context_specialization(
- &graph_reducer, data->jsgraph(), flags, data->native_context(),
- data->info()->dependencies(), temp_zone);
+ &graph_reducer, data->jsgraph(), data->js_heap_broker(), flags,
+ data->native_context(), data->dependencies(), temp_zone);
JSInliningHeuristic inlining(
&graph_reducer, data->info()->is_inlining_enabled()
? JSInliningHeuristic::kGeneralInlining
@@ -1201,19 +1287,22 @@ struct TypedLoweringPhase {
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- JSCreateLowering create_lowering(
- &graph_reducer, data->info()->dependencies(), data->jsgraph(),
- data->native_context(), temp_zone);
- JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(), temp_zone);
- ConstantFoldingReducer constant_folding_reducer(&graph_reducer,
- data->jsgraph());
- TypedOptimization typed_optimization(
- &graph_reducer, data->info()->dependencies(), data->jsgraph());
- SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
+ JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
+ data->jsgraph(), data->js_heap_broker(),
+ data->native_context(), temp_zone);
+ JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
+ data->js_heap_broker(), temp_zone);
+ ConstantFoldingReducer constant_folding_reducer(
+ &graph_reducer, data->jsgraph(), data->js_heap_broker());
+ TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
+ data->jsgraph(),
+ data->js_heap_broker());
+ SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
+ data->js_heap_broker());
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine(),
- temp_zone);
+ data->js_heap_broker(), data->common(),
+ data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &create_lowering);
AddReducer(data, &graph_reducer, &constant_folding_reducer);
@@ -1248,8 +1337,9 @@ struct SimplifiedLoweringPhase {
static const char* phase_name() { return "simplified lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SimplifiedLowering lowering(data->jsgraph(), temp_zone,
- data->source_positions(), data->node_origins(),
+ SimplifiedLowering lowering(data->jsgraph(), data->js_heap_broker(),
+ temp_zone, data->source_positions(),
+ data->node_origins(),
data->info()->GetPoisoningMitigationLevel());
lowering.LowerAllNodes();
}
@@ -1295,11 +1385,13 @@ struct ConcurrentOptimizationPrepPhase {
// This is needed for escape analysis.
NodeProperties::SetType(
data->jsgraph()->FalseConstant(),
- Type::HeapConstant(data->isolate()->factory()->false_value(),
+ Type::HeapConstant(data->js_heap_broker(),
+ data->isolate()->factory()->false_value(),
data->jsgraph()->zone()));
NodeProperties::SetType(
data->jsgraph()->TrueConstant(),
- Type::HeapConstant(data->isolate()->factory()->true_value(),
+ Type::HeapConstant(data->js_heap_broker(),
+ data->isolate()->factory()->true_value(),
data->jsgraph()->zone()));
}
};
@@ -1324,13 +1416,14 @@ struct EarlyOptimizationPhase {
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
+ SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
+ data->js_heap_broker());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine(),
- temp_zone);
+ data->js_heap_broker(), data->common(),
+ data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &redundancy_elimination);
@@ -1371,7 +1464,7 @@ struct EffectControlLinearizationPhase {
Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
Scheduler::kTempSchedule);
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
- TraceSchedule(data->info(), data->isolate(), schedule,
+ TraceSchedule(data->info(), data, schedule,
"effect linearization schedule");
EffectControlLinearizer::MaskArrayIndexEnable mask_array_index =
@@ -1399,9 +1492,9 @@ struct EffectControlLinearizationPhase {
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine(),
- temp_zone);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->js_heap_broker(), data->common(),
+ data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
@@ -1438,12 +1531,12 @@ struct LoadEliminationPhase {
CheckpointElimination checkpoint_elimination(&graph_reducer);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine(),
- temp_zone);
- ConstantFoldingReducer constant_folding_reducer(&graph_reducer,
- data->jsgraph());
- TypeNarrowingReducer type_narrowing_reducer(&graph_reducer,
- data->jsgraph());
+ data->js_heap_broker(), data->common(),
+ data->machine(), temp_zone);
+ ConstantFoldingReducer constant_folding_reducer(
+ &graph_reducer, data->jsgraph(), data->js_heap_broker());
+ TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
+ data->js_heap_broker());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &redundancy_elimination);
@@ -1490,8 +1583,8 @@ struct LateOptimizationPhase {
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine(),
- temp_zone);
+ data->js_heap_broker(), data->common(),
+ data->machine(), temp_zone);
SelectLowering select_lowering(data->jsgraph()->graph(),
data->jsgraph()->common());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
@@ -1541,6 +1634,37 @@ struct ComputeSchedulePhase {
}
};
+struct InstructionRangesAsJSON {
+ const InstructionSequence* sequence;
+ const ZoneVector<std::pair<int, int>>* instr_origins;
+};
+
+std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
+ const int max = static_cast<int>(s.sequence->LastInstructionIndex());
+
+ out << ", \"nodeIdToInstructionRange\": {";
+ bool need_comma = false;
+ for (size_t i = 0; i < s.instr_origins->size(); ++i) {
+ std::pair<int, int> offset = (*s.instr_origins)[i];
+ if (offset.first == -1) continue;
+ const int first = max - offset.first + 1;
+ const int second = max - offset.second + 1;
+ if (need_comma) out << ", ";
+ out << "\"" << i << "\": [" << first << ", " << second << "]";
+ need_comma = true;
+ }
+ out << "}";
+ out << ", \"blockIdtoInstructionRange\": {";
+ need_comma = false;
+ for (auto block : s.sequence->instruction_blocks()) {
+ if (need_comma) out << ", ";
+ out << "\"" << block->rpo_number() << "\": [" << block->code_start() << ", "
+ << block->code_end() << "]";
+ need_comma = true;
+ }
+ out << "}";
+ return out;
+}
struct InstructionSelectionPhase {
static const char* phase_name() { return "select instructions"; }
@@ -1559,13 +1683,24 @@ struct InstructionSelectionPhase {
FLAG_turbo_instruction_scheduling
? InstructionSelector::kEnableScheduling
: InstructionSelector::kDisableScheduling,
- data->isolate()->serializer_enabled()
- ? InstructionSelector::kEnableSerialization
- : InstructionSelector::kDisableSerialization,
- data->info()->GetPoisoningMitigationLevel());
+ !data->isolate() || data->isolate()->serializer_enabled()
+ ? InstructionSelector::kDisableRootsRelativeAddressing
+ : InstructionSelector::kEnableRootsRelativeAddressing,
+ data->info()->GetPoisoningMitigationLevel(),
+ data->info()->trace_turbo_json_enabled()
+ ? InstructionSelector::kEnableTraceTurboJson
+ : InstructionSelector::kDisableTraceTurboJson);
if (!selector.SelectInstructions()) {
data->set_compilation_failed();
}
+ if (data->info()->trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(data->info(), std::ios_base::app);
+ json_of << "{\"name\":\"" << phase_name()
+ << "\",\"type\":\"instructions\""
+ << InstructionRangesAsJSON{data->sequence(),
+ &selector.instr_origins()}
+ << "},\n";
+ }
}
};
@@ -1730,7 +1865,7 @@ struct JumpThreadingPhase {
ZoneVector<RpoNumber> result(temp_zone);
if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
frame_at_start)) {
- JumpThreading::ApplyForwarding(result, data->sequence());
+ JumpThreading::ApplyForwarding(temp_zone, result, data->sequence());
}
}
};
@@ -1777,13 +1912,13 @@ struct PrintGraphPhase {
}
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "-- Graph after " << phase << " -- " << std::endl;
os << AsScheduledGraph(schedule);
} else if (info->trace_turbo_graph_enabled()) { // Simple textual RPO.
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "-- Graph after " << phase << " -- " << std::endl;
os << AsRPO(*graph);
@@ -1832,7 +1967,7 @@ bool PipelineImpl::CreateGraph() {
if (info()->trace_turbo_json_enabled() ||
info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "---------------------------------------------------\n"
<< "Begin compiling method " << info()->GetDebugName().get()
@@ -1876,7 +2011,7 @@ bool PipelineImpl::CreateGraph() {
// Type the graph and keep the Typer running on newly created nodes within
// this scope; the Typer is automatically unlinked from the Graph once we
// leave this scope below.
- Typer typer(isolate(), flags, data->graph());
+ Typer typer(isolate(), data->js_heap_broker(), flags, data->graph());
Run<TyperPhase>(&typer);
RunPrintAndVerify(TyperPhase::phase_name());
@@ -1985,11 +2120,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
return SelectInstructions(linkage);
}
-Handle<Code> Pipeline::GenerateCodeForCodeStub(
+MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
Schedule* schedule, Code::Kind kind, const char* debug_name,
uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level) {
+ PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options) {
OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
info.set_builtin_index(builtin_index);
info.set_stub_key(stub_key);
@@ -2003,12 +2138,12 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(
SourcePositionTable source_positions(graph);
NodeOriginTable node_origins(graph);
PipelineData data(&zone_stats, &info, isolate, graph, schedule,
- &source_positions, &node_origins, jump_opt);
+ &source_positions, &node_origins, jump_opt, options);
data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(
- new PipelineStatistics(&info, isolate, &zone_stats));
+ pipeline_statistics.reset(new PipelineStatistics(
+ &info, isolate->GetTurboStatistics(), &zone_stats));
pipeline_statistics->BeginPhaseKind("stub codegen");
}
@@ -2016,7 +2151,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(
DCHECK_NOT_NULL(data.schedule());
if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data.GetCodeTracer());
OFStream os(tracing_scope.file());
os << "---------------------------------------------------\n"
<< "Begin compiling " << debug_name << " using Turbofan" << std::endl;
@@ -2031,15 +2166,15 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("Machine");
}
- TraceSchedule(data.info(), data.isolate(), data.schedule(), "schedule");
+ TraceSchedule(data.info(), &data, data.schedule(), "schedule");
pipeline.Run<VerifyGraphPhase>(false, true);
return pipeline.GenerateCode(call_descriptor);
}
// static
-Handle<Code> Pipeline::GenerateCodeForTesting(OptimizedCompilationInfo* info,
- Isolate* isolate) {
+MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
+ OptimizedCompilationInfo* info, Isolate* isolate) {
ZoneStats zone_stats(isolate->allocator());
std::unique_ptr<PipelineStatistics> pipeline_statistics(
CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
@@ -2050,25 +2185,22 @@ Handle<Code> Pipeline::GenerateCodeForTesting(OptimizedCompilationInfo* info,
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
- if (!pipeline.CreateGraph()) return Handle<Code>::null();
- if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
+ if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
+ if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
pipeline.AssembleCode(&linkage);
- return pipeline.FinalizeCode();
-}
-
-// static
-Handle<Code> Pipeline::GenerateCodeForTesting(OptimizedCompilationInfo* info,
- Isolate* isolate, Graph* graph,
- Schedule* schedule) {
- auto call_descriptor = Linkage::ComputeIncoming(info->zone(), info);
- return GenerateCodeForTesting(info, isolate, call_descriptor, graph,
- schedule);
+ Handle<Code> code;
+ if (pipeline.FinalizeCode().ToHandle(&code) &&
+ pipeline.CommitDependencies(code)) {
+ return code;
+ }
+ return MaybeHandle<Code>();
}
// static
-Handle<Code> Pipeline::GenerateCodeForTesting(
+MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
- CallDescriptor* call_descriptor, Graph* graph, Schedule* schedule,
+ CallDescriptor* call_descriptor, Graph* graph,
+ const AssemblerOptions& options, Schedule* schedule,
SourcePositionTable* source_positions) {
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
@@ -2078,11 +2210,11 @@ Handle<Code> Pipeline::GenerateCodeForTesting(
source_positions = new (info->zone()) SourcePositionTable(graph);
NodeOriginTable* node_positions = new (info->zone()) NodeOriginTable(graph);
PipelineData data(&zone_stats, info, isolate, graph, schedule,
- source_positions, node_positions, nullptr);
+ source_positions, node_positions, nullptr, options);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(
- new PipelineStatistics(info, isolate, &zone_stats));
+ pipeline_statistics.reset(new PipelineStatistics(
+ info, isolate->GetTurboStatistics(), &zone_stats));
pipeline_statistics->BeginPhaseKind("test codegen");
}
@@ -2101,25 +2233,35 @@ Handle<Code> Pipeline::GenerateCodeForTesting(
pipeline.ComputeScheduledGraph();
}
- return pipeline.GenerateCode(call_descriptor);
+ Handle<Code> code;
+ if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
+ pipeline.CommitDependencies(code)) {
+ return code;
+ }
+ return MaybeHandle<Code>();
}
// static
OptimizedCompilationJob* Pipeline::NewCompilationJob(
- Handle<JSFunction> function, bool has_script) {
- Handle<SharedFunctionInfo> shared = handle(function->shared());
- return new PipelineCompilationJob(shared, function);
+ Isolate* isolate, Handle<JSFunction> function, bool has_script) {
+ Handle<SharedFunctionInfo> shared =
+ handle(function->shared(), function->GetIsolate());
+ return new PipelineCompilationJob(isolate, shared, function);
}
// static
OptimizedCompilationJob* Pipeline::NewWasmCompilationJob(
- OptimizedCompilationInfo* info, Isolate* isolate, MachineGraph* mcgraph,
- CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
- NodeOriginTable* node_origins, WasmCompilationData* wasm_compilation_data,
+ OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
+ MachineGraph* mcgraph, CallDescriptor* call_descriptor,
+ SourcePositionTable* source_positions, NodeOriginTable* node_origins,
+ WasmCompilationData* wasm_compilation_data,
+ wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
+ wasm::NativeModule* native_module, int function_index,
wasm::ModuleOrigin asmjs_origin) {
- return new PipelineWasmCompilationJob(info, isolate, mcgraph, call_descriptor,
- source_positions, node_origins,
- wasm_compilation_data, asmjs_origin);
+ return new PipelineWasmCompilationJob(
+ info, wasm_engine, mcgraph, call_descriptor, source_positions,
+ node_origins, wasm_compilation_data, function_body, wasm_module,
+ native_module, function_index, asmjs_origin);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -2145,7 +2287,7 @@ void PipelineImpl::ComputeScheduledGraph() {
RunPrintAndVerify(LateGraphTrimmingPhase::phase_name(), true);
Run<ComputeSchedulePhase>();
- TraceSchedule(data->info(), data->isolate(), data->schedule(), "schedule");
+ TraceSchedule(data->info(), data, data->schedule(), "schedule");
}
bool PipelineImpl::SelectInstructions(Linkage* linkage) {
@@ -2176,7 +2318,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
!strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
if (FLAG_trace_verify_csa) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "--------------------------------------------------\n"
<< "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
@@ -2186,7 +2328,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
<< "--- End of " << data->debug_name() << " generated by TurboFan\n"
<< "--------------------------------------------------\n";
}
- Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
+ Zone temp_zone(data->allocator(), ZONE_NAME);
MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
data->info()->IsStub(), data->debug_name(),
&temp_zone);
@@ -2241,6 +2383,9 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
run_verifier);
}
+ // Verify the instruction sequence has the same hash in two stages.
+ VerifyGeneratedCodeIsIdempotent();
+
Run<FrameElisionPhase>();
if (data->compilation_failed()) {
info()->AbortOptimization(
@@ -2262,36 +2407,106 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
return true;
}
+void PipelineImpl::VerifyGeneratedCodeIsIdempotent() {
+ PipelineData* data = this->data_;
+ JumpOptimizationInfo* jump_opt = data->jump_optimization_info();
+ if (jump_opt == nullptr) return;
+
+ InstructionSequence* code = data->sequence();
+ int instruction_blocks = code->InstructionBlockCount();
+ int virtual_registers = code->VirtualRegisterCount();
+ size_t hash_code = base::hash_combine(instruction_blocks, virtual_registers);
+ for (auto instr : *code) {
+ hash_code = base::hash_combine(hash_code, instr->opcode(),
+ instr->InputCount(), instr->OutputCount());
+ }
+ for (int i = 0; i < virtual_registers; i++) {
+ hash_code = base::hash_combine(hash_code, code->GetRepresentation(i));
+ }
+ if (jump_opt->is_collecting()) {
+ jump_opt->set_hash_code(hash_code);
+ } else {
+ CHECK_EQ(hash_code, jump_opt->hash_code());
+ }
+}
+
+struct InstructionStartsAsJSON {
+ const ZoneVector<int>* instr_starts;
+};
+
+std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
+ out << ", \"instructionOffsetToPCOffset\": {";
+ bool need_comma = false;
+ for (size_t i = 0; i < s.instr_starts->size(); ++i) {
+ if (need_comma) out << ", ";
+ int offset = (*s.instr_starts)[i];
+ out << "\"" << i << "\":" << offset;
+ need_comma = true;
+ }
+ out << "}";
+ return out;
+}
+
void PipelineImpl::AssembleCode(Linkage* linkage) {
PipelineData* data = this->data_;
data->BeginPhaseKind("code generation");
data->InitializeCodeGenerator(linkage);
Run<AssembleCodePhase>();
+ if (data->info()->trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(data->info(), std::ios_base::app);
+ json_of << "{\"name\":\"code generation\""
+ << ", \"type\":\"instructions\""
+ << InstructionStartsAsJSON{&data->code_generator()->instr_starts()};
+ json_of << "},\n";
+ }
data->DeleteInstructionZone();
}
-Handle<Code> PipelineImpl::FinalizeCode() {
+struct BlockStartsAsJSON {
+ const ZoneVector<int>* block_starts;
+};
+
+std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
+ out << ", \"blockIdToOffset\": {";
+ bool need_comma = false;
+ for (size_t i = 0; i < s.block_starts->size(); ++i) {
+ if (need_comma) out << ", ";
+ int offset = (*s.block_starts)[i];
+ out << "\"" << i << "\":" << offset;
+ need_comma = true;
+ }
+ out << "},";
+ return out;
+}
+
+MaybeHandle<Code> PipelineImpl::FinalizeCode() {
PipelineData* data = this->data_;
Run<FinalizeCodePhase>();
- Handle<Code> code = data->code();
- if (code.is_null()) return code;
+ MaybeHandle<Code> maybe_code = data->code();
+ Handle<Code> code;
+ if (!maybe_code.ToHandle(&code)) {
+ return maybe_code;
+ }
if (data->profiler_data()) {
-#if ENABLE_DISASSEMBLER
+#ifdef ENABLE_DISASSEMBLER
std::ostringstream os;
code->Disassemble(nullptr, os);
data->profiler_data()->SetCode(&os);
-#endif
+#endif // ENABLE_DISASSEMBLER
}
info()->SetCode(code);
- PrintCode(code, info());
+ PrintCode(isolate(), code, info());
if (info()->trace_turbo_json_enabled()) {
TurboJsonFile json_of(info(), std::ios_base::app);
- json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
-#if ENABLE_DISASSEMBLER
+
+ json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
+ << BlockStartsAsJSON{&data->code_generator()->block_starts()}
+ << "\"data\":\"";
+#ifdef ENABLE_DISASSEMBLER
std::stringstream disassembly_stream;
code->Disassemble(nullptr, disassembly_stream);
std::string disassembly_string(disassembly_stream.str());
@@ -2307,7 +2522,7 @@ Handle<Code> PipelineImpl::FinalizeCode() {
}
if (info()->trace_turbo_json_enabled() ||
info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "---------------------------------------------------\n"
<< "Finished compiling method " << info()->GetDebugName().get()
@@ -2316,17 +2531,22 @@ Handle<Code> PipelineImpl::FinalizeCode() {
return code;
}
-Handle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
+MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
Linkage linkage(call_descriptor);
// Perform instruction selection and register allocation.
- if (!SelectInstructions(&linkage)) return Handle<Code>();
+ if (!SelectInstructions(&linkage)) return MaybeHandle<Code>();
// Generate the final machine code.
AssembleCode(&linkage);
return FinalizeCode();
}
+bool PipelineImpl::CommitDependencies(Handle<Code> code) {
+ return data_->dependencies() == nullptr ||
+ data_->dependencies()->Commit(code);
+}
+
void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* call_descriptor,
bool run_verifier) {
@@ -2335,7 +2555,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
std::unique_ptr<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
- verifier_zone.reset(new Zone(isolate()->allocator(), ZONE_NAME));
+ verifier_zone.reset(new Zone(data->allocator(), ZONE_NAME));
verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
verifier_zone.get(), config, data->sequence());
}
@@ -2354,7 +2574,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<BuildLiveRangesPhase>();
if (info()->trace_turbo_graph_enabled()) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "----- Instruction sequence before register allocation -----\n"
<< PrintableInstructionSequence({config, data->sequence()});
@@ -2398,7 +2618,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
if (info()->trace_turbo_graph_enabled()) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ CodeTracer::Scope tracing_scope(data->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "----- Instruction sequence after register allocation -----\n"
<< PrintableInstructionSequence({config, data->sequence()});
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 3f8affc61b..95d13f3169 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -10,11 +10,11 @@
#include "src/globals.h"
#include "src/objects.h"
#include "src/objects/code.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
+struct AssemblerOptions;
class OptimizedCompilationInfo;
class OptimizedCompilationJob;
class RegisterConfiguration;
@@ -22,6 +22,10 @@ class JumpOptimizationInfo;
namespace wasm {
enum ModuleOrigin : uint8_t;
+struct FunctionBody;
+class NativeModule;
+class WasmEngine;
+struct WasmModule;
} // namespace wasm
namespace compiler {
@@ -37,49 +41,51 @@ class WasmCompilationData;
class Pipeline : public AllStatic {
public:
- // Returns a new compilation job for the given function.
- static OptimizedCompilationJob* NewCompilationJob(Handle<JSFunction> function,
+ // Returns a new compilation job for the given JavaScript function.
+ static OptimizedCompilationJob* NewCompilationJob(Isolate* isolate,
+ Handle<JSFunction> function,
bool has_script);
// Returns a new compilation job for the WebAssembly compilation info.
static OptimizedCompilationJob* NewWasmCompilationJob(
- OptimizedCompilationInfo* info, Isolate* isolate, MachineGraph* mcgraph,
- CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
- NodeOriginTable* node_origins, WasmCompilationData* wasm_compilation_data,
+ OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
+ MachineGraph* mcgraph, CallDescriptor* call_descriptor,
+ SourcePositionTable* source_positions, NodeOriginTable* node_origins,
+ WasmCompilationData* wasm_compilation_data,
+ wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
+ wasm::NativeModule* native_module, int function_index,
wasm::ModuleOrigin wasm_origin);
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
- static Handle<Code> GenerateCodeForCodeStub(
+ static MaybeHandle<Code> GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
Schedule* schedule, Code::Kind kind, const char* debug_name,
uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level);
+ PoisoningMitigationLevel poisoning_level,
+ const AssemblerOptions& options);
- // Run the entire pipeline and generate a handle to a code object suitable for
- // testing.
- static Handle<Code> GenerateCodeForTesting(OptimizedCompilationInfo* info,
- Isolate* isolate);
+ // ---------------------------------------------------------------------------
+ // The following methods are for testing purposes only. Avoid production use.
+ // ---------------------------------------------------------------------------
+
+ // Run the pipeline on JavaScript bytecode and generate code.
+ static MaybeHandle<Code> GenerateCodeForTesting(
+ OptimizedCompilationInfo* info, Isolate* isolate);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(OptimizedCompilationInfo* info,
- Isolate* isolate, Graph* graph,
- Schedule* schedule = nullptr);
+ V8_EXPORT_PRIVATE static MaybeHandle<Code> GenerateCodeForTesting(
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ CallDescriptor* call_descriptor, Graph* graph,
+ const AssemblerOptions& options, Schedule* schedule = nullptr,
+ SourcePositionTable* source_positions = nullptr);
// Run just the register allocator phases.
V8_EXPORT_PRIVATE static bool AllocateRegistersForTesting(
const RegisterConfiguration* config, InstructionSequence* sequence,
bool run_verifier);
- // Run the pipeline on a machine graph and generate code. If {schedule} is
- // {nullptr}, then compute a new schedule for code generation.
- V8_EXPORT_PRIVATE static Handle<Code> GenerateCodeForTesting(
- OptimizedCompilationInfo* info, Isolate* isolate,
- CallDescriptor* call_descriptor, Graph* graph,
- Schedule* schedule = nullptr,
- SourcePositionTable* source_positions = nullptr);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
};
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index f25fae6dfe..9c71d65d9c 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -913,9 +913,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm());
if (HasRegisterInput(instr, 0)) {
- __ addi(ip, i.InputRegister(0),
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(reg);
} else {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
@@ -927,20 +930,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallWasmFunction: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
- RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
- if (info()->IsWasm()) {
- rmode = RelocInfo::WASM_CALL;
- }
-
if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
#ifdef V8_TARGET_ARCH_PPC64
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt64());
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
#else
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
#endif
- __ Call(wasm_code, rmode);
+ __ Call(wasm_code, constant.rmode());
} else {
__ Call(i.InputRegister(0));
}
@@ -957,9 +954,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.TempRegister(2));
}
if (HasRegisterInput(instr, 0)) {
- __ addi(ip, i.InputRegister(0),
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(reg);
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
@@ -974,15 +974,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallWasm: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
- RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
- if (info()->IsWasm()) {
- rmode = RelocInfo::WASM_CALL;
- }
-
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
- __ Jump(wasm_code, rmode);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+#ifdef V8_TARGET_ARCH_S390X
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+#else
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+#endif
+ __ Jump(wasm_code, constant.rmode());
} else {
__ Jump(i.InputRegister(0));
}
@@ -993,7 +992,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
- __ Jump(i.InputRegister(0));
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -1092,6 +1095,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -1149,13 +1155,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mr(i.OutputRegister(), fp);
}
break;
- case kArchRootsPointer:
- __ mr(i.OutputRegister(), kRootRegister);
- DCHECK_EQ(LeaveRC, i.OutputRCBit());
- break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
- i.InputDoubleRegister(0));
+ i.InputDoubleRegister(0), DetermineStubCallMode());
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchStoreWithWriteBarrier: {
@@ -2114,31 +2116,19 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
void Generate() final {
PPCOperandConverter i(gen_, instr_);
-
- Builtins::Name trap_id =
- static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED, true);
- }
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- __ set_has_frame(old_has_frame);
- }
}
private:
- void GenerateCallToTrap(Builtins::Name trap_id) {
- if (trap_id == Builtins::builtin_count) {
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -2154,8 +2144,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
- RelocInfo::CODE_TARGET);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2166,12 +2157,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}
}
- bool frame_elided_;
Instruction* instr_;
CodeGenerator* gen_;
};
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ auto ool = new (zone()) OutOfLineTrap(this, instr);
Label* tlabel = ool->entry();
Label end;
@@ -2249,6 +2238,16 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ bind(&done);
}
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
PPCOperandConverter i(this, instr);
@@ -2260,7 +2259,6 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
AssembleArchJump(i.InputRpo(1));
}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
PPCOperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -2350,33 +2348,88 @@ void CodeGenerator::AssembleConstructFrame() {
ResetSpeculationPoison();
}
- const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() &
+ ~kConstantPoolRegister.bit()
+ : call_descriptor->CalleeSavedRegisters();
+
if (shrink_slots > 0) {
+ if (info()->IsWasm() && shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
+ Register scratch = ip;
+ __ LoadP(scratch, FieldMemOperand(
+ kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ __ LoadP(scratch, MemOperand(scratch), r0);
+ __ Add(scratch, scratch, shrink_slots * kPointerSize, r0);
+ __ cmpl(sp, scratch);
+ __ bge(&done);
+ }
+
+ __ LoadP(r5,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset),
+ r0);
+ __ Move(cp, Smi::kZero);
+ __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r5);
+ // We come from WebAssembly, there are no references for the GC.
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ }
+
+ __ bind(&done);
+ }
+
+ // Skip callee-saved and return slots, which are pushed below.
+ shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= frame()->GetReturnSlotCount();
+ shrink_slots -=
+ (kDoubleSize / kPointerSize) * base::bits::CountPopulation(saves_fp);
__ Add(sp, sp, -shrink_slots * kPointerSize, r0);
}
// Save callee-saved Double registers.
- if (double_saves != 0) {
- __ MultiPushDoubles(double_saves);
- DCHECK_EQ(kNumCalleeSavedDoubles,
- base::bits::CountPopulation(double_saves));
+ if (saves_fp != 0) {
+ __ MultiPushDoubles(saves_fp);
+ DCHECK_EQ(kNumCalleeSavedDoubles, base::bits::CountPopulation(saves_fp));
}
// Save callee-saved registers.
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? call_descriptor->CalleeSavedRegisters() &
- ~kConstantPoolRegister.bit()
- : call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
}
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Create space for returns.
+ __ Add(sp, sp, -returns * kPointerSize, r0);
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Create space for returns.
+ __ Add(sp, sp, returns * kPointerSize, r0);
+ }
+
// Restore registers.
const RegList saves = FLAG_enable_embedded_constant_pool
? call_descriptor->CalleeSavedRegisters() &
@@ -2483,7 +2536,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
- __ mov(dst, Operand(src.ToExternalReference()));
+ __ Move(dst, src.ToExternalReference());
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 9638a50f7b..b02e80d389 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -1719,8 +1719,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
}
}
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
}
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index ce1bece51c..d319304df6 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -4,9 +4,9 @@
#include "src/compiler/property-access-builder.h"
-#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
@@ -136,7 +136,8 @@ void PropertyAccessBuilder::BuildCheckMaps(
if (receiver_map->is_stable()) {
for (Handle<Map> map : receiver_maps) {
if (map.is_identical_to(receiver_map)) {
- dependencies()->AssumeMapStable(receiver_map);
+ dependencies()->DependOnStableMap(
+ MapRef(js_heap_broker(), receiver_map));
return;
}
}
@@ -168,22 +169,6 @@ Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Node** effect,
return expected;
}
-void PropertyAccessBuilder::AssumePrototypesStable(
- Handle<Context> native_context,
- std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
- // Determine actual holder and perform prototype chain checks.
- for (auto map : receiver_maps) {
- // Perform the implicit ToObject for primitives here.
- // Implemented according to ES6 section 7.3.2 GetV (V, P).
- Handle<JSFunction> constructor;
- if (Map::GetConstructorFunction(map, native_context)
- .ToHandle(&constructor)) {
- map = handle(constructor->initial_map(), holder->GetIsolate());
- }
- dependencies()->AssumePrototypeMapsStable(map, holder);
- }
-}
-
Node* PropertyAccessBuilder::ResolveHolder(
PropertyAccessInfo const& access_info, Node* receiver) {
Handle<JSObject> holder;
@@ -209,20 +194,22 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
// TODO(turbofan): Given that we already have the field_index here, we
// might be smarter in the future and not rely on the LookupIterator,
// but for now let's just do what Crankshaft does.
- LookupIterator it(m.Value(), name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(isolate(), m.Value(), name,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
if (it.state() == LookupIterator::DATA) {
- bool is_reaonly_non_configurable =
+ bool is_readonly_non_configurable =
it.IsReadOnly() && !it.IsConfigurable();
- if (is_reaonly_non_configurable ||
+ if (is_readonly_non_configurable ||
(FLAG_track_constant_fields && access_info.IsDataConstantField())) {
Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
- if (!is_reaonly_non_configurable) {
+ if (!is_readonly_non_configurable) {
// It's necessary to add dependency on the map that introduced
// the field.
DCHECK(access_info.IsDataConstantField());
DCHECK(!it.is_dictionary_holder());
- Handle<Map> field_owner_map = it.GetFieldOwnerMap();
- dependencies()->AssumeFieldOwner(field_owner_map);
+ MapRef map(js_heap_broker(),
+ handle(it.GetHolder<HeapObject>()->map(), isolate()));
+ dependencies()->DependOnFieldType(map, it.GetFieldDescriptorIndex());
}
return value;
}
@@ -280,7 +267,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
Handle<Map> field_map;
if (access_info.field_map().ToHandle(&field_map)) {
if (field_map->is_stable()) {
- dependencies()->AssumeMapStable(field_map);
+ dependencies()->DependOnStableMap(MapRef(js_heap_broker(), field_map));
field_access.map = field_map;
}
}
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 13a0f0b46f..7b569a9a12 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -13,22 +13,24 @@
namespace v8 {
namespace internal {
-
-class CompilationDependencies;
-
namespace compiler {
class CommonOperatorBuilder;
+class CompilationDependencies;
class Graph;
class JSGraph;
+class JSHeapBroker;
class Node;
class PropertyAccessInfo;
class SimplifiedOperatorBuilder;
class PropertyAccessBuilder {
public:
- PropertyAccessBuilder(JSGraph* jsgraph, CompilationDependencies* dependencies)
- : jsgraph_(jsgraph), dependencies_(dependencies) {}
+ PropertyAccessBuilder(JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ CompilationDependencies* dependencies)
+ : jsgraph_(jsgraph),
+ js_heap_broker_(js_heap_broker),
+ dependencies_(dependencies) {}
// Builds the appropriate string check if the maps are only string
// maps.
@@ -44,12 +46,6 @@ class PropertyAccessBuilder {
Node* BuildCheckValue(Node* receiver, Node** effect, Node* control,
Handle<HeapObject> value);
- // Adds stability dependencies on all prototypes of every class in
- // {receiver_type} up to (and including) the {holder}.
- void AssumePrototypesStable(Handle<Context> native_context,
- std::vector<Handle<Map>> const& receiver_maps,
- Handle<JSObject> holder);
-
// Builds the actual load for data-field and data-constant-field
// properties (without heap-object or map checks).
Node* BuildLoadDataField(Handle<Name> name,
@@ -58,6 +54,7 @@ class PropertyAccessBuilder {
private:
JSGraph* jsgraph() const { return jsgraph_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
CompilationDependencies* dependencies() const { return dependencies_; }
Graph* graph() const;
Isolate* isolate() const;
@@ -72,6 +69,7 @@ class PropertyAccessBuilder {
Node* ResolveHolder(PropertyAccessInfo const& access_info, Node* receiver);
JSGraph* jsgraph_;
+ const JSHeapBroker* js_heap_broker_;
CompilationDependencies* dependencies_;
};
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 778481f8c0..9e7bc9a611 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -24,12 +24,17 @@ RawMachineAssembler::RawMachineAssembler(
machine_(zone(), word, flags, alignment_requirements),
common_(zone()),
call_descriptor_(call_descriptor),
+ target_parameter_(nullptr),
parameters_(parameter_count(), zone()),
current_block_(schedule()->start()),
poisoning_level_(poisoning_level) {
int param_count = static_cast<int>(parameter_count());
// Add an extra input for the JSFunction parameter to the start node.
graph->SetStart(graph->NewNode(common_.Start(param_count + 1)));
+ if (call_descriptor->IsJSFunctionCall()) {
+ target_parameter_ = AddNode(
+ common()->Parameter(Linkage::kJSCallClosureParamIndex), graph->start());
+ }
for (size_t i = 0; i < parameter_count(); ++i) {
parameters_[i] =
AddNode(common()->Parameter(static_cast<int>(i)), graph->start());
@@ -55,17 +60,16 @@ Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
Schedule* RawMachineAssembler::Export() {
// Compute the correct codegen order.
DCHECK(schedule_->rpo_order()->empty());
- OFStream os(stdout);
if (FLAG_trace_turbo_scheduler) {
PrintF("--- RAW SCHEDULE -------------------------------------------\n");
- os << *schedule_;
+ StdoutStream{} << *schedule_;
}
schedule_->EnsureCFGWellFormedness();
Scheduler::ComputeSpecialRPO(zone(), schedule_);
schedule_->PropagateDeferredMark();
if (FLAG_trace_turbo_scheduler) {
PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
- os << *schedule_;
+ StdoutStream{} << *schedule_;
}
// Invalidate RawMachineAssembler.
Schedule* schedule = schedule_;
@@ -73,6 +77,10 @@ Schedule* RawMachineAssembler::Export() {
return schedule;
}
+Node* RawMachineAssembler::TargetParameter() {
+ DCHECK_NOT_NULL(target_parameter_);
+ return target_parameter_;
+}
Node* RawMachineAssembler::Parameter(size_t index) {
DCHECK(index < parameter_count());
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index ea4636b6d6..ff8bebc411 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -741,10 +741,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->LoadParentFramePointer());
}
- // Root pointer operations.
- Node* LoadRootsPointer() { return AddNode(machine()->LoadRootsPointer()); }
-
// Parameters.
+ Node* TargetParameter();
Node* Parameter(size_t index);
// Pointer utilities.
@@ -939,6 +937,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineOperatorBuilder machine_;
CommonOperatorBuilder common_;
CallDescriptor* call_descriptor_;
+ Node* target_parameter_;
NodeVector parameters_;
BasicBlock* current_block_;
PoisoningMitigationLevel poisoning_level_;
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index 8c150d975a..f294471fd3 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -312,7 +312,7 @@ void BlockAssessments::DropRegisters() {
}
void BlockAssessments::Print() const {
- OFStream os(stdout);
+ StdoutStream os;
for (const auto pair : map()) {
const InstructionOperand op = pair.first;
const Assessment* assessment = pair.second;
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 8bf8337bd1..1938ef22b6 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -376,12 +376,7 @@ UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
return after;
}
-
-void LifetimePosition::Print() const {
- OFStream os(stdout);
- os << *this << std::endl;
-}
-
+void LifetimePosition::Print() const { StdoutStream{} << *this << std::endl; }
std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
os << '@' << pos.ToInstructionIndex();
@@ -807,7 +802,7 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
void LiveRange::Print(const RegisterConfiguration* config,
bool with_children) const {
- OFStream os(stdout);
+ StdoutStream os;
PrintableLiveRange wrapper;
wrapper.register_configuration_ = config;
for (const LiveRange* i = this; i != nullptr; i = i->next()) {
@@ -1316,7 +1311,7 @@ void SpillRange::MergeDisjointIntervals(UseInterval* other) {
void SpillRange::Print() const {
- OFStream os(stdout);
+ StdoutStream os;
os << "{" << std::endl;
for (TopLevelLiveRange* range : live_ranges()) {
os << range->vreg() << " ";
@@ -2747,15 +2742,12 @@ const char* RegisterAllocator::RegisterName(int register_code) const {
}
}
-
LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
RegisterKind kind, Zone* local_zone)
: RegisterAllocator(data, kind),
unhandled_live_ranges_(local_zone),
active_live_ranges_(local_zone),
inactive_live_ranges_(local_zone) {
- unhandled_live_ranges().reserve(
- static_cast<size_t>(code()->VirtualRegisterCount() * 2));
active_live_ranges().reserve(8);
inactive_live_ranges().reserve(8);
// TryAllocateFreeReg and AllocateBlockedReg assume this
@@ -2780,12 +2772,10 @@ void LinearScanAllocator::AllocateRegisters() {
for (LiveRange* to_add = range; to_add != nullptr;
to_add = to_add->next()) {
if (!to_add->spilled()) {
- AddToUnhandledUnsorted(to_add);
+ AddToUnhandled(to_add);
}
}
}
- SortUnhandled();
- DCHECK(UnhandledIsSorted());
if (mode() == GENERAL_REGISTERS) {
for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
@@ -2806,10 +2796,8 @@ void LinearScanAllocator::AllocateRegisters() {
}
while (!unhandled_live_ranges().empty()) {
- DCHECK(UnhandledIsSorted());
- LiveRange* current = unhandled_live_ranges().back();
- unhandled_live_ranges().pop_back();
- DCHECK(UnhandledIsSorted());
+ LiveRange* current = *unhandled_live_ranges().begin();
+ unhandled_live_ranges().erase(unhandled_live_ranges().begin());
LifetimePosition position = current->Start();
#ifdef DEBUG
allocation_finger_ = position;
@@ -2862,7 +2850,7 @@ bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
return false;
} else if (next_reg->pos().PrevStart() > range->Start()) {
LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
- AddToUnhandledSorted(tail);
+ AddToUnhandled(tail);
Spill(range);
return true;
}
@@ -2893,63 +2881,14 @@ void LinearScanAllocator::AddToInactive(LiveRange* range) {
inactive_live_ranges().push_back(range);
}
-
-void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
+void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
if (range == nullptr || range->IsEmpty()) return;
DCHECK(!range->HasRegisterAssigned() && !range->spilled());
DCHECK(allocation_finger_ <= range->Start());
- for (size_t i = unhandled_live_ranges().size(); i-- > 0;) {
- LiveRange* cur_range = unhandled_live_ranges().at(i);
- if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
- TRACE("Add live range %d:%d to unhandled at %zu\n",
- range->TopLevel()->vreg(), range->relative_id(), i + 1);
- auto it = unhandled_live_ranges().begin() + (i + 1);
- unhandled_live_ranges().insert(it, range);
- DCHECK(UnhandledIsSorted());
- return;
- }
- TRACE("Add live range %d:%d to unhandled at start\n",
- range->TopLevel()->vreg(), range->relative_id());
- unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
- DCHECK(UnhandledIsSorted());
-}
-
-void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) {
- if (range == nullptr || range->IsEmpty()) return;
- DCHECK(!range->HasRegisterAssigned() && !range->spilled());
- TRACE("Add live range %d:%d to unhandled unsorted at end\n",
- range->TopLevel()->vreg(), range->relative_id());
- unhandled_live_ranges().push_back(range);
-}
-
-
-static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
- DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
- if (a->ShouldBeAllocatedBefore(b)) return false;
- if (b->ShouldBeAllocatedBefore(a)) return true;
- return a->TopLevel()->vreg() < b->TopLevel()->vreg();
-}
-
-
-// Sort the unhandled live ranges so that the ranges to be processed first are
-// at the end of the array list. This is convenient for the register allocation
-// algorithm because it is efficient to remove elements from the end.
-void LinearScanAllocator::SortUnhandled() {
- TRACE("Sort unhandled\n");
- std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
- &UnhandledSortHelper);
-}
-
-
-bool LinearScanAllocator::UnhandledIsSorted() {
- size_t len = unhandled_live_ranges().size();
- for (size_t i = 1; i < len; i++) {
- LiveRange* a = unhandled_live_ranges().at(i - 1);
- LiveRange* b = unhandled_live_ranges().at(i);
- if (a->Start() < b->Start()) return false;
- }
- return true;
+ TRACE("Add live range %d:%d to unhandled\n", range->TopLevel()->vreg(),
+ range->relative_id());
+ unhandled_live_ranges().insert(range);
}
@@ -3150,11 +3089,22 @@ bool LinearScanAllocator::TryAllocateFreeReg(
DCHECK_GE(free_until_pos.length(), num_codes);
- // Find the register which stays free for the longest time.
- int reg = codes[0];
- for (int i = 1; i < num_codes; ++i) {
+ // Find the register which stays free for the longest time. Check for
+ // the hinted register first, as we might want to use that one. Only
+ // count full instructions for free ranges, as an instruction's internal
+ // positions do not help but might shadow a hinted register. This is
+ // typically the case for function calls, where all registered are
+ // cloberred after the call except for the argument registers, which are
+ // set before the call. Hence, the argument registers always get ignored,
+ // as their available time is shorter.
+ int reg;
+ if (current->FirstHintPosition(&reg) == nullptr) {
+ reg = codes[0];
+ }
+ for (int i = 0; i < num_codes; ++i) {
int code = codes[i];
- if (free_until_pos[code] > free_until_pos[reg]) {
+ if (free_until_pos[code].ToInstructionIndex() >
+ free_until_pos[reg].ToInstructionIndex()) {
reg = code;
}
}
@@ -3170,7 +3120,7 @@ bool LinearScanAllocator::TryAllocateFreeReg(
// Register reg is available at the range start but becomes blocked before
// the range end. Split current at position where it becomes blocked.
LiveRange* tail = SplitRangeAt(current, pos);
- AddToUnhandledSorted(tail);
+ AddToUnhandled(tail);
// Try to allocate preferred register once more.
if (TryAllocatePreferredReg(current, free_until_pos)) return true;
@@ -3316,7 +3266,7 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
// position.
LiveRange* tail =
SplitBetween(current, current->Start(), block_pos[reg].Start());
- AddToUnhandledSorted(tail);
+ AddToUnhandled(tail);
}
// Register reg is not blocked for the whole range.
@@ -3479,7 +3429,6 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
bool merged = first_op_spill->TryMerge(spill_range);
if (!merged) return false;
SpillBetween(range, range->Start(), pos->pos());
- DCHECK(UnhandledIsSorted());
return true;
}
return false;
@@ -3519,11 +3468,11 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
DCHECK(third_part != second_part);
Spill(second_part);
- AddToUnhandledSorted(third_part);
+ AddToUnhandled(third_part);
} else {
// The split result does not intersect with [start, end[.
// Nothing to spill. Just put it to unhandled as whole.
- AddToUnhandledSorted(second_part);
+ AddToUnhandled(second_part);
}
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index b5d73a5bba..b5286e8e95 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -308,7 +308,6 @@ class V8_EXPORT_PRIVATE UsePosition final
class SpillRange;
class RegisterAllocationData;
class TopLevelLiveRange;
-class LiveRangeGroup;
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
@@ -473,21 +472,6 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
};
-class LiveRangeGroup final : public ZoneObject {
- public:
- explicit LiveRangeGroup(Zone* zone) : ranges_(zone) {}
- ZoneVector<LiveRange*>& ranges() { return ranges_; }
- const ZoneVector<LiveRange*>& ranges() const { return ranges_; }
-
- int assigned_register() const { return assigned_register_; }
- void set_assigned_register(int reg) { assigned_register_ = reg; }
-
- private:
- ZoneVector<LiveRange*> ranges_;
- int assigned_register_;
- DISALLOW_COPY_AND_ASSIGN(LiveRangeGroup);
-};
-
class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
public:
explicit TopLevelLiveRange(int vreg, MachineRepresentation rep);
@@ -1056,9 +1040,13 @@ class LinearScanAllocator final : public RegisterAllocator {
void AllocateRegisters();
private:
- ZoneVector<LiveRange*>& unhandled_live_ranges() {
- return unhandled_live_ranges_;
- }
+ struct LiveRangeOrdering {
+ bool operator()(LiveRange* a, LiveRange* b) {
+ return a->ShouldBeAllocatedBefore(b);
+ }
+ };
+ using LiveRangeQueue = ZoneMultiset<LiveRange*, LiveRangeOrdering>;
+ LiveRangeQueue& unhandled_live_ranges() { return unhandled_live_ranges_; }
ZoneVector<LiveRange*>& active_live_ranges() { return active_live_ranges_; }
ZoneVector<LiveRange*>& inactive_live_ranges() {
return inactive_live_ranges_;
@@ -1069,10 +1057,7 @@ class LinearScanAllocator final : public RegisterAllocator {
// Helper methods for updating the life range lists.
void AddToActive(LiveRange* range);
void AddToInactive(LiveRange* range);
- void AddToUnhandledSorted(LiveRange* range);
- void AddToUnhandledUnsorted(LiveRange* range);
- void SortUnhandled();
- bool UnhandledIsSorted();
+ void AddToUnhandled(LiveRange* range);
void ActiveToHandled(LiveRange* range);
void ActiveToInactive(LiveRange* range);
void InactiveToHandled(LiveRange* range);
@@ -1106,7 +1091,7 @@ class LinearScanAllocator final : public RegisterAllocator {
void SplitAndSpillIntersecting(LiveRange* range);
- ZoneVector<LiveRange*> unhandled_live_ranges_;
+ LiveRangeQueue unhandled_live_ranges_;
ZoneVector<LiveRange*> active_live_ranges_;
ZoneVector<LiveRange*> inactive_live_ranges_;
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 7ecbc405cd..01c80e6954 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -900,12 +900,12 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
{ \
__ LoadlW(temp0, MemOperand(addr, offset)); \
__ llgfr(temp1, temp0); \
- __ risbg(temp0, old_val, Operand(start), Operand(end), \
- Operand(shift_amount), false); \
- __ risbg(temp1, new_val, Operand(start), Operand(end), \
- Operand(shift_amount), false); \
+ __ RotateInsertSelectBits(temp0, old_val, Operand(start), \
+ Operand(end), Operand(shift_amount), false); \
+ __ RotateInsertSelectBits(temp1, new_val, Operand(start), \
+ Operand(end), Operand(shift_amount), false); \
__ CmpAndSwap(temp0, temp1, MemOperand(addr, offset)); \
- __ risbg(output, temp0, Operand(start+shift_amount), \
+ __ RotateInsertSelectBits(output, temp0, Operand(start+shift_amount), \
Operand(end+shift_amount), Operand(64-shift_amount), true); \
}
@@ -1019,26 +1019,16 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ CmpAndSwap(output, new_val, MemOperand(addr)); \
} while (false)
-// TODO(vasili.skurydzin): use immediate operand for value and
-// SI-formatted instructions (i.e. ASI/AGSI for add) to update
-// memory atomically
-#define ASSEMBLE_ATOMIC_BINOP_WORD(bin_inst, load_and_ext) \
+#define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op) \
do { \
Register value = i.InputRegister(2); \
Register result = i.OutputRegister(0); \
Register addr = r1; \
- Register prev = r0; \
- Register next = kScratchReg; \
AddressingMode mode = kMode_None; \
MemOperand op = i.MemoryOperand(&mode); \
- Label do_cs; \
__ lay(addr, op); \
- __ l(prev, MemOperand(addr)); \
- __ bind(&do_cs); \
- __ bin_inst(next, prev, value); \
- __ CmpAndSwap(prev, next, MemOperand(addr)); \
- __ bne(&do_cs, Label::kNear); \
- __ load_and_ext(result, prev); \
+ __ load_and_op(result, value, MemOperand(addr)); \
+ __ LoadlW(result, result); \
} while (false)
#define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end) \
@@ -1046,12 +1036,12 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
Label do_cs; \
__ LoadlW(prev, MemOperand(addr, offset)); \
__ bind(&do_cs); \
- __ risbg(temp, value, Operand(start), Operand(end), \
+ __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
Operand(static_cast<intptr_t>(shift_amount)), true); \
__ bin_inst(new_val, prev, temp); \
__ lr(temp, prev); \
- __ risbg(temp, new_val, Operand(start), Operand(end), \
- Operand::Zero(), false); \
+ __ RotateInsertSelectBits(temp, new_val, Operand(start), \
+ Operand(end), Operand::Zero(), false); \
__ CmpAndSwap(prev, temp, MemOperand(addr, offset)); \
__ bne(&do_cs, Label::kNear); \
} while (false)
@@ -1366,9 +1356,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCodeObject: {
if (HasRegisterInput(instr, 0)) {
- __ AddP(ip, i.InputRegister(0),
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(reg);
} else {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
@@ -1379,20 +1372,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallWasmFunction: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
- RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
- if (info()->IsWasm()) {
- rmode = RelocInfo::WASM_CALL;
- }
-
if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
#ifdef V8_TARGET_ARCH_S390X
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt64());
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
#else
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
#endif
- __ Call(wasm_code, rmode);
+ __ Call(wasm_code, constant.rmode());
} else {
__ Call(i.InputRegister(0));
}
@@ -1408,9 +1395,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.TempRegister(2));
}
if (HasRegisterInput(instr, 0)) {
- __ AddP(ip, i.InputRegister(0),
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(reg);
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
@@ -1424,15 +1414,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallWasm: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
- RelocInfo::Mode rmode = RelocInfo::JS_TO_WASM_CALL;
- if (info()->IsWasm()) {
- rmode = RelocInfo::WASM_CALL;
- }
-
if (instr->InputAt(0)->IsImmediate()) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt32());
- __ Jump(wasm_code, rmode);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+#ifdef V8_TARGET_ARCH_S390X
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+#else
+ Address wasm_code = static_cast<Address>(constant.ToInt32());
+#endif
+ __ Jump(wasm_code, constant.rmode());
} else {
__ Jump(i.InputRegister(0));
}
@@ -1442,7 +1431,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallAddress: {
CHECK(!instr->InputAt(0)->IsImmediate());
- __ Jump(i.InputRegister(0));
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -1530,6 +1523,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
@@ -1581,12 +1577,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadRR(i.OutputRegister(), fp);
}
break;
- case kArchRootsPointer:
- __ LoadRR(i.OutputRegister(), kRootRegister);
- break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
- i.InputDoubleRegister(0));
+ i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -1816,8 +1809,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int shiftAmount = i.InputInt32(1);
int endBit = 63 - shiftAmount;
int startBit = 63 - i.InputInt32(2);
- __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
- Operand(endBit), Operand(shiftAmount), true);
+ __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
+ Operand(startBit), Operand(endBit), Operand(shiftAmount), true);
} else {
int shiftAmount = i.InputInt32(1);
int clearBit = 63 - i.InputInt32(2);
@@ -1833,8 +1826,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int shiftAmount = i.InputInt32(1);
int endBit = 63;
int startBit = 63 - i.InputInt32(2);
- __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
- Operand(endBit), Operand(shiftAmount), true);
+ __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
+ Operand(startBit), Operand(endBit), Operand(shiftAmount), true);
} else {
int shiftAmount = i.InputInt32(1);
int clearBit = 63 - i.InputInt32(2);
@@ -1848,8 +1841,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int shiftAmount = i.InputInt32(1);
int endBit = 63 - i.InputInt32(2);
int startBit = 0;
- __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
- Operand(endBit), Operand(shiftAmount), true);
+ __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
+ Operand(startBit), Operand(endBit), Operand(shiftAmount), true);
} else {
int shiftAmount = i.InputInt32(1);
int clearBit = i.InputInt32(2);
@@ -1980,16 +1973,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_UNARY_OP(D_DInstr(sqdbr), nullInstr, nullInstr);
break;
case kS390_FloorFloat:
- __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
+ __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF,
+ i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_CeilFloat:
- __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
+ __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF,
+ i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_TruncateFloat:
- __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
+ __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0,
+ i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
// Double operations
case kS390_ModDouble:
@@ -2083,20 +2076,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_FloorDouble:
- __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
+ __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF,
+ i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_CeilDouble:
- __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
+ __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF,
+ i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_TruncateDouble:
- __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
+ __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0,
+ i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_RoundDouble:
- __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
+ __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0,
+ i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_NegFloat:
ASSEMBLE_UNARY_OP(D_DInstr(lcebr), nullInstr, nullInstr);
@@ -2559,8 +2552,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadlW(output, MemOperand(r1, offset)); \
__ bind(&do_cs); \
__ llgfr(r0, output); \
- __ risbg(r0, value, Operand(start), Operand(end), Operand(shift_amount), \
- false); \
+ __ RotateInsertSelectBits(r0, value, Operand(start), Operand(end), \
+ Operand(shift_amount), false); \
__ csy(output, r0, MemOperand(r1, offset)); \
__ bne(&do_cs, Label::kNear); \
__ srl(output, Operand(shift_amount)); \
@@ -2709,8 +2702,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
- __ risbg(result, prev, Operand(56), Operand(63), \
- Operand(static_cast<intptr_t>(rotate_left)), true); \
+ __ RotateInsertSelectBits(result, prev, Operand(56), \
+ Operand(63), Operand(static_cast<intptr_t>(rotate_left)), \
+ true); \
}); \
break; \
case kWord32Atomic##op##Int16: \
@@ -2723,12 +2717,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
- __ risbg(result, prev, Operand(48), Operand(63), \
- Operand(static_cast<intptr_t>(rotate_left)), true); \
+ __ RotateInsertSelectBits(result, prev, Operand(48), \
+ Operand(63), Operand(static_cast<intptr_t>(rotate_left)), \
+ true); \
}); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP_WORD(inst, LoadlW); \
break;
ATOMIC_BINOP_CASE(Add, Add32)
ATOMIC_BINOP_CASE(Sub, Sub32)
@@ -2736,7 +2728,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
-
+ case kWord32AtomicAddWord32:
+ ASSEMBLE_ATOMIC_BINOP_WORD(laa);
+ break;
+ case kWord32AtomicSubWord32:
+ ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
+ break;
+ case kWord32AtomicAndWord32:
+ ASSEMBLE_ATOMIC_BINOP_WORD(lan);
+ break;
+ case kWord32AtomicOrWord32:
+ ASSEMBLE_ATOMIC_BINOP_WORD(lao);
+ break;
+ case kWord32AtomicXorWord32:
+ ASSEMBLE_ATOMIC_BINOP_WORD(lax);
+ break;
default:
UNREACHABLE();
break;
@@ -2792,31 +2798,19 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
class OutOfLineTrap final : public OutOfLineCode {
public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
void Generate() final {
S390OperandConverter i(gen_, instr_);
-
- Builtins::Name trap_id =
- static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- __ set_has_frame(old_has_frame);
- }
}
private:
- void GenerateCallToTrap(Builtins::Name trap_id) {
- if (trap_id == Builtins::builtin_count) {
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
@@ -2832,8 +2826,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
- RelocInfo::CODE_TARGET);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2844,12 +2839,10 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}
}
- bool frame_elided_;
Instruction* instr_;
CodeGenerator* gen_;
};
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ auto ool = new (zone()) OutOfLineTrap(this, instr);
Label* tlabel = ool->entry();
Label end;
@@ -2899,6 +2892,17 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ bind(&done);
}
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ S390OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
+
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
S390OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -2987,30 +2991,83 @@ void CodeGenerator::AssembleConstructFrame() {
ResetSpeculationPoison();
}
- const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+
if (shrink_slots > 0) {
+ if (info()->IsWasm() && shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
+ Register scratch = r1;
+ __ LoadP(scratch, FieldMemOperand(
+ kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ __ LoadP(scratch, MemOperand(scratch));
+ __ AddP(scratch, scratch, Operand(shrink_slots * kPointerSize));
+ __ CmpLogicalP(sp, scratch);
+ __ bge(&done);
+ }
+
+ __ LoadP(r4, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
+ __ Move(cp, Smi::kZero);
+ __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r4);
+ // We come from WebAssembly, there are no references for the GC.
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ }
+
+ __ bind(&done);
+ }
+
+ // Skip callee-saved and return slots, which are pushed below.
+ shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= frame()->GetReturnSlotCount();
+ shrink_slots -=
+ (kDoubleSize / kPointerSize) * base::bits::CountPopulation(saves_fp);
__ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
}
// Save callee-saved Double registers.
- if (double_saves != 0) {
- __ MultiPushDoubles(double_saves);
- DCHECK_EQ(kNumCalleeSavedDoubles,
- base::bits::CountPopulation(double_saves));
+ if (saves_fp != 0) {
+ __ MultiPushDoubles(saves_fp);
+ DCHECK_EQ(kNumCalleeSavedDoubles, base::bits::CountPopulation(saves_fp));
}
// Save callee-saved registers.
- const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
}
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Create space for returns.
+ __ lay(sp, MemOperand(sp, -returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Create space for returns.
+ __ lay(sp, MemOperand(sp, returns * kPointerSize));
+ }
+
// Restore registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
@@ -3110,7 +3167,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
- __ mov(dst, Operand(src.ToExternalReference()));
+ __ Move(dst, src.ToExternalReference());
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index c502135c30..340cbb65c1 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -2041,8 +2041,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
}
}
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
}
void InstructionSelector::VisitWord32Equal(Node* const node) {
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 1bedeb8e04..01034ffb73 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -96,7 +96,7 @@ BasicBlock* BasicBlock::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
return b1;
}
-void BasicBlock::Print() { OFStream(stdout) << this; }
+void BasicBlock::Print() { StdoutStream{} << this; }
std::ostream& operator<<(std::ostream& os, const BasicBlock& block) {
os << "B" << block.id();
@@ -194,9 +194,9 @@ BasicBlock* Schedule::NewBasicBlock() {
void Schedule::PlanNode(BasicBlock* block, Node* node) {
if (FLAG_trace_turbo_scheduler) {
- OFStream os(stdout);
- os << "Planning #" << node->id() << ":" << node->op()->mnemonic()
- << " for future add to B" << block->id() << "\n";
+ StdoutStream{} << "Planning #" << node->id() << ":"
+ << node->op()->mnemonic() << " for future add to B"
+ << block->id() << "\n";
}
DCHECK_NULL(this->block(node));
SetBlockForNode(block, node);
@@ -205,9 +205,8 @@ void Schedule::PlanNode(BasicBlock* block, Node* node) {
void Schedule::AddNode(BasicBlock* block, Node* node) {
if (FLAG_trace_turbo_scheduler) {
- OFStream os(stdout);
- os << "Adding #" << node->id() << ":" << node->op()->mnemonic() << " to B"
- << block->id() << "\n";
+ StdoutStream{} << "Adding #" << node->id() << ":" << node->op()->mnemonic()
+ << " to B" << block->id() << "\n";
}
DCHECK(this->block(node) == nullptr || this->block(node) == block);
block->AddNode(node);
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 91e4e02dd3..13712a3561 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -1005,7 +1005,7 @@ class SpecialRPONumberer : public ZoneObject {
#if DEBUG
void PrintRPO() {
- OFStream os(stdout);
+ StdoutStream os;
os << "RPO with " << loops_.size() << " loops";
if (loops_.size() > 0) {
os << " (";
@@ -1732,8 +1732,7 @@ void Scheduler::SealFinalSchedule() {
void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
- OFStream os(stdout);
- os << "Schedule before control flow fusion:\n" << *schedule_;
+ StdoutStream{} << "Schedule before control flow fusion:\n" << *schedule_;
}
// Iterate on phase 1: Build control-flow graph.
@@ -1776,8 +1775,7 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
MovePlannedNodes(block, schedule_->block(node));
if (FLAG_trace_turbo_scheduler) {
- OFStream os(stdout);
- os << "Schedule after control flow fusion:\n" << *schedule_;
+ StdoutStream{} << "Schedule after control flow fusion:\n" << *schedule_;
}
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 6192375826..645f47f706 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -81,6 +81,8 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4ReplaceLane) \
V(I32x4SConvertF32x4) \
V(I32x4UConvertF32x4) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
V(I32x4Neg) \
V(I32x4Shl) \
V(I32x4ShrS) \
@@ -99,6 +101,8 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4LeS) \
V(I32x4GtS) \
V(I32x4GeS) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
V(I32x4LtU) \
V(I32x4LeU) \
V(I32x4GtU) \
@@ -143,6 +147,8 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8Splat) \
V(I16x8ExtractLane) \
V(I16x8ReplaceLane) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
V(I16x8Neg) \
V(I16x8Shl) \
V(I16x8ShrS) \
@@ -155,6 +161,8 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8Mul) \
V(I16x8MinS) \
V(I16x8MaxS) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
V(I16x8ShrU) \
V(I16x8UConvertI32x4) \
V(I16x8AddSaturateU) \
@@ -247,10 +255,21 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
break;
}
case IrOpcode::kI8x16SConvertI16x8:
- case IrOpcode::kI8x16UConvertI16x8: {
+ case IrOpcode::kI8x16UConvertI16x8:
+ case IrOpcode::kI32x4SConvertI16x8Low:
+ case IrOpcode::kI32x4SConvertI16x8High:
+ case IrOpcode::kI32x4UConvertI16x8Low:
+ case IrOpcode::kI32x4UConvertI16x8High: {
replacements_[node->id()].type = SimdType::kInt16x8;
break;
}
+ case IrOpcode::kI16x8SConvertI8x16Low:
+ case IrOpcode::kI16x8SConvertI8x16High:
+ case IrOpcode::kI16x8UConvertI8x16Low:
+ case IrOpcode::kI16x8UConvertI8x16High: {
+ replacements_[node->id()].type = SimdType::kInt8x16;
+ break;
+ }
FOREACH_FLOAT32X4_TO_INT32X4OPCODE(CASE_STMT)
case IrOpcode::kI32x4SConvertF32x4:
case IrOpcode::kI32x4UConvertF32x4: {
@@ -335,8 +354,22 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
}
}
-void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
- const Operator* load_op, SimdType type) {
+void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) {
+ MachineRepresentation rep = LoadRepresentationOf(node->op()).representation();
+ const Operator* load_op;
+ switch (node->opcode()) {
+ case IrOpcode::kLoad:
+ load_op = machine()->Load(MachineTypeFrom(type));
+ break;
+ case IrOpcode::kUnalignedLoad:
+ load_op = machine()->UnalignedLoad(MachineTypeFrom(type));
+ break;
+ case IrOpcode::kProtectedLoad:
+ load_op = machine()->ProtectedLoad(MachineTypeFrom(type));
+ break;
+ default:
+ UNREACHABLE();
+ }
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -368,9 +401,38 @@ void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
}
}
-void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
- const Operator* store_op,
- SimdType rep_type) {
+void SimdScalarLowering::LowerStoreOp(Node* node) {
+ // For store operation, use replacement type of its input instead of the
+ // one of its effected node.
+ DCHECK_LT(2, node->InputCount());
+ SimdType rep_type = ReplacementType(node->InputAt(2));
+ replacements_[node->id()].type = rep_type;
+ const Operator* store_op;
+ MachineRepresentation rep;
+ switch (node->opcode()) {
+ case IrOpcode::kStore: {
+ rep = StoreRepresentationOf(node->op()).representation();
+ WriteBarrierKind write_barrier_kind =
+ StoreRepresentationOf(node->op()).write_barrier_kind();
+ store_op = machine()->Store(StoreRepresentation(
+ MachineTypeFrom(rep_type).representation(), write_barrier_kind));
+ break;
+ }
+ case IrOpcode::kUnalignedStore: {
+ rep = UnalignedStoreRepresentationOf(node->op());
+ store_op =
+ machine()->UnalignedStore(MachineTypeFrom(rep_type).representation());
+ break;
+ }
+ case IrOpcode::kProtectedStore: {
+ rep = StoreRepresentationOf(node->op()).representation();
+ store_op =
+ machine()->ProtectedStore(MachineTypeFrom(rep_type).representation());
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -681,31 +743,25 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
void SimdScalarLowering::LowerConvertFromInt(Node* node,
SimdType input_rep_type,
SimdType output_rep_type,
- bool is_signed) {
+ bool is_signed, int start_index) {
DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- int32_t shift_val = 0;
+ int32_t mask = 0;
if (input_rep_type == SimdType::kInt16x8) {
DCHECK_EQ(output_rep_type, SimdType::kInt32x4);
- shift_val = kShift16;
+ mask = kMask16;
} else {
DCHECK_EQ(output_rep_type, SimdType::kInt16x8);
DCHECK_EQ(input_rep_type, SimdType::kInt8x16);
- shift_val = kShift8;
+ mask = kMask8;
}
int num_lanes = NumLanes(output_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = rep[i];
- if (!is_signed) {
- rep_node[i] =
- graph()->NewNode(machine()->Word32Shr(),
- graph()->NewNode(machine()->Word32Shl(), rep_node[i],
- mcgraph_->Int32Constant(shift_val)),
- mcgraph_->Int32Constant(shift_val));
- }
+ rep_node[i] =
+ is_signed ? rep[i + start_index] : Mask(rep[i + start_index], mask);
}
ReplaceNode(node, rep_node, num_lanes);
@@ -886,52 +942,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
- case IrOpcode::kLoad: {
- MachineRepresentation rep =
- LoadRepresentationOf(node->op()).representation();
- const Operator* load_op;
- load_op = machine()->Load(MachineTypeFrom(rep_type));
- LowerLoadOp(rep, node, load_op, rep_type);
- break;
- }
- case IrOpcode::kUnalignedLoad: {
- MachineRepresentation rep =
- LoadRepresentationOf(node->op()).representation();
- const Operator* load_op;
- load_op = machine()->UnalignedLoad(MachineTypeFrom(rep_type));
- LowerLoadOp(rep, node, load_op, rep_type);
+ case IrOpcode::kLoad:
+ case IrOpcode::kUnalignedLoad:
+ case IrOpcode::kProtectedLoad: {
+ LowerLoadOp(node, rep_type);
break;
}
- case IrOpcode::kStore: {
- // For store operation, use replacement type of its input instead of the
- // one of its effected node.
- DCHECK_LT(2, node->InputCount());
- SimdType input_rep_type = ReplacementType(node->InputAt(2));
- if (input_rep_type != rep_type)
- replacements_[node->id()].type = input_rep_type;
- MachineRepresentation rep =
- StoreRepresentationOf(node->op()).representation();
- WriteBarrierKind write_barrier_kind =
- StoreRepresentationOf(node->op()).write_barrier_kind();
- const Operator* store_op;
- store_op = machine()->Store(
- StoreRepresentation(MachineTypeFrom(input_rep_type).representation(),
- write_barrier_kind));
- LowerStoreOp(rep, node, store_op, input_rep_type);
- break;
- }
- case IrOpcode::kUnalignedStore: {
- // For store operation, use replacement type of its input instead of the
- // one of its effected node.
- DCHECK_LT(2, node->InputCount());
- SimdType input_rep_type = ReplacementType(node->InputAt(2));
- if (input_rep_type != rep_type)
- replacements_[node->id()].type = input_rep_type;
- MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
- const Operator* store_op;
- store_op = machine()->UnalignedStore(
- MachineTypeFrom(input_rep_type).representation());
- LowerStoreOp(rep, node, store_op, input_rep_type);
+ case IrOpcode::kStore:
+ case IrOpcode::kUnalignedStore:
+ case IrOpcode::kProtectedStore: {
+ LowerStoreOp(node);
break;
}
case IrOpcode::kReturn: {
@@ -1101,24 +1121,44 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerConvertFromFloat(node, false);
break;
}
- case IrOpcode::kI32x4SConvertI16x8Low:
+ case IrOpcode::kI32x4SConvertI16x8Low: {
+ LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, true,
+ 0);
+ break;
+ }
case IrOpcode::kI32x4SConvertI16x8High: {
- LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, true);
+ LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, true,
+ 4);
+ break;
+ }
+ case IrOpcode::kI32x4UConvertI16x8Low: {
+ LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, false,
+ 0);
break;
}
- case IrOpcode::kI32x4UConvertI16x8Low:
case IrOpcode::kI32x4UConvertI16x8High: {
- LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, false);
+ LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, false,
+ 4);
+ break;
+ }
+ case IrOpcode::kI16x8SConvertI8x16Low: {
+ LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, true,
+ 0);
break;
}
- case IrOpcode::kI16x8SConvertI8x16Low:
case IrOpcode::kI16x8SConvertI8x16High: {
- LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, true);
+ LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, true,
+ 8);
+ break;
+ }
+ case IrOpcode::kI16x8UConvertI8x16Low: {
+ LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, false,
+ 0);
break;
}
- case IrOpcode::kI16x8UConvertI8x16Low:
case IrOpcode::kI16x8UConvertI8x16High: {
- LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, false);
+ LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, false,
+ 8);
break;
}
case IrOpcode::kI16x8SConvertI32x4: {
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 0bbac96906..9bb6e79cbe 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -73,10 +73,8 @@ class SimdScalarLowering {
void PreparePhiReplacement(Node* phi);
void SetLoweredType(Node* node, Node* output);
void GetIndexNodes(Node* index, Node** new_indices, SimdType type);
- void LowerLoadOp(MachineRepresentation rep, Node* node,
- const Operator* load_op, SimdType type);
- void LowerStoreOp(MachineRepresentation rep, Node* node,
- const Operator* store_op, SimdType rep_type);
+ void LowerLoadOp(Node* node, SimdType type);
+ void LowerStoreOp(Node* node);
void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
bool not_horizontal = true);
void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
@@ -92,7 +90,8 @@ class SimdScalarLowering {
SimdType type);
void LowerConvertFromFloat(Node* node, bool is_signed);
void LowerConvertFromInt(Node* node, SimdType input_rep_type,
- SimdType output_rep_type, bool is_signed);
+ SimdType output_rep_type, bool is_signed,
+ int start_index);
void LowerPack(Node* node, SimdType input_rep_type, SimdType output_rep_type,
bool is_signed);
void LowerShiftOp(Node* node, SimdType type);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index f24f000887..74bb7fcd6b 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -286,8 +286,8 @@ class RepresentationSelector {
bool weakened_ = false;
};
- RepresentationSelector(JSGraph* jsgraph, Zone* zone,
- RepresentationChanger* changer,
+ RepresentationSelector(JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ Zone* zone, RepresentationChanger* changer,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins)
: jsgraph_(jsgraph),
@@ -306,7 +306,7 @@ class RepresentationSelector {
source_positions_(source_positions),
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
- op_typer_(jsgraph->isolate(), graph_zone()) {
+ op_typer_(jsgraph->isolate(), js_heap_broker, graph_zone()) {
}
// Forward propagation of types from type feedback.
@@ -537,7 +537,7 @@ class RepresentationSelector {
}
void PrintNodeFeedbackType(Node* n) {
- OFStream os(stdout);
+ StdoutStream os;
os << "#" << n->id() << ":" << *n->op() << "(";
int j = 0;
for (Node* const i : n->inputs()) {
@@ -2630,6 +2630,17 @@ class RepresentationSelector {
SetOutput(node, rep);
return;
}
+ case IrOpcode::kLoadDataViewElement: {
+ MachineRepresentation const rep =
+ MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
+ ProcessInput(node, 1, UseInfo::PointerInt()); // external pointer
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 3, UseInfo::Bool()); // little-endian
+ ProcessRemainingInputs(node, 4);
+ SetOutput(node, rep);
+ return;
+ }
case IrOpcode::kStoreTypedElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
@@ -2643,6 +2654,19 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kNone);
return;
}
+ case IrOpcode::kStoreDataViewElement: {
+ MachineRepresentation const rep =
+ MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
+ ProcessInput(node, 1, UseInfo::PointerInt()); // external pointer
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 3,
+ TruncatingUseInfoFromRepresentation(rep)); // value
+ ProcessInput(node, 4, UseInfo::Bool()); // little-endian
+ ProcessRemainingInputs(node, 5);
+ SetOutput(node, MachineRepresentation::kNone);
+ return;
+ }
case IrOpcode::kConvertReceiver: {
Type input_type = TypeOf(node->InputAt(0));
VisitBinop(node, UseInfo::AnyTagged(),
@@ -3188,29 +3212,26 @@ class RepresentationSelector {
void PrintOutputInfo(NodeInfo* info) {
if (FLAG_trace_representation) {
- OFStream os(stdout);
- os << info->representation();
+ StdoutStream{} << info->representation();
}
}
void PrintRepresentation(MachineRepresentation rep) {
if (FLAG_trace_representation) {
- OFStream os(stdout);
- os << rep;
+ StdoutStream{} << rep;
}
}
void PrintTruncation(Truncation truncation) {
if (FLAG_trace_representation) {
- OFStream os(stdout);
- os << truncation.description() << std::endl;
+ StdoutStream{} << truncation.description() << std::endl;
}
}
void PrintUseInfo(UseInfo info) {
if (FLAG_trace_representation) {
- OFStream os(stdout);
- os << info.representation() << ":" << info.truncation().description();
+ StdoutStream{} << info.representation() << ":"
+ << info.truncation().description();
}
}
@@ -3252,11 +3273,14 @@ class RepresentationSelector {
Zone* graph_zone() { return jsgraph_->zone(); }
};
-SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
+SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph,
+ const JSHeapBroker* js_heap_broker,
+ Zone* zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level)
: jsgraph_(jsgraph),
+ js_heap_broker_(js_heap_broker),
zone_(zone),
type_cache_(TypeCache::Get()),
source_positions_(source_positions),
@@ -3265,8 +3289,8 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
void SimplifiedLowering::LowerAllNodes() {
RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
- RepresentationSelector selector(jsgraph(), zone_, &changer, source_positions_,
- node_origins_);
+ RepresentationSelector selector(jsgraph(), js_heap_broker_, zone_, &changer,
+ source_positions_, node_origins_);
selector.Run(this);
}
@@ -3910,9 +3934,9 @@ Operator const* SimplifiedLowering::ToNumberOperator() {
if (!to_number_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- Operator::kNoProperties);
+ auto call_descriptor =
+ Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(),
+ 0, flags, Operator::kNoProperties);
to_number_operator_.set(common()->Call(call_descriptor));
}
return to_number_operator_.get();
@@ -3922,9 +3946,9 @@ Operator const* SimplifiedLowering::ToNumericOperator() {
if (!to_numeric_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- Operator::kNoProperties);
+ auto call_descriptor =
+ Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(),
+ 0, flags, Operator::kNoProperties);
to_numeric_operator_.set(common()->Call(call_descriptor));
}
return to_numeric_operator_.get();
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index b78d5d5cfe..86ac8c75ab 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -23,8 +23,8 @@ class TypeCache;
class V8_EXPORT_PRIVATE SimplifiedLowering final {
public:
- SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
- SourcePositionTable* source_position,
+ SimplifiedLowering(JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ Zone* zone, SourcePositionTable* source_position,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level);
~SimplifiedLowering() {}
@@ -48,6 +48,7 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
private:
JSGraph* const jsgraph_;
+ const JSHeapBroker* js_heap_broker_;
Zone* const zone_;
TypeCache const& type_cache_;
SetOncePointer<Node> to_number_code_;
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index dcfb485156..34be9cb0e4 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -32,16 +32,20 @@ Decision DecideObjectIsSmi(Node* const input) {
} // namespace
-SimplifiedOperatorReducer::SimplifiedOperatorReducer(Editor* editor,
- JSGraph* jsgraph)
- : AdvancedReducer(editor), jsgraph_(jsgraph) {}
+SimplifiedOperatorReducer::SimplifiedOperatorReducer(
+ Editor* editor, JSGraph* jsgraph, const JSHeapBroker* js_heap_broker)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ js_heap_broker_(js_heap_broker) {}
SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kBooleanNot: {
+ // TODO(neis): Provide HeapObjectRefMatcher?
HeapObjectMatcher m(node->InputAt(0));
if (m.Is(factory()->true_value())) return ReplaceBoolean(false);
if (m.Is(factory()->false_value())) return ReplaceBoolean(true);
@@ -57,7 +61,9 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeTaggedToBit: {
HeapObjectMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(m.Value()->BooleanValue());
+ if (m.HasValue()) {
+ return ReplaceInt32(m.Ref(js_heap_broker()).BooleanValue());
+ }
if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
break;
}
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 39c467d1bc..af827a2788 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -26,7 +26,8 @@ class SimplifiedOperatorBuilder;
class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph);
+ SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph,
+ const JSHeapBroker* js_heap_broker);
~SimplifiedOperatorReducer() final;
const char* reducer_name() const override {
@@ -51,11 +52,14 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
Factory* factory() const;
Graph* graph() const;
Isolate* isolate() const;
- JSGraph* jsgraph() const { return jsgraph_; }
MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+
JSGraph* const jsgraph_;
+ const JSHeapBroker* const js_heap_broker_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 8b64eb566e..32aafa33d4 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -141,7 +141,9 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
ExternalArrayType ExternalArrayTypeOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kLoadTypedElement ||
- op->opcode() == IrOpcode::kStoreTypedElement);
+ op->opcode() == IrOpcode::kLoadDataViewElement ||
+ op->opcode() == IrOpcode::kStoreTypedElement ||
+ op->opcode() == IrOpcode::kStoreDataViewElement);
return OpParameter<ExternalArrayType>(op);
}
@@ -581,11 +583,6 @@ AbortReason AbortReasonOf(const Operator* op) {
return static_cast<AbortReason>(OpParameter<int>(op));
}
-DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kCheckIf, op->opcode());
- return OpParameter<DeoptimizeReason>(op);
-}
-
const CheckTaggedInputParameters& CheckTaggedInputParametersOf(
const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32 ||
@@ -834,11 +831,12 @@ struct SimplifiedOperatorGlobalCache final {
#undef CHECKED_WITH_FEEDBACK
template <DeoptimizeReason kDeoptimizeReason>
- struct CheckIfOperator final : public Operator1<DeoptimizeReason> {
+ struct CheckIfOperator final : public Operator1<CheckIfParameters> {
CheckIfOperator()
- : Operator1<DeoptimizeReason>(
+ : Operator1<CheckIfParameters>(
IrOpcode::kCheckIf, Operator::kFoldable | Operator::kNoThrow,
- "CheckIf", 1, 1, 1, 0, 1, 0, kDeoptimizeReason) {}
+ "CheckIf", 1, 1, 1, 0, 1, 0,
+ CheckIfParameters(kDeoptimizeReason, VectorSlotPair())) {}
};
#define CHECK_IF(Name, message) \
CheckIfOperator<DeoptimizeReason::k##Name> kCheckIf##Name;
@@ -1144,15 +1142,20 @@ const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) {
static_cast<int>(reason)); // parameter
}
-const Operator* SimplifiedOperatorBuilder::CheckIf(DeoptimizeReason reason) {
- switch (reason) {
+const Operator* SimplifiedOperatorBuilder::CheckIf(
+ DeoptimizeReason reason, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (reason) {
#define CHECK_IF(Name, message) \
case DeoptimizeReason::k##Name: \
return &cache_.kCheckIf##Name;
DEOPTIMIZE_REASON_LIST(CHECK_IF)
#undef CHECK_IF
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckIfParameters>(
+ IrOpcode::kCheckIf, Operator::kFoldable | Operator::kNoThrow, "CheckIf",
+ 1, 1, 1, 0, 1, 0, CheckIfParameters(reason, feedback));
}
const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
@@ -1412,6 +1415,23 @@ CheckParameters const& CheckParametersOf(Operator const* op) {
return OpParameter<CheckParameters>(op);
}
+bool operator==(CheckIfParameters const& lhs, CheckIfParameters const& rhs) {
+ return lhs.reason() == rhs.reason() && lhs.feedback() == rhs.feedback();
+}
+
+size_t hash_value(CheckIfParameters const& p) {
+ return base::hash_combine(p.reason(), p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, CheckIfParameters const& p) {
+ return os << p.reason() << p.feedback();
+}
+
+CheckIfParameters const& CheckIfParametersOf(Operator const* op) {
+ CHECK(op->opcode() == IrOpcode::kCheckIf);
+ return OpParameter<CheckIfParameters>(op);
+}
+
const Operator* SimplifiedOperatorBuilder::NewDoubleElements(
PretenureFlag pretenure) {
return new (zone()) Operator1<PretenureFlag>( // --
@@ -1505,13 +1525,15 @@ const Operator* SimplifiedOperatorBuilder::StringFromSingleCodePoint(
SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
#undef SPECULATIVE_NUMBER_BINOP
-#define ACCESS_OP_LIST(V) \
- V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
- V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0) \
- V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
- V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0) \
- V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
- V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0)
+#define ACCESS_OP_LIST(V) \
+ V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
+ V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0) \
+ V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
+ V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0) \
+ V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
+ V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0) \
+ V(LoadDataViewElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
+ V(StoreDataViewElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0)
#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
output_count) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 62e7f73bfa..1708b0e06e 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -162,6 +162,29 @@ std::ostream& operator<<(std::ostream&, CheckParameters const&);
CheckParameters const& CheckParametersOf(Operator const*) V8_WARN_UNUSED_RESULT;
+class CheckIfParameters final {
+ public:
+ explicit CheckIfParameters(DeoptimizeReason reason,
+ const VectorSlotPair& feedback)
+ : reason_(reason), feedback_(feedback) {}
+
+ VectorSlotPair const& feedback() const { return feedback_; }
+ DeoptimizeReason reason() const { return reason_; }
+
+ private:
+ DeoptimizeReason reason_;
+ VectorSlotPair feedback_;
+};
+
+bool operator==(CheckIfParameters const&, CheckIfParameters const&);
+
+size_t hash_value(CheckIfParameters const&);
+
+std::ostream& operator<<(std::ostream&, CheckIfParameters const&);
+
+CheckIfParameters const& CheckIfParametersOf(Operator const*)
+ V8_WARN_UNUSED_RESULT;
+
enum class CheckFloat64HoleMode : uint8_t {
kNeverReturnHole, // Never return the hole (deoptimize instead).
kAllowReturnHole // Allow to return the hole (signaling NaN).
@@ -619,7 +642,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckEqualsSymbol();
const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
const Operator* CheckHeapObject();
- const Operator* CheckIf(DeoptimizeReason deoptimize_reason);
+ const Operator* CheckIf(DeoptimizeReason deoptimize_reason,
+ const VectorSlotPair& feedback = VectorSlotPair());
const Operator* CheckInternalizedString();
const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
const VectorSlotPair& = VectorSlotPair());
@@ -735,9 +759,15 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// load-typed-element buffer, [base + external + index]
const Operator* LoadTypedElement(ExternalArrayType const&);
+ // load-data-view-element buffer, [base + index]
+ const Operator* LoadDataViewElement(ExternalArrayType const&);
+
// store-typed-element buffer, [base + external + index], value
const Operator* StoreTypedElement(ExternalArrayType const&);
+ // store-data-view-element buffer, [base + index], value
+ const Operator* StoreDataViewElement(ExternalArrayType const&);
+
// Abort (for terminating execution on internal error).
const Operator* RuntimeAbort(AbortReason reason);
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 4679573f87..ce2c71a3d7 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -147,6 +147,7 @@ class RedundantStoreFinder final {
bool HasBeenVisited(Node* node);
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() { return jsgraph()->isolate(); }
Zone* temp_zone() const { return temp_zone_; }
ZoneVector<UnobservablesSet>& unobservable() { return unobservable_; }
UnobservablesSet& unobservable_for_id(NodeId id) {
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.cc b/deps/v8/src/compiler/type-narrowing-reducer.cc
index 23ef594c5d..1b8b5b4657 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.cc
+++ b/deps/v8/src/compiler/type-narrowing-reducer.cc
@@ -11,14 +11,17 @@ namespace v8 {
namespace internal {
namespace compiler {
-TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph)
+TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
+ const JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- op_typer_(jsgraph->isolate(), zone()) {}
+ op_typer_(jsgraph->isolate(), js_heap_broker, zone()) {}
TypeNarrowingReducer::~TypeNarrowingReducer() {}
Reduction TypeNarrowingReducer::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
+
Type new_type = Type::Any();
switch (node->opcode()) {
@@ -29,11 +32,10 @@ Reduction TypeNarrowingReducer::Reduce(Node* node) {
Type right_type = NodeProperties::GetType(node->InputAt(1));
if (left_type.Is(Type::PlainNumber()) &&
right_type.Is(Type::PlainNumber())) {
- Factory* const factory = jsgraph()->isolate()->factory();
if (left_type.Max() < right_type.Min()) {
- new_type = Type::HeapConstant(factory->true_value(), zone());
+ new_type = op_typer_.singleton_true();
} else if (left_type.Min() >= right_type.Max()) {
- new_type = Type::HeapConstant(factory->false_value(), zone());
+ new_type = op_typer_.singleton_false();
}
}
break;
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.h b/deps/v8/src/compiler/type-narrowing-reducer.h
index 4b1d589361..77cb07e772 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.h
+++ b/deps/v8/src/compiler/type-narrowing-reducer.h
@@ -19,7 +19,8 @@ class JSGraph;
class V8_EXPORT_PRIVATE TypeNarrowingReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph);
+ TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
+ const JSHeapBroker* js_heap_broker);
~TypeNarrowingReducer() final;
const char* reducer_name() const override { return "TypeNarrowingReducer"; }
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index ddcedd6d32..0c001117de 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -4,8 +4,10 @@
#include "src/compiler/typed-optimization.h"
-#include "src/compilation-dependencies.h"
+#include "src/base/optional.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -18,18 +20,22 @@ namespace compiler {
TypedOptimization::TypedOptimization(Editor* editor,
CompilationDependencies* dependencies,
- JSGraph* jsgraph)
+ JSGraph* jsgraph,
+ const JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
- true_type_(Type::HeapConstant(factory()->true_value(), graph()->zone())),
- false_type_(
- Type::HeapConstant(factory()->false_value(), graph()->zone())),
+ js_heap_broker_(js_heap_broker),
+ true_type_(Type::HeapConstant(js_heap_broker, factory()->true_value(),
+ graph()->zone())),
+ false_type_(Type::HeapConstant(js_heap_broker, factory()->false_value(),
+ graph()->zone())),
type_cache_(TypeCache::Get()) {}
TypedOptimization::~TypedOptimization() {}
Reduction TypedOptimization::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kConvertReceiver:
return ReduceConvertReceiver(node);
@@ -83,12 +89,14 @@ Reduction TypedOptimization::Reduce(Node* node) {
namespace {
-MaybeHandle<Map> GetStableMapFromObjectType(Type object_type) {
+base::Optional<MapRef> GetStableMapFromObjectType(
+ const JSHeapBroker* js_heap_broker, Type object_type) {
if (object_type.IsHeapConstant()) {
- Handle<Map> object_map(object_type.AsHeapConstant()->Value()->map());
- if (object_map->is_stable()) return object_map;
+ HeapObjectRef object = object_type.AsHeapConstant()->Ref();
+ MapRef object_map = object.map();
+ if (object_map.is_stable()) return object_map;
}
- return MaybeHandle<Map>();
+ return {};
}
} // namespace
@@ -136,15 +144,16 @@ Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Type const object_type = NodeProperties::GetType(object);
Node* const effect = NodeProperties::GetEffectInput(node);
- Handle<Map> object_map;
- if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
+ base::Optional<MapRef> object_map =
+ GetStableMapFromObjectType(js_heap_broker(), object_type);
+ if (object_map.has_value()) {
for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
Node* const map = NodeProperties::GetValueInput(node, i);
Type const map_type = NodeProperties::GetType(map);
if (map_type.IsHeapConstant() &&
- map_type.AsHeapConstant()->Value().is_identical_to(object_map)) {
+ map_type.AsHeapConstant()->Ref().equals(*object_map)) {
if (object_map->CanTransition()) {
- dependencies()->AssumeMapStable(object_map);
+ dependencies()->DependOnStableMap(*object_map);
}
return Replace(effect);
}
@@ -206,12 +215,11 @@ Reduction TypedOptimization::ReduceLoadField(Node* node) {
// (1) map cannot transition further, or
// (2) deoptimization is enabled and we can add a code dependency on the
// stability of map (to guard the Constant type information).
- Handle<Map> object_map;
- if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
- if (object_map->CanTransition()) {
- dependencies()->AssumeMapStable(object_map);
- }
- Node* const value = jsgraph()->HeapConstant(object_map);
+ base::Optional<MapRef> object_map =
+ GetStableMapFromObjectType(js_heap_broker(), object_type);
+ if (object_map.has_value()) {
+ dependencies()->DependOnStableMap(*object_map);
+ Node* const value = jsgraph()->Constant(*object_map);
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -326,10 +334,10 @@ const Operator* TypedOptimization::NumberComparisonFor(const Operator* op) {
Reduction TypedOptimization::
TryReduceStringComparisonOfStringFromSingleCharCodeToConstant(
- Node* comparison, Handle<String> string, bool inverted) {
+ Node* comparison, const StringRef& string, bool inverted) {
switch (comparison->opcode()) {
case IrOpcode::kStringEqual:
- if (string->length() != 1) {
+ if (string.length() != 1) {
// String.fromCharCode(x) always has length 1.
return Replace(jsgraph()->BooleanConstant(false));
}
@@ -337,7 +345,7 @@ Reduction TypedOptimization::
case IrOpcode::kStringLessThan:
V8_FALLTHROUGH;
case IrOpcode::kStringLessThanOrEqual:
- if (string->length() == 0) {
+ if (string.length() == 0) {
// String.fromCharCode(x) <= "" is always false,
// "" < String.fromCharCode(x) is always true.
return Replace(jsgraph()->BooleanConstant(inverted));
@@ -354,11 +362,14 @@ Reduction TypedOptimization::
// and {constant} {comparison} String.fromCharCode(x) if inverted is true.
Reduction
TypedOptimization::TryReduceStringComparisonOfStringFromSingleCharCode(
- Node* comparison, Node* from_char_code, Node* constant, bool inverted) {
+ Node* comparison, Node* from_char_code, Type constant_type, bool inverted) {
DCHECK_EQ(IrOpcode::kStringFromSingleCharCode, from_char_code->opcode());
- HeapObjectMatcher m(constant);
- if (!m.HasValue() || !m.Value()->IsString()) return NoChange();
- Handle<String> string = Handle<String>::cast(m.Value());
+
+ if (!constant_type.IsHeapConstant()) return NoChange();
+ ObjectRef constant = constant_type.AsHeapConstant()->Ref();
+
+ if (!constant.IsString()) return NoChange();
+ StringRef string = constant.AsString();
// Check if comparison can be resolved statically.
Reduction red = TryReduceStringComparisonOfStringFromSingleCharCodeToConstant(
@@ -376,12 +387,12 @@ TypedOptimization::TryReduceStringComparisonOfStringFromSingleCharCode(
simplified()->NumberBitwiseAnd(), from_char_code_repl,
jsgraph()->Constant(std::numeric_limits<uint16_t>::max()));
}
- Node* constant_repl = jsgraph()->Constant(string->Get(0));
+ Node* constant_repl = jsgraph()->Constant(string.GetFirstChar());
Node* number_comparison = nullptr;
if (inverted) {
// "x..." <= String.fromCharCode(z) is true if x < z.
- if (string->length() > 1 &&
+ if (string.length() > 1 &&
comparison->opcode() == IrOpcode::kStringLessThanOrEqual) {
comparison_op = simplified()->NumberLessThan();
}
@@ -389,7 +400,7 @@ TypedOptimization::TryReduceStringComparisonOfStringFromSingleCharCode(
graph()->NewNode(comparison_op, constant_repl, from_char_code_repl);
} else {
// String.fromCharCode(z) < "x..." is true if z <= x.
- if (string->length() > 1 &&
+ if (string.length() > 1 &&
comparison->opcode() == IrOpcode::kStringLessThan) {
comparison_op = simplified()->NumberLessThanOrEqual();
}
@@ -406,6 +417,8 @@ Reduction TypedOptimization::ReduceStringComparison(Node* node) {
IrOpcode::kStringLessThanOrEqual == node->opcode());
Node* const lhs = NodeProperties::GetValueInput(node, 0);
Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ Type lhs_type = NodeProperties::GetType(lhs);
+ Type rhs_type = NodeProperties::GetType(rhs);
if (lhs->opcode() == IrOpcode::kStringFromSingleCharCode) {
if (rhs->opcode() == IrOpcode::kStringFromSingleCharCode) {
Node* left = NodeProperties::GetValueInput(lhs, 0);
@@ -431,12 +444,12 @@ Reduction TypedOptimization::ReduceStringComparison(Node* node) {
ReplaceWithValue(node, equal);
return Replace(equal);
} else {
- return TryReduceStringComparisonOfStringFromSingleCharCode(node, lhs, rhs,
- false);
+ return TryReduceStringComparisonOfStringFromSingleCharCode(
+ node, lhs, rhs_type, false);
}
} else if (rhs->opcode() == IrOpcode::kStringFromSingleCharCode) {
- return TryReduceStringComparisonOfStringFromSingleCharCode(node, rhs, lhs,
- true);
+ return TryReduceStringComparisonOfStringFromSingleCharCode(node, rhs,
+ lhs_type, true);
}
return NoChange();
}
@@ -542,26 +555,32 @@ Reduction TypedOptimization::ReduceTypeOf(Node* node) {
Type const type = NodeProperties::GetType(input);
Factory* const f = factory();
if (type.Is(Type::Boolean())) {
- return Replace(jsgraph()->Constant(f->boolean_string()));
+ return Replace(
+ jsgraph()->Constant(ObjectRef(js_heap_broker(), f->boolean_string())));
} else if (type.Is(Type::Number())) {
- return Replace(jsgraph()->Constant(f->number_string()));
+ return Replace(
+ jsgraph()->Constant(ObjectRef(js_heap_broker(), f->number_string())));
} else if (type.Is(Type::String())) {
- return Replace(jsgraph()->Constant(f->string_string()));
+ return Replace(
+ jsgraph()->Constant(ObjectRef(js_heap_broker(), f->string_string())));
} else if (type.Is(Type::BigInt())) {
- return Replace(jsgraph()->Constant(f->bigint_string()));
+ return Replace(
+ jsgraph()->Constant(ObjectRef(js_heap_broker(), f->bigint_string())));
} else if (type.Is(Type::Symbol())) {
- return Replace(jsgraph()->Constant(f->symbol_string()));
+ return Replace(
+ jsgraph()->Constant(ObjectRef(js_heap_broker(), f->symbol_string())));
} else if (type.Is(Type::OtherUndetectableOrUndefined())) {
- return Replace(jsgraph()->Constant(f->undefined_string()));
+ return Replace(jsgraph()->Constant(
+ ObjectRef(js_heap_broker(), f->undefined_string())));
} else if (type.Is(Type::NonCallableOrNull())) {
- return Replace(jsgraph()->Constant(f->object_string()));
+ return Replace(
+ jsgraph()->Constant(ObjectRef(js_heap_broker(), f->object_string())));
} else if (type.Is(Type::Function())) {
- return Replace(jsgraph()->Constant(f->function_string()));
+ return Replace(
+ jsgraph()->Constant(ObjectRef(js_heap_broker(), f->function_string())));
} else if (type.IsHeapConstant()) {
- return Replace(jsgraph()->Constant(
- Object::TypeOf(isolate(), type.AsHeapConstant()->Value())));
+ return Replace(jsgraph()->Constant(type.AsHeapConstant()->Ref().TypeOf()));
}
-
return NoChange();
}
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index cbb5ab7865..3c4b6ed9cd 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -13,13 +13,13 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class CompilationDependencies;
class Factory;
class Isolate;
namespace compiler {
// Forward declarations.
+class CompilationDependencies;
class JSGraph;
class SimplifiedOperatorBuilder;
class TypeCache;
@@ -28,7 +28,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
TypedOptimization(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph);
+ JSGraph* jsgraph, const JSHeapBroker* js_heap_broker);
~TypedOptimization();
const char* reducer_name() const override { return "TypedOptimization"; }
@@ -58,20 +58,24 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceToBoolean(Node* node);
Reduction TryReduceStringComparisonOfStringFromSingleCharCode(
- Node* comparison, Node* from_char_code, Node* constant, bool inverted);
+ Node* comparison, Node* from_char_code, Type constant_type,
+ bool inverted);
Reduction TryReduceStringComparisonOfStringFromSingleCharCodeToConstant(
- Node* comparison, Handle<String> string, bool inverted);
+ Node* comparison, const StringRef& string, bool inverted);
const Operator* NumberComparisonFor(const Operator* op);
- CompilationDependencies* dependencies() const { return dependencies_; }
+ SimplifiedOperatorBuilder* simplified() const;
Factory* factory() const;
Graph* graph() const;
Isolate* isolate() const;
+
+ CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* jsgraph() const { return jsgraph_; }
- SimplifiedOperatorBuilder* simplified() const;
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
+ const JSHeapBroker* js_heap_broker_;
Type const true_type_;
Type const false_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 29f6f0230d..575d4aa893 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -33,13 +33,14 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(Isolate* isolate, Flags flags, Graph* graph)
- : isolate_(isolate),
- flags_(flags),
+Typer::Typer(Isolate* isolate, const JSHeapBroker* js_heap_broker, Flags flags,
+ Graph* graph)
+ : flags_(flags),
graph_(graph),
decorator_(nullptr),
cache_(TypeCache::Get()),
- operation_typer_(isolate, zone()) {
+ js_heap_broker_(js_heap_broker),
+ operation_typer_(isolate, js_heap_broker, zone()) {
singleton_false_ = operation_typer_.singleton_false();
singleton_true_ = operation_typer_.singleton_true();
@@ -63,6 +64,7 @@ class Typer::Visitor : public Reducer {
const char* reducer_name() const override { return "Typer"; }
Reduction Reduce(Node* node) override {
+ DisallowHeapAccess no_heap_access;
if (node->op()->ValueOutputCount() == 0) return NoChange();
switch (node->opcode()) {
#define DECLARE_CASE(x) \
@@ -227,7 +229,6 @@ class Typer::Visitor : public Reducer {
Type Weaken(Node* node, Type current_type, Type previous_type);
Zone* zone() { return typer_->zone(); }
- Isolate* isolate() { return typer_->isolate(); }
Graph* graph() { return typer_->graph(); }
void SetWeakened(NodeId node_id) { weakened_nodes_.insert(node_id); }
@@ -829,14 +830,15 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
return typer_->cache_.kInteger;
}
if (FLAG_trace_turbo_loop) {
- OFStream os(stdout);
- os << std::setprecision(10);
- os << "Loop (" << NodeProperties::GetControlInput(node)->id()
- << ") variable bounds in "
- << (arithmetic_type == InductionVariable::ArithmeticType::kAddition
- ? "addition"
- : "subtraction")
- << " for phi " << node->id() << ": (" << min << ", " << max << ")\n";
+ StdoutStream{} << std::setprecision(10) << "Loop ("
+ << NodeProperties::GetControlInput(node)->id()
+ << ") variable bounds in "
+ << (arithmetic_type ==
+ InductionVariable::ArithmeticType::kAddition
+ ? "addition"
+ : "subtraction")
+ << " for phi " << node->id() << ": (" << min << ", " << max
+ << ")\n";
}
return Type::Range(min, max, typer_->zone());
}
@@ -1228,6 +1230,8 @@ Type Typer::Visitor::TypeJSLoadGlobal(Node* node) {
Type Typer::Visitor::TypeJSParseInt(Node* node) { return Type::Number(); }
+Type Typer::Visitor::TypeJSRegExpTest(Node* node) { return Type::Boolean(); }
+
// Returns a somewhat larger range if we previously assigned
// a (smaller) range to this node. This is used to speed up
// the fixpoint calculation in case there appears to be a loop
@@ -1398,278 +1402,278 @@ Type Typer::Visitor::TypeJSObjectIsArray(Node* node) { return Type::Boolean(); }
Type Typer::Visitor::TypeDateNow(Node* node) { return Type::Number(); }
Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
- if (fun.IsHeapConstant() && fun.AsHeapConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(fun.AsHeapConstant()->Value());
- if (function->shared()->HasBuiltinFunctionId()) {
- switch (function->shared()->builtin_function_id()) {
- case kMathRandom:
- return Type::PlainNumber();
- case kMathFloor:
- case kMathCeil:
- case kMathRound:
- case kMathTrunc:
- return t->cache_.kIntegerOrMinusZeroOrNaN;
- // Unary math functions.
- case kMathAbs:
- case kMathExp:
- case kMathExpm1:
- return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtanh:
- case kMathCbrt:
- case kMathCos:
- case kMathFround:
- case kMathLog:
- case kMathLog1p:
- case kMathLog10:
- case kMathLog2:
- case kMathSin:
- case kMathSqrt:
- case kMathTan:
- return Type::Number();
- case kMathSign:
- return t->cache_.kMinusOneToOneOrMinusZeroOrNaN;
- // Binary math functions.
- case kMathAtan2:
- case kMathPow:
- case kMathMax:
- case kMathMin:
- return Type::Number();
- case kMathImul:
- return Type::Signed32();
- case kMathClz32:
- return t->cache_.kZeroToThirtyTwo;
- // Date functions.
- case kDateNow:
- return t->cache_.kTimeValueType;
- case kDateGetDate:
- return t->cache_.kJSDateDayType;
- case kDateGetDay:
- return t->cache_.kJSDateWeekdayType;
- case kDateGetFullYear:
- return t->cache_.kJSDateYearType;
- case kDateGetHours:
- return t->cache_.kJSDateHourType;
- case kDateGetMilliseconds:
- return Type::Union(Type::Range(0.0, 999.0, t->zone()), Type::NaN(),
- t->zone());
- case kDateGetMinutes:
- return t->cache_.kJSDateMinuteType;
- case kDateGetMonth:
- return t->cache_.kJSDateMonthType;
- case kDateGetSeconds:
- return t->cache_.kJSDateSecondType;
- case kDateGetTime:
- return t->cache_.kJSDateValueType;
-
- // Symbol functions.
- case kSymbolConstructor:
- return Type::Symbol();
-
- // BigInt functions.
- case kBigIntConstructor:
- return Type::BigInt();
-
- // Number functions.
- case kNumberConstructor:
- return Type::Number();
- case kNumberIsFinite:
- case kNumberIsInteger:
- case kNumberIsNaN:
- case kNumberIsSafeInteger:
- return Type::Boolean();
- case kNumberParseFloat:
- return Type::Number();
- case kNumberParseInt:
- return t->cache_.kIntegerOrMinusZeroOrNaN;
- case kNumberToString:
- return Type::String();
-
- // String functions.
- case kStringConstructor:
- return Type::String();
- case kStringCharCodeAt:
- return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
- t->zone());
- case kStringCharAt:
- return Type::String();
- case kStringCodePointAt:
- return Type::Union(Type::Range(0.0, String::kMaxCodePoint, t->zone()),
- Type::Undefined(), t->zone());
- case kStringConcat:
- case kStringFromCharCode:
- case kStringFromCodePoint:
- return Type::String();
- case kStringIndexOf:
- case kStringLastIndexOf:
- return Type::Range(-1.0, String::kMaxLength, t->zone());
- case kStringEndsWith:
- case kStringIncludes:
- return Type::Boolean();
- case kStringRaw:
- case kStringRepeat:
- case kStringSlice:
- return Type::String();
- case kStringStartsWith:
- return Type::Boolean();
- case kStringSubstr:
- case kStringSubstring:
- case kStringToLowerCase:
- case kStringToString:
- case kStringToUpperCase:
- case kStringTrim:
- case kStringTrimEnd:
- case kStringTrimStart:
- case kStringValueOf:
- return Type::String();
-
- case kStringIterator:
- case kStringIteratorNext:
- return Type::OtherObject();
-
- case kArrayEntries:
- case kArrayKeys:
- case kArrayValues:
- case kTypedArrayEntries:
- case kTypedArrayKeys:
- case kTypedArrayValues:
- case kArrayIteratorNext:
- case kMapIteratorNext:
- case kSetIteratorNext:
- return Type::OtherObject();
- case kTypedArrayToStringTag:
- return Type::Union(Type::InternalizedString(), Type::Undefined(),
- t->zone());
-
- // Array functions.
- case kArrayIsArray:
- return Type::Boolean();
- case kArrayConcat:
- return Type::Receiver();
- case kArrayEvery:
- return Type::Boolean();
- case kArrayFill:
- case kArrayFilter:
- return Type::Receiver();
- case kArrayFindIndex:
- return Type::Range(-1, kMaxSafeInteger, t->zone());
- case kArrayForEach:
- return Type::Undefined();
- case kArrayIncludes:
- return Type::Boolean();
- case kArrayIndexOf:
- return Type::Range(-1, kMaxSafeInteger, t->zone());
- case kArrayJoin:
- return Type::String();
- case kArrayLastIndexOf:
- return Type::Range(-1, kMaxSafeInteger, t->zone());
- case kArrayMap:
- return Type::Receiver();
- case kArrayPush:
- return t->cache_.kPositiveSafeInteger;
- case kArrayReverse:
- case kArraySlice:
- return Type::Receiver();
- case kArraySome:
- return Type::Boolean();
- case kArraySplice:
- return Type::Receiver();
- case kArrayUnshift:
- return t->cache_.kPositiveSafeInteger;
-
- // ArrayBuffer functions.
- case kArrayBufferIsView:
- return Type::Boolean();
-
- // Object functions.
- case kObjectAssign:
- return Type::Receiver();
- case kObjectCreate:
- return Type::OtherObject();
- case kObjectIs:
- case kObjectHasOwnProperty:
- case kObjectIsPrototypeOf:
- return Type::Boolean();
- case kObjectToString:
- return Type::String();
-
- // RegExp functions.
- case kRegExpCompile:
- return Type::OtherObject();
- case kRegExpExec:
- return Type::Union(Type::Array(), Type::Null(), t->zone());
- case kRegExpTest:
- return Type::Boolean();
- case kRegExpToString:
- return Type::String();
-
- // Function functions.
- case kFunctionBind:
- return Type::BoundFunction();
- case kFunctionHasInstance:
- return Type::Boolean();
-
- // Global functions.
- case kGlobalDecodeURI:
- case kGlobalDecodeURIComponent:
- case kGlobalEncodeURI:
- case kGlobalEncodeURIComponent:
- case kGlobalEscape:
- case kGlobalUnescape:
- return Type::String();
- case kGlobalIsFinite:
- case kGlobalIsNaN:
- return Type::Boolean();
-
- // Map functions.
- case kMapClear:
- case kMapForEach:
- return Type::Undefined();
- case kMapDelete:
- case kMapHas:
- return Type::Boolean();
- case kMapEntries:
- case kMapKeys:
- case kMapSet:
- case kMapValues:
- return Type::OtherObject();
-
- // Set functions.
- case kSetAdd:
- case kSetEntries:
- case kSetValues:
- return Type::OtherObject();
- case kSetClear:
- case kSetForEach:
- return Type::Undefined();
- case kSetDelete:
- case kSetHas:
- return Type::Boolean();
-
- // WeakMap functions.
- case kWeakMapDelete:
- case kWeakMapHas:
- return Type::Boolean();
- case kWeakMapSet:
- return Type::OtherObject();
-
- // WeakSet functions.
- case kWeakSetAdd:
- return Type::OtherObject();
- case kWeakSetDelete:
- case kWeakSetHas:
- return Type::Boolean();
- default:
- break;
- }
- }
+ if (!fun.IsHeapConstant() || !fun.AsHeapConstant()->Ref().IsJSFunction()) {
+ return Type::NonInternal();
+ }
+ JSFunctionRef function = fun.AsHeapConstant()->Ref().AsJSFunction();
+ if (!function.HasBuiltinFunctionId()) {
+ return Type::NonInternal();
+ }
+ switch (function.GetBuiltinFunctionId()) {
+ case kMathRandom:
+ return Type::PlainNumber();
+ case kMathFloor:
+ case kMathCeil:
+ case kMathRound:
+ case kMathTrunc:
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
+ // Unary math functions.
+ case kMathAbs:
+ case kMathExp:
+ case kMathExpm1:
+ return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCos:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog1p:
+ case kMathLog10:
+ case kMathLog2:
+ case kMathSin:
+ case kMathSqrt:
+ case kMathTan:
+ return Type::Number();
+ case kMathSign:
+ return t->cache_.kMinusOneToOneOrMinusZeroOrNaN;
+ // Binary math functions.
+ case kMathAtan2:
+ case kMathPow:
+ case kMathMax:
+ case kMathMin:
+ return Type::Number();
+ case kMathImul:
+ return Type::Signed32();
+ case kMathClz32:
+ return t->cache_.kZeroToThirtyTwo;
+ // Date functions.
+ case kDateNow:
+ return t->cache_.kTimeValueType;
+ case kDateGetDate:
+ return t->cache_.kJSDateDayType;
+ case kDateGetDay:
+ return t->cache_.kJSDateWeekdayType;
+ case kDateGetFullYear:
+ return t->cache_.kJSDateYearType;
+ case kDateGetHours:
+ return t->cache_.kJSDateHourType;
+ case kDateGetMilliseconds:
+ return Type::Union(Type::Range(0.0, 999.0, t->zone()), Type::NaN(),
+ t->zone());
+ case kDateGetMinutes:
+ return t->cache_.kJSDateMinuteType;
+ case kDateGetMonth:
+ return t->cache_.kJSDateMonthType;
+ case kDateGetSeconds:
+ return t->cache_.kJSDateSecondType;
+ case kDateGetTime:
+ return t->cache_.kJSDateValueType;
+
+ // Symbol functions.
+ case kSymbolConstructor:
+ return Type::Symbol();
+
+ // BigInt functions.
+ case kBigIntConstructor:
+ return Type::BigInt();
+
+ // Number functions.
+ case kNumberConstructor:
+ return Type::Number();
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ return Type::Boolean();
+ case kNumberParseFloat:
+ return Type::Number();
+ case kNumberParseInt:
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
+ case kNumberToString:
+ return Type::String();
+
+ // String functions.
+ case kStringConstructor:
+ return Type::String();
+ case kStringCharCodeAt:
+ return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
+ t->zone());
+ case kStringCharAt:
+ return Type::String();
+ case kStringCodePointAt:
+ return Type::Union(Type::Range(0.0, String::kMaxCodePoint, t->zone()),
+ Type::Undefined(), t->zone());
+ case kStringConcat:
+ case kStringFromCharCode:
+ case kStringFromCodePoint:
+ return Type::String();
+ case kStringIndexOf:
+ case kStringLastIndexOf:
+ return Type::Range(-1.0, String::kMaxLength, t->zone());
+ case kStringEndsWith:
+ case kStringIncludes:
+ return Type::Boolean();
+ case kStringRaw:
+ case kStringRepeat:
+ case kStringSlice:
+ return Type::String();
+ case kStringStartsWith:
+ return Type::Boolean();
+ case kStringSubstr:
+ case kStringSubstring:
+ case kStringToLowerCase:
+ case kStringToString:
+ case kStringToUpperCase:
+ case kStringTrim:
+ case kStringTrimEnd:
+ case kStringTrimStart:
+ case kStringValueOf:
+ return Type::String();
+
+ case kStringIterator:
+ case kStringIteratorNext:
+ return Type::OtherObject();
+
+ case kArrayEntries:
+ case kArrayKeys:
+ case kArrayValues:
+ case kTypedArrayEntries:
+ case kTypedArrayKeys:
+ case kTypedArrayValues:
+ case kArrayIteratorNext:
+ case kMapIteratorNext:
+ case kSetIteratorNext:
+ return Type::OtherObject();
+ case kTypedArrayToStringTag:
+ return Type::Union(Type::InternalizedString(), Type::Undefined(),
+ t->zone());
+
+ // Array functions.
+ case kArrayIsArray:
+ return Type::Boolean();
+ case kArrayConcat:
+ return Type::Receiver();
+ case kArrayEvery:
+ return Type::Boolean();
+ case kArrayFill:
+ case kArrayFilter:
+ return Type::Receiver();
+ case kArrayFindIndex:
+ return Type::Range(-1, kMaxSafeInteger, t->zone());
+ case kArrayForEach:
+ return Type::Undefined();
+ case kArrayIncludes:
+ return Type::Boolean();
+ case kArrayIndexOf:
+ return Type::Range(-1, kMaxSafeInteger, t->zone());
+ case kArrayJoin:
+ return Type::String();
+ case kArrayLastIndexOf:
+ return Type::Range(-1, kMaxSafeInteger, t->zone());
+ case kArrayMap:
+ return Type::Receiver();
+ case kArrayPush:
+ return t->cache_.kPositiveSafeInteger;
+ case kArrayReverse:
+ case kArraySlice:
+ return Type::Receiver();
+ case kArraySome:
+ return Type::Boolean();
+ case kArraySplice:
+ return Type::Receiver();
+ case kArrayUnshift:
+ return t->cache_.kPositiveSafeInteger;
+
+ // ArrayBuffer functions.
+ case kArrayBufferIsView:
+ return Type::Boolean();
+
+ // Object functions.
+ case kObjectAssign:
+ return Type::Receiver();
+ case kObjectCreate:
+ return Type::OtherObject();
+ case kObjectIs:
+ case kObjectHasOwnProperty:
+ case kObjectIsPrototypeOf:
+ return Type::Boolean();
+ case kObjectToString:
+ return Type::String();
+
+ // RegExp functions.
+ case kRegExpCompile:
+ return Type::OtherObject();
+ case kRegExpExec:
+ return Type::Union(Type::Array(), Type::Null(), t->zone());
+ case kRegExpTest:
+ return Type::Boolean();
+ case kRegExpToString:
+ return Type::String();
+
+ // Function functions.
+ case kFunctionBind:
+ return Type::BoundFunction();
+ case kFunctionHasInstance:
+ return Type::Boolean();
+
+ // Global functions.
+ case kGlobalDecodeURI:
+ case kGlobalDecodeURIComponent:
+ case kGlobalEncodeURI:
+ case kGlobalEncodeURIComponent:
+ case kGlobalEscape:
+ case kGlobalUnescape:
+ return Type::String();
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ return Type::Boolean();
+
+ // Map functions.
+ case kMapClear:
+ case kMapForEach:
+ return Type::Undefined();
+ case kMapDelete:
+ case kMapHas:
+ return Type::Boolean();
+ case kMapEntries:
+ case kMapKeys:
+ case kMapSet:
+ case kMapValues:
+ return Type::OtherObject();
+
+ // Set functions.
+ case kSetAdd:
+ case kSetEntries:
+ case kSetValues:
+ return Type::OtherObject();
+ case kSetClear:
+ case kSetForEach:
+ return Type::Undefined();
+ case kSetDelete:
+ case kSetHas:
+ return Type::Boolean();
+
+ // WeakMap functions.
+ case kWeakMapDelete:
+ case kWeakMapHas:
+ return Type::Boolean();
+ case kWeakMapSet:
+ return Type::OtherObject();
+
+ // WeakSet functions.
+ case kWeakSetAdd:
+ return Type::OtherObject();
+ case kWeakSetDelete:
+ case kWeakSetHas:
+ return Type::Boolean();
+ default:
+ return Type::NonInternal();
}
- return Type::NonInternal();
}
Type Typer::Visitor::TypeJSCallForwardVarargs(Node* node) {
@@ -2039,6 +2043,17 @@ Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
UNREACHABLE();
}
+Type Typer::Visitor::TypeLoadDataViewElement(Node* node) {
+ switch (ExternalArrayTypeOf(node->op())) {
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
+ case kExternal##ElemType##Array: \
+ return typer_->cache_.k##ElemType;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ UNREACHABLE();
+}
+
Type Typer::Visitor::TypeStoreField(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreElement(Node* node) { UNREACHABLE(); }
@@ -2059,6 +2074,8 @@ Type Typer::Visitor::TypeStoreSignedSmallElement(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreTypedElement(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeStoreDataViewElement(Node* node) { UNREACHABLE(); }
+
Type Typer::Visitor::TypeObjectIsArrayBufferView(Node* node) {
return TypeUnaryOp(node, ObjectIsArrayBufferView);
}
@@ -2178,10 +2195,7 @@ Type Typer::Visitor::TypeRuntimeAbort(Node* node) { UNREACHABLE(); }
// Heap constants.
Type Typer::Visitor::TypeConstant(Handle<Object> value) {
- if (Type::IsInteger(*value)) {
- return Type::Range(value->Number(), value->Number(), zone());
- }
- return Type::NewConstant(value, zone());
+ return Type::NewConstant(typer_->js_heap_broker(), value, zone());
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 2d3cda7be3..1720bc776f 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -25,7 +25,8 @@ class V8_EXPORT_PRIVATE Typer {
};
typedef base::Flags<Flag> Flags;
- Typer(Isolate* isolate, Flags flags, Graph* graph);
+ Typer(Isolate* isolate, const JSHeapBroker* js_heap_broker, Flags flags,
+ Graph* graph);
~Typer();
void Run();
@@ -40,14 +41,14 @@ class V8_EXPORT_PRIVATE Typer {
Flags flags() const { return flags_; }
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
- Isolate* isolate() const { return isolate_; }
OperationTyper* operation_typer() { return &operation_typer_; }
+ const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
- Isolate* const isolate_;
Flags const flags_;
Graph* const graph_;
Decorator* decorator_;
TypeCache const& cache_;
+ const JSHeapBroker* js_heap_broker_;
OperationTyper operation_typer_;
Type singleton_false_;
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index b101acad1d..8a5871fdb0 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -14,14 +14,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-// NOTE: If code is marked as being a "shortcut", this means that removing
-// the code won't affect the semantics of the surrounding function definition.
-
-// static
-bool Type::IsInteger(i::Object* x) {
- return x->IsNumber() && Type::IsInteger(x->Number());
-}
-
// -----------------------------------------------------------------------------
// Range-related helper functions.
@@ -57,12 +49,6 @@ bool Type::Contains(const RangeType* lhs, const RangeType* rhs) {
return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
}
-bool Type::Contains(const RangeType* range, i::Object* val) {
- DisallowHeapAllocation no_allocation;
- return IsInteger(val) && range->Min() <= val->Number() &&
- val->Number() <= range->Max();
-}
-
// -----------------------------------------------------------------------------
// Min and Max computation.
@@ -146,9 +132,8 @@ Type::bitset Type::BitsetLub() const {
UNREACHABLE();
}
-Type::bitset BitsetType::Lub(i::Map* map) {
- DisallowHeapAllocation no_allocation;
- switch (map->instance_type()) {
+Type::bitset BitsetType::Lub(HeapObjectType const& type) {
+ switch (type.instance_type()) {
case CONS_STRING_TYPE:
case CONS_ONE_BYTE_STRING_TYPE:
case THIN_STRING_TYPE:
@@ -177,19 +162,24 @@ Type::bitset BitsetType::Lub(i::Map* map) {
return kSymbol;
case BIGINT_TYPE:
return kBigInt;
- case ODDBALL_TYPE: {
- Heap* heap = map->GetHeap();
- if (map == heap->undefined_map()) return kUndefined;
- if (map == heap->null_map()) return kNull;
- if (map == heap->boolean_map()) return kBoolean;
- if (map == heap->the_hole_map()) return kHole;
- DCHECK(map == heap->uninitialized_map() ||
- map == heap->termination_exception_map() ||
- map == heap->arguments_marker_map() ||
- map == heap->optimized_out_map() ||
- map == heap->stale_register_map());
- return kOtherInternal;
- }
+ case ODDBALL_TYPE:
+ switch (type.oddball_type()) {
+ case OddballType::kNone:
+ break;
+ case OddballType::kHole:
+ return kHole;
+ case OddballType::kBoolean:
+ return kBoolean;
+ case OddballType::kNull:
+ return kNull;
+ case OddballType::kUndefined:
+ return kUndefined;
+ case OddballType::kUninitialized:
+ case OddballType::kOther:
+ // TODO(neis): We should add a kOtherOddball type.
+ return kOtherInternal;
+ }
+ UNREACHABLE();
case HEAP_NUMBER_TYPE:
return kNumber;
case JS_OBJECT_TYPE:
@@ -199,15 +189,15 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_GLOBAL_PROXY_TYPE:
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
- if (map->is_undetectable()) {
+ if (type.is_undetectable()) {
// Currently we assume that every undetectable receiver is also
// callable, which is what we need to support document.all. We
// could add another Type bit to support other use cases in the
// future if necessary.
- DCHECK(map->is_callable());
+ DCHECK(type.is_callable());
return kOtherUndetectable;
}
- if (map->is_callable()) {
+ if (type.is_callable()) {
return kOtherCallable;
}
return kOtherObject;
@@ -218,6 +208,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_DATE_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
#endif // V8_INTL_SUPPORT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -246,18 +237,18 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case WASM_INSTANCE_TYPE:
case WASM_MEMORY_TYPE:
case WASM_TABLE_TYPE:
- DCHECK(!map->is_callable());
- DCHECK(!map->is_undetectable());
+ DCHECK(!type.is_callable());
+ DCHECK(!type.is_undetectable());
return kOtherObject;
case JS_BOUND_FUNCTION_TYPE:
- DCHECK(!map->is_undetectable());
+ DCHECK(!type.is_undetectable());
return kBoundFunction;
case JS_FUNCTION_TYPE:
- DCHECK(!map->is_undetectable());
+ DCHECK(!type.is_undetectable());
return kFunction;
case JS_PROXY_TYPE:
- DCHECK(!map->is_undetectable());
- if (map->is_callable()) return kCallableProxy;
+ DCHECK(!type.is_undetectable());
+ if (type.is_callable()) return kCallableProxy;
return kOtherProxy;
case MAP_TYPE:
case ALLOCATION_SITE_TYPE:
@@ -267,13 +258,21 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case ACCESSOR_PAIR_TYPE:
case FIXED_ARRAY_TYPE:
case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ case EPHEMERON_HASH_TABLE_TYPE:
case WEAK_FIXED_ARRAY_TYPE:
case WEAK_ARRAY_LIST_TYPE:
case FIXED_DOUBLE_ARRAY_TYPE:
case FEEDBACK_METADATA_TYPE:
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
- case BOILERPLATE_DESCRIPTION_TYPE:
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
case FEEDBACK_CELL_TYPE:
@@ -281,6 +280,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case PROPERTY_ARRAY_TYPE:
case FOREIGN_TYPE:
case SCOPE_INFO_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -296,6 +296,9 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE:
case CELL_TYPE:
+ case PRE_PARSED_SCOPE_DATA_TYPE:
+ case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
+ case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
return kOtherInternal;
// Remaining instance types are unsupported for now. If any of them do
@@ -325,10 +328,9 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case INTERPRETER_DATA_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
- case WASM_COMPILED_MODULE_TYPE:
+ case ARRAY_BOILERPLATE_DESCRIPTION_TYPE:
case WASM_DEBUG_INFO_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
- case WASM_SHARED_MODULE_DATA_TYPE:
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
@@ -343,17 +345,9 @@ Type::bitset BitsetType::Lub(i::Map* map) {
UNREACHABLE();
}
-Type::bitset BitsetType::Lub(i::Object* value) {
- DisallowHeapAllocation no_allocation;
- if (value->IsNumber()) {
- return Lub(value->Number());
- }
- return Lub(i::HeapObject::cast(value)->map());
-}
-
Type::bitset BitsetType::Lub(double value) {
DisallowHeapAllocation no_allocation;
- if (i::IsMinusZero(value)) return kMinusZero;
+ if (IsMinusZero(value)) return kMinusZero;
if (std::isnan(value)) return kNaN;
if (IsUint32Double(value) || IsInt32Double(value)) return Lub(value, value);
return kOtherNumber;
@@ -460,21 +454,16 @@ double BitsetType::Max(bitset bits) {
// static
bool OtherNumberConstantType::IsOtherNumberConstant(double value) {
// Not an integer, not NaN, and not -0.
- return !std::isnan(value) && !Type::IsInteger(value) &&
- !i::IsMinusZero(value);
-}
-
-// static
-bool OtherNumberConstantType::IsOtherNumberConstant(Object* value) {
- return value->IsHeapNumber() &&
- IsOtherNumberConstant(HeapNumber::cast(value)->value());
+ return !std::isnan(value) && !RangeType::IsInteger(value) &&
+ !IsMinusZero(value);
}
HeapConstantType::HeapConstantType(BitsetType::bitset bitset,
- i::Handle<i::HeapObject> object)
- : TypeBase(kHeapConstant), bitset_(bitset), object_(object) {
- DCHECK(!object->IsHeapNumber());
- DCHECK_IMPLIES(object->IsString(), object->IsInternalizedString());
+ const HeapObjectRef& heap_ref)
+ : TypeBase(kHeapConstant), bitset_(bitset), heap_ref_(heap_ref) {}
+
+Handle<HeapObject> HeapConstantType::Value() const {
+ return heap_ref_.object<HeapObject>();
}
// -----------------------------------------------------------------------------
@@ -819,9 +808,9 @@ Type Type::NormalizeRangeAndBitset(Type range, bitset* bits, Zone* zone) {
}
Type Type::NewConstant(double value, Zone* zone) {
- if (IsInteger(value)) {
+ if (RangeType::IsInteger(value)) {
return Range(value, value, zone);
- } else if (i::IsMinusZero(value)) {
+ } else if (IsMinusZero(value)) {
return Type::MinusZero();
} else if (std::isnan(value)) {
return Type::NaN();
@@ -831,16 +820,22 @@ Type Type::NewConstant(double value, Zone* zone) {
return OtherNumberConstant(value, zone);
}
-Type Type::NewConstant(i::Handle<i::Object> value, Zone* zone) {
- if (IsInteger(*value)) {
- double v = value->Number();
- return Range(v, v, zone);
- } else if (value->IsHeapNumber()) {
- return NewConstant(value->Number(), zone);
- } else if (value->IsString() && !value->IsInternalizedString()) {
+Type Type::NewConstant(const JSHeapBroker* js_heap_broker,
+ Handle<i::Object> value, Zone* zone) {
+ auto maybe_smi = JSHeapBroker::TryGetSmi(value);
+ if (maybe_smi.has_value()) {
+ return NewConstant(static_cast<double>(maybe_smi.value()), zone);
+ }
+
+ HeapObjectRef heap_ref(js_heap_broker, value);
+ if (heap_ref.IsHeapNumber()) {
+ return NewConstant(heap_ref.AsHeapNumber().value(), zone);
+ }
+
+ if (heap_ref.IsString() && !heap_ref.IsInternalizedString()) {
return Type::String();
}
- return HeapConstant(i::Handle<i::HeapObject>::cast(value), zone);
+ return HeapConstant(js_heap_broker, value, zone);
}
Type Type::Union(Type type1, Type type2, Zone* zone) {
@@ -1032,23 +1027,23 @@ void Type::PrintTo(std::ostream& os) const {
#ifdef DEBUG
void Type::Print() const {
- OFStream os(stdout);
+ StdoutStream os;
PrintTo(os);
os << std::endl;
}
void BitsetType::Print(bitset bits) {
- OFStream os(stdout);
+ StdoutStream os;
Print(os, bits);
os << std::endl;
}
#endif
BitsetType::bitset BitsetType::SignedSmall() {
- return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
+ return SmiValuesAre31Bits() ? kSigned31 : kSigned32;
}
BitsetType::bitset BitsetType::UnsignedSmall() {
- return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
+ return SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
}
// static
@@ -1066,8 +1061,10 @@ Type Type::OtherNumberConstant(double value, Zone* zone) {
}
// static
-Type Type::HeapConstant(i::Handle<i::HeapObject> value, Zone* zone) {
- return FromTypeBase(HeapConstantType::New(value, zone));
+Type Type::HeapConstant(const JSHeapBroker* js_heap_broker,
+ Handle<i::Object> value, Zone* zone) {
+ return FromTypeBase(
+ HeapConstantType::New(HeapObjectRef(js_heap_broker, value), zone));
}
// static
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 9f7a7d3d80..fbda845ee2 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_TYPES_H_
#include "src/base/compiler-specific.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/conversions.h"
#include "src/globals.h"
#include "src/handles.h"
@@ -250,8 +251,7 @@ class V8_EXPORT_PRIVATE BitsetType {
static double Max(bitset);
static bitset Glb(double min, double max);
- static bitset Lub(i::Map* map);
- static bitset Lub(i::Object* value);
+ static bitset Lub(HeapObjectType const& type);
static bitset Lub(double value);
static bitset Lub(double min, double max);
static bitset ExpandInternals(bitset bits);
@@ -312,6 +312,10 @@ class RangeType : public TypeBase {
double Min() const { return limits_.min; }
double Max() const { return limits_.max; }
+ static bool IsInteger(double x) {
+ return nearbyint(x) == x && !IsMinusZero(x); // Allows for infinities.
+ }
+
private:
friend class Type;
friend class BitsetType;
@@ -321,10 +325,6 @@ class RangeType : public TypeBase {
return New(Limits(min, max), zone);
}
- static bool IsInteger(double x) {
- return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
- }
-
static RangeType* New(Limits lim, Zone* zone) {
DCHECK(IsInteger(lim.min) && IsInteger(lim.max));
DCHECK(lim.min <= lim.max);
@@ -361,33 +361,25 @@ class V8_EXPORT_PRIVATE Type {
static Type UnsignedSmall() { return NewBitset(BitsetType::UnsignedSmall()); }
static Type OtherNumberConstant(double value, Zone* zone);
- static Type HeapConstant(i::Handle<i::HeapObject> value, Zone* zone);
+ static Type HeapConstant(const JSHeapBroker* js_heap_broker,
+ Handle<i::Object> value, Zone* zone);
static Type Range(double min, double max, Zone* zone);
static Type Range(RangeType::Limits lims, Zone* zone);
static Type Tuple(Type first, Type second, Type third, Zone* zone);
static Type Union(int length, Zone* zone);
// NewConstant is a factory that returns Constant, Range or Number.
- static Type NewConstant(i::Handle<i::Object> value, Zone* zone);
+ static Type NewConstant(const JSHeapBroker* js_heap_broker,
+ Handle<i::Object> value, Zone* zone);
static Type NewConstant(double value, Zone* zone);
static Type Union(Type type1, Type type2, Zone* zone);
static Type Intersect(Type type1, Type type2, Zone* zone);
- static Type Of(double value, Zone* zone) {
- return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(value)));
- }
- static Type Of(i::Object* value, Zone* zone) {
- return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(value)));
+ static Type For(const JSHeapBroker* js_heap_broker, Handle<i::Map> map) {
+ HeapObjectType type = js_heap_broker->HeapObjectTypeFromMap(map);
+ return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(type)));
}
- static Type Of(i::Handle<i::Object> value, Zone* zone) {
- return Of(*value, zone);
- }
-
- static Type For(i::Map* map) {
- return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(map)));
- }
- static Type For(i::Handle<i::Map> map) { return For(*map); }
// Predicates.
bool IsNone() const { return payload_ == None().payload_; }
@@ -424,11 +416,6 @@ class V8_EXPORT_PRIVATE Type {
// containing a range, that range is returned; otherwise, nullptr is returned.
Type GetRange() const;
- static bool IsInteger(i::Object* x);
- static bool IsInteger(double x) {
- return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
- }
-
int NumConstants() const;
static Type Invalid() { return Type(); }
@@ -494,7 +481,6 @@ class V8_EXPORT_PRIVATE Type {
static bool Overlap(const RangeType* lhs, const RangeType* rhs);
static bool Contains(const RangeType* lhs, const RangeType* rhs);
- static bool Contains(const RangeType* range, i::Object* val);
static int UpdateRange(Type type, UnionType* result, int size, Zone* zone);
@@ -526,7 +512,6 @@ class OtherNumberConstantType : public TypeBase {
double Value() const { return value_; }
static bool IsOtherNumberConstant(double value);
- static bool IsOtherNumberConstant(Object* value);
private:
friend class Type;
@@ -549,24 +534,27 @@ class OtherNumberConstantType : public TypeBase {
class V8_EXPORT_PRIVATE HeapConstantType : public NON_EXPORTED_BASE(TypeBase) {
public:
- i::Handle<i::HeapObject> Value() const { return object_; }
+ Handle<HeapObject> Value() const;
+ const HeapObjectRef& Ref() const { return heap_ref_; }
private:
friend class Type;
friend class BitsetType;
- static HeapConstantType* New(i::Handle<i::HeapObject> value, Zone* zone) {
- BitsetType::bitset bitset = BitsetType::Lub(*value);
+ static HeapConstantType* New(const HeapObjectRef& heap_ref, Zone* zone) {
+ DCHECK(!heap_ref.IsHeapNumber());
+ DCHECK_IMPLIES(heap_ref.IsString(), heap_ref.IsInternalizedString());
+ BitsetType::bitset bitset = BitsetType::Lub(heap_ref.type());
return new (zone->New(sizeof(HeapConstantType)))
- HeapConstantType(bitset, value);
+ HeapConstantType(bitset, heap_ref);
}
- HeapConstantType(BitsetType::bitset bitset, i::Handle<i::HeapObject> object);
+ HeapConstantType(BitsetType::bitset bitset, const HeapObjectRef& heap_ref);
BitsetType::bitset Lub() const { return bitset_; }
BitsetType::bitset bitset_;
- Handle<i::HeapObject> object_;
+ HeapObjectRef heap_ref_;
};
// -----------------------------------------------------------------------------
@@ -595,7 +583,7 @@ class StructuralType : public TypeBase {
length_ = length;
}
- StructuralType(Kind kind, int length, i::Zone* zone)
+ StructuralType(Kind kind, int length, Zone* zone)
: TypeBase(kind), length_(length) {
elements_ = reinterpret_cast<Type*>(zone->New(sizeof(Type) * length));
}
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 7aaab7963d..52cbd6e0b7 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -645,12 +645,15 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Receiver());
break;
case IrOpcode::kJSParseInt:
- // Type is Receiver.
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Any());
CheckTypeIs(node, Type::Number());
break;
-
+ case IrOpcode::kJSRegExpTest:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::String());
+ CheckTypeIs(node, Type::Boolean());
+ break;
case IrOpcode::kJSCreate:
// Type is Object.
CheckTypeIs(node, Type::Object());
@@ -1495,6 +1498,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kLoadTypedElement:
break;
+ case IrOpcode::kLoadDataViewElement:
+ break;
case IrOpcode::kStoreField:
// (Object, fieldtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
@@ -1524,6 +1529,9 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kStoreTypedElement:
CheckNotTyped(node);
break;
+ case IrOpcode::kStoreDataViewElement:
+ CheckNotTyped(node);
+ break;
case IrOpcode::kNumberSilenceNaN:
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::Number());
@@ -1708,7 +1716,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
- case IrOpcode::kLoadRootsPointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kWord32AtomicLoad:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 61c3a350f7..1b8f4e9066 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "src/assembler-inl.h"
+#include "src/assembler.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
@@ -35,9 +36,12 @@
#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/log-inl.h"
+#include "src/optimized-compilation-info.h"
+#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
+#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/memory-tracing.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-limits.h"
@@ -111,16 +115,12 @@ bool ContainsInt64(wasm::FunctionSig* sig) {
} // namespace
WasmGraphBuilder::WasmGraphBuilder(
- Isolate* isolate, wasm::ModuleEnv* env, Zone* zone, MachineGraph* mcgraph,
- Handle<Code> centry_stub, Handle<Oddball> anyref_null,
+ wasm::ModuleEnv* env, Zone* zone, MachineGraph* mcgraph,
wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table)
- : isolate_(isolate),
- zone_(zone),
+ : zone_(zone),
mcgraph_(mcgraph),
env_(env),
- centry_stub_(centry_stub),
- anyref_null_(anyref_null),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
has_simd_(ContainsSimd(sig)),
@@ -214,19 +214,9 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
}
Node* WasmGraphBuilder::RefNull() {
- if (!anyref_null_node_.is_set()) {
- anyref_null_node_.set(
- graph()->NewNode(mcgraph()->common()->HeapConstant(anyref_null_)));
- }
- return anyref_null_node_.get();
-}
-
-Node* WasmGraphBuilder::CEntryStub() {
- if (!centry_stub_node_.is_set()) {
- centry_stub_node_.set(
- graph()->NewNode(mcgraph()->common()->HeapConstant(centry_stub_)));
- }
- return centry_stub_node_.get();
+ Node* null = LOAD_INSTANCE_FIELD(NullValue, MachineType::TaggedPointer());
+ *effect_ = null;
+ return null;
}
Node* WasmGraphBuilder::NoContextConstant() {
@@ -259,11 +249,17 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
if (effect == nullptr) effect = effect_;
if (control == nullptr) control = control_;
- Node* limit =
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::Pointer()),
- mcgraph()->ExternalConstant(
- ExternalReference::address_of_stack_limit(isolate_)),
- mcgraph()->IntPtrConstant(0), *effect, *control);
+ // This instruction sequence is matched in the instruction selector to
+ // load the stack pointer directly on some platforms. Hence, when modifying
+ // please also fix WasmStackCheckMatcher in node-matchers.h
+
+ Node* limit_address = graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Pointer()), instance_node_.get(),
+ mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(StackLimitAddress)),
+ *effect, *control);
+ Node* limit = graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Pointer()), limit_address,
+ mcgraph()->IntPtrConstant(0), *effect, *control);
*effect = limit;
Node* pointer = graph()->NewNode(mcgraph()->machine()->LoadStackPointer());
@@ -275,26 +271,28 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
if (stack_check_call_operator_ == nullptr) {
// Build and cache the stack check call operator and the constant
- // representing the stackcheck code.
- Handle<Code> code = BUILTIN_CODE(isolate_, WasmStackGuard);
- CallInterfaceDescriptor idesc = WasmRuntimeCallDescriptor(isolate_);
+ // representing the stack check code.
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate_, mcgraph()->zone(), idesc, 0, CallDescriptor::kNoFlags,
- Operator::kNoProperties, MachineType::AnyTagged(), 1,
- Linkage::kNoContext);
- stack_check_builtin_code_node_.set(
- graph()->NewNode(mcgraph()->common()->HeapConstant(code)));
+ mcgraph()->zone(), // zone
+ NoContextDescriptor{}, // descriptor
+ 0, // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ StubCallMode::kCallWasmRuntimeStub); // stub call mode
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ stack_check_code_node_.set(mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmStackGuard, RelocInfo::WASM_STUB_CALL));
stack_check_call_operator_ = mcgraph()->common()->Call(call_descriptor);
}
- Node* call = graph()->NewNode(stack_check_call_operator_,
- stack_check_builtin_code_node_.get(), *effect,
+ Node* call = graph()->NewNode(stack_check_call_operator_.get(),
+ stack_check_code_node_.get(), *effect,
stack_check.if_false);
SetSourcePosition(call, position);
- Node* ephi = graph()->NewNode(mcgraph()->common()->EffectPhi(2), *effect,
- call, stack_check.merge);
+ Node* ephi = stack_check.EffectPhi(*effect, call);
*control = stack_check.merge;
*effect = ephi;
@@ -911,22 +909,25 @@ Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
BranchHint::kFalse);
}
-Builtins::Name WasmGraphBuilder::GetBuiltinIdForTrap(wasm::TrapReason reason) {
+TrapId WasmGraphBuilder::GetTrapIdForTrap(wasm::TrapReason reason) {
// TODO(wasm): "!env_" should not happen when compiling an actual wasm
// function.
if (!env_ || !env_->runtime_exception_support) {
- // We use Builtins::builtin_count as a marker to tell the code generator
+ // We use TrapId::kInvalid as a marker to tell the code generator
// to generate a call to a testing c-function instead of a runtime
- // function. This code should only be called from a cctest.
- return Builtins::builtin_count;
+ // stub. This code should only be called from a cctest.
+ return TrapId::kInvalid;
}
switch (reason) {
-#define TRAPREASON_TO_MESSAGE(name) \
- case wasm::k##name: \
- return Builtins::kThrowWasm##name;
- FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
-#undef TRAPREASON_TO_MESSAGE
+#define TRAPREASON_TO_TRAPID(name) \
+ case wasm::k##name: \
+ static_assert( \
+ static_cast<int>(TrapId::k##name) == wasm::WasmCode::kThrowWasm##name, \
+ "trap id mismatch"); \
+ return TrapId::k##name;
+ FOREACH_WASM_TRAPREASON(TRAPREASON_TO_TRAPID)
+#undef TRAPREASON_TO_TRAPID
default:
UNREACHABLE();
}
@@ -934,7 +935,7 @@ Builtins::Name WasmGraphBuilder::GetBuiltinIdForTrap(wasm::TrapReason reason) {
Node* WasmGraphBuilder::TrapIfTrue(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
- Builtins::Name trap_id = GetBuiltinIdForTrap(reason);
+ TrapId trap_id = GetTrapIdForTrap(reason);
Node* node = graph()->NewNode(mcgraph()->common()->TrapIf(trap_id), cond,
Effect(), Control());
*control_ = node;
@@ -944,8 +945,7 @@ Node* WasmGraphBuilder::TrapIfTrue(wasm::TrapReason reason, Node* cond,
Node* WasmGraphBuilder::TrapIfFalse(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
- Builtins::Name trap_id = GetBuiltinIdForTrap(reason);
-
+ TrapId trap_id = GetTrapIdForTrap(reason);
Node* node = graph()->NewNode(mcgraph()->common()->TrapUnless(trap_id), cond,
Effect(), Control());
*control_ = node;
@@ -2015,28 +2015,25 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
Node* WasmGraphBuilder::GrowMemory(Node* input) {
SetNeedsStackCheck();
- Diamond check_input_range(
- graph(), mcgraph()->common(),
- graph()->NewNode(mcgraph()->machine()->Uint32LessThanOrEqual(), input,
- mcgraph()->Uint32Constant(FLAG_wasm_max_mem_pages)),
- BranchHint::kTrue);
-
- check_input_range.Chain(*control_);
-
- Node* parameters[] = {BuildChangeUint31ToSmi(input)};
- Node* old_effect = *effect_;
- *control_ = check_input_range.if_true;
- Node* call = BuildCallToRuntime(Runtime::kWasmGrowMemory, parameters,
- arraysize(parameters));
- Node* result = BuildChangeSmiToInt32(call);
+ WasmGrowMemoryDescriptor interface_descriptor;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), // zone
+ interface_descriptor, // descriptor
+ interface_descriptor.GetStackParameterCount(), // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ StubCallMode::kCallWasmRuntimeStub); // stub call mode
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmGrowMemory, RelocInfo::WASM_STUB_CALL);
+ Node* call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ call_target, input, *effect_, *control_);
- result = check_input_range.Phi(MachineRepresentation::kWord32, result,
- mcgraph()->Int32Constant(-1));
- *effect_ = graph()->NewNode(mcgraph()->common()->EffectPhi(2), *effect_,
- old_effect, check_input_range.merge);
- *control_ = check_input_range.merge;
- return result;
+ *effect_ = call;
+ *control_ = call;
+ return call;
}
uint32_t WasmGraphBuilder::GetExceptionEncodedSize(
@@ -2595,13 +2592,45 @@ Node* WasmGraphBuilder::BuildImportWasmCall(wasm::FunctionSig* sig, Node** args,
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
Node* target_node = graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- mcgraph()->Int32Constant(func_index * sizeof(Address)),
+ mcgraph()->Int32Constant(func_index * kPointerSize),
mcgraph()->graph()->start(), mcgraph()->graph()->start());
args[0] = target_node;
return BuildWasmCall(sig, args, rets, position, instance_node,
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
}
+Node* WasmGraphBuilder::BuildImportWasmCall(wasm::FunctionSig* sig, Node** args,
+ Node*** rets,
+ wasm::WasmCodePosition position,
+ Node* func_index) {
+ // Load the instance from the imported_instances array.
+ Node* imported_instances = LOAD_INSTANCE_FIELD(ImportedFunctionInstances,
+ MachineType::TaggedPointer());
+ // Access fixed array at {header_size - tag + func_index * kPointerSize}.
+ Node* imported_instances_data =
+ graph()->NewNode(mcgraph()->machine()->IntAdd(), imported_instances,
+ mcgraph()->IntPtrConstant(FixedArrayOffsetMinusTag(0)));
+ Node* func_index_times_pointersize = graph()->NewNode(
+ mcgraph()->machine()->IntMul(), Uint32ToUintptr(func_index),
+ mcgraph()->Int32Constant(kPointerSize));
+ Node* instance_node =
+ graph()->NewNode(mcgraph()->machine()->Load(MachineType::TaggedPointer()),
+ imported_instances_data, func_index_times_pointersize,
+ *effect_, *control_);
+
+ // Load the target from the imported_targets array at the offset of
+ // {func_index}.
+ Node* imported_targets =
+ LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
+ Node* target_node = graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
+ func_index_times_pointersize, mcgraph()->graph()->start(),
+ mcgraph()->graph()->start());
+ args[0] = target_node;
+ return BuildWasmCall(sig, args, rets, position, instance_node,
+ untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
+}
+
Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
@@ -2741,10 +2770,15 @@ bool CanCover(Node* value, IrOpcode::Value opcode) {
return true;
}
-Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
+Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
if (mcgraph()->machine()->Is64()) {
value = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), value);
}
+ return value;
+}
+
+Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
+ value = BuildChangeInt32ToIntPtr(value);
return graph()->NewNode(mcgraph()->machine()->WordShl(), value,
BuildSmiShiftBitsConstant());
}
@@ -2929,10 +2963,8 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
if (mem_type == MachineType::Simd128() && global.offset != 0) {
// TODO(titzer,bbudge): code generation for SIMD memory offsets is broken.
- *base_node =
- graph()->NewNode(kPointerSize == 4 ? mcgraph()->machine()->Int32Add()
- : mcgraph()->machine()->Int64Add(),
- *base_node, *offset_node);
+ *base_node = graph()->NewNode(mcgraph()->machine()->IntAdd(), *base_node,
+ *offset_node);
*offset_node = mcgraph()->Int32Constant(0);
}
}
@@ -2949,7 +2981,7 @@ Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
Node* WasmGraphBuilder::CurrentMemoryPages() {
// CurrentMemoryPages can not be called from asm.js.
- DCHECK_EQ(wasm::kWasmOrigin, env_->module->origin());
+ DCHECK_EQ(wasm::kWasmOrigin, env_->module->origin);
DCHECK_NOT_NULL(instance_cache_);
Node* mem_size = instance_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
@@ -2968,16 +3000,18 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
mcgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
CallDescriptor::kNoFlags);
- // CEntryStubConstant nodes have to be created and cached in the main
- // thread. At the moment this is only done for CEntryStubConstant(1).
+ // The CEntryStub is loaded from the instance_node so that generated code is
+ // Isolate independent. At the moment this is only done for CEntryStub(1).
DCHECK_EQ(1, fun->result_size);
+ Node* centry_stub = *effect_ =
+ LOAD_INSTANCE_FIELD(CEntryStub, MachineType::TaggedPointer());
// At the moment we only allow 4 parameters. If more parameters are needed,
// increase this constant accordingly.
static const int kMaxParams = 4;
DCHECK_GE(kMaxParams, parameter_count);
Node* inputs[kMaxParams + 6];
int count = 0;
- inputs[count++] = CEntryStub();
+ inputs[count++] = centry_stub;
for (int i = 0; i < parameter_count; i++) {
inputs[count++] = parameters[i];
}
@@ -3314,8 +3348,7 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
Node* value_phi =
bounds_check.Phi(type.representation(), load,
GetAsmJsOOBValue(type.representation(), mcgraph()));
- Node* effect_phi = graph()->NewNode(mcgraph()->common()->EffectPhi(2), load,
- *effect_, bounds_check.merge);
+ Node* effect_phi = bounds_check.EffectPhi(load, *effect_);
*effect_ = effect_phi;
*control_ = bounds_check.merge;
return value_phi;
@@ -3323,6 +3356,12 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
if (mcgraph()->machine()->Is32()) return node;
+ // Fold instances of ChangeUint32ToUint64(IntConstant) directly.
+ UintPtrMatcher matcher(node);
+ if (matcher.HasValue()) {
+ uintptr_t value = matcher.Value();
+ return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
+ }
return graph()->NewNode(mcgraph()->machine()->ChangeUint32ToUint64(), node);
}
@@ -3357,8 +3396,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
type.representation(), WriteBarrierKind::kNoWriteBarrier));
Node* store = graph()->NewNode(store_op, mem_start, index, val, *effect_,
bounds_check.if_true);
- Node* effect_phi = graph()->NewNode(mcgraph()->common()->EffectPhi(2), store,
- *effect_, bounds_check.merge);
+ Node* effect_phi = bounds_check.EffectPhi(store, *effect_);
*effect_ = effect_phi;
*control_ = bounds_check.merge;
return val;
@@ -3950,6 +3988,35 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
#undef ATOMIC_LOAD_LIST
#undef ATOMIC_STORE_LIST
+class WasmDecorator final : public GraphDecorator {
+ public:
+ explicit WasmDecorator(NodeOriginTable* origins, wasm::Decoder* decoder)
+ : origins_(origins), decoder_(decoder) {}
+
+ void Decorate(Node* node) final {
+ origins_->SetNodeOrigin(
+ node, NodeOrigin("wasm graph creation", "n/a",
+ NodeOrigin::kWasmBytecode, decoder_->position()));
+ }
+
+ private:
+ compiler::NodeOriginTable* origins_;
+ wasm::Decoder* decoder_;
+};
+
+void WasmGraphBuilder::AddBytecodePositionDecorator(
+ NodeOriginTable* node_origins, wasm::Decoder* decoder) {
+ DCHECK_NULL(decorator_);
+ decorator_ = new (graph()->zone()) WasmDecorator(node_origins, decoder);
+ graph()->AddDecorator(decorator_);
+}
+
+void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
+ DCHECK_NOT_NULL(decorator_);
+ graph()->RemoveDecorator(decorator_);
+ decorator_ = nullptr;
+}
+
namespace {
bool must_record_function_compilation(Isolate* isolate) {
return isolate->logger()->is_listening_to_code_events() ||
@@ -3977,41 +4044,37 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
WasmWrapperGraphBuilder(Zone* zone, wasm::ModuleEnv* env, JSGraph* jsgraph,
wasm::FunctionSig* sig,
- compiler::SourcePositionTable* spt)
- : WasmGraphBuilder(jsgraph->isolate(), env, zone, jsgraph,
- CodeFactory::CEntry(jsgraph->isolate()),
- jsgraph->isolate()->factory()->null_value(), sig, spt),
- jsgraph_(jsgraph) {}
+ compiler::SourcePositionTable* spt,
+ StubCallMode stub_mode)
+ : WasmGraphBuilder(env, zone, jsgraph, sig, spt),
+ isolate_(jsgraph->isolate()),
+ jsgraph_(jsgraph),
+ stub_mode_(stub_mode) {}
Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
- // The AllocateHeapNumber builtin does not use the js_context, so we can
- // safely pass in Smi zero here.
- Callable callable =
- Builtins::CallableFor(isolate_, Builtins::kAllocateHeapNumber);
- Node* target = jsgraph()->HeapConstant(callable.code());
- Node* js_context = jsgraph()->NoContextConstant();
- Node* begin_region = graph()->NewNode(
- common->BeginRegion(RegionObservability::kNotObservable), *effect_);
+ Node* target = (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+ ? mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmAllocateHeapNumber,
+ RelocInfo::WASM_STUB_CALL)
+ : jsgraph()->HeapConstant(
+ BUILTIN_CODE(isolate_, AllocateHeapNumber));
if (!allocate_heap_number_operator_.is_set()) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate_, mcgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
+ mcgraph()->zone(), AllocateHeapNumberDescriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow, stub_mode_);
allocate_heap_number_operator_.set(common->Call(call_descriptor));
}
- Node* heap_number =
- graph()->NewNode(allocate_heap_number_operator_.get(), target,
- js_context, begin_region, control);
+ Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
+ target, *effect_, control);
Node* store =
graph()->NewNode(machine->Store(StoreRepresentation(
MachineRepresentation::kFloat64, kNoWriteBarrier)),
heap_number, BuildHeapNumberValueIndexConstant(),
value, heap_number, control);
- Node* finish_region =
- graph()->NewNode(common->FinishRegion(), heap_number, store);
- *effect_ = finish_region;
- return finish_region;
+ *effect_ = store;
+ return heap_number;
}
Node* BuildChangeSmiToFloat64(Node* value) {
@@ -4019,17 +4082,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildChangeSmiToInt32(value));
}
- Node* BuildTestNotSmi(Node* value) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
+ Node* BuildTestHeapObject(Node* value) {
return graph()->NewNode(mcgraph()->machine()->WordAnd(), value,
- mcgraph()->IntPtrConstant(kSmiTagMask));
+ mcgraph()->IntPtrConstant(kHeapObjectTag));
}
- Node* BuildLoadHeapNumberValue(Node* value, Node* control) {
- return graph()->NewNode(mcgraph()->machine()->Load(MachineType::Float64()),
- value, BuildHeapNumberValueIndexConstant(),
- graph()->start(), control);
+ Node* BuildLoadHeapNumberValue(Node* value) {
+ return *effect_ = graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Float64()), value,
+ BuildHeapNumberValueIndexConstant(), *effect_, *control_);
}
Node* BuildHeapNumberValueIndexConstant() {
@@ -4040,9 +4101,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
- if (machine->Is64()) {
+ if (SmiValuesAre32Bits()) {
return BuildChangeInt32ToSmi(value);
}
+ DCHECK(SmiValuesAre31Bits());
Node* effect = *effect_;
Node* control = *control_;
@@ -4060,6 +4122,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* if_false = graph()->NewNode(common->IfFalse(), branch);
Node* vfalse = graph()->NewNode(common->Projection(0), add, if_false);
+ vfalse = BuildChangeInt32ToIntPtr(vfalse);
Node* merge = graph()->NewNode(common->Merge(2), if_true, if_false);
Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
@@ -4073,27 +4136,36 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
+ // Check several conditions:
+ // i32?
+ // ├─ true: zero?
+ // │ ├─ true: negative?
+ // │ │ ├─ true: box
+ // │ │ └─ false: potentially Smi
+ // │ └─ false: potentially Smi
+ // └─ false: box
+ // For potential Smi values, depending on whether Smis are 31 or 32 bit, we
+ // still need to check whether the value fits in a Smi.
+
Node* effect = *effect_;
Node* control = *control_;
Node* value32 = graph()->NewNode(machine->RoundFloat64ToInt32(), value);
- Node* check_same = graph()->NewNode(
+ Node* check_i32 = graph()->NewNode(
machine->Float64Equal(), value,
graph()->NewNode(machine->ChangeInt32ToFloat64(), value32));
- Node* branch_same = graph()->NewNode(common->Branch(), check_same, control);
+ Node* branch_i32 = graph()->NewNode(common->Branch(), check_i32, control);
- Node* if_smi = graph()->NewNode(common->IfTrue(), branch_same);
- Node* vsmi;
- Node* if_box = graph()->NewNode(common->IfFalse(), branch_same);
- Node* vbox;
+ Node* if_i32 = graph()->NewNode(common->IfTrue(), branch_i32);
+ Node* if_not_i32 = graph()->NewNode(common->IfFalse(), branch_i32);
// We only need to check for -0 if the {value} can potentially contain -0.
Node* check_zero = graph()->NewNode(machine->Word32Equal(), value32,
mcgraph()->Int32Constant(0));
Node* branch_zero = graph()->NewNode(common->Branch(BranchHint::kFalse),
- check_zero, if_smi);
+ check_zero, if_i32);
Node* if_zero = graph()->NewNode(common->IfTrue(), branch_zero);
- Node* if_notzero = graph()->NewNode(common->IfFalse(), branch_zero);
+ Node* if_not_zero = graph()->NewNode(common->IfFalse(), branch_zero);
// In case of 0, we need to check the high bits for the IEEE -0 pattern.
Node* check_negative = graph()->NewNode(
@@ -4104,18 +4176,22 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
check_negative, if_zero);
Node* if_negative = graph()->NewNode(common->IfTrue(), branch_negative);
- Node* if_notnegative = graph()->NewNode(common->IfFalse(), branch_negative);
+ Node* if_not_negative =
+ graph()->NewNode(common->IfFalse(), branch_negative);
// We need to create a box for negative 0.
- if_smi = graph()->NewNode(common->Merge(2), if_notzero, if_notnegative);
- if_box = graph()->NewNode(common->Merge(2), if_box, if_negative);
+ Node* if_smi =
+ graph()->NewNode(common->Merge(2), if_not_zero, if_not_negative);
+ Node* if_box = graph()->NewNode(common->Merge(2), if_not_i32, if_negative);
// On 64-bit machines we can just wrap the 32-bit integer in a smi, for
// 32-bit machines we need to deal with potential overflow and fallback to
// boxing.
- if (machine->Is64()) {
+ Node* vsmi;
+ if (SmiValuesAre32Bits()) {
vsmi = BuildChangeInt32ToSmi(value32);
} else {
+ DCHECK(SmiValuesAre31Bits());
Node* smi_tag = graph()->NewNode(machine->Int32AddWithOverflow(), value32,
value32, if_smi);
@@ -4129,10 +4205,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if_smi = graph()->NewNode(common->IfFalse(), branch_ovf);
vsmi = graph()->NewNode(common->Projection(0), smi_tag, if_smi);
+ vsmi = BuildChangeInt32ToIntPtr(vsmi);
}
// Allocate the box for the {value}.
- vbox = BuildAllocateHeapNumberWithValue(value, if_box);
+ Node* vbox = BuildAllocateHeapNumberWithValue(value, if_box);
Node* ebox = *effect_;
Node* merge = graph()->NewNode(common->Merge(2), if_smi, if_box);
@@ -4155,11 +4232,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildJavaScriptToNumber(Node* node, Node* js_context) {
- Callable callable = Builtins::CallableFor(isolate_, Builtins::kToNumber);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate_, mcgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoProperties);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ mcgraph()->zone(), TypeConversionDescriptor{}, 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ Node* stub_code =
+ (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+ ? mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL)
+ : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, ToNumber));
Node* result =
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code,
@@ -4176,40 +4256,49 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
- Node* check = BuildTestNotSmi(value);
- Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), check,
- graph()->start());
+ // Implement the following decision tree:
+ // heap object?
+ // ├─ true: undefined?
+ // │ ├─ true: f64 const
+ // │ └─ false: load heap number value
+ // └─ false: smi to float64
+
+ Node* check_heap_object = BuildTestHeapObject(value);
+ Diamond is_heap_object(graph(), common, check_heap_object,
+ BranchHint::kFalse);
+ is_heap_object.Chain(*control_);
- Node* if_not_smi = graph()->NewNode(common->IfTrue(), branch);
+ *control_ = is_heap_object.if_true;
+ Node* orig_effect = *effect_;
- Node* vnot_smi;
- Node* check_undefined = graph()->NewNode(machine->WordEqual(), value,
- jsgraph()->UndefinedConstant());
- Node* branch_undefined = graph()->NewNode(
- common->Branch(BranchHint::kFalse), check_undefined, if_not_smi);
+ Node* undefined_node = *effect_ =
+ LOAD_INSTANCE_FIELD(UndefinedValue, MachineType::TaggedPointer());
+ Node* check_undefined =
+ graph()->NewNode(machine->WordEqual(), value, undefined_node);
+ Node* effect_tagged = *effect_;
- Node* if_undefined = graph()->NewNode(common->IfTrue(), branch_undefined);
+ Diamond is_undefined(graph(), common, check_undefined, BranchHint::kFalse);
+ is_undefined.Nest(is_heap_object, true);
+
+ *control_ = is_undefined.if_false;
+ Node* vheap_number = BuildLoadHeapNumberValue(value);
+ Node* effect_undefined = *effect_;
+
+ *control_ = is_undefined.merge;
Node* vundefined =
mcgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
+ Node* vtagged = is_undefined.Phi(MachineRepresentation::kFloat64,
+ vundefined, vheap_number);
- Node* if_not_undefined =
- graph()->NewNode(common->IfFalse(), branch_undefined);
- Node* vheap_number = BuildLoadHeapNumberValue(value, if_not_undefined);
+ effect_tagged = is_undefined.EffectPhi(effect_tagged, effect_undefined);
- if_not_smi =
- graph()->NewNode(common->Merge(2), if_undefined, if_not_undefined);
- vnot_smi = graph()->NewNode(common->Phi(MachineRepresentation::kFloat64, 2),
- vundefined, vheap_number, if_not_smi);
-
- Node* if_smi = graph()->NewNode(common->IfFalse(), branch);
+ // If input is Smi: just convert to float64.
Node* vfrom_smi = BuildChangeSmiToFloat64(value);
- Node* merge = graph()->NewNode(common->Merge(2), if_not_smi, if_smi);
- Node* phi =
- graph()->NewNode(common->Phi(MachineRepresentation::kFloat64, 2),
- vnot_smi, vfrom_smi, merge);
-
- return phi;
+ *control_ = is_heap_object.merge;
+ *effect_ = is_heap_object.EffectPhi(effect_tagged, orig_effect);
+ return is_heap_object.Phi(MachineRepresentation::kFloat64, vtagged,
+ vfrom_smi);
}
Node* ToJS(Node* node, wasm::ValueType type) {
@@ -4227,8 +4316,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return BuildChangeFloat64ToTagged(node);
case wasm::kWasmAnyRef:
return node;
- case wasm::kWasmStmt:
- return jsgraph()->UndefinedConstant();
default:
UNREACHABLE();
}
@@ -4246,7 +4333,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* num = BuildJavaScriptToNumber(node, js_context);
// Change representation.
- SimplifiedOperatorBuilder simplified(mcgraph()->zone());
num = BuildChangeTaggedToFloat64(num);
switch (type) {
@@ -4287,7 +4373,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
&sig, graph()->NewNode(mcgraph()->common()->ExternalConstant(ref)));
}
- Node* BuildLoadInstanceFromExportedFunction(Node* closure) {
+ Node* BuildLoadFunctionDataFromExportedFunction(Node* closure) {
Node* shared = *effect_ = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), closure,
jsgraph()->Int32Constant(JSFunction::kSharedFunctionInfoOffset -
@@ -4298,6 +4384,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
jsgraph()->Int32Constant(SharedFunctionInfo::kFunctionDataOffset -
kHeapObjectTag),
*effect_, *control_);
+ return function_data;
+ }
+
+ Node* BuildLoadInstanceFromExportedFunctionData(Node* function_data) {
Node* instance = *effect_ = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
jsgraph()->Int32Constant(WasmExportedFunctionData::kInstanceOffset -
@@ -4306,7 +4396,17 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return instance;
}
- void BuildJSToWasmWrapper(uint32_t wasm_func_index, Address call_target) {
+ Node* BuildLoadFunctionIndexFromExportedFunctionData(Node* function_data) {
+ Node* function_index_smi = *effect_ = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
+ jsgraph()->Int32Constant(
+ WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag),
+ *effect_, *control_);
+ Node* function_index = BuildChangeSmiToInt32(function_index_smi);
+ return function_index;
+ }
+
+ void BuildJSToWasmWrapper(bool is_import) {
const int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the JS parameter nodes.
@@ -4324,13 +4424,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
graph()->start());
- // Create the instance_node node to pass as parameter. It is loaded from the
+ // Create the instance_node node to pass as parameter. It is loaded from
// an actual reference to an instance or a placeholder reference,
// called {WasmExportedFunction} via the {WasmExportedFunctionData}
- // structure. since JSToWasm wrappers can be compiled at module compile time
- // and patched at instance build time.
- DCHECK_NULL(instance_node_);
- instance_node_ = BuildLoadInstanceFromExportedFunction(js_closure);
+ // structure.
+ Node* function_data = BuildLoadFunctionDataFromExportedFunction(js_closure);
+ instance_node_.set(
+ BuildLoadInstanceFromExportedFunctionData(function_data));
if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Use the js_context of the calling javascript
@@ -4338,7 +4438,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// js_context independent.
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
nullptr, 0);
- Return(jsgraph()->UndefinedConstant());
+ Return(jsgraph()->SmiConstant(0));
return;
}
@@ -4356,16 +4456,29 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Set the ThreadInWasm flag before we do the actual call.
BuildModifyThreadInWasmFlag(true);
- if (env_ && wasm_func_index < env_->module->num_imported_functions) {
+ // Load function index from {WasmExportedFunctionData}.
+ Node* function_index =
+ BuildLoadFunctionIndexFromExportedFunctionData(function_data);
+
+ if (is_import) {
// Call to an imported function.
- DCHECK_EQ(kNullAddress, call_target);
BuildImportWasmCall(sig_, args, &rets, wasm::kNoCodePosition,
- wasm_func_index);
+ function_index);
} else {
// Call to a wasm function defined in this module.
- DCHECK_NE(kNullAddress, call_target);
- args[0] = mcgraph()->RelocatableIntPtrConstant(
- call_target, RelocInfo::JS_TO_WASM_CALL);
+ // The call target is the jump table slot for that function. This is
+ // {jump_table + (func_index - num_imports) * kJumpTableSlotSize}
+ // == {jump_table_adjusted + func_index * kJumpTableSlotSize}.
+ Node* jump_table_adjusted =
+ LOAD_INSTANCE_FIELD(JumpTableAdjustedStart, MachineType::Pointer());
+ Node* jump_table_offset = graph()->NewNode(
+ mcgraph()->machine()->IntMul(), Uint32ToUintptr(function_index),
+ mcgraph()->IntPtrConstant(
+ wasm::JumpTableAssembler::kJumpTableSlotSize));
+ Node* jump_table_slot =
+ graph()->NewNode(mcgraph()->machine()->IntAdd(), jump_table_adjusted,
+ jump_table_offset);
+ args[0] = jump_table_slot;
BuildWasmCall(sig_, args, &rets, wasm::kNoCodePosition, nullptr,
kNoRetpoline);
@@ -4385,16 +4498,18 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
- CallDescriptor* call_descriptor;
Node* start = Start(wasm_count + 3);
*effect_ = start;
*control_ = start;
+ // Create the instance_node from the passed parameter.
instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
+
Node* callables_node = LOAD_INSTANCE_FIELD(ImportedFunctionCallables,
MachineType::TaggedPointer());
Node* callable_node = LOAD_FIXED_ARRAY_SLOT(callables_node, index);
- Node* undefined_node = jsgraph()->UndefinedConstant();
+ Node* undefined_node =
+ LOAD_INSTANCE_FIELD(UndefinedValue, MachineType::TaggedPointer());
Node* native_context =
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer());
@@ -4408,6 +4523,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return false;
}
+ CallDescriptor* call_descriptor;
Node** args = Buffer(wasm_count + 9);
Node* call = nullptr;
@@ -4452,9 +4568,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
pos, args);
} else if (function->shared()->internal_formal_parameter_count() >= 0) {
- Callable callable = CodeFactory::ArgumentAdaptor(isolate_);
int pos = 0;
- args[pos++] = jsgraph()->HeapConstant(callable.code());
+ args[pos++] = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmArgumentsAdaptor, RelocInfo::WASM_STUB_CALL);
args[pos++] = callable_node; // target callable
args[pos++] = undefined_node; // new target
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
@@ -4470,16 +4586,18 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = undefined_node;
}
+ call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), ArgumentAdaptorDescriptor{}, 1 + wasm_count,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ StubCallMode::kCallWasmRuntimeStub);
+
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(args, pos, wasm_count, sig_);
args[pos++] = function_context;
args[pos++] = *effect_;
args[pos++] = *control_;
- call = graph()->NewNode(
- mcgraph()->common()->Call(Linkage::GetStubCallDescriptor(
- isolate_, mcgraph()->zone(), callable.descriptor(),
- 1 + wasm_count, CallDescriptor::kNoFlags)),
- pos, args);
+ call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ pos, args);
}
}
}
@@ -4487,16 +4605,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// We cannot call the target directly, we have to use the Call builtin.
if (!call) {
int pos = 0;
- // We cannot call the target directly, we have to use the Call builtin.
- Callable callable = CodeFactory::Call(isolate_);
- args[pos++] = jsgraph()->HeapConstant(callable.code());
+ args[pos++] = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmCallJavaScript, RelocInfo::WASM_STUB_CALL);
args[pos++] = callable_node;
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
args[pos++] = undefined_node; // receiver
call_descriptor = Linkage::GetStubCallDescriptor(
- isolate_, graph()->zone(), callable.descriptor(), wasm_count + 1,
- CallDescriptor::kNoFlags);
+ graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ StubCallMode::kCallWasmRuntimeStub);
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(args, pos, wasm_count, sig_);
@@ -4536,6 +4654,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
*effect_ = start;
*control_ = start;
+ // Create the instance_node from the passed parameter.
+ instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
+
// Compute size for the argument buffer.
int args_size_bytes = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -4672,18 +4793,21 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
JSGraph* jsgraph() { return jsgraph_; }
private:
+ Isolate* const isolate_;
JSGraph* jsgraph_;
+ StubCallMode stub_mode_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
};
} // namespace
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
- Address call_target, uint32_t index,
- wasm::UseTrapHandler use_trap_handler) {
- const wasm::WasmFunction* func = &module->functions[index];
+MaybeHandle<Code> CompileJSToWasmWrapper(
+ Isolate* isolate, const wasm::NativeModule* native_module,
+ wasm::FunctionSig* sig, bool is_import,
+ wasm::UseTrapHandler use_trap_handler) {
+ const wasm::WasmModule* module = native_module->module();
//----------------------------------------------------------------------------
- // Create the Graph
+ // Create the Graph.
//----------------------------------------------------------------------------
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
@@ -4698,10 +4822,11 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
Node* effect = nullptr;
wasm::ModuleEnv env(module, use_trap_handler, wasm::kRuntimeExceptionSupport);
- WasmWrapperGraphBuilder builder(&zone, &env, &jsgraph, func->sig, nullptr);
+ WasmWrapperGraphBuilder builder(&zone, &env, &jsgraph, sig, nullptr,
+ StubCallMode::kCallOnHeapBuiltin);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildJSToWasmWrapper(index, call_target);
+ builder.BuildJSToWasmWrapper(is_import);
//----------------------------------------------------------------------------
// Run the compilation pipeline.
@@ -4717,21 +4842,23 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
OptimizedCompilationInfo info(func_name, &zone, Code::JS_TO_WASM_FUNCTION);
if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- OFStream os(stdout);
- os << "-- Graph after change lowering -- " << std::endl;
- os << AsRPO(graph);
+ StdoutStream{} << "-- Graph after change lowering -- " << std::endl
+ << AsRPO(graph);
}
// Schedule and compile to machine code.
- int params =
- static_cast<int>(module->functions[index].sig->parameter_count());
+ int params = static_cast<int>(sig->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
&zone, false, params + 1, CallDescriptor::kNoFlags);
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph);
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForTesting(
+ &info, isolate, incoming, &graph, WasmAssemblerOptions());
+ Handle<Code> code;
+ if (!maybe_code.ToHandle(&code)) {
+ return maybe_code;
+ }
#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code && !code.is_null()) {
+ if (FLAG_print_opt_code) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
code->Disassemble(func_name.start(), os);
@@ -4746,42 +4873,10 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
return code;
}
-namespace {
-
-void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
-#ifdef DEBUG
- // We expect the only embedded objects to be those originating from
- // a snapshot, which are immovable.
- DisallowHeapAllocation no_gc;
- if (wrapper.is_null()) return;
- static constexpr int kAllGCRefs = (1 << (RelocInfo::LAST_GCED_ENUM + 1)) - 1;
- for (RelocIterator it(*wrapper, kAllGCRefs); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- Object* target = nullptr;
- switch (mode) {
- case RelocInfo::CODE_TARGET:
- // this would be either one of the stubs or builtins, because
- // we didn't link yet.
- target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- break;
- case RelocInfo::EMBEDDED_OBJECT:
- target = it.rinfo()->target_object();
- break;
- default:
- UNREACHABLE();
- }
- DCHECK_NOT_NULL(target);
- DCHECK(target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target)));
- }
-#endif
-}
-
-} // namespace
-
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
- wasm::FunctionSig* sig, uint32_t index,
- wasm::ModuleOrigin origin,
- wasm::UseTrapHandler use_trap_handler) {
+MaybeHandle<Code> CompileWasmToJSWrapper(
+ Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
+ uint32_t index, wasm::ModuleOrigin origin,
+ wasm::UseTrapHandler use_trap_handler) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -4805,7 +4900,8 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::kRuntimeExceptionSupport);
WasmWrapperGraphBuilder builder(&zone, &env, &jsgraph, sig,
- source_position_table);
+ source_position_table,
+ StubCallMode::kCallWasmRuntimeStub);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmToJSWrapper(target, index);
@@ -4821,9 +4917,8 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
OptimizedCompilationInfo info(func_name, &zone, Code::WASM_TO_JS_FUNCTION);
if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- OFStream os(stdout);
- os << "-- Graph after change lowering -- " << std::endl;
- os << AsRPO(graph);
+ StdoutStream{} << "-- Graph after change lowering -- " << std::endl
+ << AsRPO(graph);
}
// Schedule and compile to machine code.
@@ -4831,13 +4926,15 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
if (machine.Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
-
- Handle<Code> code = Pipeline::GenerateCodeForTesting(
- &info, isolate, incoming, &graph, nullptr, source_position_table);
- ValidateImportWrapperReferencesImmovables(code);
-
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForTesting(
+ &info, isolate, incoming, &graph, AssemblerOptions::Default(isolate),
+ nullptr, source_position_table);
+ Handle<Code> code;
+ if (!maybe_code.ToHandle(&code)) {
+ return maybe_code;
+ }
#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code && !code.is_null()) {
+ if (FLAG_print_opt_code) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
code->Disassemble(func_name.start(), os);
@@ -4852,8 +4949,9 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
return code;
}
-Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
- wasm::FunctionSig* sig) {
+MaybeHandle<Code> CompileWasmInterpreterEntry(Isolate* isolate,
+ uint32_t func_index,
+ wasm::FunctionSig* sig) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -4869,55 +4967,56 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, nullptr, &jsgraph, sig, nullptr);
+ WasmWrapperGraphBuilder builder(&zone, nullptr, &jsgraph, sig, nullptr,
+ StubCallMode::kCallWasmRuntimeStub);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmInterpreterEntry(func_index);
- Handle<Code> code = Handle<Code>::null();
- {
- // Schedule and compile to machine code.
- CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
- if (machine.Is32()) {
- incoming = GetI32WasmCallDescriptor(&zone, incoming);
- }
+ // Schedule and compile to machine code.
+ CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
+ if (machine.Is32()) {
+ incoming = GetI32WasmCallDescriptor(&zone, incoming);
+ }
#ifdef DEBUG
- EmbeddedVector<char, 32> func_name;
- func_name.Truncate(
- SNPrintF(func_name, "wasm-interpreter-entry#%d", func_index));
+ EmbeddedVector<char, 32> func_name;
+ func_name.Truncate(
+ SNPrintF(func_name, "wasm-interpreter-entry#%d", func_index));
#else
- Vector<const char> func_name = CStrVector("wasm-interpreter-entry");
+ Vector<const char> func_name = CStrVector("wasm-interpreter-entry");
#endif
- OptimizedCompilationInfo info(func_name, &zone,
- Code::WASM_INTERPRETER_ENTRY);
+ OptimizedCompilationInfo info(func_name, &zone, Code::WASM_INTERPRETER_ENTRY);
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- OFStream os(stdout);
- os << "-- Wasm interpreter entry graph -- " << std::endl;
- os << AsRPO(graph);
- }
+ if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ StdoutStream{} << "-- Wasm interpreter entry graph -- " << std::endl
+ << AsRPO(graph);
+ }
- code = Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph,
- nullptr);
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForTesting(
+ &info, isolate, incoming, &graph, AssemblerOptions::Default(isolate),
+ nullptr);
+ Handle<Code> code;
+ if (!maybe_code.ToHandle(&code)) {
+ return maybe_code;
+ }
#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code && !code.is_null()) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(func_name.start(), os);
- }
+ if (FLAG_print_opt_code) {
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ code->Disassemble(func_name.start(), os);
+ }
#endif
- if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
- "%.*s", func_name.length(), func_name.start());
- }
+ if (must_record_function_compilation(isolate)) {
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
+ "%.*s", func_name.length(), func_name.start());
}
- return code;
+ return maybe_code;
}
-Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
+MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
@@ -4930,7 +5029,8 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, nullptr, &jsgraph, sig, nullptr);
+ WasmWrapperGraphBuilder builder(&zone, nullptr, &jsgraph, sig, nullptr,
+ StubCallMode::kCallOnHeapBuiltin);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildCWasmEntry();
@@ -4960,15 +5060,17 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
OptimizedCompilationInfo info(debug_name_vec, &zone, Code::C_WASM_ENTRY);
if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- OFStream os(stdout);
- os << "-- C Wasm entry graph -- " << std::endl;
- os << AsRPO(graph);
+ StdoutStream{} << "-- C Wasm entry graph -- " << std::endl << AsRPO(graph);
}
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph);
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForTesting(
+ &info, isolate, incoming, &graph, AssemblerOptions::Default(isolate));
+ Handle<Code> code;
+ if (!maybe_code.ToHandle(&code)) {
+ return maybe_code;
+ }
#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code && !code.is_null()) {
+ if (FLAG_print_opt_code) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
code->Disassemble(debug_name, os);
@@ -4978,18 +5080,6 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
return code;
}
-WasmCompilationData::WasmCompilationData(
- wasm::RuntimeExceptionSupport runtime_exception_support)
- : protected_instructions_(
- new std::vector<trap_handler::ProtectedInstructionData>()),
- runtime_exception_support_(runtime_exception_support) {}
-
-void WasmCompilationData::AddProtectedInstruction(uint32_t instr_offset,
- uint32_t landing_offset) {
- protected_instructions_->emplace_back(
- trap_handler::ProtectedInstructionData{instr_offset, landing_offset});
-}
-
TurbofanWasmCompilationUnit::TurbofanWasmCompilationUnit(
wasm::WasmCompilationUnit* wasm_unit)
: wasm_unit_(wasm_unit),
@@ -4999,7 +5089,7 @@ TurbofanWasmCompilationUnit::TurbofanWasmCompilationUnit(
TurbofanWasmCompilationUnit::~TurbofanWasmCompilationUnit() = default;
SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
- double* decode_ms) {
+ double* decode_ms, MachineGraph* mcgraph, NodeOriginTable* node_origins) {
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
@@ -5007,21 +5097,16 @@ SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
// Create a TF graph during decoding.
SourcePositionTable* source_position_table =
- new (mcgraph_->zone()) SourcePositionTable(mcgraph_->graph());
- // We get the handle for {null_value()} directly from the isolate although we
- // are on a background task because the handle is stored in the isolate
- // anyways, and it is immortal and immovable.
- WasmGraphBuilder builder(wasm_unit_->isolate_, wasm_unit_->env_,
- mcgraph_->zone(), mcgraph_, wasm_unit_->centry_stub_,
- wasm_unit_->isolate_->factory()->null_value(),
+ new (mcgraph->zone()) SourcePositionTable(mcgraph->graph());
+ WasmGraphBuilder builder(wasm_unit_->env_, mcgraph->zone(), mcgraph,
wasm_unit_->func_body_.sig, source_position_table);
- graph_construction_result_ = wasm::BuildTFGraph(
- wasm_unit_->isolate_->allocator(), &builder, wasm_unit_->func_body_);
+ graph_construction_result_ =
+ wasm::BuildTFGraph(wasm_unit_->wasm_engine_->allocator(), &builder,
+ wasm_unit_->func_body_, node_origins);
if (graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
- OFStream os(stdout);
- os << "Compilation failed: " << graph_construction_result_.error_msg()
- << std::endl;
+ StdoutStream{} << "Compilation failed: "
+ << graph_construction_result_.error_msg() << std::endl;
}
return nullptr;
}
@@ -5031,15 +5116,16 @@ SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
if (builder.has_simd() &&
(!CpuFeatures::SupportsWasmSimd128() || wasm_unit_->lower_simd_)) {
SimdScalarLowering(
- mcgraph_,
- CreateMachineSignature(mcgraph_->zone(), wasm_unit_->func_body_.sig))
+ mcgraph,
+ CreateMachineSignature(mcgraph->zone(), wasm_unit_->func_body_.sig))
.LowerGraph();
}
if (wasm_unit_->func_index_ >= FLAG_trace_wasm_ast_start &&
wasm_unit_->func_index_ < FLAG_trace_wasm_ast_end) {
- PrintRawWasmCode(wasm_unit_->isolate_->allocator(), wasm_unit_->func_body_,
- wasm_unit_->env_->module, wasm::kPrintLocals);
+ PrintRawWasmCode(wasm_unit_->wasm_engine_->allocator(),
+ wasm_unit_->func_body_, wasm_unit_->env_->module,
+ wasm::kPrintLocals);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -5070,61 +5156,71 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
} // namespace
void TurbofanWasmCompilationUnit::ExecuteCompilation() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "ExecuteTurbofanCompilation");
double decode_ms = 0;
size_t node_count = 0;
// Scope for the {graph_zone}.
{
- Zone graph_zone(wasm_unit_->isolate_->allocator(), ZONE_NAME);
- mcgraph_ = new (&graph_zone)
+ Zone graph_zone(wasm_unit_->wasm_engine_->allocator(), ZONE_NAME);
+ MachineGraph* mcgraph = new (&graph_zone)
MachineGraph(new (&graph_zone) Graph(&graph_zone),
new (&graph_zone) CommonOperatorBuilder(&graph_zone),
new (&graph_zone) MachineOperatorBuilder(
&graph_zone, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()));
+
+ Zone compilation_zone(wasm_unit_->wasm_engine_->allocator(), ZONE_NAME);
+
+ OptimizedCompilationInfo info(
+ GetDebugName(&compilation_zone, wasm_unit_->func_name_,
+ wasm_unit_->func_index_),
+ &compilation_zone, Code::WASM_FUNCTION);
+
+ NodeOriginTable* node_origins = info.trace_turbo_json_enabled()
+ ? new (&graph_zone)
+ NodeOriginTable(mcgraph->graph())
+ : nullptr;
SourcePositionTable* source_positions =
- BuildGraphForWasmFunction(&decode_ms);
+ BuildGraphForWasmFunction(&decode_ms, mcgraph, node_origins);
if (graph_construction_result_.failed()) {
ok_ = false;
return;
}
+ if (node_origins) {
+ node_origins->AddDecorator();
+ }
+
base::ElapsedTimer pipeline_timer;
if (FLAG_trace_wasm_decode_time) {
- node_count = mcgraph_->graph()->NodeCount();
+ node_count = mcgraph->graph()->NodeCount();
pipeline_timer.Start();
}
- compilation_zone_.reset(
- new Zone(wasm_unit_->isolate_->allocator(), ZONE_NAME));
-
// Run the compiler pipeline to generate machine code.
- auto call_descriptor = GetWasmCallDescriptor(compilation_zone_.get(),
- wasm_unit_->func_body_.sig);
- if (mcgraph_->machine()->Is32()) {
+ auto call_descriptor =
+ GetWasmCallDescriptor(&compilation_zone, wasm_unit_->func_body_.sig);
+ if (mcgraph->machine()->Is32()) {
call_descriptor =
- GetI32WasmCallDescriptor(compilation_zone_.get(), call_descriptor);
+ GetI32WasmCallDescriptor(&compilation_zone, call_descriptor);
}
- info_.reset(new OptimizedCompilationInfo(
- GetDebugName(compilation_zone_.get(), wasm_unit_->func_name_,
- wasm_unit_->func_index_),
- compilation_zone_.get(), Code::WASM_FUNCTION));
-
- NodeOriginTable* node_origins = info_->trace_turbo_graph_enabled()
- ? new (&graph_zone)
- NodeOriginTable(mcgraph_->graph())
- : nullptr;
- job_.reset(Pipeline::NewWasmCompilationJob(
- info_.get(), wasm_unit_->isolate_, mcgraph_, call_descriptor,
- source_positions, node_origins, &wasm_compilation_data_,
- wasm_unit_->env_->module->origin()));
- ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
+ std::unique_ptr<OptimizedCompilationJob> job(
+ Pipeline::NewWasmCompilationJob(
+ &info, wasm_unit_->wasm_engine_, mcgraph, call_descriptor,
+ source_positions, node_origins, &wasm_compilation_data_,
+ wasm_unit_->func_body_,
+ const_cast<wasm::WasmModule*>(wasm_unit_->env_->module),
+ wasm_unit_->native_module_, wasm_unit_->func_index_,
+ wasm_unit_->env_->module->origin));
+ ok_ = job->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
wasm_unit_->counters_->wasm_compile_function_peak_memory_bytes()->AddSample(
- static_cast<int>(mcgraph_->graph()->zone()->allocation_size()));
+ static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
if (FLAG_trace_wasm_decode_time) {
double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
@@ -5135,13 +5231,8 @@ void TurbofanWasmCompilationUnit::ExecuteCompilation() {
wasm_unit_->func_body_.start),
decode_ms, node_count, pipeline_ms);
}
- // The graph zone is about to get out of scope. Avoid invalid references.
- mcgraph_ = nullptr;
+ if (ok_) wasm_code_ = info.wasm_code();
}
-
- // Record the memory cost this unit places on the system until
- // it is finalized.
- wasm_unit_->memory_cost_ = job_->AllocatedMemory();
}
wasm::WasmCode* TurbofanWasmCompilationUnit::FinishCompilation(
@@ -5164,37 +5255,9 @@ wasm::WasmCode* TurbofanWasmCompilationUnit::FinishCompilation(
return nullptr;
}
- base::ElapsedTimer codegen_timer;
- if (FLAG_trace_wasm_decode_time) {
- codegen_timer.Start();
- }
- if (job_->FinalizeJob(wasm_unit_->isolate_) != CompilationJob::SUCCEEDED) {
- return nullptr;
- }
-
- // TODO(mtrofin): when we crystalize a design in lieu of WasmCodeDesc, that
- // works for both wasm and non-wasm, we can simplify AddCode to just take
- // that as a parameter.
- const CodeDesc& desc = job_->compilation_info()->wasm_code_desc()->code_desc;
- wasm::WasmCode* code = wasm_unit_->native_module_->AddCode(
- desc, job_->compilation_info()->wasm_code_desc()->frame_slot_count,
- wasm_unit_->func_index_,
- job_->compilation_info()->wasm_code_desc()->safepoint_table_offset,
- job_->compilation_info()->wasm_code_desc()->handler_table_offset,
- wasm_compilation_data_.ReleaseProtectedInstructions(),
- job_->compilation_info()->wasm_code_desc()->source_positions_table,
- wasm::WasmCode::kTurbofan);
- if (!code) return code;
- if (FLAG_trace_wasm_decode_time) {
- double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
- PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
- static_cast<unsigned>(wasm_unit_->func_body_.end -
- wasm_unit_->func_body_.start),
- codegen_ms);
- }
-
- return code;
+ wasm_unit_->native_module()->PublishCode(wasm_code_);
+ return wasm_code_;
}
namespace {
@@ -5384,6 +5447,13 @@ CallDescriptor* GetI32WasmCallDescriptorForSimd(
MachineRepresentation::kWord32);
}
+AssemblerOptions WasmAssemblerOptions() {
+ AssemblerOptions options;
+ options.record_reloc_info_for_serialization = true;
+ options.enable_root_array_delta_access = false;
+ return options;
+}
+
#undef WASM_64
#undef FATAL_UNSUPPORTED_OPCODE
#undef WASM_INSTANCE_OBJECT_OFFSET
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 01c339fb47..0f6ee0304e 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -9,7 +9,7 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
-#include "src/optimized-compilation-info.h"
+#include "src/runtime/runtime.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
@@ -20,8 +20,7 @@
namespace v8 {
namespace internal {
-
-class OptimizedCompilationJob;
+struct AssemblerOptions;
namespace compiler {
// Forward declarations for some compiler data structures.
@@ -29,17 +28,18 @@ class CallDescriptor;
class Graph;
class MachineGraph;
class Node;
+class NodeOriginTable;
class Operator;
class SourcePositionTable;
+class WasmDecorator;
+enum class TrapId : uint32_t;
} // namespace compiler
namespace wasm {
struct DecodeStruct;
-class SignatureMap;
// Expose {Node} and {Graph} opaquely as {wasm::TFNode} and {wasm::TFGraph}.
typedef compiler::Node TFNode;
typedef compiler::MachineGraph TFGraph;
-class NativeModule;
class WasmCode;
} // namespace wasm
@@ -50,13 +50,17 @@ namespace compiler {
class WasmCompilationData {
public:
explicit WasmCompilationData(
- wasm::RuntimeExceptionSupport runtime_exception_support);
+ wasm::RuntimeExceptionSupport runtime_exception_support)
+ : runtime_exception_support_(runtime_exception_support) {}
- void AddProtectedInstruction(uint32_t instr_offset, uint32_t landing_offset);
+ void AddProtectedInstruction(uint32_t instr_offset, uint32_t landing_offset) {
+ protected_instructions_.push_back({instr_offset, landing_offset});
+ }
- std::unique_ptr<std::vector<trap_handler::ProtectedInstructionData>>
- ReleaseProtectedInstructions() {
- return std::move(protected_instructions_);
+ OwnedVector<trap_handler::ProtectedInstructionData>
+ GetProtectedInstructions() {
+ return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
+ protected_instructions_);
}
wasm::RuntimeExceptionSupport runtime_exception_support() const {
@@ -64,8 +68,7 @@ class WasmCompilationData {
}
private:
- std::unique_ptr<std::vector<trap_handler::ProtectedInstructionData>>
- protected_instructions_;
+ std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
// See ModuleEnv::runtime_exception_support_.
wasm::RuntimeExceptionSupport runtime_exception_support_;
@@ -78,7 +81,9 @@ class TurbofanWasmCompilationUnit {
explicit TurbofanWasmCompilationUnit(wasm::WasmCompilationUnit* wasm_unit);
~TurbofanWasmCompilationUnit();
- SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
+ SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms,
+ MachineGraph* mcgraph,
+ NodeOriginTable* node_origins);
void ExecuteCompilation();
@@ -88,36 +93,30 @@ class TurbofanWasmCompilationUnit {
wasm::WasmCompilationUnit* const wasm_unit_;
WasmCompilationData wasm_compilation_data_;
bool ok_ = true;
- // The graph zone is deallocated at the end of {ExecuteCompilation} by virtue
- // of it being zone allocated.
- MachineGraph* mcgraph_ = nullptr;
- // The compilation_zone_, info_, and job_ fields need to survive past
- // {ExecuteCompilation}, onto {FinishCompilation} (which happens on the main
- // thread).
- std::unique_ptr<Zone> compilation_zone_;
- std::unique_ptr<OptimizedCompilationInfo> info_;
- std::unique_ptr<OptimizedCompilationJob> job_;
+ wasm::WasmCode* wasm_code_ = nullptr;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
DISALLOW_COPY_AND_ASSIGN(TurbofanWasmCompilationUnit);
};
// Wraps a JS function, producing a code object that can be called from wasm.
-Handle<Code> CompileWasmToJSWrapper(Isolate*, Handle<JSReceiver> target,
- wasm::FunctionSig*, uint32_t index,
- wasm::ModuleOrigin, wasm::UseTrapHandler);
-
-// Wraps a given wasm code object, producing a code object.
-V8_EXPORT_PRIVATE Handle<Code> CompileJSToWasmWrapper(Isolate*,
- wasm::WasmModule*,
- Address call_target,
- uint32_t index,
- wasm::UseTrapHandler);
+MaybeHandle<Code> CompileWasmToJSWrapper(Isolate*, Handle<JSReceiver> target,
+ wasm::FunctionSig*, uint32_t index,
+ wasm::ModuleOrigin,
+ wasm::UseTrapHandler);
+
+// Creates a code object calling a wasm function with the given signature,
+// callable from JS.
+// TODO(clemensh): Remove the {UseTrapHandler} parameter to make js-to-wasm
+// wrappers sharable across instances.
+V8_EXPORT_PRIVATE MaybeHandle<Code> CompileJSToWasmWrapper(
+ Isolate*, const wasm::NativeModule*, wasm::FunctionSig*, bool is_import,
+ wasm::UseTrapHandler);
// Compiles a stub that redirects a call to a wasm function to the wasm
// interpreter. It's ABI compatible with the compiled wasm function.
-Handle<Code> CompileWasmInterpreterEntry(Isolate*, uint32_t func_index,
- wasm::FunctionSig*);
+MaybeHandle<Code> CompileWasmInterpreterEntry(Isolate*, uint32_t func_index,
+ wasm::FunctionSig*);
// Helper function to get the offset into a fixed array for a given {index}.
// TODO(titzer): access-builder.h is not accessible outside compiler. Move?
@@ -134,7 +133,7 @@ enum CWasmEntryParameters {
// Compiles a stub with JS linkage, taking parameters as described by
// {CWasmEntryParameters}. It loads the wasm parameters from the argument
// buffer and calls the wasm function given as first parameter.
-Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
+MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
// Values from the instance object are cached between WASM-level function calls.
// This struct allows the SSA environment handling this cache to be defined
@@ -148,7 +147,6 @@ struct WasmInstanceCacheNodes {
// Abstracts details of building TurboFan graph nodes for wasm to separate
// the wasm decoder from the internal details of TurboFan.
-typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
enum EnforceBoundsCheck : bool {
@@ -157,9 +155,8 @@ class WasmGraphBuilder {
};
enum UseRetpoline : bool { kRetpoline = true, kNoRetpoline = false };
- WasmGraphBuilder(Isolate* isolate, wasm::ModuleEnv* env, Zone* zone,
- MachineGraph* mcgraph, Handle<Code> centry_stub,
- Handle<Oddball> anyref_null, wasm::FunctionSig* sig,
+ WasmGraphBuilder(wasm::ModuleEnv* env, Zone* zone, MachineGraph* mcgraph,
+ wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt = nullptr);
Node** Buffer(size_t count) {
@@ -331,10 +328,14 @@ class WasmGraphBuilder {
MachineGraph* mcgraph() { return mcgraph_; }
Graph* graph();
+ void AddBytecodePositionDecorator(NodeOriginTable* node_origins,
+ wasm::Decoder* decoder);
+
+ void RemoveBytecodePositionDecorator();
+
protected:
static const int kDefaultBufferSize = 16;
- Isolate* const isolate_;
Zone* const zone_;
MachineGraph* const mcgraph_;
wasm::ModuleEnv* const env_;
@@ -343,16 +344,11 @@ class WasmGraphBuilder {
Node** effect_ = nullptr;
WasmInstanceCacheNodes* instance_cache_ = nullptr;
- Handle<Code> centry_stub_;
- Handle<Oddball> anyref_null_;
-
SetOncePointer<Node> instance_node_;
SetOncePointer<Node> globals_start_;
SetOncePointer<Node> imported_mutable_globals_;
- SetOncePointer<Node> centry_stub_node_;
- SetOncePointer<Node> anyref_null_node_;
- SetOncePointer<Node> stack_check_builtin_code_node_;
- const Operator* stack_check_call_operator_ = nullptr;
+ SetOncePointer<Node> stack_check_code_node_;
+ SetOncePointer<const Operator> stack_check_call_operator_;
Node** cur_buffer_;
size_t cur_bufsize_;
@@ -363,9 +359,10 @@ class WasmGraphBuilder {
wasm::FunctionSig* const sig_;
+ compiler::WasmDecorator* decorator_ = nullptr;
+
compiler::SourcePositionTable* const source_position_table_ = nullptr;
- Node* CEntryStub();
Node* NoContextConstant();
Node* MemBuffer(uint32_t offset);
@@ -390,6 +387,8 @@ class WasmGraphBuilder {
UseRetpoline use_retpoline);
Node* BuildImportWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
wasm::WasmCodePosition position, int func_index);
+ Node* BuildImportWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
+ wasm::WasmCodePosition position, Node* func_index);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
@@ -446,6 +445,7 @@ class WasmGraphBuilder {
MachineType result_type, wasm::TrapReason trap_zero,
wasm::WasmCodePosition position);
+ Node* BuildChangeInt32ToIntPtr(Node* value);
Node* BuildChangeInt32ToSmi(Node* value);
Node* BuildChangeUint31ToSmi(Node* value);
Node* BuildSmiShiftBitsConstant();
@@ -490,7 +490,7 @@ class WasmGraphBuilder {
Node* js_context,
Node* const* parameters,
int parameter_count);
- Builtins::Name GetBuiltinIdForTrap(wasm::TrapReason reason);
+ TrapId GetTrapIdForTrap(wasm::TrapReason reason);
};
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
@@ -504,6 +504,8 @@ V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
Zone* zone, CallDescriptor* call_descriptor);
+AssemblerOptions WasmAssemblerOptions();
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 5d7534d2eb..548e3eb416 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -187,11 +187,12 @@ class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
- XMMRegister input,
+ XMMRegister input, StubCallMode stub_mode,
UnwindingInfoWriter* unwinding_info_writer)
: OutOfLineCode(gen),
result_(result),
input_(input),
+ stub_mode_(stub_mode),
unwinding_info_writer_(unwinding_info_writer),
isolate_(gen->isolate()),
zone_(gen->zone()) {}
@@ -201,7 +202,13 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
- __ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ near_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ __ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
+ }
__ movl(result_, MemOperand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
@@ -211,6 +218,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
private:
Register const result_;
XMMRegister const input_;
+ StubCallMode stub_mode_;
UnwindingInfoWriter* const unwinding_info_writer_;
Isolate* isolate_;
Zone* zone_;
@@ -262,37 +270,23 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
class WasmOutOfLineTrap : public OutOfLineCode {
public:
- WasmOutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- gen_(gen),
- frame_elided_(frame_elided),
- instr_(instr) {}
+ WasmOutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), gen_(gen), instr_(instr) {}
void Generate() override {
X64OperandConverter i(gen_, instr_);
-
- Builtins::Name trap_id =
- static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
GenerateWithTrapId(trap_id);
}
protected:
CodeGenerator* gen_;
- void GenerateWithTrapId(Builtins::Name trap_id) {
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
- GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- __ set_has_frame(old_has_frame);
- }
- }
+ void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); }
private:
- void GenerateCallToTrap(Builtins::Name trap_id) {
+ void GenerateCallToTrap(TrapId trap_id) {
if (!gen_->wasm_runtime_exception_support()) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
@@ -306,8 +300,9 @@ class WasmOutOfLineTrap : public OutOfLineCode {
__ Ret(static_cast<int>(pop_size), rcx);
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
- RelocInfo::CODE_TARGET);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ __ near_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -316,20 +311,17 @@ class WasmOutOfLineTrap : public OutOfLineCode {
}
}
- bool frame_elided_;
Instruction* instr_;
};
class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
public:
- WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, bool frame_elided,
- Instruction* instr)
- : WasmOutOfLineTrap(gen, frame_elided, instr), pc_(pc) {}
+ WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr)
+ : WasmOutOfLineTrap(gen, instr), pc_(pc) {}
void Generate() final {
gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
-
- GenerateWithTrapId(Builtins::kThrowWasmTrapMemOutOfBounds);
+ GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
}
private:
@@ -342,8 +334,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessProtected) {
- const bool frame_elided = !codegen->frame_access_state()->has_frame();
- new (zone) WasmProtectedInstructionTrap(codegen, pc, frame_elided, instr);
+ new (zone) WasmProtectedInstructionTrap(codegen, pc, instr);
}
}
@@ -563,9 +554,8 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Load arguments count from current arguments adaptor frame (note, it
// does not include receiver).
Register caller_args_count_reg = scratch1;
- __ SmiToInteger32(
- caller_args_count_reg,
- Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg,
+ Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
@@ -691,6 +681,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
@@ -704,15 +697,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallWasmFunction: {
if (HasImmediateInput(instr, 0)) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt64());
- if (info()->IsWasm()) {
- __ near_call(wasm_code, RelocInfo::WASM_CALL);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
+ __ near_call(wasm_code, constant.rmode());
} else {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ __ RetpolineCall(wasm_code, constant.rmode());
} else {
- __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ __ Call(wasm_code, constant.rmode());
}
}
} else {
@@ -736,9 +729,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
- __ jmp(code, RelocInfo::CODE_TARGET);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
@@ -753,12 +749,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallWasm: {
if (HasImmediateInput(instr, 0)) {
- Address wasm_code =
- static_cast<Address>(i.ToConstant(instr->InputAt(0)).ToInt64());
- if (info()->IsWasm()) {
- __ near_jmp(wasm_code, RelocInfo::WASM_CALL);
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
+ __ near_jmp(wasm_code, constant.rmode());
} else {
- __ Move(kScratchRegister, wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ __ Move(kScratchRegister, wasm_code, constant.rmode());
__ jmp(kScratchRegister);
}
} else {
@@ -777,10 +773,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister)) {
- static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- DCHECK_EQ(rcx, reg);
- }
+ DCHECK_IMPLIES(
+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -873,6 +868,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
@@ -931,14 +929,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movq(i.OutputRegister(), rbp);
}
break;
- case kArchRootsPointer:
- __ movq(i.OutputRegister(), kRootRegister);
- break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
auto ool = new (zone()) OutOfLineTruncateDoubleToI(
- this, result, input, &unwinding_info_writer_);
+ this, result, input, DetermineStubCallMode(),
+ &unwinding_info_writer_);
// We use Cvttsd2siq instead of Cvttsd2si due to performance reasons. The
// use of Cvttsd2siq requires the movl below to avoid sign extension.
__ Cvttsd2siq(result, input);
@@ -1962,9 +1958,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (i.InputRegister(0) == i.OutputRegister()) {
if (mode == kMode_MRI) {
int32_t constant_summand = i.InputInt32(1);
+ DCHECK_NE(0, constant_summand);
if (constant_summand > 0) {
__ addl(i.OutputRegister(), Immediate(constant_summand));
- } else if (constant_summand < 0) {
+ } else {
__ subl(i.OutputRegister(), Immediate(-constant_summand));
}
} else if (mode == kMode_MR1) {
@@ -2944,8 +2941,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) WasmOutOfLineTrap(this, frame_elided, instr);
+ auto ool = new (zone()) WasmOutOfLineTrap(this, instr);
Label* tlabel = ool->entry();
Label end;
if (condition == kUnorderedEqual) {
@@ -2983,6 +2979,17 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ bind(&done);
}
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ X64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
+
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
X64OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -2993,7 +3000,6 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
AssembleArchJump(i.InputRpo(1));
}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
X64OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -3082,6 +3088,7 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
+ DCHECK(frame_access_state()->has_frame());
if (info()->IsWasm() && shrink_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
@@ -3093,19 +3100,18 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
- __ Move(kScratchRegister,
- ExternalReference::address_of_real_stack_limit(__ isolate()));
+ __ movq(kScratchRegister,
+ FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
__ movq(kScratchRegister, Operand(kScratchRegister, 0));
__ addq(kScratchRegister, Immediate(shrink_slots * kPointerSize));
__ cmpq(rsp, kScratchRegister);
__ j(above_equal, &done);
}
- if (!frame_access_state()->has_frame()) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
+ __ movp(rcx, FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
__ Move(rsi, Smi::kZero);
- __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
+ __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, rcx);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
@@ -3272,6 +3278,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
}
};
+ // Helper function to write the given constant to the stack.
+ auto MoveConstantToSlot = [&](Operand dst, Constant src) {
+ if (!RelocInfo::IsWasmPtrReference(src.rmode())) {
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ movq(dst, Immediate(src.ToInt32()));
+ return;
+ case Constant::kInt64:
+ __ Set(dst, src.ToInt64());
+ return;
+ default:
+ break;
+ }
+ }
+ MoveConstantToRegister(kScratchRegister, src);
+ __ movq(dst, kScratchRegister);
+ };
// Dispatch on the source and destination operand kinds.
switch (MoveType::InferMove(source, destination)) {
case MoveType::kRegisterToRegister:
@@ -3359,8 +3382,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
if (destination->IsStackSlot()) {
- MoveConstantToRegister(kScratchRegister, src);
- __ movq(dst, kScratchRegister);
+ MoveConstantToSlot(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
if (src.type() == Constant::kFloat32) {
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index b14f2fdb8c..b3dfb91991 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -8,6 +8,7 @@
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
@@ -165,30 +166,16 @@ class X64OperandGenerator final : public OperandGenerator {
if (selector()->CanAddressRelativeToRootsRegister()) {
LoadMatcher<ExternalReferenceMatcher> m(operand);
if (m.index().HasValue() && m.object().HasValue()) {
- Address const kRootsRegisterValue =
- kRootRegisterBias +
- reinterpret_cast<Address>(
- selector()->isolate()->heap()->roots_array_start());
ptrdiff_t const delta =
m.index().Value() +
- (m.object().Value().address() - kRootsRegisterValue);
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ selector()->isolate(), m.object().Value());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
return kMode_Root;
}
}
}
- if (operand->InputCount() == 2) {
- Node* left = operand->InputAt(0);
- Node* right = operand->InputAt(1);
- if (left->opcode() == IrOpcode::kLoadRootsPointer &&
- right->opcode() == IrOpcode::kInt64Constant) {
- int64_t offset = OpParameter<int64_t>(right->op());
- DCHECK(is_int32(offset));
- inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(offset));
- return kMode_Root;
- }
- }
BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
DCHECK(m.matches());
if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
@@ -851,7 +838,6 @@ void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
VisitBinop(this, node, kX64Add, &cont);
}
-
void InstructionSelector::VisitInt32Sub(Node* node) {
X64OperandGenerator g(this);
DCHECK_EQ(node->InputCount(), 2);
@@ -859,31 +845,38 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
Node* input2 = node->InputAt(1);
if (input1->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
g.CanBeImmediate(input2)) {
- // Omit truncation and turn subtractions of constant values into immediate
- // "leal" instructions by negating the value.
- Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(input1->InputAt(0)),
- g.TempImmediate(-g.GetImmediateIntegerValue(input2)));
+ int32_t imm = g.GetImmediateIntegerValue(input2);
+ InstructionOperand int64_input = g.UseRegister(input1->InputAt(0));
+ if (imm == 0) {
+ // Emit "movl" for subtraction of 0.
+ Emit(kX64Movl, g.DefineAsRegister(node), int64_input);
+ } else {
+ // Omit truncation and turn subtractions of constant values into immediate
+ // "leal" instructions by negating the value.
+ Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm));
+ }
return;
}
Int32BinopMatcher m(node);
if (m.left().Is(0)) {
Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
+ } else if (m.right().Is(0)) {
+ // TODO(jarin): We should be able to use {EmitIdentity} here
+ // (https://crbug.com/v8/7947).
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+ } else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+ // Turn subtractions of constant values into immediate "leal" instructions
+ // by negating the value.
+ Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(-m.right().Value()));
} else {
- if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
- // Turn subtractions of constant values into immediate "leal" instructions
- // by negating the value.
- Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.TempImmediate(-m.right().Value()));
- return;
- }
VisitBinop(this, node, kX64Sub32);
}
}
-
void InstructionSelector::VisitInt64Sub(Node* node) {
X64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1715,7 +1708,7 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
kX64Cmp | AddressingModeField::encode(kMode_Root);
return VisitCompare(
selector, opcode,
- g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
+ g.TempImmediate(TurboAssemblerBase::RootRegisterOffset(root_index)),
g.UseRegister(m.left().node()), cont);
} else if (m.left().HasValue() &&
heap->IsRootHandle(m.left().Value(), &root_index)) {
@@ -1723,23 +1716,31 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
kX64Cmp | AddressingModeField::encode(kMode_Root);
return VisitCompare(
selector, opcode,
- g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
+ g.TempImmediate(TurboAssemblerBase::RootRegisterOffset(root_index)),
g.UseRegister(m.right().node()), cont);
}
}
- Int64BinopMatcher m(node);
- if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
- LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
- ExternalReference js_stack_limit =
- ExternalReference::address_of_stack_limit(selector->isolate());
- if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
- // Compare(Load(js_stack_limit), LoadStackPointer)
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- InstructionCode opcode = cont->Encode(kX64StackCheck);
- CHECK(cont->IsBranch());
- selector->EmitWithContinuation(opcode, cont);
- return;
- }
+ StackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> m(
+ selector->isolate(), node);
+ if (m.Matched()) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kX64StackCheck);
+ CHECK(cont->IsBranch());
+ selector->EmitWithContinuation(opcode, cont);
+ return;
+ }
+ WasmStackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> wasm_m(
+ node);
+ if (wasm_m.Matched()) {
+ // This is a wasm stack check. By structure, we know that we can use the
+ // stack pointer directly, as wasm code does not modify the stack at points
+ // where stack checks are performed.
+ Node* left = node->InputAt(0);
+ LocationOperand rsp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER,
+ InstructionSequence::DefaultRepresentation(),
+ RegisterCode::kRegCode_rsp);
+ return VisitCompareWithMemoryOperand(selector, kX64Cmp, left, rsp, cont);
}
VisitWordCompare(selector, node, kX64Cmp, cont);
}
@@ -1891,29 +1892,6 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
-// Shared routine for Word32/Word64 Atomic Store
-void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X64OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- AddressingMode addressing_mode;
- InstructionOperand index_operand;
- if (g.CanBeImmediate(index)) {
- index_operand = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- index_operand = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionOperand inputs[] = {g.UseUniqueRegister(value),
- g.UseUniqueRegister(base), index_operand};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
- arraysize(inputs), inputs);
-}
-
} // namespace
// Shared routine for word comparison against zero.
@@ -2092,8 +2070,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
}
}
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
}
@@ -2315,7 +2293,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
UNREACHABLE();
return;
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -2338,7 +2316,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
UNREACHABLE();
return;
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index eb164591e5..62faacbca7 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -32,14 +32,14 @@ void ScriptContextTable::set_used(int used) {
// static
-Handle<Context> ScriptContextTable::GetContext(Handle<ScriptContextTable> table,
+Handle<Context> ScriptContextTable::GetContext(Isolate* isolate,
+ Handle<ScriptContextTable> table,
int i) {
DCHECK(i < table->used());
return Handle<Context>::cast(
- FixedArray::get(*table, i + kFirstContextSlotIndex, table->GetIsolate()));
+ FixedArray::get(*table, i + kFirstContextSlotIndex, isolate));
}
-
// static
Context* Context::cast(Object* context) {
DCHECK(context->IsContext());
@@ -59,7 +59,7 @@ void Context::set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
Object* Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
-bool Context::has_extension() { return !extension()->IsTheHole(GetIsolate()); }
+bool Context::has_extension() { return !extension()->IsTheHole(); }
HeapObject* Context::extension() {
return HeapObject::cast(get(EXTENSION_INDEX));
}
@@ -208,7 +208,7 @@ Map* Context::GetInitialJSArrayMap(ElementsKind kind) const {
if (!IsFastElementsKind(kind)) return nullptr;
DisallowHeapAllocation no_gc;
Object* const initial_js_array_map = get(Context::ArrayMapIndex(kind));
- DCHECK(!initial_js_array_map->IsUndefined(GetIsolate()));
+ DCHECK(!initial_js_array_map->IsUndefined());
return Map::cast(initial_js_array_map);
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 2d2ff2d2ce..b52d751f3f 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -8,6 +8,7 @@
#include "src/bootstrapper.h"
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
+#include "src/objects/module-inl.h"
namespace v8 {
namespace internal {
@@ -21,10 +22,10 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
CHECK(used >= 0 && length > 0 && used < length);
if (used + kFirstContextSlotIndex == length) {
CHECK(length < Smi::kMaxValue / 2);
- Isolate* isolate = table->GetIsolate();
+ Isolate* isolate = script_context->GetIsolate();
Handle<FixedArray> copy =
isolate->factory()->CopyFixedArrayAndGrow(table, length);
- copy->set_map(isolate->heap()->script_context_table_map());
+ copy->set_map(ReadOnlyRoots(isolate).script_context_table_map());
result = Handle<ScriptContextTable>::cast(copy);
} else {
result = table;
@@ -36,13 +37,13 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
return result;
}
-
-bool ScriptContextTable::Lookup(Handle<ScriptContextTable> table,
+bool ScriptContextTable::Lookup(Isolate* isolate,
+ Handle<ScriptContextTable> table,
Handle<String> name, LookupResult* result) {
for (int i = 0; i < table->used(); i++) {
- Handle<Context> context = GetContext(table, i);
+ Handle<Context> context = GetContext(isolate, table, i);
DCHECK(context->IsScriptContext());
- Handle<ScopeInfo> scope_info(context->scope_info());
+ Handle<ScopeInfo> scope_info(context->scope_info(), context->GetIsolate());
int slot_index = ScopeInfo::ContextSlotIndex(
scope_info, name, &result->mode, &result->init_flag,
&result->maybe_assigned_flag);
@@ -92,7 +93,7 @@ JSObject* Context::extension_object() {
DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext() ||
IsEvalContext() || IsCatchContext());
HeapObject* object = extension();
- if (object->IsTheHole(GetIsolate())) return nullptr;
+ if (object->IsTheHole()) return nullptr;
DCHECK(object->IsJSContextExtensionObject() ||
(IsNativeContext() && object->IsJSGlobalObject()));
return JSObject::cast(object);
@@ -153,22 +154,23 @@ static Maybe<bool> UnscopableLookup(LookupIterator* it) {
Handle<Object> unscopables;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, unscopables,
- JSReceiver::GetProperty(Handle<JSReceiver>::cast(it->GetReceiver()),
+ JSReceiver::GetProperty(isolate,
+ Handle<JSReceiver>::cast(it->GetReceiver()),
isolate->factory()->unscopables_symbol()),
Nothing<bool>());
if (!unscopables->IsJSReceiver()) return Just(true);
Handle<Object> blacklist;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, blacklist,
- JSReceiver::GetProperty(Handle<JSReceiver>::cast(unscopables),
+ JSReceiver::GetProperty(isolate, Handle<JSReceiver>::cast(unscopables),
it->name()),
Nothing<bool>());
- return Just(!blacklist->BooleanValue());
+ return Just(!blacklist->BooleanValue(isolate));
}
static PropertyAttributes GetAttributesForMode(VariableMode mode) {
DCHECK(IsDeclaredVariableMode(mode));
- return mode == CONST ? READ_ONLY : NONE;
+ return mode == VariableMode::kConst ? READ_ONLY : NONE;
}
Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
@@ -184,7 +186,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
*index = kNotFound;
*attributes = ABSENT;
*init_flag = kCreatedInitialized;
- *variable_mode = VAR;
+ *variable_mode = VariableMode::kVar;
if (is_sloppy_function_name != nullptr) {
*is_sloppy_function_name = false;
}
@@ -210,7 +212,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
(context->IsWithContext() && ((flags & SKIP_WITH_CONTEXT) == 0)) ||
context->IsFunctionContext() || context->IsBlockContext()) &&
context->extension_receiver() != nullptr) {
- Handle<JSReceiver> object(context->extension_receiver());
+ Handle<JSReceiver> object(context->extension_receiver(), isolate);
if (context->IsNativeContext()) {
if (FLAG_trace_contexts) {
@@ -218,12 +220,13 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
// Try other script contexts.
Handle<ScriptContextTable> script_contexts(
- context->global_object()->native_context()->script_context_table());
+ context->global_object()->native_context()->script_context_table(),
+ isolate);
ScriptContextTable::LookupResult r;
- if (ScriptContextTable::Lookup(script_contexts, name, &r)) {
+ if (ScriptContextTable::Lookup(isolate, script_contexts, name, &r)) {
if (FLAG_trace_contexts) {
- Handle<Context> c = ScriptContextTable::GetContext(script_contexts,
- r.context_index);
+ Handle<Context> c = ScriptContextTable::GetContext(
+ isolate, script_contexts, r.context_index);
PrintF("=> found property in script context %d: %p\n",
r.context_index, reinterpret_cast<void*>(*c));
}
@@ -231,7 +234,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
*variable_mode = r.mode;
*init_flag = r.init_flag;
*attributes = GetAttributesForMode(r.mode);
- return ScriptContextTable::GetContext(script_contexts,
+ return ScriptContextTable::GetContext(isolate, script_contexts,
r.context_index);
}
}
@@ -246,9 +249,9 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
} else if (context->IsWithContext()) {
// A with context will never bind "this", but debug-eval may look into
// a with context when resolving "this". Other synthetic variables such
- // as new.target may be resolved as DYNAMIC_LOCAL due to bug v8:5405 ,
- // skipping them here serves as a workaround until a more thorough
- // fix can be applied.
+ // as new.target may be resolved as VariableMode::kDynamicLocal due to
+ // bug v8:5405 , skipping them here serves as a workaround until a more
+ // thorough fix can be applied.
// TODO(v8:5405): Replace this check with a DCHECK when resolution of
// of synthetic variables does not go through this code path.
if (ScopeInfo::VariableIsSynthetic(*name)) {
@@ -288,7 +291,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
context->IsModuleContext() || context->IsCatchContext()) {
// Use serialized scope information of functions and blocks to search
// for the context index.
- Handle<ScopeInfo> scope_info(context->scope_info());
+ Handle<ScopeInfo> scope_info(context->scope_info(), isolate);
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
@@ -297,8 +300,8 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
if (FLAG_trace_contexts) {
- PrintF("=> found local in context slot %d (mode = %d)\n",
- slot_index, mode);
+ PrintF("=> found local in context slot %d (mode = %hhu)\n",
+ slot_index, static_cast<uint8_t>(mode));
}
*index = slot_index;
*variable_mode = mode;
@@ -321,7 +324,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
*index = function_index;
*attributes = READ_ONLY;
*init_flag = kCreatedInitialized;
- *variable_mode = CONST;
+ *variable_mode = VariableMode::kConst;
if (is_sloppy_function_name != nullptr &&
is_sloppy(scope_info->language_mode())) {
*is_sloppy_function_name = true;
@@ -355,7 +358,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// Check materialized locals.
Object* ext = context->get(EXTENSION_INDEX);
if (ext->IsJSReceiver()) {
- Handle<JSReceiver> extension(JSReceiver::cast(ext));
+ Handle<JSReceiver> extension(JSReceiver::cast(ext), isolate);
LookupIterator it(extension, name, extension);
Maybe<bool> found = JSReceiver::HasProperty(&it);
if (found.FromMaybe(false)) {
@@ -375,7 +378,8 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// to with, script or native contexts up the context chain.
obj = context->get(WHITE_LIST_INDEX);
if (obj->IsStringSet()) {
- failed_whitelist = failed_whitelist || !StringSet::cast(obj)->Has(name);
+ failed_whitelist =
+ failed_whitelist || !StringSet::cast(obj)->Has(isolate, name);
}
}
@@ -406,7 +410,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
void Context::AddOptimizedCode(Code* code) {
DCHECK(IsNativeContext());
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- DCHECK(code->next_code_link()->IsUndefined(GetIsolate()));
+ DCHECK(code->next_code_link()->IsUndefined());
code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index b66bb94a4b..3a4f8329c7 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -71,8 +71,6 @@ enum ContextLookupFlags {
V(ASYNC_GENERATOR_AWAIT_UNCAUGHT, JSFunction, async_generator_await_uncaught)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_POP_INDEX, JSFunction, array_pop) \
- V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
@@ -96,6 +94,7 @@ enum ContextLookupFlags {
V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(RESOLVE_LOCALE_FUNCTION_INDEX, JSFunction, resolve_locale) \
V(SET_ADD_INDEX, JSFunction, set_add) \
V(SET_DELETE_INDEX, JSFunction, set_delete) \
V(SET_HAS_INDEX, JSFunction, set_has) \
@@ -202,6 +201,8 @@ enum ContextLookupFlags {
intl_date_time_format_function) \
V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction, \
intl_number_format_function) \
+ V(INTL_NUMBER_FORMAT_INTERNAL_FORMAT_NUMBER_SHARED_FUN, SharedFunctionInfo, \
+ number_format_internal_format_number_shared_fun) \
V(INTL_LOCALE_FUNCTION_INDEX, JSFunction, intl_locale_function) \
V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function) \
V(INTL_PLURAL_RULES_FUNCTION_INDEX, JSFunction, intl_plural_rules_function) \
@@ -271,7 +272,6 @@ enum ContextLookupFlags {
initial_regexp_string_iterator_prototype_map_index) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
- V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
V(SERIALIZED_OBJECTS, FixedArray, serialized_objects) \
@@ -366,7 +366,8 @@ class ScriptContextTable : public FixedArray {
inline int used() const;
inline void set_used(int used);
- static inline Handle<Context> GetContext(Handle<ScriptContextTable> table,
+ static inline Handle<Context> GetContext(Isolate* isolate,
+ Handle<ScriptContextTable> table,
int i);
// Lookup a variable `name` in a ScriptContextTable.
@@ -374,8 +375,8 @@ class ScriptContextTable : public FixedArray {
// valid information about its location.
// If it returns false, `result` is untouched.
V8_WARN_UNUSED_RESULT
- static bool Lookup(Handle<ScriptContextTable> table, Handle<String> name,
- LookupResult* result);
+ static bool Lookup(Isolate* isolate, Handle<ScriptContextTable> table,
+ Handle<String> name, LookupResult* result);
V8_WARN_UNUSED_RESULT
static Handle<ScriptContextTable> Extend(Handle<ScriptContextTable> table,
@@ -434,8 +435,13 @@ class ScriptContextTable : public FixedArray {
// Script contexts from all top-level scripts are gathered in
// ScriptContextTable.
-class Context : public FixedArray {
+class Context : public FixedArray, public NeverReadOnlySpaceObject {
public:
+ // Use the mixin methods over the HeapObject methods.
+ // TODO(v8:7786) Remove once the HeapObject methods are gone.
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
// Conversions.
static inline Context* cast(Object* context);
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 8aa4abccda..baf8b3a6d5 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -941,7 +941,7 @@ class StringToBigIntHelper : public StringToIntHelper {
};
MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string) {
- string = String::Flatten(string);
+ string = String::Flatten(isolate, string);
StringToBigIntHelper helper(isolate, string);
return helper.GetResult();
}
@@ -1330,9 +1330,10 @@ char* DoubleToRadixCString(double value, int radix) {
// ES6 18.2.4 parseFloat(string)
-double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
- int flags, double empty_string_val) {
- Handle<String> flattened = String::Flatten(string);
+double StringToDouble(Isolate* isolate, UnicodeCache* unicode_cache,
+ Handle<String> string, int flags,
+ double empty_string_val) {
+ Handle<String> flattened = String::Flatten(isolate, string);
{
DisallowHeapAllocation no_gc;
String::FlatContent flat = flattened->GetFlatContent();
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 77d3e8bbcd..3077ae4204 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -170,8 +170,9 @@ inline uint32_t NumberToUint32(Object* number);
inline int64_t NumberToInt64(Object* number);
inline uint64_t PositiveNumberToUint64(Object* number);
-double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
- int flags, double empty_string_val = 0.0);
+double StringToDouble(Isolate* isolate, UnicodeCache* unicode_cache,
+ Handle<String> string, int flags,
+ double empty_string_val = 0.0);
inline bool TryNumberToSize(Object* number, size_t* result);
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
index abde3a1af5..0f60a76806 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/counters-inl.h
@@ -57,9 +57,10 @@ void RuntimeCallTimer::CommitTimeToCounter() {
bool RuntimeCallTimer::IsStarted() { return start_ticks_ != base::TimeTicks(); }
-RuntimeCallTimerScope::RuntimeCallTimerScope(HeapObject* heap_object,
+RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
+ HeapObject* heap_object,
RuntimeCallCounterId counter_id)
- : RuntimeCallTimerScope(heap_object->GetIsolate(), counter_id) {}
+ : RuntimeCallTimerScope(isolate, counter_id) {}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 001beb938e..bcea9e0f42 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -215,24 +215,6 @@ Counters::Counters(Isolate* isolate)
Histogram(histogram.caption, 1000, 500000, 50, this);
}
- // For n = 100, low = 4000, high = 2000000: the factor = 1.06.
- static const struct {
- Histogram Counters::*member;
- AggregatedMemoryHistogram<Histogram> Counters::*aggregated;
- const char* caption;
- } kMemoryHistograms[] = {
-#define HM(name, caption) \
- {&Counters::name##_, &Counters::aggregated_##name##_, #caption},
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
- };
- for (const auto& histogram : kMemoryHistograms) {
- this->*histogram.member =
- Histogram(histogram.caption, 4000, 2000000, 100, this);
- this->*histogram.aggregated =
- AggregatedMemoryHistogram<Histogram>(&(this->*histogram.member));
- }
-
// clang-format off
static const struct {
StatsCounter Counters::*member;
@@ -323,7 +305,6 @@ void Counters::ResetCreateHistogramFunction(CreateHistogramCallback f) {
#define HM(name, caption) name##_.Reset();
HISTOGRAM_LEGACY_MEMORY_LIST(HM)
- HISTOGRAM_MEMORY_LIST(HM)
#undef HM
}
@@ -508,7 +489,7 @@ bool RuntimeCallStats::IsCalledOnTheSameThread() {
}
void RuntimeCallStats::Print() {
- OFStream os(stdout);
+ StdoutStream os;
Print(os);
}
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 255c8db7c6..0f7ae95769 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -406,8 +406,7 @@ class HistogramTimerScope BASE_EMBEDDED {
explicit HistogramTimerScope(HistogramTimer* timer,
bool allow_nesting = false)
#ifdef DEBUG
- : timer_(timer),
- skipped_timer_start_(false) {
+ : timer_(timer), skipped_timer_start_(false) {
if (timer_->timer()->IsStarted() && allow_nesting) {
skipped_timer_start_ = true;
} else {
@@ -436,6 +435,27 @@ class HistogramTimerScope BASE_EMBEDDED {
#endif
};
+enum class OptionalHistogramTimerScopeMode { TAKE_TIME, DONT_TAKE_TIME };
+
+// Helper class for scoping a HistogramTimer.
+// It will not take time if take_time is set to false.
+class OptionalHistogramTimerScope BASE_EMBEDDED {
+ public:
+ OptionalHistogramTimerScope(HistogramTimer* timer,
+ OptionalHistogramTimerScopeMode mode)
+ : timer_(timer), mode_(mode) {
+ if (mode == OptionalHistogramTimerScopeMode::TAKE_TIME) timer_->Start();
+ }
+
+ ~OptionalHistogramTimerScope() {
+ if (mode_ == OptionalHistogramTimerScopeMode::TAKE_TIME) timer_->Stop();
+ }
+
+ private:
+ HistogramTimer* timer_;
+ OptionalHistogramTimerScopeMode mode_;
+};
+
// A histogram timer that can aggregate events within a larger scope.
//
// Intended use of this timer is to have an outer (aggregating) and an inner
@@ -1075,7 +1095,7 @@ class RuntimeCallTimerScope {
RuntimeCallCounterId counter_id);
// This constructor is here just to avoid calling GetIsolate() when the
// stats are disabled and the isolate is not directly available.
- inline RuntimeCallTimerScope(HeapObject* heap_object,
+ inline RuntimeCallTimerScope(Isolate* isolate, HeapObject* heap_object,
RuntimeCallCounterId counter_id);
inline RuntimeCallTimerScope(RuntimeCallStats* stats,
RuntimeCallCounterId counter_id) {
@@ -1109,6 +1129,13 @@ class RuntimeCallTimerScope {
HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22) \
HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101) \
HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22) \
+ HR(gc_finalize_clear, V8.GCFinalizeMC.Clear, 0, 10000, 101) \
+ HR(gc_finalize_epilogue, V8.GCFinalizeMC.Epilogue, 0, 10000, 101) \
+ HR(gc_finalize_evacuate, V8.GCFinalizeMC.Evacuate, 0, 10000, 101) \
+ HR(gc_finalize_finish, V8.GCFinalizeMC.Finish, 0, 10000, 101) \
+ HR(gc_finalize_mark, V8.GCFinalizeMC.Mark, 0, 10000, 101) \
+ HR(gc_finalize_prologue, V8.GCFinalizeMC.Prologue, 0, 10000, 101) \
+ HR(gc_finalize_sweep, V8.GCFinalizeMC.Sweep, 0, 10000, 101) \
HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
@@ -1263,12 +1290,6 @@ class RuntimeCallTimerScope {
HM(heap_sample_code_space_committed, V8.MemoryHeapSampleCodeSpaceCommitted) \
HM(heap_sample_maximum_committed, V8.MemoryHeapSampleMaximumCommitted)
-// Note: These define both Histogram and AggregatedMemoryHistogram<Histogram>
-// histograms with options (min=4000, max=2000000, buckets=100).
-#define HISTOGRAM_MEMORY_LIST(HM) \
- HM(memory_heap_committed, V8.MemoryHeapCommitted) \
- HM(memory_heap_used, V8.MemoryHeapUsed)
-
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
// Intellisense to crash. It was broken into two macros (each of length 40
// lines) rather than one macro (of length about 80 lines) to work around
@@ -1289,7 +1310,6 @@ class RuntimeCallTimerScope {
SC(objs_since_last_full, V8.ObjsSinceLastFull) \
SC(string_table_capacity, V8.StringTableCapacity) \
SC(number_of_symbols, V8.NumberOfSymbols) \
- SC(script_wrappers, V8.ScriptWrappers) \
SC(inlined_copied_elements, V8.InlinedCopiedElements) \
SC(arguments_adaptors, V8.ArgumentsAdaptors) \
SC(compilation_cache_hits, V8.CompilationCacheHits) \
@@ -1450,14 +1470,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
#define HM(name, caption) \
Histogram* name() { return &name##_; }
HISTOGRAM_LEGACY_MEMORY_LIST(HM)
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-
-#define HM(name, caption) \
- AggregatedMemoryHistogram<Histogram>* aggregated_##name() { \
- return &aggregated_##name##_; \
- }
- HISTOGRAM_MEMORY_LIST(HM)
#undef HM
#define SC(name, caption) \
@@ -1485,7 +1497,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
#undef PERCENTAGE_ID
#define MEMORY_ID(name, caption) k_##name,
HISTOGRAM_LEGACY_MEMORY_LIST(MEMORY_ID)
- HISTOGRAM_MEMORY_LIST(MEMORY_ID)
#undef MEMORY_ID
#define COUNTER_ID(name, caption) k_##name,
STATS_COUNTER_LIST_1(COUNTER_ID)
@@ -1557,12 +1568,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
#define HM(name, caption) \
Histogram name##_;
HISTOGRAM_LEGACY_MEMORY_LIST(HM)
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-
-#define HM(name, caption) \
- AggregatedMemoryHistogram<Histogram> aggregated_##name##_;
- HISTOGRAM_MEMORY_LIST(HM)
#undef HM
#define SC(name, caption) \
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index a246347962..8feefa4634 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -383,64 +383,6 @@ static platform::tracing::TraceConfig* CreateTraceConfigFromJSON(
} // namespace tracing
-class PerIsolateData {
- public:
- explicit PerIsolateData(Isolate* isolate)
- : isolate_(isolate), realms_(nullptr) {
- isolate->SetData(0, this);
- }
-
- ~PerIsolateData() {
- isolate_->SetData(0, nullptr); // Not really needed, just to be sure...
- }
-
- inline static PerIsolateData* Get(Isolate* isolate) {
- return reinterpret_cast<PerIsolateData*>(isolate->GetData(0));
- }
-
- class RealmScope {
- public:
- explicit RealmScope(PerIsolateData* data);
- ~RealmScope();
- private:
- PerIsolateData* data_;
- };
-
- inline void SetTimeout(Local<Function> callback, Local<Context> context) {
- set_timeout_callbacks_.emplace(isolate_, callback);
- set_timeout_contexts_.emplace(isolate_, context);
- }
-
- inline MaybeLocal<Function> GetTimeoutCallback() {
- if (set_timeout_callbacks_.empty()) return MaybeLocal<Function>();
- Local<Function> result = set_timeout_callbacks_.front().Get(isolate_);
- set_timeout_callbacks_.pop();
- return result;
- }
-
- inline MaybeLocal<Context> GetTimeoutContext() {
- if (set_timeout_contexts_.empty()) return MaybeLocal<Context>();
- Local<Context> result = set_timeout_contexts_.front().Get(isolate_);
- set_timeout_contexts_.pop();
- return result;
- }
-
- private:
- friend class Shell;
- friend class RealmScope;
- Isolate* isolate_;
- int realm_count_;
- int realm_current_;
- int realm_switch_;
- Global<Context>* realms_;
- Global<Value> realm_shared_;
- std::queue<Global<Function>> set_timeout_callbacks_;
- std::queue<Global<Context>> set_timeout_contexts_;
-
- int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
- int arg_offset);
- int RealmFind(Local<Context> context);
-};
class ExternalOwningOneByteStringResource
: public String::ExternalOneByteStringResource {
@@ -483,8 +425,8 @@ base::OnceType Shell::quit_once_ = V8_ONCE_INIT;
// Dummy external source stream which returns the whole source in one go.
class DummySourceStream : public v8::ScriptCompiler::ExternalSourceStream {
public:
- explicit DummySourceStream(Local<String> source) : done_(false) {
- source_length_ = source->Utf8Length();
+ DummySourceStream(Local<String> source, Isolate* isolate) : done_(false) {
+ source_length_ = source->Utf8Length(isolate);
source_buffer_.reset(new uint8_t[source_length_]);
source->WriteUtf8(reinterpret_cast<char*>(source_buffer_.get()),
source_length_);
@@ -511,7 +453,7 @@ class BackgroundCompileThread : public base::Thread {
BackgroundCompileThread(Isolate* isolate, Local<String> source)
: base::Thread(GetThreadOptions("BackgroundCompileThread")),
source_(source),
- streamed_source_(new DummySourceStream(source),
+ streamed_source_(new DummySourceStream(source, isolate),
v8::ScriptCompiler::StreamedSource::UTF8),
task_(v8::ScriptCompiler::StartStreamingScript(isolate,
&streamed_source_)) {}
@@ -990,6 +932,41 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
return true;
}
+PerIsolateData::PerIsolateData(Isolate* isolate)
+ : isolate_(isolate), realms_(nullptr) {
+ isolate->SetData(0, this);
+ if (i::FLAG_expose_async_hooks) {
+ async_hooks_wrapper_ = new AsyncHooks(isolate);
+ }
+}
+
+PerIsolateData::~PerIsolateData() {
+ isolate_->SetData(0, nullptr); // Not really needed, just to be sure...
+ if (i::FLAG_expose_async_hooks) {
+ delete async_hooks_wrapper_; // This uses the isolate
+ }
+}
+
+void PerIsolateData::SetTimeout(Local<Function> callback,
+ Local<Context> context) {
+ set_timeout_callbacks_.emplace(isolate_, callback);
+ set_timeout_contexts_.emplace(isolate_, context);
+}
+
+MaybeLocal<Function> PerIsolateData::GetTimeoutCallback() {
+ if (set_timeout_callbacks_.empty()) return MaybeLocal<Function>();
+ Local<Function> result = set_timeout_callbacks_.front().Get(isolate_);
+ set_timeout_callbacks_.pop();
+ return result;
+}
+
+MaybeLocal<Context> PerIsolateData::GetTimeoutContext() {
+ if (set_timeout_contexts_.empty()) return MaybeLocal<Context>();
+ Local<Context> result = set_timeout_contexts_.front().Get(isolate_);
+ set_timeout_contexts_.pop();
+ return result;
+}
+
PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_count_ = 1;
data_->realm_current_ = 0;
@@ -1244,6 +1221,35 @@ void Shell::RealmSharedSet(Local<String> property,
data->realm_shared_.Reset(isolate, value);
}
+// async_hooks.createHook() registers functions to be called for different
+// lifetime events of each async operation.
+void Shell::AsyncHooksCreateHook(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Local<Object> wrap =
+ PerIsolateData::Get(args.GetIsolate())->GetAsyncHooks()->CreateHook(args);
+ args.GetReturnValue().Set(wrap);
+}
+
+// async_hooks.executionAsyncId() returns the asyncId of the current execution
+// context.
+void Shell::AsyncHooksExecutionAsyncId(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope handle_scope(isolate);
+ args.GetReturnValue().Set(v8::Number::New(
+ isolate,
+ PerIsolateData::Get(isolate)->GetAsyncHooks()->GetExecutionAsyncId()));
+}
+
+void Shell::AsyncHooksTriggerAsyncId(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope handle_scope(isolate);
+ args.GetReturnValue().Set(v8::Number::New(
+ isolate,
+ PerIsolateData::Get(isolate)->GetAsyncHooks()->GetTriggerAsyncId()));
+}
+
void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
@@ -1857,6 +1863,26 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
.ToLocalChecked(),
os_templ);
+ if (i::FLAG_expose_async_hooks) {
+ Local<ObjectTemplate> async_hooks_templ = ObjectTemplate::New(isolate);
+ async_hooks_templ->Set(
+ String::NewFromUtf8(isolate, "createHook", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, AsyncHooksCreateHook));
+ async_hooks_templ->Set(
+ String::NewFromUtf8(isolate, "executionAsyncId", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, AsyncHooksExecutionAsyncId));
+ async_hooks_templ->Set(
+ String::NewFromUtf8(isolate, "triggerAsyncId", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, AsyncHooksTriggerAsyncId));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "async_hooks", NewStringType::kNormal)
+ .ToLocalChecked(),
+ async_hooks_templ);
+ }
+
return global_template;
}
@@ -2061,8 +2087,7 @@ void Shell::OnExit(v8::Isolate* isolate) {
// Dump basic block profiling data.
if (i::BasicBlockProfiler* profiler =
reinterpret_cast<i::Isolate*>(isolate)->basic_block_profiler()) {
- i::OFStream os(stdout);
- os << *profiler;
+ i::StdoutStream{} << *profiler;
}
isolate->Dispose();
@@ -2351,8 +2376,6 @@ class InspectorClient : public v8_inspector::V8InspectorClient {
.ToLocalChecked();
CHECK(context->Global()->Set(context, function_name, function).FromJust());
- v8::debug::SetLiveEditEnabled(isolate_, true);
-
context_.Reset(isolate_, context);
}
@@ -2400,6 +2423,14 @@ SourceGroup::~SourceGroup() {
thread_ = nullptr;
}
+bool ends_with(const char* input, const char* suffix) {
+ size_t input_length = strlen(input);
+ size_t suffix_length = strlen(suffix);
+ if (suffix_length <= input_length) {
+ return strcmp(input + input_length - suffix_length, suffix) == 0;
+ }
+ return false;
+}
void SourceGroup::Execute(Isolate* isolate) {
bool exception_was_thrown = false;
@@ -2423,6 +2454,13 @@ void SourceGroup::Execute(Isolate* isolate) {
}
++i;
continue;
+ } else if (ends_with(arg, ".mjs")) {
+ Shell::options.script_executed = true;
+ if (!Shell::ExecuteModule(isolate, arg)) {
+ exception_was_thrown = true;
+ break;
+ }
+ continue;
} else if (strcmp(arg, "--module") == 0 && i + 1 < end_offset_) {
// Treat the next file as a module.
arg = argv_[++i];
@@ -2882,8 +2920,8 @@ bool Shell::SetOptions(int argc, char* argv[]) {
current->Begin(argv, i + 1);
} else if (strcmp(str, "--module") == 0) {
// Pass on to SourceGroup, which understands this option.
- } else if (strncmp(argv[i], "--", 2) == 0) {
- printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]);
+ } else if (strncmp(str, "--", 2) == 0) {
+ printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
options.script_executed = true;
} else if (strncmp(str, "-", 1) != 0) {
@@ -3078,6 +3116,21 @@ class Serializer : public ValueSerializer::Delegate {
return Just<uint32_t>(static_cast<uint32_t>(index));
}
+ Maybe<uint32_t> GetWasmModuleTransferId(
+ Isolate* isolate, Local<WasmCompiledModule> module) override {
+ DCHECK_NOT_NULL(data_);
+ for (size_t index = 0; index < wasm_modules_.size(); ++index) {
+ if (wasm_modules_[index] == module) {
+ return Just<uint32_t>(static_cast<uint32_t>(index));
+ }
+ }
+
+ size_t index = wasm_modules_.size();
+ wasm_modules_.emplace_back(isolate_, module);
+ data_->transferrable_modules_.push_back(module->GetTransferrableModule());
+ return Just<uint32_t>(static_cast<uint32_t>(index));
+ }
+
void* ReallocateBufferMemory(void* old_buffer, size_t size,
size_t* actual_size) override {
// Not accurate, because we don't take into account reallocated buffers,
@@ -3155,6 +3208,7 @@ class Serializer : public ValueSerializer::Delegate {
std::unique_ptr<SerializationData> data_;
std::vector<Global<ArrayBuffer>> array_buffers_;
std::vector<Global<SharedArrayBuffer>> shared_array_buffers_;
+ std::vector<Global<WasmCompiledModule>> wasm_modules_;
std::vector<ExternalizedContents> externalized_contents_;
size_t current_memory_usage_;
@@ -3198,6 +3252,16 @@ class Deserializer : public ValueDeserializer::Delegate {
return MaybeLocal<SharedArrayBuffer>();
}
+ MaybeLocal<WasmCompiledModule> GetWasmModuleFromId(
+ Isolate* isolate, uint32_t transfer_id) override {
+ DCHECK_NOT_NULL(data_);
+ if (transfer_id < data_->transferrable_modules().size()) {
+ return WasmCompiledModule::FromTransferrableModule(
+ isolate_, data_->transferrable_modules().at(transfer_id));
+ }
+ return MaybeLocal<WasmCompiledModule>();
+ }
+
private:
Isolate* isolate_;
ValueDeserializer deserializer_;
@@ -3295,7 +3359,9 @@ int Shell::Main(int argc, char* argv[]) {
if (i::FLAG_trace_turbo_cfg_file == nullptr) {
SetFlagsFromString("--trace-turbo-cfg-file=turbo.cfg");
}
- SetFlagsFromString("--redirect-code-traces-to=code.asm");
+ if (i::FLAG_redirect_code_traces_to == nullptr) {
+ SetFlagsFromString("--redirect-code-traces-to=code.asm");
+ }
int result = 0;
Isolate::CreateParams create_params;
ShellArrayBufferAllocator shell_array_buffer_allocator;
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index e4a58707c2..ef0ea7d898 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -8,11 +8,13 @@
#include <iterator>
#include <map>
#include <memory>
+#include <queue>
#include <string>
#include <unordered_map>
#include <vector>
#include "src/allocation.h"
+#include "src/async-hooks-wrapper.h"
#include "src/base/platform/time.h"
#include "src/string-hasher.h"
#include "src/utils.h"
@@ -177,7 +179,10 @@ class SerializationData {
shared_array_buffer_contents() {
return shared_array_buffer_contents_;
}
-
+ const std::vector<WasmCompiledModule::TransferrableModule>&
+ transferrable_modules() {
+ return transferrable_modules_;
+ }
private:
struct DataDeleter {
@@ -188,6 +193,7 @@ class SerializationData {
size_t size_;
std::vector<ArrayBuffer::Contents> array_buffer_contents_;
std::vector<SharedArrayBuffer::Contents> shared_array_buffer_contents_;
+ std::vector<WasmCompiledModule::TransferrableModule> transferrable_modules_;
private:
friend class Serializer;
@@ -260,6 +266,49 @@ class Worker {
base::Atomic32 running_;
};
+class PerIsolateData {
+ public:
+ explicit PerIsolateData(Isolate* isolate);
+
+ ~PerIsolateData();
+
+ inline static PerIsolateData* Get(Isolate* isolate) {
+ return reinterpret_cast<PerIsolateData*>(isolate->GetData(0));
+ }
+
+ class RealmScope {
+ public:
+ explicit RealmScope(PerIsolateData* data);
+ ~RealmScope();
+
+ private:
+ PerIsolateData* data_;
+ };
+
+ inline void SetTimeout(Local<Function> callback, Local<Context> context);
+ inline MaybeLocal<Function> GetTimeoutCallback();
+ inline MaybeLocal<Context> GetTimeoutContext();
+
+ AsyncHooks* GetAsyncHooks() { return async_hooks_wrapper_; }
+
+ private:
+ friend class Shell;
+ friend class RealmScope;
+ Isolate* isolate_;
+ int realm_count_;
+ int realm_current_;
+ int realm_switch_;
+ Global<Context>* realms_;
+ Global<Value> realm_shared_;
+ std::queue<Global<Function>> set_timeout_callbacks_;
+ std::queue<Global<Context>> set_timeout_contexts_;
+ AsyncHooks* async_hooks_wrapper_;
+
+ int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
+ int arg_offset);
+ int RealmFind(Local<Context> context);
+};
+
class ShellOptions {
public:
enum CodeCacheOptions {
@@ -396,6 +445,13 @@ class Shell : public i::AllStatic {
Local<Value> value,
const PropertyCallbackInfo<void>& info);
+ static void AsyncHooksCreateHook(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void AsyncHooksExecutionAsyncId(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void AsyncHooksTriggerAsyncId(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
static void PrintErr(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
index 26899f9114..46b472480d 100644
--- a/deps/v8/src/debug/OWNERS
+++ b/deps/v8/src/debug/OWNERS
@@ -1,7 +1,6 @@
set noparent
bmeurer@chromium.org
-franzih@chromium.org
jgruber@chromium.org
mvstanton@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index ca00606247..6844fe28a9 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -40,8 +40,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::INTERNAL);
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0,
- FieldMemOperand(r0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldrh(r0,
+ FieldMemOperand(r0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, r0);
ParameterCount dummy1(r2);
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index 2ca6d32064..c130524f37 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -41,8 +41,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ Pop(fp, lr); // Frame, Return address.
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(
- x0, FieldMemOperand(x0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ldrh(x0,
+ FieldMemOperand(x0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(x2, x0);
ParameterCount dummy1(x2);
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index f27e22cfbb..9e7195b1f3 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -505,7 +505,7 @@ std::unique_ptr<Coverage> Coverage::Collect(
{
// Sort functions by start position, from outer to inner functions.
- SharedFunctionInfo::ScriptIterator infos(script_handle);
+ SharedFunctionInfo::ScriptIterator infos(isolate, *script_handle);
while (SharedFunctionInfo* info = infos.Next()) {
sorted.push_back(info);
}
@@ -575,7 +575,7 @@ void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
isolate->debug()->RemoveAllCoverageInfos();
if (!isolate->is_collecting_type_profile()) {
isolate->SetFeedbackVectorsForProfilingTools(
- isolate->heap()->undefined_value());
+ ReadOnlyRoots(isolate).undefined_value());
}
break;
case debug::Coverage::kBlockBinary:
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 505bbad2dd..0dd2303772 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -44,7 +44,8 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
context);
if (throw_on_side_effect) isolate->debug()->StartSideEffectCheckMode();
MaybeHandle<Object> result = Execution::Call(
- isolate, fun, Handle<JSObject>(context->global_proxy()), 0, nullptr);
+ isolate, fun, Handle<JSObject>(context->global_proxy(), isolate), 0,
+ nullptr);
if (throw_on_side_effect) isolate->debug()->StopSideEffectCheckMode();
return result;
}
@@ -72,7 +73,7 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
Handle<Context> context = context_builder.evaluation_context();
- Handle<JSObject> receiver(context->global_proxy());
+ Handle<JSObject> receiver(context->global_proxy(), isolate);
MaybeHandle<Object> maybe_result =
Evaluate(isolate, context_builder.outer_info(), context, receiver, source,
throw_on_side_effect);
@@ -115,7 +116,7 @@ MaybeHandle<Object> DebugEvaluate::WithTopmostArguments(Isolate* isolate,
Handle<Context>(), Handle<StringSet>());
Handle<SharedFunctionInfo> outer_info(
native_context->empty_function()->shared(), isolate);
- Handle<JSObject> receiver(native_context->global_proxy());
+ Handle<JSObject> receiver(native_context->global_proxy(), isolate);
const bool throw_on_side_effect = false;
MaybeHandle<Object> maybe_result =
Evaluate(isolate, outer_info, evaluation_context, receiver, source,
@@ -159,20 +160,24 @@ MaybeHandle<Object> DebugEvaluate::Evaluate(
return result;
}
+Handle<SharedFunctionInfo> DebugEvaluate::ContextBuilder::outer_info() const {
+ return handle(frame_inspector_.GetFunction()->shared(), isolate_);
+}
DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
JavaScriptFrame* frame,
int inlined_jsframe_index)
: isolate_(isolate),
- frame_(frame),
- inlined_jsframe_index_(inlined_jsframe_index) {
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- Handle<JSFunction> local_function = frame_inspector.GetFunction();
- Handle<Context> outer_context(local_function->context());
+ frame_inspector_(frame, inlined_jsframe_index, isolate),
+ scope_iterator_(isolate, &frame_inspector_,
+ ScopeIterator::COLLECT_NON_LOCALS) {
+ Handle<Context> outer_context(frame_inspector_.GetFunction()->context(),
+ isolate);
evaluation_context_ = outer_context;
- outer_info_ = handle(local_function->shared());
Factory* factory = isolate->factory();
+ if (scope_iterator_.Done()) return;
+
// To evaluate as if we were running eval at the point of the debug break,
// we reconstruct the context chain as follows:
// - To make stack-allocated variables visible, we materialize them and
@@ -189,64 +194,32 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
// - Look up in the materialized stack variables.
// - Look up in the original context.
// - Check the whitelist to find out whether to skip contexts during lookup.
- const ScopeIterator::Option option = ScopeIterator::COLLECT_NON_LOCALS;
- for (ScopeIterator it(isolate, &frame_inspector, option); !it.Done();
- it.Next()) {
- ScopeIterator::ScopeType scope_type = it.Type();
+ for (; scope_iterator_.InInnerScope(); scope_iterator_.Next()) {
+ ScopeIterator::ScopeType scope_type = scope_iterator_.Type();
+ if (scope_type == ScopeIterator::ScopeTypeScript) break;
+ ContextChainElement context_chain_element;
+ if (scope_type == ScopeIterator::ScopeTypeLocal ||
+ scope_iterator_.DeclaresLocals(ScopeIterator::Mode::STACK)) {
+ context_chain_element.materialized_object =
+ scope_iterator_.ScopeObject(ScopeIterator::Mode::STACK);
+ }
+ if (scope_iterator_.HasContext()) {
+ context_chain_element.wrapped_context = scope_iterator_.CurrentContext();
+ }
if (scope_type == ScopeIterator::ScopeTypeLocal) {
- DCHECK_EQ(FUNCTION_SCOPE, it.CurrentScopeInfo()->scope_type());
- Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
- Handle<Context> local_context =
- it.HasContext() ? it.CurrentContext() : outer_context;
- Handle<StringSet> non_locals = it.GetNonLocals();
- MaterializeReceiver(materialized, local_context, local_function,
- non_locals);
- MaterializeStackLocals(materialized, local_function, &frame_inspector);
- ContextChainElement context_chain_element;
- context_chain_element.scope_info = it.CurrentScopeInfo();
- context_chain_element.materialized_object = materialized;
- // Non-locals that are already being referenced by the current function
- // are guaranteed to be correctly resolved.
- context_chain_element.whitelist = non_locals;
- if (it.HasContext()) {
- context_chain_element.wrapped_context = it.CurrentContext();
- }
- context_chain_.push_back(context_chain_element);
- evaluation_context_ = outer_context;
- break;
- } else if (scope_type == ScopeIterator::ScopeTypeCatch ||
- scope_type == ScopeIterator::ScopeTypeWith ||
- scope_type == ScopeIterator::ScopeTypeModule) {
- ContextChainElement context_chain_element;
- Handle<Context> current_context = it.CurrentContext();
- if (!current_context->IsDebugEvaluateContext()) {
- context_chain_element.wrapped_context = current_context;
- }
- context_chain_.push_back(context_chain_element);
- } else if (scope_type == ScopeIterator::ScopeTypeBlock ||
- scope_type == ScopeIterator::ScopeTypeEval) {
- Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
- frame_inspector.MaterializeStackLocals(materialized,
- it.CurrentScopeInfo());
- ContextChainElement context_chain_element;
- context_chain_element.scope_info = it.CurrentScopeInfo();
- context_chain_element.materialized_object = materialized;
- if (it.HasContext()) {
- context_chain_element.wrapped_context = it.CurrentContext();
- }
- context_chain_.push_back(context_chain_element);
- } else {
- break;
+ context_chain_element.whitelist = scope_iterator_.GetNonLocals();
}
+ context_chain_.push_back(context_chain_element);
}
+ Handle<ScopeInfo> scope_info =
+ evaluation_context_->IsNativeContext()
+ ? Handle<ScopeInfo>::null()
+ : handle(evaluation_context_->scope_info(), isolate);
for (auto rit = context_chain_.rbegin(); rit != context_chain_.rend();
rit++) {
ContextChainElement element = *rit;
- Handle<ScopeInfo> scope_info(ScopeInfo::CreateForWithScope(
- isolate, evaluation_context_->IsNativeContext()
- ? Handle<ScopeInfo>::null()
- : Handle<ScopeInfo>(evaluation_context_->scope_info())));
+ scope_info = ScopeInfo::CreateForWithScope(isolate, scope_info);
scope_info->SetIsDebugEvaluateScope();
evaluation_context_ = factory->NewDebugEvaluateContext(
evaluation_context_, scope_info, element.materialized_object,
@@ -256,62 +229,24 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
void DebugEvaluate::ContextBuilder::UpdateValues() {
+ scope_iterator_.Restart();
for (ContextChainElement& element : context_chain_) {
if (!element.materialized_object.is_null()) {
- // Write back potential changes to materialized stack locals to the stack.
- FrameInspector(frame_, inlined_jsframe_index_, isolate_)
- .UpdateStackLocalsFromMaterializedObject(element.materialized_object,
- element.scope_info);
+ Handle<FixedArray> keys =
+ KeyAccumulator::GetKeys(element.materialized_object,
+ KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS)
+ .ToHandleChecked();
+
+ for (int i = 0; i < keys->length(); i++) {
+ DCHECK(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)), isolate_);
+ Handle<Object> value =
+ JSReceiver::GetDataProperty(element.materialized_object, key);
+ scope_iterator_.SetVariableValue(key, value);
+ }
}
- }
-}
-
-
-void DebugEvaluate::ContextBuilder::MaterializeReceiver(
- Handle<JSObject> target, Handle<Context> local_context,
- Handle<JSFunction> local_function, Handle<StringSet> non_locals) {
- Handle<Object> recv = isolate_->factory()->undefined_value();
- Handle<String> name = isolate_->factory()->this_string();
- if (non_locals->Has(name)) {
- // 'this' is allocated in an outer context and is is already being
- // referenced by the current function, so it can be correctly resolved.
- return;
- } else if (local_function->shared()->scope_info()->HasReceiver() &&
- !frame_->receiver()->IsTheHole(isolate_)) {
- recv = handle(frame_->receiver(), isolate_);
- }
- JSObject::SetOwnPropertyIgnoreAttributes(target, name, recv, NONE).Check();
-}
-
-void DebugEvaluate::ContextBuilder::MaterializeStackLocals(
- Handle<JSObject> target, Handle<JSFunction> function,
- FrameInspector* frame_inspector) {
- bool materialize_arguments_object = true;
-
- // Do not materialize the arguments object for eval or top-level code.
- if (function->shared()->is_toplevel()) materialize_arguments_object = false;
-
- // First materialize stack locals (modulo arguments object).
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
- frame_inspector->MaterializeStackLocals(target, scope_info,
- materialize_arguments_object);
-
- // Then materialize the arguments object.
- if (materialize_arguments_object) {
- // Skip if "arguments" is already taken and wasn't optimized out (which
- // causes {MaterializeStackLocals} above to skip the local variable).
- Handle<String> arguments_str = isolate_->factory()->arguments_string();
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(target, arguments_str);
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) return;
-
- // FunctionGetArguments can't throw an exception.
- Handle<JSObject> arguments =
- Accessors::FunctionGetArguments(frame_, inlined_jsframe_index_);
- JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
- NONE)
- .Check();
+ scope_iterator_.Next();
}
}
@@ -332,10 +267,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(IsArray) \
V(IsDate) \
V(IsFunction) \
- V(IsJSMap) \
V(IsJSProxy) \
V(IsJSReceiver) \
- V(IsJSSet) \
V(IsJSWeakMap) \
V(IsJSWeakSet) \
V(IsRegExp) \
@@ -398,6 +331,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ObjectHasOwnProperty) \
V(ObjectValues) \
V(ObjectValuesSkipFastPath) \
+ V(ObjectGetOwnPropertyNames) \
+ V(ObjectGetOwnPropertyNamesTryFast) \
V(RegExpInitializeAndCompile) \
V(StackGuard) \
V(StringAdd) \
@@ -410,6 +345,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ThrowRangeError) \
V(ThrowTypeError) \
V(ToName) \
+ V(TransitionElementsKind) \
/* Misc. */ \
V(Call) \
V(CompleteInobjectSlackTrackingForMap) \
@@ -588,8 +524,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
}
}
-SharedFunctionInfo::SideEffectState BuiltinGetSideEffectState(
- Builtins::Name id) {
+DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
switch (id) {
// Whitelist for builtins.
// Object builtins.
@@ -620,6 +555,8 @@ SharedFunctionInfo::SideEffectState BuiltinGetSideEffectState(
case Builtins::kArrayPrototypeEntries:
case Builtins::kArrayPrototypeFind:
case Builtins::kArrayPrototypeFindIndex:
+ case Builtins::kArrayPrototypeFlat:
+ case Builtins::kArrayPrototypeFlatMap:
case Builtins::kArrayPrototypeKeys:
case Builtins::kArrayPrototypeSlice:
case Builtins::kArrayForEach:
@@ -630,9 +567,6 @@ SharedFunctionInfo::SideEffectState BuiltinGetSideEffectState(
case Builtins::kArrayMap:
case Builtins::kArrayReduce:
case Builtins::kArrayReduceRight:
- // Trace builtins
- case Builtins::kIsTraceCategoryEnabled:
- case Builtins::kTrace:
// TypedArray builtins.
case Builtins::kTypedArrayConstructor:
case Builtins::kTypedArrayPrototypeBuffer:
@@ -866,7 +800,7 @@ SharedFunctionInfo::SideEffectState BuiltinGetSideEffectState(
case Builtins::kMakeURIError:
// RegExp builtins.
case Builtins::kRegExpConstructor:
- return SharedFunctionInfo::kHasNoSideEffect;
+ return DebugInfo::kHasNoSideEffect;
// Set builtins.
case Builtins::kSetIteratorPrototypeNext:
case Builtins::kSetPrototypeAdd:
@@ -895,13 +829,13 @@ SharedFunctionInfo::SideEffectState BuiltinGetSideEffectState(
case Builtins::kRegExpPrototypeDotAllGetter:
case Builtins::kRegExpPrototypeUnicodeGetter:
case Builtins::kRegExpPrototypeStickyGetter:
- return SharedFunctionInfo::kRequiresRuntimeChecks;
+ return DebugInfo::kRequiresRuntimeChecks;
default:
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] built-in %s may cause side effect.\n",
Builtins::name(id));
}
- return SharedFunctionInfo::kHasSideEffects;
+ return DebugInfo::kHasSideEffects;
}
}
@@ -923,8 +857,8 @@ bool BytecodeRequiresRuntimeCheck(interpreter::Bytecode bytecode) {
} // anonymous namespace
// static
-SharedFunctionInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
- Handle<SharedFunctionInfo> info) {
+DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
+ Isolate* isolate, Handle<SharedFunctionInfo> info) {
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] Checking function %s for side effect.\n",
info->DebugName()->ToCString().get());
@@ -933,8 +867,10 @@ SharedFunctionInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
DCHECK(info->is_compiled());
if (info->HasBytecodeArray()) {
// Check bytecodes against whitelist.
- Handle<BytecodeArray> bytecode_array(info->GetBytecodeArray());
- if (FLAG_trace_side_effect_free_debug_evaluate) bytecode_array->Print();
+ Handle<BytecodeArray> bytecode_array(info->GetBytecodeArray(), isolate);
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ bytecode_array->Print();
+ }
bool requires_runtime_checks = false;
for (interpreter::BytecodeArrayIterator it(bytecode_array); !it.done();
it.Advance()) {
@@ -946,7 +882,7 @@ SharedFunctionInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
? it.GetIntrinsicIdOperand(0)
: it.GetRuntimeIdOperand(0);
if (IntrinsicHasNoSideEffect(id)) continue;
- return SharedFunctionInfo::kHasSideEffects;
+ return DebugInfo::kHasSideEffects;
}
if (BytecodeHasNoSideEffect(bytecode)) continue;
@@ -961,15 +897,15 @@ SharedFunctionInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
}
// Did not match whitelist.
- return SharedFunctionInfo::kHasSideEffects;
+ return DebugInfo::kHasSideEffects;
}
- return requires_runtime_checks ? SharedFunctionInfo::kRequiresRuntimeChecks
- : SharedFunctionInfo::kHasNoSideEffect;
+ return requires_runtime_checks ? DebugInfo::kRequiresRuntimeChecks
+ : DebugInfo::kHasNoSideEffect;
} else if (info->IsApiFunction()) {
if (info->GetCode()->is_builtin()) {
return info->GetCode()->builtin_index() == Builtins::kHandleApiCall
- ? SharedFunctionInfo::kHasNoSideEffect
- : SharedFunctionInfo::kHasSideEffects;
+ ? DebugInfo::kHasNoSideEffect
+ : DebugInfo::kHasSideEffects;
}
} else {
// Check built-ins against whitelist.
@@ -977,12 +913,11 @@ SharedFunctionInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
info->HasBuiltinId() ? info->builtin_id() : Builtins::kNoBuiltinId;
DCHECK_NE(Builtins::kDeserializeLazy, builtin_index);
if (!Builtins::IsBuiltinId(builtin_index))
- return SharedFunctionInfo::kHasSideEffects;
- SharedFunctionInfo::SideEffectState state =
+ return DebugInfo::kHasSideEffects;
+ DebugInfo::SideEffectState state =
BuiltinGetSideEffectState(static_cast<Builtins::Name>(builtin_index));
#ifdef DEBUG
- if (state == SharedFunctionInfo::kHasNoSideEffect) {
- Isolate* isolate = info->GetIsolate();
+ if (state == DebugInfo::kHasNoSideEffect) {
Code* code = isolate->builtins()->builtin(builtin_index);
if (code->builtin_index() == Builtins::kDeserializeLazy) {
// Target builtin is not yet deserialized. Deserialize it now.
@@ -1015,7 +950,7 @@ SharedFunctionInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
return state;
}
- return SharedFunctionInfo::kHasSideEffects;
+ return DebugInfo::kHasSideEffects;
}
// static
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index fbc9887440..420c6c208b 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -7,7 +7,8 @@
#include <vector>
-#include "src/frames.h"
+#include "src/debug/debug-frames.h"
+#include "src/debug/debug-scopes.h"
#include "src/objects.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string-table.h"
@@ -38,8 +39,8 @@ class DebugEvaluate : public AllStatic {
static MaybeHandle<Object> WithTopmostArguments(Isolate* isolate,
Handle<String> source);
- static SharedFunctionInfo::SideEffectState FunctionGetSideEffectState(
- Handle<SharedFunctionInfo> info);
+ static DebugInfo::SideEffectState FunctionGetSideEffectState(
+ Isolate* isolate, Handle<SharedFunctionInfo> info);
static bool CallbackHasNoSideEffect(Object* callback_info);
static void ApplySideEffectChecks(Handle<BytecodeArray> bytecode_array);
@@ -70,31 +71,20 @@ class DebugEvaluate : public AllStatic {
void UpdateValues();
Handle<Context> evaluation_context() const { return evaluation_context_; }
- Handle<SharedFunctionInfo> outer_info() const { return outer_info_; }
+ Handle<SharedFunctionInfo> outer_info() const;
private:
struct ContextChainElement {
- Handle<ScopeInfo> scope_info;
Handle<Context> wrapped_context;
Handle<JSObject> materialized_object;
Handle<StringSet> whitelist;
};
- void MaterializeReceiver(Handle<JSObject> target,
- Handle<Context> local_context,
- Handle<JSFunction> local_function,
- Handle<StringSet> non_locals);
-
- void MaterializeStackLocals(Handle<JSObject> target,
- Handle<JSFunction> function,
- FrameInspector* frame_inspector);
-
- Handle<SharedFunctionInfo> outer_info_;
Handle<Context> evaluation_context_;
std::vector<ContextChainElement> context_chain_;
Isolate* isolate_;
- JavaScriptFrame* frame_;
- int inlined_jsframe_index_;
+ FrameInspector frame_inspector_;
+ ScopeIterator scope_iterator_;
};
static MaybeHandle<Object> Evaluate(Isolate* isolate,
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index a7426eb96e..e474056107 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -15,6 +15,7 @@ namespace internal {
FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
Isolate* isolate)
: frame_(frame),
+ inlined_frame_index_(inlined_frame_index),
isolate_(isolate) {
// Extract the relevant information from the frame summary and discard it.
FrameSummary summary = FrameSummary::Get(frame, inlined_frame_index);
@@ -82,97 +83,6 @@ bool FrameInspector::IsWasm() { return frame_->is_wasm(); }
bool FrameInspector::IsJavaScript() { return frame_->is_java_script(); }
-// To inspect all the provided arguments the frame might need to be
-// replaced with the arguments frame.
-void FrameInspector::SetArgumentsFrame(StandardFrame* frame) {
- DCHECK(has_adapted_arguments_);
- DCHECK(frame->is_arguments_adaptor());
- frame_ = frame;
- is_optimized_ = frame_->is_optimized();
- is_interpreted_ = frame_->is_interpreted();
- DCHECK(!is_optimized_);
-}
-
-
-// Create a plain JSObject which materializes the local scope for the specified
-// frame.
-void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
- Handle<ScopeInfo> scope_info,
- bool materialize_arguments_object) {
- HandleScope scope(isolate_);
- // First fill all parameters.
- for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- // Do not materialize the parameter if it is shadowed by a context local.
- // TODO(yangguo): check whether this is necessary, now that we materialize
- // context locals as well.
- Handle<String> name(scope_info->ParameterName(i));
- if (ScopeInfo::VariableIsSynthetic(*name)) continue;
- if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
-
- Handle<Object> value =
- i < GetParametersCount()
- ? GetParameter(i)
- : Handle<Object>::cast(isolate_->factory()->undefined_value());
- DCHECK(!value->IsTheHole(isolate_));
-
- JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
- }
-
- // Second fill all stack locals.
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- Handle<String> name(scope_info->StackLocalName(i));
- if (ScopeInfo::VariableIsSynthetic(*name)) continue;
- Handle<Object> value = GetExpression(scope_info->StackLocalIndex(i));
- // TODO(yangguo): We convert optimized out values to {undefined} when they
- // are passed to the debugger. Eventually we should handle them somehow.
- if (value->IsTheHole(isolate_)) {
- value = isolate_->factory()->undefined_value();
- }
- if (value->IsOptimizedOut(isolate_)) {
- if (materialize_arguments_object) {
- Handle<String> arguments_str = isolate_->factory()->arguments_string();
- if (String::Equals(name, arguments_str)) continue;
- }
- value = isolate_->factory()->undefined_value();
- }
- JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
- }
-}
-
-
-void FrameInspector::UpdateStackLocalsFromMaterializedObject(
- Handle<JSObject> target, Handle<ScopeInfo> scope_info) {
- // Optimized frames and wasm frames are not supported. Simply give up.
- if (is_optimized_ || frame_->is_wasm()) return;
-
- HandleScope scope(isolate_);
-
- // Parameters.
- for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- // Shadowed parameters were not materialized.
- Handle<String> name(scope_info->ParameterName(i));
- if (ScopeInfo::VariableIsSynthetic(*name)) continue;
- if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
-
- DCHECK(!javascript_frame()->GetParameter(i)->IsTheHole(isolate_));
- Handle<Object> value =
- Object::GetPropertyOrElement(target, name).ToHandleChecked();
- javascript_frame()->SetParameterValue(i, *value);
- }
-
- // Stack locals.
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- Handle<String> name(scope_info->StackLocalName(i));
- if (ScopeInfo::VariableIsSynthetic(*name)) continue;
- int index = scope_info->StackLocalIndex(i);
- if (frame_->GetExpression(index)->IsTheHole(isolate_)) continue;
- Handle<Object> value =
- Object::GetPropertyOrElement(target, name).ToHandleChecked();
- frame_->SetExpression(index, *value);
- }
-}
-
-
bool FrameInspector::ParameterIsShadowedByContextLocal(
Handle<ScopeInfo> info, Handle<String> parameter_name) {
VariableMode mode;
@@ -181,32 +91,5 @@ bool FrameInspector::ParameterIsShadowedByContextLocal(
return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &init_flag,
&maybe_assigned_flag) != -1;
}
-
-SaveContext* DebugFrameHelper::FindSavedContextForFrame(Isolate* isolate,
- StandardFrame* frame) {
- SaveContext* save = isolate->save_context();
- while (save != nullptr && !save->IsBelowFrame(frame)) {
- save = save->prev();
- }
- DCHECK(save != nullptr);
- return save;
-}
-
-int DebugFrameHelper::FindIndexedNonNativeFrame(StackTraceFrameIterator* it,
- int index) {
- int count = -1;
- for (; !it->done(); it->Advance()) {
- std::vector<FrameSummary> frames;
- it->frame()->Summarize(&frames);
- for (size_t i = frames.size(); i != 0; i--) {
- // Omit functions from native and extension scripts.
- if (!frames[i - 1].is_subject_to_debugging()) continue;
- if (++count == index) return static_cast<int>(i) - 1;
- }
- }
- return -1;
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 6b4f8c23f6..6a613dbae9 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -22,7 +22,7 @@ class FrameInspector {
~FrameInspector();
int GetParametersCount();
- Handle<JSFunction> GetFunction() { return function_; }
+ Handle<JSFunction> GetFunction() const { return function_; }
Handle<Script> GetScript() { return script_; }
Handle<Object> GetParameter(int index);
Handle<Object> GetExpression(int index);
@@ -41,21 +41,14 @@ class FrameInspector {
: JavaScriptFrame::cast(frame_);
}
- JavaScriptFrame* GetArgumentsFrame() { return javascript_frame(); }
- void SetArgumentsFrame(StandardFrame* frame);
-
- void MaterializeStackLocals(Handle<JSObject> target,
- Handle<ScopeInfo> scope_info,
- bool materialize_arguments_object = false);
-
- void UpdateStackLocalsFromMaterializedObject(Handle<JSObject> object,
- Handle<ScopeInfo> scope_info);
+ int inlined_frame_index() const { return inlined_frame_index_; }
private:
bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
Handle<String> parameter_name);
StandardFrame* frame_;
+ int inlined_frame_index_;
std::unique_ptr<DeoptimizedFrameInfo> deoptimized_frame_;
wasm::WasmInterpreter::FramePtr wasm_interpreted_frame_;
Isolate* isolate_;
@@ -71,27 +64,6 @@ class FrameInspector {
DISALLOW_COPY_AND_ASSIGN(FrameInspector);
};
-
-
-class DebugFrameHelper : public AllStatic {
- public:
- static SaveContext* FindSavedContextForFrame(Isolate* isolate,
- StandardFrame* frame);
- // Advances the iterator to the frame that matches the index and returns the
- // inlined frame index, or -1 if not found. Skips native JS functions.
- static int FindIndexedNonNativeFrame(StackTraceFrameIterator* it, int index);
-
- // Helper functions for wrapping and unwrapping stack frame ids.
- static Smi* WrapFrameId(StackFrame::Id id) {
- DCHECK(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
- return Smi::FromInt(id >> 2);
- }
-
- static StackFrame::Id UnwrapFrameId(int wrapped) {
- return static_cast<StackFrame::Id>(wrapped << 2);
- }
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 2210b4e87f..ac8073e02c 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -21,6 +21,7 @@ struct CoverageScript;
struct TypeProfileEntry;
struct TypeProfileScript;
class Coverage;
+class PostponeInterruptsScope;
class Script;
class TypeProfile;
} // namespace internal
@@ -33,51 +34,13 @@ int GetContextId(Local<Context> context);
void SetInspector(Isolate* isolate, v8_inspector::V8Inspector*);
v8_inspector::V8Inspector* GetInspector(Isolate* isolate);
-/**
- * Debugger is running in its own context which is entered while debugger
- * messages are being dispatched. This is an explicit getter for this
- * debugger context. Note that the content of the debugger context is subject
- * to change. The Context exists only when the debugger is active, i.e. at
- * least one DebugEventListener or MessageHandler is set.
- */
-Local<Context> GetDebugContext(Isolate* isolate);
-
-/**
- * Run a JavaScript function in the debugger.
- * \param fun the function to call
- * \param data passed as second argument to the function
- * With this call the debugger is entered and the function specified is called
- * with the execution state as the first argument. This makes it possible to
- * get access to information otherwise not available during normal JavaScript
- * execution e.g. details on stack frames. Receiver of the function call will
- * be the debugger context global object, however this is a subject to change.
- * The following example shows a JavaScript function which when passed to
- * v8::Debug::Call will return the current line of JavaScript execution.
- *
- * \code
- * function frame_source_line(exec_state) {
- * return exec_state.frame(0).sourceLine();
- * }
- * \endcode
- */
-// TODO(dcarney): data arg should be a MaybeLocal
-MaybeLocal<Value> Call(Local<Context> context, v8::Local<v8::Function> fun,
- Local<Value> data = Local<Value>());
-
-/**
- * Enable/disable LiveEdit functionality for the given Isolate
- * (default Isolate if not provided). V8 will abort if LiveEdit is
- * unexpectedly used. LiveEdit is enabled by default.
- */
-V8_EXPORT_PRIVATE void SetLiveEditEnabled(Isolate* isolate, bool enable);
-
-// Schedule a debugger break to happen when JavaScript code is run
-// in the given isolate.
-void DebugBreak(Isolate* isolate);
+// Schedule a debugger break to happen when function is called inside given
+// isolate.
+void SetBreakOnNextFunctionCall(Isolate* isolate);
// Remove scheduled debugger break in given isolate if it has not
// happened yet.
-void CancelDebugBreak(Isolate* isolate);
+void ClearBreakOnNextFunctionCall(Isolate* isolate);
/**
* Returns array of internal properties specific to the value type. Result has
@@ -116,18 +79,28 @@ void BreakRightNow(Isolate* isolate);
bool AllFramesOnStackAreBlackboxed(Isolate* isolate);
-/**
- * Out-of-memory callback function.
- * The function is invoked when the heap size is close to the hard limit.
- *
- * \param data the parameter provided during callback installation.
- */
-typedef void (*OutOfMemoryCallback)(void* data);
+class Script;
-V8_DEPRECATED("Use v8::Isolate::AddNearHeapLimitCallback",
- void SetOutOfMemoryCallback(Isolate* isolate,
- OutOfMemoryCallback callback,
- void* data));
+struct LiveEditResult {
+ enum Status {
+ OK,
+ COMPILE_ERROR,
+ BLOCKED_BY_RUNNING_GENERATOR,
+ BLOCKED_BY_FUNCTION_ABOVE_BREAK_FRAME,
+ BLOCKED_BY_FUNCTION_BELOW_NON_DROPPABLE_FRAME,
+ BLOCKED_BY_ACTIVE_FUNCTION,
+ BLOCKED_BY_NEW_TARGET_IN_RESTART_FRAME,
+ FRAME_RESTART_IS_NOT_SUPPORTED
+ };
+ Status status = OK;
+ bool stack_changed = false;
+ // Available only for OK.
+ v8::Local<v8::debug::Script> script;
+ // Fields below are available only for COMPILE_ERROR.
+ v8::Local<v8::String> message;
+ int line_number = -1;
+ int column_number = -1;
+};
/**
* Native wrapper around v8::internal::Script object.
@@ -157,7 +130,7 @@ class V8_EXPORT_PRIVATE Script {
int GetSourceOffset(const debug::Location& location) const;
v8::debug::Location GetSourceLocation(int offset) const;
bool SetScriptSource(v8::Local<v8::String> newSource, bool preview,
- bool* stack_changed) const;
+ LiveEditResult* result) const;
bool SetBreakpoint(v8::Local<v8::String> condition, debug::Location* location,
BreakpointId* id) const;
};
@@ -184,17 +157,14 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
class DebugDelegate {
public:
virtual ~DebugDelegate() {}
- virtual void PromiseEventOccurred(debug::PromiseDebugActionType type, int id,
- bool is_blackboxed) {}
virtual void ScriptCompiled(v8::Local<Script> script, bool is_live_edited,
bool has_compile_error) {}
// |inspector_break_points_hit| contains id of breakpoints installed with
// debug::Script::SetBreakpoint API.
virtual void BreakProgramRequested(
- v8::Local<v8::Context> paused_context, v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Context> paused_context,
const std::vector<debug::BreakpointId>& inspector_break_points_hit) {}
virtual void ExceptionThrown(v8::Local<v8::Context> paused_context,
- v8::Local<v8::Object> exec_state,
v8::Local<v8::Value> exception,
v8::Local<v8::Value> promise, bool is_uncaught) {
}
@@ -207,6 +177,15 @@ class DebugDelegate {
void SetDebugDelegate(Isolate* isolate, DebugDelegate* listener);
+class AsyncEventDelegate {
+ public:
+ virtual ~AsyncEventDelegate() {}
+ virtual void AsyncEventOccurred(debug::DebugAsyncActionType type, int id,
+ bool is_blackboxed) = 0;
+};
+
+void SetAsyncEventDelegate(Isolate* isolate, AsyncEventDelegate* delegate);
+
void ResetBlackboxedStateCache(Isolate* isolate,
v8::Local<debug::Script> script);
@@ -435,7 +414,6 @@ class ScopeIterator {
virtual void Advance() = 0;
virtual ScopeType GetType() = 0;
virtual v8::Local<v8::Object> GetObject() = 0;
- virtual v8::Local<v8::Function> GetFunction() = 0;
virtual v8::Local<v8::Value> GetFunctionDebugName() = 0;
virtual int GetScriptId() = 0;
virtual bool HasLocationInfo() = 0;
@@ -515,6 +493,15 @@ bool SetFunctionBreakpoint(v8::Local<v8::Function> function,
v8::Platform* GetCurrentPlatform();
+class PostponeInterruptsScope {
+ public:
+ explicit PostponeInterruptsScope(v8::Isolate* isolate);
+ ~PostponeInterruptsScope();
+
+ private:
+ std::unique_ptr<i::PostponeInterruptsScope> scope_;
+};
+
} // namespace debug
} // namespace v8
diff --git a/deps/v8/src/debug/debug-scope-iterator.cc b/deps/v8/src/debug/debug-scope-iterator.cc
index f3185b149a..dbade081cb 100644
--- a/deps/v8/src/debug/debug-scope-iterator.cc
+++ b/deps/v8/src/debug/debug-scope-iterator.cc
@@ -75,26 +75,8 @@ void DebugScopeIterator::Advance() {
}
bool DebugScopeIterator::ShouldIgnore() {
- // Almost always Script scope will be empty, so just filter out that noise.
- // Also drop empty Block, Eval and Script scopes, should we get any.
- DCHECK(!Done());
- debug::ScopeIterator::ScopeType type = GetType();
- if (type != debug::ScopeIterator::ScopeTypeBlock &&
- type != debug::ScopeIterator::ScopeTypeScript &&
- type != debug::ScopeIterator::ScopeTypeEval &&
- type != debug::ScopeIterator::ScopeTypeModule) {
- return false;
- }
-
- // TODO(kozyatinskiy): make this function faster.
- Handle<JSObject> value;
- if (!iterator_.ScopeObject().ToHandle(&value)) return false;
- Handle<FixedArray> keys =
- KeyAccumulator::GetKeys(value, KeyCollectionMode::kOwnOnly,
- ENUMERABLE_STRINGS,
- GetKeysConversion::kConvertToString)
- .ToHandleChecked();
- return keys->length() == 0;
+ if (GetType() == debug::ScopeIterator::ScopeTypeLocal) return false;
+ return !iterator_.DeclaresLocals(i::ScopeIterator::Mode::ALL);
}
v8::debug::ScopeIterator::ScopeType DebugScopeIterator::GetType() {
@@ -104,19 +86,10 @@ v8::debug::ScopeIterator::ScopeType DebugScopeIterator::GetType() {
v8::Local<v8::Object> DebugScopeIterator::GetObject() {
DCHECK(!Done());
- Handle<JSObject> value;
- if (iterator_.ScopeObject().ToHandle(&value)) {
- return Utils::ToLocal(value);
- }
- return v8::Local<v8::Object>();
+ Handle<JSObject> value = iterator_.ScopeObject(i::ScopeIterator::Mode::ALL);
+ return Utils::ToLocal(value);
}
-v8::Local<v8::Function> DebugScopeIterator::GetFunction() {
- DCHECK(!Done());
- Handle<JSFunction> closure = iterator_.GetFunction();
- if (closure.is_null()) return v8::Local<v8::Function>();
- return Utils::ToLocal(closure);
-}
int DebugScopeIterator::GetScriptId() {
DCHECK(!Done());
return iterator_.GetScript()->id();
@@ -201,11 +174,6 @@ int DebugWasmScopeIterator::GetScriptId() {
return -1;
}
-v8::Local<v8::Function> DebugWasmScopeIterator::GetFunction() {
- DCHECK(!Done());
- return v8::Local<v8::Function>();
-}
-
v8::Local<v8::Value> DebugWasmScopeIterator::GetFunctionDebugName() {
DCHECK(!Done());
return Utils::ToLocal(isolate_->factory()->empty_string());
diff --git a/deps/v8/src/debug/debug-scope-iterator.h b/deps/v8/src/debug/debug-scope-iterator.h
index 38109e754d..912d6858fd 100644
--- a/deps/v8/src/debug/debug-scope-iterator.h
+++ b/deps/v8/src/debug/debug-scope-iterator.h
@@ -23,7 +23,6 @@ class DebugScopeIterator final : public debug::ScopeIterator {
void Advance() override;
ScopeType GetType() override;
v8::Local<v8::Object> GetObject() override;
- v8::Local<v8::Function> GetFunction() override;
v8::Local<v8::Value> GetFunctionDebugName() override;
int GetScriptId() override;
bool HasLocationInfo() override;
@@ -48,7 +47,6 @@ class DebugWasmScopeIterator final : public debug::ScopeIterator {
void Advance() override;
ScopeType GetType() override;
v8::Local<v8::Object> GetObject() override;
- v8::Local<v8::Function> GetFunction() override;
v8::Local<v8::Value> GetFunctionDebugName() override;
int GetScriptId() override;
bool HasLocationInfo() override;
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index c6e46d2167..8c6fae1d9f 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -12,6 +12,7 @@
#include "src/frames-inl.h"
#include "src/globals.h"
#include "src/isolate-inl.h"
+#include "src/objects/module.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
@@ -24,28 +25,28 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
: isolate_(isolate),
frame_inspector_(frame_inspector),
function_(frame_inspector_->GetFunction()),
- script_(frame_inspector_->GetScript()),
- seen_script_scope_(false) {
+ script_(frame_inspector_->GetScript()) {
if (!frame_inspector->GetContext()->IsContext()) {
// Optimized frame, context or function cannot be materialized. Give up.
return;
}
context_ = Handle<Context>::cast(frame_inspector->GetContext());
- context_ = Handle<Context>::cast(frame_inspector->GetContext());
-
// We should not instantiate a ScopeIterator for wasm frames.
- DCHECK(frame_inspector->GetScript()->type() != Script::TYPE_WASM);
+ DCHECK_NE(Script::TYPE_WASM, frame_inspector->GetScript()->type());
TryParseAndRetrieveScopes(option);
}
+ScopeIterator::~ScopeIterator() { delete info_; }
+
Handle<Object> ScopeIterator::GetFunctionDebugName() const {
- if (HasNestedScopeChain()) return JSFunction::GetDebugName(function_);
+ if (!function_.is_null()) return JSFunction::GetDebugName(function_);
+
if (!context_->IsNativeContext()) {
DisallowHeapAllocation no_gc;
ScopeInfo* closure_info = context_->closure_context()->scope_info();
- Handle<String> debug_name(closure_info->FunctionDebugName());
+ Handle<String> debug_name(closure_info->FunctionDebugName(), isolate_);
if (debug_name->length() > 0) return debug_name;
}
return isolate_->factory()->undefined_value();
@@ -53,10 +54,12 @@ Handle<Object> ScopeIterator::GetFunctionDebugName() const {
ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate),
- context_(function->context()),
- script_(Script::cast(function->shared()->script())),
- seen_script_scope_(false) {
- if (!function->shared()->IsSubjectToDebugging()) context_ = Handle<Context>();
+ context_(function->context(), isolate),
+ script_(Script::cast(function->shared()->script()), isolate) {
+ if (!function->shared()->IsSubjectToDebugging()) {
+ context_ = Handle<Context>();
+ return;
+ }
UnwrapEvaluationContext();
}
@@ -64,10 +67,9 @@ ScopeIterator::ScopeIterator(Isolate* isolate,
Handle<JSGeneratorObject> generator)
: isolate_(isolate),
generator_(generator),
- function_(generator->function()),
- context_(generator->context()),
- script_(Script::cast(function_->shared()->script())),
- seen_script_scope_(false) {
+ function_(generator->function(), isolate),
+ context_(generator->context(), isolate),
+ script_(Script::cast(function_->shared()->script()), isolate) {
if (!function_->shared()->IsSubjectToDebugging()) {
context_ = Handle<Context>();
return;
@@ -75,33 +77,36 @@ ScopeIterator::ScopeIterator(Isolate* isolate,
TryParseAndRetrieveScopes(DEFAULT);
}
+void ScopeIterator::Restart() {
+ DCHECK_NOT_NULL(frame_inspector_);
+ function_ = frame_inspector_->GetFunction();
+ context_ = Handle<Context>::cast(frame_inspector_->GetContext());
+ current_scope_ = start_scope_;
+ DCHECK_NOT_NULL(current_scope_);
+ UnwrapEvaluationContext();
+}
+
void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
// Catch the case when the debugger stops in an internal function.
- Handle<SharedFunctionInfo> shared_info(function_->shared());
- Handle<ScopeInfo> scope_info(shared_info->scope_info());
+ Handle<SharedFunctionInfo> shared_info(function_->shared(), isolate_);
+ Handle<ScopeInfo> scope_info(shared_info->scope_info(), isolate_);
if (shared_info->script()->IsUndefined(isolate_)) {
- context_ = handle(function_->context());
+ current_scope_ = closure_scope_ = nullptr;
+ context_ = handle(function_->context(), isolate_);
function_ = Handle<JSFunction>();
return;
}
- // Currently it takes too much time to find nested scopes due to script
- // parsing. Sometimes we want to run the ScopeIterator as fast as possible
- // (for example, while collecting async call stacks on every
- // addEventListener call), even if we drop some nested scopes.
- // Later we may optimize getting the nested scopes (cache the result?)
- // and include nested scopes into the "fast" iteration case as well.
- bool ignore_nested_scopes = (option == IGNORE_NESTED_SCOPES);
- bool collect_non_locals = (option == COLLECT_NON_LOCALS);
- if (!ignore_nested_scopes && shared_info->HasBreakInfo() &&
- frame_inspector_ != nullptr) {
+ DCHECK_NE(IGNORE_NESTED_SCOPES, option);
+ bool ignore_nested_scopes = false;
+ if (shared_info->HasBreakInfo() && frame_inspector_ != nullptr) {
// The source position at return is always the end of the function,
// which is not consistent with the current scope chain. Therefore all
// nested with, catch and block contexts are skipped, and we can only
// inspect the function scope.
// This can only happen if we set a break point inside right before the
// return, which requires a debug info to be available.
- Handle<DebugInfo> debug_info(shared_info->GetDebugInfo());
+ Handle<DebugInfo> debug_info(shared_info->GetDebugInfo(), isolate_);
// Find the break point where execution has stopped.
BreakLocation location = BreakLocation::FromFrame(debug_info, GetFrame());
@@ -109,57 +114,52 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
ignore_nested_scopes = location.IsReturn();
}
- if (ignore_nested_scopes) {
- if (scope_info->HasContext()) {
- context_ = Handle<Context>(context_->declaration_context(), isolate_);
- } else {
- context_ = handle(function_->context());
- }
- if (scope_info->scope_type() == FUNCTION_SCOPE) {
- nested_scope_chain_.emplace_back(scope_info, shared_info->StartPosition(),
- shared_info->EndPosition());
- }
- if (!collect_non_locals) return;
- }
-
// Reparse the code and analyze the scopes.
// Check whether we are in global, eval or function code.
- std::unique_ptr<ParseInfo> info;
- if (scope_info->scope_type() != FUNCTION_SCOPE) {
+ if (scope_info->scope_type() == FUNCTION_SCOPE) {
+ // Inner function.
+ info_ = new ParseInfo(isolate_, shared_info);
+ } else {
// Global or eval code.
- Handle<Script> script(Script::cast(shared_info->script()));
- info.reset(new ParseInfo(script));
+ Handle<Script> script(Script::cast(shared_info->script()), isolate_);
+ info_ = new ParseInfo(isolate_, script);
if (scope_info->scope_type() == EVAL_SCOPE) {
- info->set_eval();
- if (!function_->context()->IsNativeContext()) {
- info->set_outer_scope_info(handle(function_->context()->scope_info()));
+ info_->set_eval();
+ if (!context_->IsNativeContext()) {
+ info_->set_outer_scope_info(handle(context_->scope_info(), isolate_));
}
// Language mode may be inherited from the eval caller.
// Retrieve it from shared function info.
- info->set_language_mode(shared_info->language_mode());
+ info_->set_language_mode(shared_info->language_mode());
} else if (scope_info->scope_type() == MODULE_SCOPE) {
- DCHECK(info->is_module());
+ DCHECK(info_->is_module());
} else {
- DCHECK(scope_info->scope_type() == SCRIPT_SCOPE);
+ DCHECK_EQ(SCRIPT_SCOPE, scope_info->scope_type());
}
- } else {
- // Inner function.
- info.reset(new ParseInfo(shared_info));
- }
- if (parsing::ParseAny(info.get(), shared_info, isolate_) &&
- Rewriter::Rewrite(info.get())) {
- info->ast_value_factory()->Internalize(isolate_);
- DeclarationScope* scope = info->literal()->scope();
- if (!ignore_nested_scopes || collect_non_locals) {
- CollectNonLocals(info.get(), scope);
+ }
+
+ if (parsing::ParseAny(info_, shared_info, isolate_) &&
+ Rewriter::Rewrite(info_)) {
+ info_->ast_value_factory()->Internalize(isolate_);
+ closure_scope_ = info_->literal()->scope();
+
+ if (option == COLLECT_NON_LOCALS) {
+ DCHECK(non_locals_.is_null());
+ non_locals_ = info_->literal()->scope()->CollectNonLocals(
+ isolate_, info_, StringSet::New(isolate_));
}
- if (!ignore_nested_scopes) {
- if (DeclarationScope::Analyze(info.get())) {
- DeclarationScope::AllocateScopeInfos(info.get(), isolate_,
- AnalyzeMode::kDebugger);
- RetrieveScopeChain(scope);
+
+ CHECK(DeclarationScope::Analyze(info_));
+ if (ignore_nested_scopes) {
+ current_scope_ = closure_scope_;
+ start_scope_ = current_scope_;
+ if (closure_scope_->NeedsContext()) {
+ context_ = handle(context_->closure_context(), isolate_);
}
+ } else {
+ RetrieveScopeChain(closure_scope_);
}
+ UnwrapEvaluationContext();
} else {
// A failed reparse indicates that the preparser has diverged from the
// parser or that the preparse data given to the initial parse has been
@@ -172,32 +172,30 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
isolate_->clear_pending_exception();
context_ = Handle<Context>();
}
- UnwrapEvaluationContext();
}
void ScopeIterator::UnwrapEvaluationContext() {
- while (true) {
- if (context_.is_null()) return;
- if (!context_->IsDebugEvaluateContext()) return;
- Handle<Object> wrapped(context_->get(Context::WRAPPED_CONTEXT_INDEX),
- isolate_);
+ if (!context_->IsDebugEvaluateContext()) return;
+ Context* current = *context_;
+ do {
+ Object* wrapped = current->get(Context::WRAPPED_CONTEXT_INDEX);
if (wrapped->IsContext()) {
- context_ = Handle<Context>::cast(wrapped);
+ current = Context::cast(wrapped);
} else {
- context_ = Handle<Context>(context_->previous(), isolate_);
+ DCHECK_NOT_NULL(current->previous());
+ current = current->previous();
}
- }
+ } while (current->IsDebugEvaluateContext());
+ context_ = handle(current, isolate_);
}
-V8_WARN_UNUSED_RESULT MaybeHandle<JSObject>
-ScopeIterator::MaterializeScopeDetails() {
+Handle<JSObject> ScopeIterator::MaterializeScopeDetails() {
// Calculate the size of the result.
Handle<FixedArray> details =
isolate_->factory()->NewFixedArray(kScopeDetailsSize);
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(Type()));
- Handle<JSObject> scope_object;
- ASSIGN_RETURN_ON_EXCEPTION(isolate_, scope_object, ScopeObject(), JSObject);
+ Handle<JSObject> scope_object = ScopeObject(Mode::ALL);
details->set(kScopeDetailsObjectIndex, *scope_object);
if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript) {
return isolate_->factory()->NewJSArrayWithElements(details);
@@ -207,7 +205,7 @@ ScopeIterator::MaterializeScopeDetails() {
details->set(kScopeDetailsStartPositionIndex,
Smi::FromInt(start_position()));
details->set(kScopeDetailsEndPositionIndex, Smi::FromInt(end_position()));
- if (HasNestedScopeChain()) {
+ if (InInnerScope()) {
details->set(kScopeDetailsFunctionIndex, *function_);
}
}
@@ -215,87 +213,115 @@ ScopeIterator::MaterializeScopeDetails() {
}
bool ScopeIterator::HasPositionInfo() {
- return HasNestedScopeChain() || !context_->IsNativeContext();
+ return InInnerScope() || !context_->IsNativeContext();
}
int ScopeIterator::start_position() {
- if (HasNestedScopeChain()) {
- return LastNestedScopeChain().start_position;
- }
+ if (InInnerScope()) return current_scope_->start_position();
if (context_->IsNativeContext()) return 0;
return context_->closure_context()->scope_info()->StartPosition();
}
int ScopeIterator::end_position() {
- if (HasNestedScopeChain()) {
- return LastNestedScopeChain().end_position;
- }
+ if (InInnerScope()) return current_scope_->end_position();
if (context_->IsNativeContext()) return 0;
return context_->closure_context()->scope_info()->EndPosition();
}
+bool ScopeIterator::DeclaresLocals(Mode mode) const {
+ ScopeType type = Type();
+
+ if (type == ScopeTypeWith) return mode == Mode::ALL;
+ if (type == ScopeTypeGlobal) return mode == Mode::ALL;
+
+ bool declares_local = false;
+ auto visitor = [&](Handle<String> name, Handle<Object> value) {
+ declares_local = true;
+ return true;
+ };
+ VisitScope(visitor, mode);
+ return declares_local;
+}
+
+bool ScopeIterator::HasContext() const {
+ return !InInnerScope() || current_scope_->NeedsContext();
+}
+
void ScopeIterator::Next() {
DCHECK(!Done());
+
ScopeType scope_type = Type();
+
if (scope_type == ScopeTypeGlobal) {
// The global scope is always the last in the chain.
DCHECK(context_->IsNativeContext());
context_ = Handle<Context>();
- } else if (scope_type == ScopeTypeScript) {
+ DCHECK(Done());
+ return;
+ }
+
+ bool inner = InInnerScope();
+ if (current_scope_ == closure_scope_) function_ = Handle<JSFunction>();
+
+ if (scope_type == ScopeTypeScript) {
+ DCHECK_IMPLIES(InInnerScope(), current_scope_->is_script_scope());
seen_script_scope_ = true;
if (context_->IsScriptContext()) {
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- if (HasNestedScopeChain()) {
- DCHECK_EQ(LastNestedScopeChain().scope_info->scope_type(), SCRIPT_SCOPE);
- nested_scope_chain_.pop_back();
- DCHECK(!HasNestedScopeChain());
+ context_ = handle(context_->previous(), isolate_);
}
- CHECK(context_->IsNativeContext());
- } else if (!HasNestedScopeChain()) {
- context_ = Handle<Context>(context_->previous(), isolate_);
+ } else if (!inner) {
+ DCHECK(!context_->IsNativeContext());
+ context_ = handle(context_->previous(), isolate_);
} else {
+ DCHECK_NOT_NULL(current_scope_);
do {
- if (LastNestedScopeChain().scope_info->HasContext()) {
- DCHECK(context_->previous() != nullptr);
- context_ = Handle<Context>(context_->previous(), isolate_);
+ if (current_scope_->NeedsContext()) {
+ DCHECK_NOT_NULL(context_->previous());
+ context_ = handle(context_->previous(), isolate_);
}
- nested_scope_chain_.pop_back();
- if (!HasNestedScopeChain()) break;
+ DCHECK_IMPLIES(InInnerScope(), current_scope_->outer_scope() != nullptr);
+ current_scope_ = current_scope_->outer_scope();
// Repeat to skip hidden scopes.
- } while (LastNestedScopeChain().is_hidden());
+ } while (current_scope_->is_hidden());
}
- if (!HasNestedScopeChain()) function_ = Handle<JSFunction>();
+
UnwrapEvaluationContext();
}
// Return the type of the current scope.
-ScopeIterator::ScopeType ScopeIterator::Type() {
+ScopeIterator::ScopeType ScopeIterator::Type() const {
DCHECK(!Done());
- if (HasNestedScopeChain()) {
- Handle<ScopeInfo> scope_info = LastNestedScopeChain().scope_info;
- switch (scope_info->scope_type()) {
+ if (InInnerScope()) {
+ switch (current_scope_->scope_type()) {
case FUNCTION_SCOPE:
- DCHECK(context_->IsFunctionContext() || !scope_info->HasContext());
+ DCHECK_IMPLIES(current_scope_->NeedsContext(),
+ context_->IsFunctionContext());
return ScopeTypeLocal;
case MODULE_SCOPE:
- DCHECK(context_->IsModuleContext());
+ DCHECK_IMPLIES(current_scope_->NeedsContext(),
+ context_->IsModuleContext());
return ScopeTypeModule;
case SCRIPT_SCOPE:
- DCHECK(context_->IsScriptContext() || context_->IsNativeContext());
+ DCHECK_IMPLIES(
+ current_scope_->NeedsContext(),
+ context_->IsScriptContext() || context_->IsNativeContext());
return ScopeTypeScript;
case WITH_SCOPE:
- DCHECK(context_->IsWithContext() || context_->IsDebugEvaluateContext());
+ DCHECK_IMPLIES(
+ current_scope_->NeedsContext(),
+ context_->IsWithContext() || context_->IsDebugEvaluateContext());
return ScopeTypeWith;
case CATCH_SCOPE:
DCHECK(context_->IsCatchContext());
return ScopeTypeCatch;
case BLOCK_SCOPE:
- DCHECK(!scope_info->HasContext() || context_->IsBlockContext());
+ DCHECK_IMPLIES(current_scope_->NeedsContext(),
+ context_->IsBlockContext());
return ScopeTypeBlock;
case EVAL_SCOPE:
- DCHECK(!scope_info->HasContext() || context_->IsEvalContext());
+ DCHECK_IMPLIES(current_scope_->NeedsContext(),
+ context_->IsEvalContext());
return ScopeTypeEval;
}
UNREACHABLE();
@@ -325,94 +351,91 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
return ScopeTypeWith;
}
-
-MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
+Handle<JSObject> ScopeIterator::ScopeObject(Mode mode) {
DCHECK(!Done());
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- return Handle<JSObject>(CurrentContext()->global_proxy());
- case ScopeIterator::ScopeTypeScript:
- return MaterializeScriptScope();
- case ScopeIterator::ScopeTypeLocal:
- // Materialize the content of the local scope into a JSObject.
- DCHECK_EQ(1, nested_scope_chain_.size());
- return MaterializeLocalScope();
- case ScopeIterator::ScopeTypeWith:
- return WithContextExtension();
- case ScopeIterator::ScopeTypeClosure:
- // Materialize the content of the closure scope into a JSObject.
- return MaterializeClosure();
- case ScopeIterator::ScopeTypeCatch:
- case ScopeIterator::ScopeTypeBlock:
- case ScopeIterator::ScopeTypeEval:
- return MaterializeInnerScope();
- case ScopeIterator::ScopeTypeModule:
- return MaterializeModuleScope();
- }
- UNREACHABLE();
-}
-
-bool ScopeIterator::HasContext() {
ScopeType type = Type();
- if (type == ScopeTypeBlock || type == ScopeTypeLocal ||
- type == ScopeTypeEval) {
- if (HasNestedScopeChain()) {
- return LastNestedScopeChain().scope_info->HasContext();
- }
+ if (type == ScopeTypeGlobal) {
+ DCHECK_EQ(Mode::ALL, mode);
+ return handle(context_->global_proxy(), isolate_);
+ }
+ if (type == ScopeTypeWith) {
+ DCHECK_EQ(Mode::ALL, mode);
+ return WithContextExtension();
}
- return true;
-}
+ Handle<JSObject> scope = isolate_->factory()->NewJSObjectWithNullProto();
+ auto visitor = [=](Handle<String> name, Handle<Object> value) {
+ JSObject::AddProperty(isolate_, scope, name, value, NONE);
+ return false;
+ };
-bool ScopeIterator::SetVariableValue(Handle<String> variable_name,
- Handle<Object> new_value) {
- DCHECK(!Done());
+ VisitScope(visitor, mode);
+ return scope;
+}
+
+void ScopeIterator::VisitScope(const Visitor& visitor, Mode mode) const {
switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- break;
- case ScopeIterator::ScopeTypeLocal:
- return SetLocalVariableValue(variable_name, new_value);
- case ScopeIterator::ScopeTypeWith:
- break;
- case ScopeIterator::ScopeTypeClosure:
- return SetClosureVariableValue(variable_name, new_value);
- case ScopeIterator::ScopeTypeScript:
- return SetScriptVariableValue(variable_name, new_value);
- case ScopeIterator::ScopeTypeCatch:
- case ScopeIterator::ScopeTypeBlock:
- case ScopeIterator::ScopeTypeEval:
- return SetInnerScopeVariableValue(variable_name, new_value);
- case ScopeIterator::ScopeTypeModule:
- return SetModuleVariableValue(variable_name, new_value);
- break;
+ case ScopeTypeLocal:
+ case ScopeTypeClosure:
+ case ScopeTypeCatch:
+ case ScopeTypeBlock:
+ case ScopeTypeEval:
+ return VisitLocalScope(visitor, mode);
+ case ScopeTypeModule:
+ if (InInnerScope()) {
+ return VisitLocalScope(visitor, mode);
+ }
+ DCHECK_EQ(Mode::ALL, mode);
+ return VisitModuleScope(visitor);
+ case ScopeTypeScript:
+ DCHECK_EQ(Mode::ALL, mode);
+ return VisitScriptScope(visitor);
+ case ScopeTypeWith:
+ case ScopeTypeGlobal:
+ UNREACHABLE();
}
- return false;
}
-
-Handle<ScopeInfo> ScopeIterator::CurrentScopeInfo() {
+bool ScopeIterator::SetVariableValue(Handle<String> name,
+ Handle<Object> value) {
DCHECK(!Done());
- if (HasNestedScopeChain()) {
- return LastNestedScopeChain().scope_info;
- } else if (context_->IsBlockContext() || context_->IsFunctionContext() ||
- context_->IsEvalContext() || context_->IsCatchContext()) {
- return Handle<ScopeInfo>(context_->scope_info());
- }
- return Handle<ScopeInfo>::null();
-}
+ name = isolate_->factory()->InternalizeString(name);
+ switch (Type()) {
+ case ScopeTypeGlobal:
+ case ScopeTypeWith:
+ break;
+ case ScopeTypeEval:
+ case ScopeTypeBlock:
+ case ScopeTypeCatch:
+ case ScopeTypeModule:
+ if (InInnerScope()) return SetLocalVariableValue(name, value);
+ if (Type() == ScopeTypeModule && SetModuleVariableValue(name, value)) {
+ return true;
+ }
+ return SetContextVariableValue(name, value);
+
+ case ScopeTypeLocal:
+ case ScopeTypeClosure:
+ if (InInnerScope()) {
+ DCHECK_EQ(ScopeTypeLocal, Type());
+ if (SetLocalVariableValue(name, value)) return true;
+ // There may not be an associated context since we're InInnerScope().
+ if (!current_scope_->NeedsContext()) return false;
+ } else {
+ DCHECK_EQ(ScopeTypeClosure, Type());
+ if (SetContextVariableValue(name, value)) return true;
+ }
+ // The above functions only set variables statically declared in the
+ // function. There may be eval-introduced variables. Check them in
+ // SetContextExtensionValue.
+ return SetContextExtensionValue(name, value);
-Handle<Context> ScopeIterator::CurrentContext() {
- DCHECK(!Done());
- if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript ||
- !HasNestedScopeChain()) {
- return context_;
- } else if (LastNestedScopeChain().scope_info->HasContext()) {
- return context_;
- } else {
- return Handle<Context>::null();
+ case ScopeTypeScript:
+ return SetScriptVariableValue(name, value);
}
+ return false;
}
Handle<StringSet> ScopeIterator::GetNonLocals() { return non_locals_; }
@@ -420,24 +443,22 @@ Handle<StringSet> ScopeIterator::GetNonLocals() { return non_locals_; }
#ifdef DEBUG
// Debug print of the content of the current scope.
void ScopeIterator::DebugPrint() {
- OFStream os(stdout);
+ StdoutStream os;
DCHECK(!Done());
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
os << "Global:\n";
- CurrentContext()->Print(os);
+ context_->Print(os);
break;
case ScopeIterator::ScopeTypeLocal: {
os << "Local:\n";
- function_->shared()->scope_info()->Print();
- if (!CurrentContext().is_null()) {
- CurrentContext()->Print(os);
- if (CurrentContext()->has_extension()) {
- Handle<HeapObject> extension(CurrentContext()->extension(), isolate_);
- if (extension->IsJSContextExtensionObject()) {
- extension->Print(os);
- }
+ if (current_scope_->NeedsContext()) {
+ context_->Print(os);
+ if (context_->has_extension()) {
+ Handle<HeapObject> extension(context_->extension(), isolate_);
+ DCHECK(extension->IsJSContextExtensionObject());
+ extension->Print(os);
}
}
break;
@@ -445,30 +466,28 @@ void ScopeIterator::DebugPrint() {
case ScopeIterator::ScopeTypeWith:
os << "With:\n";
- CurrentContext()->extension()->Print(os);
+ context_->extension()->Print(os);
break;
case ScopeIterator::ScopeTypeCatch:
os << "Catch:\n";
- CurrentContext()->extension()->Print(os);
- CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print(os);
+ context_->extension()->Print(os);
+ context_->get(Context::THROWN_OBJECT_INDEX)->Print(os);
break;
case ScopeIterator::ScopeTypeClosure:
os << "Closure:\n";
- CurrentContext()->Print(os);
- if (CurrentContext()->has_extension()) {
- Handle<HeapObject> extension(CurrentContext()->extension(), isolate_);
- if (extension->IsJSContextExtensionObject()) {
- extension->Print(os);
- }
+ context_->Print(os);
+ if (context_->has_extension()) {
+ Handle<HeapObject> extension(context_->extension(), isolate_);
+ DCHECK(extension->IsJSContextExtensionObject());
+ extension->Print(os);
}
break;
case ScopeIterator::ScopeTypeScript:
os << "Script:\n";
- CurrentContext()
- ->global_object()
+ context_->global_object()
->native_context()
->script_context_table()
->Print(os);
@@ -492,345 +511,392 @@ int ScopeIterator::GetSourcePosition() {
void ScopeIterator::RetrieveScopeChain(DeclarationScope* scope) {
DCHECK_NOT_NULL(scope);
- GetNestedScopeChain(isolate_, scope, GetSourcePosition());
-}
-void ScopeIterator::CollectNonLocals(ParseInfo* info, DeclarationScope* scope) {
- DCHECK_NOT_NULL(scope);
- DCHECK(non_locals_.is_null());
- non_locals_ = scope->CollectNonLocals(info, StringSet::New(isolate_));
-}
+ const int position = GetSourcePosition();
+
+ Scope* parent = nullptr;
+ Scope* current = scope;
+ while (parent != current) {
+ parent = current;
+ for (Scope* inner_scope = current->inner_scope(); inner_scope != nullptr;
+ inner_scope = inner_scope->sibling()) {
+ int beg_pos = inner_scope->start_position();
+ int end_pos = inner_scope->end_position();
+ DCHECK((beg_pos >= 0 && end_pos >= 0) || inner_scope->is_hidden());
+ if (beg_pos <= position && position < end_pos) {
+ // Don't walk into inner functions.
+ if (!inner_scope->is_function_scope()) {
+ current = inner_scope;
+ }
+ break;
+ }
+ }
+ }
+ start_scope_ = current;
+ current_scope_ = current;
+}
-MaybeHandle<JSObject> ScopeIterator::MaterializeScriptScope() {
- Handle<JSGlobalObject> global(CurrentContext()->global_object());
+void ScopeIterator::VisitScriptScope(const Visitor& visitor) const {
+ Handle<JSGlobalObject> global(context_->global_object(), isolate_);
Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table());
-
- Handle<JSObject> script_scope =
- isolate_->factory()->NewJSObjectWithNullProto();
+ global->native_context()->script_context_table(), isolate_);
- for (int context_index = 0; context_index < script_contexts->used();
+ // Skip the first script since that just declares 'this'.
+ for (int context_index = 1; context_index < script_contexts->used();
context_index++) {
- Handle<Context> context =
- ScriptContextTable::GetContext(script_contexts, context_index);
- Handle<ScopeInfo> scope_info(context->scope_info());
- CopyContextLocalsToScopeObject(scope_info, context, script_scope);
+ Handle<Context> context = ScriptContextTable::GetContext(
+ isolate_, script_contexts, context_index);
+ Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
+ if (VisitContextLocals(visitor, scope_info, context)) return;
}
- return script_scope;
}
-void ScopeIterator::MaterializeStackLocals(Handle<JSObject> local_scope,
- Handle<ScopeInfo> scope_info) {
- if (frame_inspector_) {
- return frame_inspector_->MaterializeStackLocals(local_scope, scope_info);
- }
+void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
+ DCHECK(context_->IsModuleContext());
- DCHECK(!generator_.is_null());
- // Fill all stack locals.
- Handle<FixedArray> register_file(generator_->register_file());
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- Handle<String> name = handle(scope_info->StackLocalName(i));
- if (ScopeInfo::VariableIsSynthetic(*name)) continue;
- Handle<Object> value(register_file->get(scope_info->StackLocalIndex(i)),
- isolate_);
- // TODO(yangguo): We convert optimized out values to {undefined} when they
- // are passed to the debugger. Eventually we should handle them somehow.
- if (value->IsTheHole(isolate_) || value->IsOptimizedOut(isolate_)) {
- DCHECK(!value.is_identical_to(isolate_->factory()->stale_register()));
- value = isolate_->factory()->undefined_value();
- }
- JSObject::SetOwnPropertyIgnoreAttributes(local_scope, name, value, NONE)
- .Check();
- }
-}
+ Handle<ScopeInfo> scope_info(context_->scope_info(), isolate_);
+ if (VisitContextLocals(visitor, scope_info, context_)) return;
-MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
- DCHECK(HasNestedScopeChain());
- Handle<SharedFunctionInfo> shared(function_->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
+ int count_index = scope_info->ModuleVariableCountIndex();
+ int module_variable_count = Smi::cast(scope_info->get(count_index))->value();
- Handle<JSObject> local_scope =
- isolate_->factory()->NewJSObjectWithNullProto();
- MaterializeStackLocals(local_scope, scope_info);
+ Handle<Module> module(context_->module(), isolate_);
- if (!scope_info->HasContext()) return local_scope;
-
- // Fill all context locals.
- Handle<Context> function_context(context_->closure_context());
- DCHECK_EQ(context_->scope_info(), *scope_info);
- CopyContextLocalsToScopeObject(scope_info, function_context, local_scope);
+ for (int i = 0; i < module_variable_count; ++i) {
+ int index;
+ Handle<String> name;
+ {
+ String* raw_name;
+ scope_info->ModuleVariable(i, &raw_name, &index);
+ CHECK(!ScopeInfo::VariableIsSynthetic(raw_name));
+ name = handle(raw_name, isolate_);
+ }
+ Handle<Object> value = Module::LoadVariable(isolate_, module, index);
- // Finally copy any properties from the function context extension.
- // These will be variables introduced by eval.
- if (!function_context->IsNativeContext()) {
- CopyContextExtensionToScopeObject(function_context, local_scope,
- KeyCollectionMode::kIncludePrototypes);
+ // Reflect variables under TDZ as undeclared in scope object.
+ if (value->IsTheHole(isolate_)) continue;
+ if (visitor(name, value)) return;
}
-
- return local_scope;
}
+bool ScopeIterator::VisitContextLocals(const Visitor& visitor,
+ Handle<ScopeInfo> scope_info,
+ Handle<Context> context) const {
+ // Fill all context locals to the context extension.
+ for (int i = 0; i < scope_info->ContextLocalCount(); ++i) {
+ Handle<String> name(scope_info->ContextLocalName(i), isolate_);
+ if (ScopeInfo::VariableIsSynthetic(*name)) continue;
+ int context_index = Context::MIN_CONTEXT_SLOTS + i;
+ Handle<Object> value(context->get(context_index), isolate_);
+ // Reflect variables under TDZ as undefined in scope object.
+ if (value->IsTheHole(isolate_)) continue;
+ if (visitor(name, value)) return true;
+ }
+ return false;
+}
-// Create a plain JSObject which materializes the closure content for the
-// context.
-Handle<JSObject> ScopeIterator::MaterializeClosure() {
- Handle<Context> context = CurrentContext();
- DCHECK(context->IsFunctionContext() || context->IsEvalContext());
-
- Handle<ScopeInfo> scope_info(context_->scope_info());
+bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
+ for (Variable* var : *current_scope_->locals()) {
+ if (!var->is_this() && ScopeInfo::VariableIsSynthetic(*var->name())) {
+ continue;
+ }
- // Allocate and initialize a JSObject with all the content of this function
- // closure.
- Handle<JSObject> closure_scope =
- isolate_->factory()->NewJSObjectWithNullProto();
+ int index = var->index();
+ Handle<Object> value;
+ switch (var->location()) {
+ case VariableLocation::LOOKUP:
+ UNREACHABLE();
+ break;
+
+ case VariableLocation::UNALLOCATED:
+ if (!var->is_this()) continue;
+ // No idea why we only add it sometimes.
+ if (mode == Mode::ALL) continue;
+ // No idea why this diverges...
+ value = frame_inspector_->GetReceiver();
+ break;
+
+ case VariableLocation::PARAMETER: {
+ if (frame_inspector_ == nullptr) {
+ // Get the variable from the suspended generator.
+ DCHECK(!generator_.is_null());
+ if (var->is_this()) {
+ value = handle(generator_->receiver(), isolate_);
+ } else {
+ FixedArray* parameters_and_registers =
+ generator_->parameters_and_registers();
+ DCHECK_LT(index, parameters_and_registers->length());
+ value = handle(parameters_and_registers->get(index), isolate_);
+ }
+ } else {
+ value = var->is_this() ? frame_inspector_->GetReceiver()
+ : frame_inspector_->GetParameter(index);
+
+ if (value->IsOptimizedOut(isolate_)) {
+ value = isolate_->factory()->undefined_value();
+ } else if (var->is_this() && value->IsTheHole(isolate_)) {
+ value = isolate_->factory()->undefined_value();
+ }
+ }
+ break;
+ }
- // Fill all context locals to the context extension.
- CopyContextLocalsToScopeObject(scope_info, context, closure_scope);
+ case VariableLocation::LOCAL:
+ if (frame_inspector_ == nullptr) {
+ // Get the variable from the suspended generator.
+ DCHECK(!generator_.is_null());
+ FixedArray* parameters_and_registers =
+ generator_->parameters_and_registers();
+ int parameter_count =
+ function_->shared()->scope_info()->ParameterCount();
+ index += parameter_count;
+ DCHECK_LT(index, parameters_and_registers->length());
+ value = handle(parameters_and_registers->get(index), isolate_);
+ if (value->IsTheHole(isolate_)) {
+ value = isolate_->factory()->undefined_value();
+ }
+ } else {
+ value = frame_inspector_->GetExpression(index);
+ if (value->IsOptimizedOut(isolate_)) {
+ // We'll rematerialize this later.
+ if (current_scope_->is_declaration_scope() &&
+ current_scope_->AsDeclarationScope()->arguments() == var) {
+ continue;
+ }
+ value = isolate_->factory()->undefined_value();
+ } else if (value->IsTheHole(isolate_)) {
+ // Reflect variables under TDZ as undeclared in scope object.
+ continue;
+ }
+ }
+ break;
+
+ case VariableLocation::CONTEXT:
+ if (mode == Mode::STACK) continue;
+ // TODO(verwaest): Why don't we want to show it if it's there?...
+ if (var->is_this()) continue;
+ DCHECK(var->IsContextSlot());
+ value = handle(context_->get(index), isolate_);
+ // Reflect variables under TDZ as undeclared in scope object.
+ if (value->IsTheHole(isolate_)) continue;
+ break;
+
+ case VariableLocation::MODULE: {
+ if (mode == Mode::STACK) continue;
+ // if (var->IsExport()) continue;
+ Handle<Module> module(context_->module(), isolate_);
+ value = Module::LoadVariable(isolate_, module, var->index());
+ // Reflect variables under TDZ as undeclared in scope object.
+ if (value->IsTheHole(isolate_)) continue;
+ break;
+ }
+ }
- // Finally copy any properties from the function context extension. This will
- // be variables introduced by eval.
- CopyContextExtensionToScopeObject(context, closure_scope,
- KeyCollectionMode::kOwnOnly);
- return closure_scope;
+ if (visitor(var->name(), value)) return true;
+ }
+ return false;
}
-
// Retrieve the with-context extension object. If the extension object is
// a proxy, return an empty object.
Handle<JSObject> ScopeIterator::WithContextExtension() {
- Handle<Context> context = CurrentContext();
- DCHECK(context->IsWithContext());
- if (context->extension_receiver()->IsJSProxy()) {
+ DCHECK(context_->IsWithContext());
+ if (context_->extension_receiver()->IsJSProxy()) {
return isolate_->factory()->NewJSObjectWithNullProto();
}
- return handle(JSObject::cast(context->extension_receiver()));
+ return handle(JSObject::cast(context_->extension_receiver()), isolate_);
}
// Create a plain JSObject which materializes the block scope for the specified
// block context.
-Handle<JSObject> ScopeIterator::MaterializeInnerScope() {
- Handle<JSObject> inner_scope =
- isolate_->factory()->NewJSObjectWithNullProto();
-
- Handle<Context> context = Handle<Context>::null();
- if (HasNestedScopeChain()) {
- Handle<ScopeInfo> scope_info = LastNestedScopeChain().scope_info;
- MaterializeStackLocals(inner_scope, scope_info);
- if (scope_info->HasContext()) context = CurrentContext();
- } else {
- context = CurrentContext();
- }
-
- if (!context.is_null()) {
- // Fill all context locals.
- CopyContextLocalsToScopeObject(CurrentScopeInfo(), context, inner_scope);
- CopyContextExtensionToScopeObject(context, inner_scope,
- KeyCollectionMode::kOwnOnly);
- }
- return inner_scope;
-}
-
-
-// Create a plain JSObject which materializes the module scope for the specified
-// module context.
-MaybeHandle<JSObject> ScopeIterator::MaterializeModuleScope() {
- Handle<Context> context = CurrentContext();
- DCHECK(context->IsModuleContext());
- Handle<ScopeInfo> scope_info(context->scope_info());
- Handle<JSObject> module_scope =
- isolate_->factory()->NewJSObjectWithNullProto();
- CopyContextLocalsToScopeObject(scope_info, context, module_scope);
- CopyModuleVarsToScopeObject(scope_info, context, module_scope);
- return module_scope;
-}
-
-bool ScopeIterator::SetParameterValue(Handle<ScopeInfo> scope_info,
- Handle<String> parameter_name,
- Handle<Object> new_value) {
- // Setting stack locals of optimized frames is not supported.
- HandleScope scope(isolate_);
- for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- if (String::Equals(handle(scope_info->ParameterName(i)), parameter_name)) {
- // Suspended generators should not get here because all parameters should
- // be context-allocated.
- DCHECK_NOT_NULL(frame_inspector_);
- JavaScriptFrame* frame = GetFrame();
- if (frame->is_optimized()) {
- return false;
+void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
+ if (InInnerScope()) {
+ if (VisitLocals(visitor, mode)) return;
+ if (mode == Mode::STACK && Type() == ScopeTypeLocal) {
+ // Hide |this| in arrow functions that may be embedded in other functions
+ // but don't force |this| to be context-allocated. Otherwise we'd find the
+ // wrong |this| value.
+ if (!closure_scope_->has_this_declaration() &&
+ !non_locals_->Has(isolate_, isolate_->factory()->this_string())) {
+ if (visitor(isolate_->factory()->this_string(),
+ isolate_->factory()->undefined_value()))
+ return;
}
- frame->SetParameterValue(i, *new_value);
- return true;
- }
- }
- return false;
-}
-
-bool ScopeIterator::SetStackVariableValue(Handle<ScopeInfo> scope_info,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- // Setting stack locals of optimized frames is not supported. Suspended
- // generators are supported.
- HandleScope scope(isolate_);
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
- int stack_local_index = scope_info->StackLocalIndex(i);
- if (frame_inspector_ != nullptr) {
- // Set the variable on the stack.
+ // Add |arguments| to the function scope even if it wasn't used.
+ // Currently we don't yet support materializing the arguments object of
+ // suspended generators. We'd need to read the arguments out from the
+ // suspended generator rather than from an activation as
+ // FunctionGetArguments does.
+ if (frame_inspector_ != nullptr && !closure_scope_->is_arrow_scope() &&
+ (closure_scope_->arguments() == nullptr ||
+ frame_inspector_->GetExpression(closure_scope_->arguments()->index())
+ ->IsOptimizedOut(isolate_))) {
JavaScriptFrame* frame = GetFrame();
- if (frame->is_optimized()) return false;
- frame->SetExpression(stack_local_index, *new_value);
- } else {
- // Set the variable in the suspended generator.
- DCHECK(!generator_.is_null());
- Handle<FixedArray> register_file(generator_->register_file());
- DCHECK_LT(stack_local_index, register_file->length());
- register_file->set(stack_local_index, *new_value);
+ Handle<JSObject> arguments = Accessors::FunctionGetArguments(
+ frame, frame_inspector_->inlined_frame_index());
+ if (visitor(isolate_->factory()->arguments_string(), arguments)) return;
}
- return true;
+ }
+ } else {
+ DCHECK_EQ(Mode::ALL, mode);
+ Handle<ScopeInfo> scope_info(context_->scope_info(), isolate_);
+ if (VisitContextLocals(visitor, scope_info, context_)) return;
+ }
+
+ if (mode == Mode::ALL && HasContext()) {
+ DCHECK(!context_->IsScriptContext());
+ DCHECK(!context_->IsNativeContext());
+ DCHECK(!context_->IsWithContext());
+ if (!context_->scope_info()->CallsSloppyEval()) return;
+ if (context_->extension_object() == nullptr) return;
+ Handle<JSObject> extension(context_->extension_object(), isolate_);
+ Handle<FixedArray> keys =
+ KeyAccumulator::GetKeys(extension, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS)
+ .ToHandleChecked();
+
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ DCHECK(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)), isolate_);
+ Handle<Object> value = JSReceiver::GetDataProperty(extension, key);
+ if (visitor(key, value)) return;
}
}
- return false;
}
-bool ScopeIterator::SetContextVariableValue(Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- HandleScope scope(isolate_);
- for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
- Handle<String> next_name(scope_info->ContextLocalName(i));
- if (String::Equals(variable_name, next_name)) {
- VariableMode mode;
- InitializationFlag init_flag;
- MaybeAssignedFlag maybe_assigned_flag;
- int context_index = ScopeInfo::ContextSlotIndex(
- scope_info, next_name, &mode, &init_flag, &maybe_assigned_flag);
- context->set(context_index, *new_value);
- return true;
- }
- }
+bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ // TODO(verwaest): Walk parameters backwards, not forwards.
+ // TODO(verwaest): Use VariableMap rather than locals() list for lookup.
+ for (Variable* var : *current_scope_->locals()) {
+ if (String::Equals(isolate_, var->name(), variable_name)) {
+ int index = var->index();
+ switch (var->location()) {
+ case VariableLocation::LOOKUP:
+ case VariableLocation::UNALLOCATED:
+ // Drop assignments to unallocated locals.
+ DCHECK(var->is_this() ||
+ *variable_name == ReadOnlyRoots(isolate_).arguments_string());
+ return false;
+
+ case VariableLocation::PARAMETER: {
+ if (var->is_this()) return false;
+ if (frame_inspector_ == nullptr) {
+ // Set the variable in the suspended generator.
+ DCHECK(!generator_.is_null());
+ Handle<FixedArray> parameters_and_registers(
+ generator_->parameters_and_registers(), isolate_);
+ DCHECK_LT(index, parameters_and_registers->length());
+ parameters_and_registers->set(index, *new_value);
+ } else {
+ JavaScriptFrame* frame = GetFrame();
+ if (frame->is_optimized()) return false;
+
+ frame->SetParameterValue(index, *new_value);
+ }
+ return true;
+ }
- // TODO(neis): Clean up context "extension" mess.
- if (!context->IsModuleContext() && context->has_extension()) {
- Handle<JSObject> ext(context->extension_object());
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- // We don't expect this to do anything except replacing property value.
- JSObject::SetOwnPropertyIgnoreAttributes(ext, variable_name, new_value,
- NONE)
- .Check();
- return true;
+ case VariableLocation::LOCAL:
+ if (frame_inspector_ == nullptr) {
+ // Set the variable in the suspended generator.
+ DCHECK(!generator_.is_null());
+ int parameter_count =
+ function_->shared()->scope_info()->ParameterCount();
+ index += parameter_count;
+ Handle<FixedArray> parameters_and_registers(
+ generator_->parameters_and_registers(), isolate_);
+ DCHECK_LT(index, parameters_and_registers->length());
+ parameters_and_registers->set(index, *new_value);
+ } else {
+ // Set the variable on the stack.
+ JavaScriptFrame* frame = GetFrame();
+ if (frame->is_optimized()) return false;
+
+ frame->SetExpression(index, *new_value);
+ }
+ return true;
+
+ case VariableLocation::CONTEXT:
+ DCHECK(var->IsContextSlot());
+ context_->set(index, *new_value);
+ return true;
+
+ case VariableLocation::MODULE:
+ if (!var->IsExport()) return false;
+ Handle<Module> module(context_->module(), isolate_);
+ Module::StoreVariable(module, var->index(), new_value);
+ return true;
+ }
+ UNREACHABLE();
}
}
return false;
}
-bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
- Handle<Object> new_value) {
- Handle<ScopeInfo> scope_info;
- if (HasNestedScopeChain()) {
- scope_info = handle(function_->shared()->scope_info());
- DCHECK_IMPLIES(scope_info->HasContext(),
- context_->scope_info() == *scope_info);
- } else {
- scope_info = handle(context_->scope_info());
- }
+bool ScopeIterator::SetContextExtensionValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ if (!context_->has_extension()) return false;
- // Parameter might be shadowed in context. Don't stop here.
- bool result = SetParameterValue(scope_info, variable_name, new_value);
+ DCHECK(context_->extension_object()->IsJSContextExtensionObject());
+ Handle<JSObject> ext(context_->extension_object(), isolate_);
+ LookupIterator it(isolate_, ext, variable_name, LookupIterator::OWN);
+ Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
+ DCHECK(maybe.IsJust());
+ if (!maybe.FromJust()) return false;
- // Stack locals.
- if (SetStackVariableValue(scope_info, variable_name, new_value)) {
- return true;
- }
+ CHECK(Object::SetDataProperty(&it, new_value).ToChecked());
+ return true;
+}
- if (scope_info->HasContext() &&
- SetContextVariableValue(scope_info, context_, variable_name, new_value)) {
- return true;
- }
+bool ScopeIterator::SetContextVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ Handle<ScopeInfo> scope_info(context_->scope_info(), isolate_);
- return result;
+ VariableMode mode;
+ InitializationFlag flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ int slot_index = ScopeInfo::ContextSlotIndex(scope_info, variable_name, &mode,
+ &flag, &maybe_assigned_flag);
+ if (slot_index < 0) return false;
+
+ context_->set(slot_index, *new_value);
+ return true;
}
bool ScopeIterator::SetModuleVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
- DCHECK_NOT_NULL(frame_inspector_);
-
- // Get module context and its scope info.
- Handle<Context> context = CurrentContext();
- while (!context->IsModuleContext()) {
- context = handle(context->previous(), isolate_);
- }
- Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
- DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
-
- if (SetContextVariableValue(scope_info, context, variable_name, new_value)) {
- return true;
- }
-
int cell_index;
- {
- VariableMode mode;
- InitializationFlag init_flag;
- MaybeAssignedFlag maybe_assigned_flag;
- cell_index = scope_info->ModuleIndex(variable_name, &mode, &init_flag,
- &maybe_assigned_flag);
- }
+ VariableMode mode;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ cell_index = context_->scope_info()->ModuleIndex(
+ variable_name, &mode, &init_flag, &maybe_assigned_flag);
// Setting imports is currently not supported.
- bool found = ModuleDescriptor::GetCellIndexKind(cell_index) ==
- ModuleDescriptor::kExport;
- if (found) {
- Module::StoreVariable(handle(context->module(), isolate_), cell_index,
- new_value);
+ if (ModuleDescriptor::GetCellIndexKind(cell_index) !=
+ ModuleDescriptor::kExport) {
+ return false;
}
- return found;
-}
-bool ScopeIterator::SetInnerScopeVariableValue(Handle<String> variable_name,
- Handle<Object> new_value) {
- Handle<ScopeInfo> scope_info = CurrentScopeInfo();
- DCHECK(scope_info->scope_type() == BLOCK_SCOPE ||
- scope_info->scope_type() == EVAL_SCOPE ||
- scope_info->scope_type() == CATCH_SCOPE);
-
- // Setting stack locals of optimized frames is not supported.
- if (SetStackVariableValue(scope_info, variable_name, new_value)) {
- return true;
- }
-
- if (HasContext() && SetContextVariableValue(scope_info, CurrentContext(),
- variable_name, new_value)) {
- return true;
- }
-
- return false;
-}
-
-// This method copies structure of MaterializeClosure method above.
-bool ScopeIterator::SetClosureVariableValue(Handle<String> variable_name,
- Handle<Object> new_value) {
- DCHECK(CurrentContext()->IsFunctionContext() ||
- CurrentContext()->IsEvalContext());
- return SetContextVariableValue(CurrentScopeInfo(), CurrentContext(),
- variable_name, new_value);
+ Handle<Module> module(context_->module(), isolate_);
+ Module::StoreVariable(module, cell_index, new_value);
+ return true;
}
bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
- Handle<String> internalized_variable_name =
- isolate_->factory()->InternalizeString(variable_name);
- Handle<Context> context = CurrentContext();
Handle<ScriptContextTable> script_contexts(
- context->global_object()->native_context()->script_context_table());
+ context_->global_object()->native_context()->script_context_table(),
+ isolate_);
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(script_contexts, internalized_variable_name,
+ if (ScriptContextTable::Lookup(isolate_, script_contexts, variable_name,
&lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
- script_contexts, lookup_result.context_index);
+ isolate_, script_contexts, lookup_result.context_index);
script_context->set(lookup_result.slot_index, *new_value);
return true;
}
@@ -838,116 +904,5 @@ bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
return false;
}
-void ScopeIterator::CopyContextLocalsToScopeObject(
- Handle<ScopeInfo> scope_info, Handle<Context> context,
- Handle<JSObject> scope_object) {
- Isolate* isolate = scope_info->GetIsolate();
- int local_count = scope_info->ContextLocalCount();
- if (local_count == 0) return;
- // Fill all context locals to the context extension.
- for (int i = 0; i < local_count; ++i) {
- Handle<String> name(scope_info->ContextLocalName(i));
- if (ScopeInfo::VariableIsSynthetic(*name)) continue;
- int context_index = Context::MIN_CONTEXT_SLOTS + i;
- Handle<Object> value = Handle<Object>(context->get(context_index), isolate);
- // Reflect variables under TDZ as undefined in scope object.
- if (value->IsTheHole(isolate)) continue;
- // This should always succeed.
- // TODO(verwaest): Use AddDataProperty instead.
- JSObject::SetOwnPropertyIgnoreAttributes(scope_object, name, value, NONE)
- .Check();
- }
-}
-
-void ScopeIterator::CopyModuleVarsToScopeObject(Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
- Isolate* isolate = scope_info->GetIsolate();
-
- int module_variable_count =
- Smi::cast(scope_info->get(scope_info->ModuleVariableCountIndex()))
- ->value();
- for (int i = 0; i < module_variable_count; ++i) {
- Handle<String> local_name;
- Handle<Object> value;
- {
- String* name;
- int index;
- scope_info->ModuleVariable(i, &name, &index);
- CHECK(!ScopeInfo::VariableIsSynthetic(name));
- local_name = handle(name, isolate);
- value = Module::LoadVariable(handle(context->module(), isolate), index);
- }
-
- // Reflect variables under TDZ as undefined in scope object.
- if (value->IsTheHole(isolate)) continue;
- // This should always succeed.
- // TODO(verwaest): Use AddDataProperty instead.
- JSObject::SetOwnPropertyIgnoreAttributes(scope_object, local_name, value,
- NONE)
- .Check();
- }
-}
-
-void ScopeIterator::CopyContextExtensionToScopeObject(
- Handle<Context> context, Handle<JSObject> scope_object,
- KeyCollectionMode mode) {
- if (context->extension_object() == nullptr) return;
- Handle<JSObject> extension(context->extension_object());
- Handle<FixedArray> keys =
- KeyAccumulator::GetKeys(extension, mode, ENUMERABLE_STRINGS)
- .ToHandleChecked();
-
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- DCHECK(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- Handle<Object> value =
- Object::GetPropertyOrElement(extension, key).ToHandleChecked();
- JSObject::SetOwnPropertyIgnoreAttributes(scope_object, key, value, NONE)
- .Check();
- }
-}
-
-void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
- int position) {
- if (scope->is_function_scope()) {
- // Do not collect scopes of nested inner functions inside the current one.
- // Nested arrow functions could have the same end positions.
- Handle<JSFunction> function = GetFunction();
- if (scope->start_position() > function->shared()->StartPosition() &&
- scope->end_position() <= function->shared()->EndPosition()) {
- return;
- }
- }
- if (scope->is_hidden()) {
- // We need to add this chain element in case the scope has a context
- // associated. We need to keep the scope chain and context chain in sync.
- nested_scope_chain_.emplace_back(scope->scope_info());
- } else {
- nested_scope_chain_.emplace_back(
- scope->scope_info(), scope->start_position(), scope->end_position());
- }
- for (Scope* inner_scope = scope->inner_scope(); inner_scope != nullptr;
- inner_scope = inner_scope->sibling()) {
- int beg_pos = inner_scope->start_position();
- int end_pos = inner_scope->end_position();
- DCHECK((beg_pos >= 0 && end_pos >= 0) || inner_scope->is_hidden());
- if (beg_pos <= position && position < end_pos) {
- GetNestedScopeChain(isolate, inner_scope, position);
- return;
- }
- }
-}
-
-bool ScopeIterator::HasNestedScopeChain() const {
- return !nested_scope_chain_.empty();
-}
-
-ScopeIterator::ExtendedScopeInfo& ScopeIterator::LastNestedScopeChain() {
- DCHECK(HasNestedScopeChain());
- return nested_scope_chain_.back();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index c61cf73cc5..f598f3e994 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -48,37 +48,38 @@ class ScopeIterator {
ScopeIterator(Isolate* isolate, Handle<JSFunction> function);
ScopeIterator(Isolate* isolate, Handle<JSGeneratorObject> generator);
+ ~ScopeIterator();
- V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
+ Handle<JSObject> MaterializeScopeDetails();
// More scopes?
- bool Done() { return context_.is_null(); }
+ bool Done() const { return context_.is_null(); }
// Move to the next scope.
void Next();
+ // Restart to the first scope and context.
+ void Restart();
+
// Return the type of the current scope.
- ScopeType Type();
+ ScopeType Type() const;
+
+ // Indicates which variables should be visited. Either only variables from the
+ // scope that are available on the stack, or all variables.
+ enum class Mode { STACK, ALL };
// Return the JavaScript object with the content of the current scope.
- MaybeHandle<JSObject> ScopeObject();
+ Handle<JSObject> ScopeObject(Mode mode);
- bool HasContext();
+ // Returns whether the current scope declares any variables.
+ bool DeclaresLocals(Mode mode) const;
// Set variable value and return true on success.
bool SetVariableValue(Handle<String> variable_name, Handle<Object> new_value);
- Handle<ScopeInfo> CurrentScopeInfo();
-
- // Return the context for this scope. For the local context there might not
- // be an actual context.
- Handle<Context> CurrentContext();
-
// Populate the set with collected non-local variable names.
Handle<StringSet> GetNonLocals();
- // Return function which represents closure for current scope.
- Handle<JSFunction> GetFunction() { return function_; }
// Similar to JSFunction::GetName return the function's name or it's inferred
// name.
Handle<Object> GetFunctionDebugName() const;
@@ -94,96 +95,63 @@ class ScopeIterator {
void DebugPrint();
#endif
- private:
- struct ExtendedScopeInfo {
- ExtendedScopeInfo(Handle<ScopeInfo> info, int start, int end)
- : scope_info(info), start_position(start), end_position(end) {}
- explicit ExtendedScopeInfo(Handle<ScopeInfo> info)
- : scope_info(info), start_position(-1), end_position(-1) {}
- Handle<ScopeInfo> scope_info;
- int start_position;
- int end_position;
- bool is_hidden() { return start_position == -1 && end_position == -1; }
- };
+ bool InInnerScope() const { return !function_.is_null(); }
+ bool HasContext() const;
+ Handle<Context> CurrentContext() const {
+ DCHECK(HasContext());
+ return context_;
+ }
+ private:
Isolate* isolate_;
+ ParseInfo* info_ = nullptr;
FrameInspector* const frame_inspector_ = nullptr;
Handle<JSGeneratorObject> generator_;
Handle<JSFunction> function_;
- Handle<ScopeInfo> function_scope_info_;
Handle<Context> context_;
Handle<Script> script_;
- std::vector<ExtendedScopeInfo> nested_scope_chain_;
Handle<StringSet> non_locals_;
- bool seen_script_scope_;
+ DeclarationScope* closure_scope_ = nullptr;
+ Scope* start_scope_ = nullptr;
+ Scope* current_scope_ = nullptr;
+ bool seen_script_scope_ = false;
- inline JavaScriptFrame* GetFrame() {
- return frame_inspector_->GetArgumentsFrame();
+ inline JavaScriptFrame* GetFrame() const {
+ return frame_inspector_->javascript_frame();
}
- Handle<Context> GetContext();
int GetSourcePosition();
- void MaterializeStackLocals(Handle<JSObject> local_scope,
- Handle<ScopeInfo> scope_info);
-
void TryParseAndRetrieveScopes(ScopeIterator::Option option);
void RetrieveScopeChain(DeclarationScope* scope);
- void CollectNonLocals(ParseInfo* info, DeclarationScope* scope);
-
void UnwrapEvaluationContext();
- V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> MaterializeScriptScope();
- V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> MaterializeLocalScope();
- V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> MaterializeModuleScope();
- Handle<JSObject> MaterializeClosure();
- Handle<JSObject> MaterializeInnerScope();
+ typedef std::function<bool(Handle<String> name, Handle<Object> value)>
+ Visitor;
+
Handle<JSObject> WithContextExtension();
bool SetLocalVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
- bool SetInnerScopeVariableValue(Handle<String> variable_name,
- Handle<Object> new_value);
- bool SetClosureVariableValue(Handle<String> variable_name,
+ bool SetContextVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
+ bool SetContextExtensionValue(Handle<String> variable_name,
+ Handle<Object> new_value);
bool SetScriptVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
bool SetModuleVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
// Helper functions.
- bool SetParameterValue(Handle<ScopeInfo> scope_info,
- Handle<String> parameter_name,
- Handle<Object> new_value);
- bool SetStackVariableValue(Handle<ScopeInfo> scope_info,
- Handle<String> variable_name,
- Handle<Object> new_value);
- bool SetContextVariableValue(Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value);
-
- void CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object);
- void CopyModuleVarsToScopeObject(Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object);
- void CopyContextExtensionToScopeObject(Handle<Context> context,
- Handle<JSObject> scope_object,
- KeyCollectionMode mode);
-
- // Get the chain of nested scopes within this scope for the source statement
- // position. The scopes will be added to the list from the outermost scope to
- // the innermost scope. Only nested block, catch or with scopes are tracked
- // and will be returned, but no inner function scopes.
- void GetNestedScopeChain(Isolate* isolate, Scope* scope,
- int statement_position);
-
- bool HasNestedScopeChain() const;
- ExtendedScopeInfo& LastNestedScopeChain();
+ void VisitScope(const Visitor& visitor, Mode mode) const;
+ void VisitLocalScope(const Visitor& visitor, Mode mode) const;
+ void VisitScriptScope(const Visitor& visitor) const;
+ void VisitModuleScope(const Visitor& visitor) const;
+ bool VisitLocals(const Visitor& visitor, Mode mode) const;
+ bool VisitContextLocals(const Visitor& visitor, Handle<ScopeInfo> scope_info,
+ Handle<Context> context) const;
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
};
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index ab703f1f0e..bf1e1b623b 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -84,7 +84,7 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
// So let's try to fetch it using same logic as is used to retrieve 'this'
// during DebugEvaluate::Local.
Handle<JSFunction> function = frame_inspector_->GetFunction();
- Handle<Context> context(function->context());
+ Handle<Context> context(function->context(), isolate_);
// Arrow function defined in top level function without references to
// variables may have NativeContext as context.
if (!context->IsFunctionContext()) return v8::MaybeLocal<v8::Value>();
@@ -92,10 +92,11 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
ScopeIterator::COLLECT_NON_LOCALS);
// We lookup this variable in function context only when it is used in arrow
// function otherwise V8 can optimize it out.
- if (!scope_iterator.GetNonLocals()->Has(isolate_->factory()->this_string()))
+ if (!scope_iterator.GetNonLocals()->Has(isolate_,
+ isolate_->factory()->this_string()))
return v8::MaybeLocal<v8::Value>();
- Handle<ScopeInfo> scope_info(context->scope_info());
+ Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index cc30ddee61..794087115f 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -100,7 +100,7 @@ void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
// coverage.
if (isolate->is_best_effort_code_coverage()) {
isolate->SetFeedbackVectorsForProfilingTools(
- isolate->heap()->undefined_value());
+ ReadOnlyRoots(isolate).undefined_value());
}
}
} else {
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index d25c259846..47de9523a5 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -30,6 +30,7 @@
#include "src/log.h"
#include "src/messages.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/js-promise-inl.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -78,11 +79,9 @@ class Debug::TemporaryObjectsTracker : public HeapObjectAllocationTracker {
};
Debug::Debug(Isolate* isolate)
- : debug_context_(Handle<Context>()),
- is_active_(false),
+ : is_active_(false),
hook_on_function_call_(false),
is_suppressed_(false),
- live_edit_enabled_(false),
break_disabled_(false),
break_points_active_(true),
break_on_exception_(false),
@@ -161,9 +160,10 @@ int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
return closest_break;
}
-bool BreakLocation::HasBreakPoint(Handle<DebugInfo> debug_info) const {
+bool BreakLocation::HasBreakPoint(Isolate* isolate,
+ Handle<DebugInfo> debug_info) const {
// First check whether there is a break point with the same source position.
- if (!debug_info->HasBreakPoint(position_)) return false;
+ if (!debug_info->HasBreakPoint(isolate, position_)) return false;
if (debug_info->CanBreakAtEntry()) {
DCHECK_EQ(Debug::kBreakAtEntryPosition, position_);
return debug_info->BreakAtEntry();
@@ -300,7 +300,7 @@ void BreakIterator::ClearDebugBreak() {
BreakLocation BreakIterator::GetBreakLocation() {
Handle<AbstractCode> code(
- AbstractCode::cast(debug_info_->DebugBytecodeArray()));
+ AbstractCode::cast(debug_info_->DebugBytecodeArray()), isolate());
DebugBreakType type = GetDebugBreakType();
int generator_object_reg_index = -1;
if (type == DEBUG_BREAK_SLOT_AT_SUSPEND) {
@@ -310,8 +310,8 @@ BreakLocation BreakIterator::GetBreakLocation() {
// bytecode array, and we'll read the actual generator object off the
// interpreter stack frame in GetGeneratorObjectForSuspendedFrame.
BytecodeArray* bytecode_array = debug_info_->OriginalBytecodeArray();
- interpreter::BytecodeArrayAccessor accessor(handle(bytecode_array),
- code_offset());
+ interpreter::BytecodeArrayAccessor accessor(
+ handle(bytecode_array, isolate()), code_offset());
DCHECK_EQ(accessor.current_bytecode(),
interpreter::Bytecode::kSuspendGenerator);
@@ -344,47 +344,30 @@ void Debug::ThreadInit() {
thread_local_.ignore_step_into_function_ = Smi::kZero;
thread_local_.target_frame_count_ = -1;
thread_local_.return_value_ = Smi::kZero;
- thread_local_.async_task_count_ = 0;
thread_local_.last_breakpoint_id_ = 0;
clear_suspended_generator();
thread_local_.restart_fp_ = kNullAddress;
base::Relaxed_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
+ thread_local_.break_on_next_function_call_ = false;
UpdateHookOnFunctionCall();
}
char* Debug::ArchiveDebug(char* storage) {
- MemCopy(storage, reinterpret_cast<char*>(&thread_local_),
- ArchiveSpacePerThread());
+ // Simply reset state. Don't archive anything.
+ ThreadInit();
return storage + ArchiveSpacePerThread();
}
-char* Debug::RestoreDebug(char* storage) {
- MemCopy(reinterpret_cast<char*>(&thread_local_), storage,
- ArchiveSpacePerThread());
-
- if (in_debug_scope()) {
- // If this thread was in a DebugScope when we archived it, restore the
- // previous debugging state now. Note that in_debug_scope() returns
- // true when thread_local_.current_debug_scope_ (restored by MemCopy
- // above) is non-null.
-
- // Clear any one-shot breakpoints that may have been set by the other
- // thread, and reapply breakpoints for this thread.
- HandleScope scope(isolate_);
- ClearOneShot();
-
- if (thread_local_.last_step_action_ != StepNone) {
- // Reset the previous step action for this thread.
- PrepareStep(thread_local_.last_step_action_);
- }
- }
+char* Debug::RestoreDebug(char* storage) {
+ // Simply reset state. Don't restore anything.
+ ThreadInit();
return storage + ArchiveSpacePerThread();
}
-int Debug::ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
+int Debug::ArchiveSpacePerThread() { return 0; }
void Debug::Iterate(RootVisitor* v) {
v->VisitRootPointer(Root::kDebug, nullptr, &thread_local_.return_value_);
@@ -394,9 +377,10 @@ void Debug::Iterate(RootVisitor* v) {
&thread_local_.ignore_step_into_function_);
}
-DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info) : next_(nullptr) {
+DebugInfoListNode::DebugInfoListNode(Isolate* isolate, DebugInfo* debug_info)
+ : next_(nullptr) {
// Globalize the request debug info object and make it weak.
- GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
+ GlobalHandles* global_handles = isolate->global_handles();
debug_info_ = global_handles->Create(debug_info).location();
}
@@ -407,59 +391,12 @@ DebugInfoListNode::~DebugInfoListNode() {
debug_info_ = nullptr;
}
-
-bool Debug::Load() {
- // Return if debugger is already loaded.
- if (is_loaded()) return true;
-
- // Bail out if we're already in the process of compiling the native
- // JavaScript source code for the debugger.
- if (is_suppressed_) return false;
- SuppressDebug while_loading(this);
-
- // Disable breakpoints and interrupts while compiling and running the
- // debugger scripts including the context creation code.
- DisableBreak disable(this);
- PostponeInterruptsScope postpone(isolate_);
-
- // Create the debugger context.
- HandleScope scope(isolate_);
- ExtensionConfiguration no_extensions;
- // TODO(yangguo): we rely on the fact that first context snapshot is usable
- // as debug context. This dependency is gone once we remove
- // debug context completely.
- static const int kFirstContextSnapshotIndex = 0;
- Handle<Context> context = isolate_->bootstrapper()->CreateEnvironment(
- MaybeHandle<JSGlobalProxy>(), v8::Local<ObjectTemplate>(), &no_extensions,
- kFirstContextSnapshotIndex, v8::DeserializeEmbedderFieldsCallback(),
- DEBUG_CONTEXT);
-
- // Fail if no context could be created.
- if (context.is_null()) return false;
-
- debug_context_ = isolate_->global_handles()->Create(*context);
- GlobalHandles::AnnotateStrongRetainer(
- Handle<Object>::cast(debug_context_).location(),
- "v8::internal::Debug::debug_context_");
-
- feature_tracker()->Track(DebugFeatureTracker::kActive);
-
- return true;
-}
-
-
void Debug::Unload() {
ClearAllBreakPoints();
ClearStepping();
RemoveAllCoverageInfos();
- RemoveDebugDelegate();
-
- // Return debugger is not loaded.
- if (!is_loaded()) return;
-
- // Clear debugger context global handle.
- GlobalHandles::Destroy(Handle<Object>::cast(debug_context_).location());
- debug_context_ = Handle<Context>();
+ ClearAllDebuggerHints();
+ debug_delegate_ = nullptr;
}
void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
@@ -471,14 +408,13 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
// Enter the debugger.
DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
// Postpone interrupt during breakpoint processing.
PostponeInterruptsScope postpone(isolate_);
DisableBreak no_recursive_break(this);
// Return if we fail to retrieve debug info.
- Handle<SharedFunctionInfo> shared(break_target->shared());
+ Handle<SharedFunctionInfo> shared(break_target->shared(), isolate_);
if (!EnsureBreakInfo(shared)) return;
PrepareFunctionForDebugExecution(shared);
@@ -490,11 +426,13 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
// Find actual break points, if any, and trigger debug break event.
MaybeHandle<FixedArray> break_points_hit =
CheckBreakPoints(debug_info, &location);
- if (!break_points_hit.is_null()) {
+ if (!break_points_hit.is_null() || break_on_next_function_call()) {
// Clear all current stepping setup.
ClearStepping();
// Notify the debug event listeners.
- OnDebugBreak(break_points_hit.ToHandleChecked());
+ OnDebugBreak(!break_points_hit.is_null()
+ ? break_points_hit.ToHandleChecked()
+ : isolate_->factory()->empty_fixed_array());
return;
}
@@ -575,7 +513,7 @@ MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
BreakLocation* location,
bool* has_break_points) {
bool has_break_points_to_check =
- break_points_active_ && location->HasBreakPoint(debug_info);
+ break_points_active_ && location->HasBreakPoint(isolate_, debug_info);
if (has_break_points) *has_break_points = has_break_points_to_check;
if (!has_break_points_to_check) return {};
@@ -594,10 +532,9 @@ bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
DCHECK(!summary.IsWasm());
Handle<JSFunction> function = summary.AsJavaScript().function();
if (!function->shared()->HasBreakInfo()) return false;
- Handle<DebugInfo> debug_info(function->shared()->GetDebugInfo());
+ Handle<DebugInfo> debug_info(function->shared()->GetDebugInfo(), isolate_);
// Enter the debugger.
DebugScope debug_scope(this);
- if (debug_scope.failed()) return false;
std::vector<BreakLocation> break_locations;
BreakLocation::AllAtCurrentStatement(debug_info, frame, &break_locations);
bool has_break_points_at_all = false;
@@ -611,35 +548,13 @@ bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
return has_break_points_at_all;
}
-MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
- Handle<Object> args[],
- bool catch_exceptions) {
- AllowJavascriptExecutionDebugOnly allow_script(isolate_);
- PostponeInterruptsScope no_interrupts(isolate_);
- AssertDebugContext();
- Handle<JSReceiver> holder =
- Handle<JSReceiver>::cast(isolate_->natives_utils_object());
- Handle<JSFunction> fun = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(isolate_, holder, name).ToHandleChecked());
- Handle<Object> undefined = isolate_->factory()->undefined_value();
- if (catch_exceptions) {
- MaybeHandle<Object> maybe_exception;
- return Execution::TryCall(isolate_, fun, undefined, argc, args,
- Execution::MessageHandling::kReport,
- &maybe_exception);
- } else {
- return Execution::Call(isolate_, fun, undefined, argc, args);
- }
-}
-
-
// Check whether a single break point object is triggered.
bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point,
bool is_break_at_entry) {
HandleScope scope(isolate_);
if (!break_point->condition()->length()) return true;
- Handle<String> condition(break_point->condition());
+ Handle<String> condition(break_point->condition(), isolate_);
MaybeHandle<Object> maybe_result;
Handle<Object> result;
@@ -661,7 +576,7 @@ bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point,
}
return false;
}
- return result->BooleanValue();
+ return result->BooleanValue(isolate_);
}
bool Debug::SetBreakPoint(Handle<JSFunction> function,
@@ -670,19 +585,19 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
HandleScope scope(isolate_);
// Make sure the function is compiled and has set up the debug info.
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
if (!EnsureBreakInfo(shared)) return false;
PrepareFunctionForDebugExecution(shared);
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
// Source positions starts with zero.
DCHECK_LE(0, *source_position);
// Find the break point and change it.
*source_position = FindBreakablePosition(debug_info, *source_position);
- DebugInfo::SetBreakPoint(debug_info, *source_position, break_point);
+ DebugInfo::SetBreakPoint(isolate_, debug_info, *source_position, break_point);
// At least one active break point now.
- DCHECK_LT(0, debug_info->GetBreakPointCount());
+ DCHECK_LT(0, debug_info->GetBreakPointCount(isolate_));
ClearBreakPoints(debug_info);
ApplyBreakPoints(debug_info);
@@ -722,7 +637,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
*source_position = shared->StartPosition();
}
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
// Find breakable position returns first breakable position after
// *source_position, it can return 0 if no break location is found after
@@ -731,9 +646,9 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
if (breakable_position < *source_position) return false;
*source_position = breakable_position;
- DebugInfo::SetBreakPoint(debug_info, *source_position, break_point);
+ DebugInfo::SetBreakPoint(isolate_, debug_info, *source_position, break_point);
// At least one active break point now.
- DCHECK_LT(0, debug_info->GetBreakPointCount());
+ DCHECK_LT(0, debug_info->GetBreakPointCount(isolate_));
ClearBreakPoints(debug_info);
ApplyBreakPoints(debug_info);
@@ -747,38 +662,25 @@ int Debug::FindBreakablePosition(Handle<DebugInfo> debug_info,
if (debug_info->CanBreakAtEntry()) {
return kBreakAtEntryPosition;
} else {
- DCHECK(debug_info->HasDebugBytecodeArray());
+ DCHECK(debug_info->HasInstrumentedBytecodeArray());
BreakIterator it(debug_info);
it.SkipToPosition(source_position);
return it.position();
}
}
-void Debug::ApplyInstrumentation(Handle<SharedFunctionInfo> shared) {
- DCHECK(shared->HasBytecodeArray());
- Handle<DebugInfo> debug_info(GetOrCreateDebugInfo(shared));
- DCHECK_NE(debug_info->DebugExecutionMode(), isolate_->debug_execution_mode());
- if (isolate_->debug_execution_mode() == DebugInfo::kBreakpoints) {
- ClearSideEffectChecks(debug_info);
- ApplyBreakPoints(debug_info);
- } else {
- ClearBreakPoints(debug_info);
- ApplySideEffectChecks(debug_info);
- }
-}
-
void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
DisallowHeapAllocation no_gc;
if (debug_info->CanBreakAtEntry()) {
debug_info->SetBreakAtEntry();
} else {
- if (!debug_info->HasDebugBytecodeArray()) return;
+ if (!debug_info->HasInstrumentedBytecodeArray()) return;
FixedArray* break_points = debug_info->break_points();
for (int i = 0; i < break_points->length(); i++) {
if (break_points->get(i)->IsUndefined(isolate_)) continue;
BreakPointInfo* info = BreakPointInfo::cast(break_points->get(i));
- if (info->GetBreakPointCount() == 0) continue;
- DCHECK(debug_info->HasDebugBytecodeArray());
+ if (info->GetBreakPointCount(isolate_) == 0) continue;
+ DCHECK(debug_info->HasInstrumentedBytecodeArray());
BreakIterator it(debug_info);
it.SkipToPosition(info->source_position());
it.SetDebugBreak();
@@ -793,7 +695,8 @@ void Debug::ClearBreakPoints(Handle<DebugInfo> debug_info) {
} else {
// If we attempt to clear breakpoints but none exist, simply return. This
// can happen e.g. CoverageInfos exist but no breakpoints are set.
- if (!debug_info->HasDebugBytecodeArray() || !debug_info->HasBreakInfo()) {
+ if (!debug_info->HasInstrumentedBytecodeArray() ||
+ !debug_info->HasBreakInfo()) {
return;
}
@@ -810,13 +713,13 @@ void Debug::ClearBreakPoint(Handle<BreakPoint> break_point) {
for (DebugInfoListNode* node = debug_info_list_; node != nullptr;
node = node->next()) {
if (!node->debug_info()->HasBreakInfo()) continue;
- Handle<Object> result =
- DebugInfo::FindBreakPointInfo(node->debug_info(), break_point);
+ Handle<Object> result = DebugInfo::FindBreakPointInfo(
+ isolate_, node->debug_info(), break_point);
if (result->IsUndefined(isolate_)) continue;
Handle<DebugInfo> debug_info = node->debug_info();
- if (DebugInfo::ClearBreakPoint(debug_info, break_point)) {
+ if (DebugInfo::ClearBreakPoint(isolate_, debug_info, break_point)) {
ClearBreakPoints(debug_info);
- if (debug_info->GetBreakPointCount() == 0) {
+ if (debug_info->GetBreakPointCount(isolate_) == 0) {
RemoveBreakInfoAndMaybeFree(debug_info);
} else {
ApplyBreakPoints(debug_info);
@@ -826,6 +729,17 @@ void Debug::ClearBreakPoint(Handle<BreakPoint> break_point) {
}
}
+int Debug::GetFunctionDebuggingId(Handle<JSFunction> function) {
+ Handle<SharedFunctionInfo> shared = handle(function->shared(), isolate_);
+ Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
+ int id = debug_info->debugging_id();
+ if (id == DebugInfo::kNoDebuggingId) {
+ id = isolate_->heap()->NextDebuggingId();
+ debug_info->set_debugging_id(id);
+ }
+ return id;
+}
+
bool Debug::SetBreakpointForFunction(Handle<JSFunction> function,
Handle<String> condition, int* id) {
*id = ++thread_local_.last_breakpoint_id_;
@@ -845,7 +759,7 @@ void Debug::RemoveBreakpoint(int id) {
void Debug::ClearAllBreakPoints() {
ClearAllDebugInfos([=](Handle<DebugInfo> info) {
ClearBreakPoints(info);
- return info->ClearBreakInfo();
+ info->ClearBreakInfo(isolate_);
});
}
@@ -856,9 +770,9 @@ void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared,
if (!EnsureBreakInfo(shared)) return;
PrepareFunctionForDebugExecution(shared);
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
// Flood the function with break points.
- DCHECK(debug_info->HasDebugBytecodeArray());
+ DCHECK(debug_info->HasInstrumentedBytecodeArray());
for (BreakIterator it(debug_info); !it.Done(); it.Next()) {
if (returns_only && !it.GetBreakLocation().IsReturnOrSuspend()) continue;
it.SetDebugBreak();
@@ -884,7 +798,7 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<DebugInfo> debug_info,
int position) {
- Handle<Object> break_points = debug_info->GetBreakPoints(position);
+ Handle<Object> break_points = debug_info->GetBreakPoints(isolate_, position);
bool is_break_at_entry = debug_info->BreakAtEntry();
DCHECK(!break_points->IsUndefined(isolate_));
if (!break_points->IsFixedArray()) {
@@ -897,7 +811,7 @@ MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<DebugInfo> debug_info,
return break_points_hit;
}
- Handle<FixedArray> array(FixedArray::cast(*break_points));
+ Handle<FixedArray> array(FixedArray::cast(*break_points), isolate_);
int num_objects = array->length();
Handle<FixedArray> break_points_hit =
isolate_->factory()->NewFixedArray(num_objects);
@@ -910,16 +824,32 @@ MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<DebugInfo> debug_info,
}
}
if (break_points_hit_count == 0) return {};
- break_points_hit->Shrink(break_points_hit_count);
+ break_points_hit->Shrink(isolate_, break_points_hit_count);
return break_points_hit;
}
+void Debug::SetBreakOnNextFunctionCall() {
+ // This method forces V8 to break on next function call regardless current
+ // last_step_action_. If any break happens between SetBreakOnNextFunctionCall
+ // and ClearBreakOnNextFunctionCall, we will clear this flag and stepping. If
+ // break does not happen, e.g. all called functions are blackboxed or no
+ // function is called, then we will clear this flag and let stepping continue
+ // its normal business.
+ thread_local_.break_on_next_function_call_ = true;
+ UpdateHookOnFunctionCall();
+}
+
+void Debug::ClearBreakOnNextFunctionCall() {
+ thread_local_.break_on_next_function_call_ = false;
+ UpdateHookOnFunctionCall();
+}
+
void Debug::PrepareStepIn(Handle<JSFunction> function) {
- CHECK(last_step_action() >= StepIn);
+ CHECK(last_step_action() >= StepIn || break_on_next_function_call());
if (ignore_events()) return;
if (in_debug_scope()) return;
if (break_disabled()) return;
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
if (IsBlackboxed(shared)) return;
if (*function == thread_local_.ignore_step_into_function_) return;
thread_local_.ignore_step_into_function_ = Smi::kZero;
@@ -934,7 +864,8 @@ void Debug::PrepareStepInSuspendedGenerator() {
thread_local_.last_step_action_ = StepIn;
UpdateHookOnFunctionCall();
Handle<JSFunction> function(
- JSGeneratorObject::cast(thread_local_.suspended_generator_)->function());
+ JSGeneratorObject::cast(thread_local_.suspended_generator_)->function(),
+ isolate_);
FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared(), isolate_));
clear_suspended_generator();
}
@@ -1001,7 +932,7 @@ void Debug::PrepareStepOnThrow() {
continue;
}
Handle<SharedFunctionInfo> info(
- summary.AsJavaScript().function()->shared());
+ summary.AsJavaScript().function()->shared(), isolate_);
if (IsBlackboxed(info)) continue;
FloodWithOneShot(info);
return;
@@ -1047,11 +978,11 @@ void Debug::PrepareStep(StepAction step_action) {
// Get the debug info (create it if it does not exist).
auto summary = FrameSummary::GetTop(frame).AsJavaScript();
Handle<JSFunction> function(summary.function());
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
if (!EnsureBreakInfo(shared)) return;
PrepareFunctionForDebugExecution(shared);
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
BreakLocation location = BreakLocation::FromFrame(debug_info, js_frame);
@@ -1138,23 +1069,23 @@ void Debug::PrepareStep(StepAction step_action) {
// Simple function for returning the source positions for active break points.
Handle<Object> Debug::GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared) {
- Isolate* isolate = shared->GetIsolate();
+ Isolate* isolate, Handle<SharedFunctionInfo> shared) {
if (!shared->HasBreakInfo()) {
return isolate->factory()->undefined_value();
}
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- if (debug_info->GetBreakPointCount() == 0) {
+
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate);
+ if (debug_info->GetBreakPointCount(isolate) == 0) {
return isolate->factory()->undefined_value();
}
- Handle<FixedArray> locations =
- isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
+ Handle<FixedArray> locations = isolate->factory()->NewFixedArray(
+ debug_info->GetBreakPointCount(isolate));
int count = 0;
for (int i = 0; i < debug_info->break_points()->length(); ++i) {
if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
BreakPointInfo* break_point_info =
BreakPointInfo::cast(debug_info->break_points()->get(i));
- int break_points = break_point_info->GetBreakPointCount();
+ int break_points = break_point_info->GetBreakPointCount(isolate);
if (break_points == 0) continue;
for (int j = 0; j < break_points; ++j) {
locations->set(count++,
@@ -1175,6 +1106,7 @@ void Debug::ClearStepping() {
thread_local_.fast_forward_to_return_ = false;
thread_local_.last_frame_count_ = -1;
thread_local_.target_frame_count_ = -1;
+ thread_local_.break_on_next_function_call_ = false;
UpdateHookOnFunctionCall();
}
@@ -1254,14 +1186,17 @@ void Debug::PrepareFunctionForDebugExecution(
if (debug_info->flags() & DebugInfo::kPreparedForDebugExecution) return;
// Make a copy of the bytecode array if available.
- Handle<Object> maybe_debug_bytecode_array =
+ Handle<Object> maybe_original_bytecode_array =
isolate_->factory()->undefined_value();
if (shared->HasBytecodeArray()) {
- Handle<BytecodeArray> original(shared->GetBytecodeArray());
- maybe_debug_bytecode_array =
- isolate_->factory()->CopyBytecodeArray(original);
+ Handle<BytecodeArray> original_bytecode_array =
+ handle(shared->GetBytecodeArray(), isolate_);
+ Handle<BytecodeArray> debug_bytecode_array =
+ isolate_->factory()->CopyBytecodeArray(original_bytecode_array);
+ shared->SetDebugBytecodeArray(*debug_bytecode_array);
+ maybe_original_bytecode_array = original_bytecode_array;
}
- debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
+ debug_info->set_original_bytecode_array(*maybe_original_bytecode_array);
if (debug_info->CanBreakAtEntry()) {
// Deopt everything in case the function is inlined anywhere.
@@ -1314,7 +1249,7 @@ void Debug::InstallDebugBreakTrampoline() {
if (!shared->HasDebugInfo()) continue;
if (!shared->GetDebugInfo()->CanBreakAtEntry()) continue;
if (!fun->is_compiled()) {
- needs_compile.push_back(handle(fun));
+ needs_compile.push_back(handle(fun, isolate_));
} else {
fun->set_code(*trampoline);
}
@@ -1344,7 +1279,7 @@ void GetBreakablePositions(Iterator* it, int start_position, int end_position,
void FindBreakablePositions(Handle<DebugInfo> debug_info, int start_position,
int end_position,
std::vector<BreakLocation>* locations) {
- DCHECK(debug_info->HasDebugBytecodeArray());
+ DCHECK(debug_info->HasInstrumentedBytecodeArray());
BreakIterator it(debug_info);
GetBreakablePositions(&it, start_position, end_position, locations);
}
@@ -1364,7 +1299,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
if (!EnsureBreakInfo(shared)) return false;
PrepareFunctionForDebugExecution(shared);
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
FindBreakablePositions(debug_info, start_position, end_position, locations);
return true;
}
@@ -1372,7 +1307,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
while (true) {
HandleScope scope(isolate_);
std::vector<Handle<SharedFunctionInfo>> candidates;
- SharedFunctionInfo::ScriptIterator iterator(script);
+ SharedFunctionInfo::ScriptIterator iterator(isolate_, *script);
for (SharedFunctionInfo* info = iterator.Next(); info != nullptr;
info = iterator.Next()) {
if (info->EndPosition() < start_position ||
@@ -1381,7 +1316,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
}
if (!info->IsSubjectToDebugging()) continue;
if (!info->is_compiled() && !info->allows_lazy_compilation()) continue;
- candidates.push_back(i::handle(info));
+ candidates.push_back(i::handle(info, isolate_));
}
bool was_compiled = false;
@@ -1402,7 +1337,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
for (const auto& candidate : candidates) {
CHECK(candidate->HasBreakInfo());
- Handle<DebugInfo> debug_info(candidate->GetDebugInfo());
+ Handle<DebugInfo> debug_info(candidate->GetDebugInfo(), isolate_);
FindBreakablePositions(debug_info, start_position, end_position,
locations);
}
@@ -1479,7 +1414,7 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
SharedFunctionInfo* shared;
{
SharedFunctionInfoFinder finder(position);
- SharedFunctionInfo::ScriptIterator iterator(script);
+ SharedFunctionInfo::ScriptIterator iterator(isolate_, *script);
for (SharedFunctionInfo* info = iterator.Next(); info != nullptr;
info = iterator.Next()) {
finder.NewCandidate(info);
@@ -1488,7 +1423,7 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
if (shared == nullptr) break;
// We found it if it's already compiled.
if (shared->is_compiled()) {
- Handle<SharedFunctionInfo> shared_handle(shared);
+ Handle<SharedFunctionInfo> shared_handle(shared, isolate_);
// If the iteration count is larger than 1, we had to compile the outer
// function in order to create this shared function info. So there can
// be no JSFunction referencing it. We can anticipate creating a debug
@@ -1504,7 +1439,8 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
HandleScope scope(isolate_);
// Code that cannot be compiled lazily are internal and not debuggable.
DCHECK(shared->allows_lazy_compilation());
- if (!Compiler::Compile(handle(shared), Compiler::CLEAR_EXCEPTION)) break;
+ if (!Compiler::Compile(handle(shared, isolate_), Compiler::CLEAR_EXCEPTION))
+ break;
}
return isolate_->factory()->undefined_value();
}
@@ -1550,11 +1486,11 @@ void Debug::CreateBreakInfo(Handle<SharedFunctionInfo> shared) {
Handle<DebugInfo> Debug::GetOrCreateDebugInfo(
Handle<SharedFunctionInfo> shared) {
- if (shared->HasDebugInfo()) return handle(shared->GetDebugInfo());
+ if (shared->HasDebugInfo()) return handle(shared->GetDebugInfo(), isolate_);
// Create debug info and add it to the list.
Handle<DebugInfo> debug_info = isolate_->factory()->NewDebugInfo(shared);
- DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
+ DebugInfoListNode* node = new DebugInfoListNode(isolate_, *debug_info);
node->set_next(debug_info_list_);
debug_info_list_ = node;
@@ -1575,7 +1511,12 @@ void Debug::InstallCoverageInfo(Handle<SharedFunctionInfo> shared,
void Debug::RemoveAllCoverageInfos() {
ClearAllDebugInfos(
- [=](Handle<DebugInfo> info) { return info->ClearCoverageInfo(); });
+ [=](Handle<DebugInfo> info) { info->ClearCoverageInfo(isolate_); });
+}
+
+void Debug::ClearAllDebuggerHints() {
+ ClearAllDebugInfos(
+ [=](Handle<DebugInfo> info) { info->set_debugger_hints(0); });
}
void Debug::FindDebugInfo(Handle<DebugInfo> debug_info,
@@ -1598,7 +1539,8 @@ void Debug::ClearAllDebugInfos(DebugInfoClearFunction clear_function) {
while (current != nullptr) {
DebugInfoListNode* next = current->next();
Handle<DebugInfo> debug_info = current->debug_info();
- if (clear_function(debug_info)) {
+ clear_function(debug_info);
+ if (debug_info->IsEmpty()) {
FreeDebugInfoListNode(prev, current);
current = next;
} else {
@@ -1609,8 +1551,8 @@ void Debug::ClearAllDebugInfos(DebugInfoClearFunction clear_function) {
}
void Debug::RemoveBreakInfoAndMaybeFree(Handle<DebugInfo> debug_info) {
- bool should_unlink = debug_info->ClearBreakInfo();
- if (should_unlink) {
+ debug_info->ClearBreakInfo(isolate_);
+ if (debug_info->IsEmpty()) {
DebugInfoListNode* prev;
DebugInfoListNode* node;
FindDebugInfo(debug_info, &prev, &node);
@@ -1629,10 +1571,11 @@ void Debug::FreeDebugInfoListNode(DebugInfoListNode* prev,
prev->set_next(node->next());
}
- // Pack debugger hints back into the SFI::debug_info field.
+ // Pack function_identifier back into the
+ // SFI::function_identifier_or_debug_info field.
Handle<DebugInfo> debug_info(node->debug_info());
- debug_info->shared()->set_debug_info(
- Smi::FromInt(debug_info->debugger_hints()));
+ debug_info->shared()->set_function_identifier_or_debug_info(
+ debug_info->function_identifier());
delete node;
}
@@ -1641,13 +1584,13 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
// Get the executing function in which the debug break occurred.
- Handle<SharedFunctionInfo> shared(frame->function()->shared());
+ Handle<SharedFunctionInfo> shared(frame->function()->shared(), isolate_);
// With no debug info there are no break points, so we can't be at a return.
if (!shared->HasBreakInfo()) return false;
DCHECK(!frame->is_optimized());
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
return location.IsReturn();
}
@@ -1673,12 +1616,6 @@ void Debug::ScheduleFrameRestart(StackFrame* frame) {
}
}
-
-bool Debug::IsDebugGlobal(JSGlobalObject* global) {
- return is_loaded() && global == debug_context()->global_object();
-}
-
-
Handle<FixedArray> Debug::GetLoadedScripts() {
isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kDebugger);
@@ -1697,48 +1634,9 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
if (script->HasValidSource()) results->set(length++, script);
}
}
- results->Shrink(length);
- return results;
-}
-
-
-MaybeHandle<Object> Debug::MakeExecutionState() {
- // Create the execution state object.
- Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()) };
- return CallFunction("MakeExecutionState", arraysize(argv), argv);
-}
-
-
-MaybeHandle<Object> Debug::MakeExceptionEvent(Handle<Object> exception,
- bool uncaught,
- Handle<Object> promise) {
- // Create the new exception event object.
- Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()),
- exception,
- isolate_->factory()->ToBoolean(uncaught),
- promise };
- return CallFunction("MakeExceptionEvent", arraysize(argv), argv);
+ return FixedArray::ShrinkOrEmpty(isolate_, results, length);
}
-
-MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
- v8::DebugEvent type) {
- // Create the compile event object.
- Handle<Object> script_wrapper = Script::GetWrapper(script);
- Handle<Object> argv[] = { script_wrapper,
- isolate_->factory()->NewNumberFromInt(type) };
- return CallFunction("MakeCompileEvent", arraysize(argv), argv);
-}
-
-MaybeHandle<Object> Debug::MakeAsyncTaskEvent(
- v8::debug::PromiseDebugActionType type, int id) {
- // Create the async task event object.
- Handle<Object> argv[] = {Handle<Smi>(Smi::FromInt(type), isolate_),
- Handle<Smi>(Smi::FromInt(id), isolate_)};
- return CallFunction("MakeAsyncTaskEvent", arraysize(argv), argv);
-}
-
-
void Debug::OnThrow(Handle<Object> exception) {
if (in_debug_scope() || ignore_events()) return;
// Temporarily clear any scheduled_exception to allow evaluating
@@ -1768,17 +1666,6 @@ void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
}
}
-namespace {
-v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
- Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
- // Isolate::context() may have been nullptr when "script collected" event
- // occurred.
- if (context.is_null()) return v8::Local<v8::Context>();
- Handle<Context> native_context(context->native_context());
- return v8::Utils::ToLocal(native_context);
-}
-} // anonymous namespace
-
bool Debug::IsExceptionBlackboxed(bool uncaught) {
// Uncaught exception is blackboxed if all current frames are blackboxed,
// caught exception if top frame is blackboxed.
@@ -1801,9 +1688,7 @@ bool Debug::IsFrameBlackboxed(JavaScriptFrame* frame) {
}
void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
- // We cannot generate debug events when JS execution is disallowed.
- // TODO(5530): Reenable debug events within DisallowJSScopes once relevant
- // code (MakeExceptionEvent and ProcessDebugEvent) have been moved to C++.
+ // TODO(kozyatinskiy): regress-662674.js test fails on arm without this.
if (!AllowJavascriptExecution::IsAllowed(isolate_)) return;
Isolate::CatchType catch_type = isolate_->PredictExceptionCatcher();
@@ -1816,7 +1701,8 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
Handle<JSObject> jspromise = Handle<JSObject>::cast(promise);
// Mark the promise as already having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
- JSObject::SetProperty(jspromise, key, key, LanguageMode::kStrict).Assert();
+ JSObject::SetProperty(isolate_, jspromise, key, key, LanguageMode::kStrict)
+ .Assert();
// Check whether the promise reject is considered an uncaught exception.
uncaught = !isolate_->PromiseHasUserDefinedRejectHandler(jspromise);
}
@@ -1843,20 +1729,14 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
}
DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
HandleScope scope(isolate_);
PostponeInterruptsScope postpone(isolate_);
DisableBreak no_recursive_break(this);
- // Create the execution state.
- Handle<Object> exec_state;
- // Bail out and don't call debugger if exception.
- if (!MakeExecutionState().ToHandle(&exec_state)) return;
-
- debug_delegate_->ExceptionThrown(
- GetDebugEventContext(isolate_),
- v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
- v8::Utils::ToLocal(exception), v8::Utils::ToLocal(promise), uncaught);
+ Handle<Context> native_context(isolate_->native_context());
+ debug_delegate_->ExceptionThrown(v8::Utils::ToLocal(native_context),
+ v8::Utils::ToLocal(exception),
+ v8::Utils::ToLocal(promise), uncaught);
}
void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit) {
@@ -1875,11 +1755,6 @@ void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit) {
PostponeInterruptsScope no_interrupts(isolate_);
DisableBreak no_recursive_break(this);
- // Create the execution state.
- Handle<Object> exec_state;
- // Bail out and don't call debugger if exception.
- if (!MakeExecutionState().ToHandle(&exec_state)) return;
-
std::vector<int> inspector_break_points_hit;
int inspector_break_points_count = 0;
// This array contains breakpoints installed using JS debug API.
@@ -1889,122 +1764,9 @@ void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit) {
++inspector_break_points_count;
}
- debug_delegate_->BreakProgramRequested(
- GetDebugEventContext(isolate_),
- v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
- inspector_break_points_hit);
-}
-
-
-void Debug::OnCompileError(Handle<Script> script) {
- ProcessCompileEvent(v8::CompileError, script);
-}
-
-
-// Handle debugger actions when a new script is compiled.
-void Debug::OnAfterCompile(Handle<Script> script) {
- ProcessCompileEvent(v8::AfterCompile, script);
-}
-
-namespace {
-// In an async function, reuse the existing stack related to the outer
-// Promise. Otherwise, e.g. in a direct call to then, save a new stack.
-// Promises with multiple reactions with one or more of them being async
-// functions will not get a good stack trace, as async functions require
-// different stacks from direct Promise use, but we save and restore a
-// stack once for all reactions.
-//
-// If this isn't a case of async function, we return false, otherwise
-// we set the correct id and return true.
-//
-// TODO(littledan): Improve this case.
-int GetReferenceAsyncTaskId(Isolate* isolate, Handle<JSPromise> promise) {
- Handle<Symbol> handled_by_symbol =
- isolate->factory()->promise_handled_by_symbol();
- Handle<Object> handled_by_promise =
- JSObject::GetDataProperty(promise, handled_by_symbol);
- if (!handled_by_promise->IsJSPromise()) {
- return isolate->debug()->NextAsyncTaskId(promise);
- }
- Handle<JSPromise> handled_by_promise_js =
- Handle<JSPromise>::cast(handled_by_promise);
- Handle<Symbol> async_stack_id_symbol =
- isolate->factory()->promise_async_stack_id_symbol();
- Handle<Object> async_task_id =
- JSObject::GetDataProperty(handled_by_promise_js, async_stack_id_symbol);
- if (!async_task_id->IsSmi()) {
- return isolate->debug()->NextAsyncTaskId(promise);
- }
- return Handle<Smi>::cast(async_task_id)->value();
-}
-} // namespace
-
-void Debug::RunPromiseHook(PromiseHookType hook_type, Handle<JSPromise> promise,
- Handle<Object> parent) {
- if (hook_type == PromiseHookType::kResolve) return;
- if (in_debug_scope() || ignore_events()) return;
- if (!debug_delegate_) return;
- PostponeInterruptsScope no_interrupts(isolate_);
-
- int id = GetReferenceAsyncTaskId(isolate_, promise);
- if (hook_type == PromiseHookType::kBefore) {
- debug_delegate_->PromiseEventOccurred(debug::kDebugWillHandle, id, false);
- } else if (hook_type == PromiseHookType::kAfter) {
- debug_delegate_->PromiseEventOccurred(debug::kDebugDidHandle, id, false);
- } else {
- DCHECK(hook_type == PromiseHookType::kInit);
- debug::PromiseDebugActionType type = debug::kDebugPromiseThen;
- bool last_frame_was_promise_builtin = false;
- JavaScriptFrameIterator it(isolate_);
- while (!it.done()) {
- std::vector<Handle<SharedFunctionInfo>> infos;
- it.frame()->GetFunctions(&infos);
- for (size_t i = 1; i <= infos.size(); ++i) {
- Handle<SharedFunctionInfo> info = infos[infos.size() - i];
- if (info->IsUserJavaScript()) {
- // We should not report PromiseThen and PromiseCatch which is called
- // indirectly, e.g. Promise.all calls Promise.then internally.
- if (type == debug::kDebugAsyncFunctionPromiseCreated ||
- last_frame_was_promise_builtin) {
- debug_delegate_->PromiseEventOccurred(type, id, IsBlackboxed(info));
- }
- return;
- }
- last_frame_was_promise_builtin = false;
- if (info->HasBuiltinId()) {
- if (info->builtin_id() == Builtins::kAsyncFunctionPromiseCreate) {
- type = debug::kDebugAsyncFunctionPromiseCreated;
- last_frame_was_promise_builtin = true;
- } else if (info->builtin_id() == Builtins::kPromisePrototypeThen) {
- type = debug::kDebugPromiseThen;
- last_frame_was_promise_builtin = true;
- } else if (info->builtin_id() == Builtins::kPromisePrototypeCatch) {
- type = debug::kDebugPromiseCatch;
- last_frame_was_promise_builtin = true;
- } else if (info->builtin_id() == Builtins::kPromisePrototypeFinally) {
- type = debug::kDebugPromiseFinally;
- last_frame_was_promise_builtin = true;
- }
- }
- }
- it.Advance();
- }
- }
-}
-
-int Debug::NextAsyncTaskId(Handle<JSObject> promise) {
- LookupIterator it(promise, isolate_->factory()->promise_async_id_symbol());
- Maybe<bool> maybe = JSReceiver::HasProperty(&it);
- if (maybe.ToChecked()) {
- MaybeHandle<Object> result = Object::GetProperty(&it);
- return Handle<Smi>::cast(result.ToHandleChecked())->value();
- }
- Handle<Smi> async_id =
- handle(Smi::FromInt(++thread_local_.async_task_count_), isolate_);
- Object::SetProperty(&it, async_id, LanguageMode::kSloppy,
- Object::MAY_BE_STORE_FROM_KEYED)
- .ToChecked();
- return async_id->value();
+ Handle<Context> native_context(isolate_->native_context());
+ debug_delegate_->BreakProgramRequested(v8::Utils::ToLocal(native_context),
+ inspector_break_points_hit);
}
namespace {
@@ -2023,7 +1785,8 @@ debug::Location GetDebugLocation(Handle<Script> script, int source_position) {
bool Debug::IsBlackboxed(Handle<SharedFunctionInfo> shared) {
if (!debug_delegate_) return !shared->IsSubjectToDebugging();
- if (!shared->computed_debug_is_blackboxed()) {
+ Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
+ if (!debug_info->computed_debug_is_blackboxed()) {
bool is_blackboxed =
!shared->IsSubjectToDebugging() || !shared->script()->IsScript();
if (!is_blackboxed) {
@@ -2032,17 +1795,17 @@ bool Debug::IsBlackboxed(Handle<SharedFunctionInfo> shared) {
PostponeInterruptsScope no_interrupts(isolate_);
DisableBreak no_recursive_break(this);
DCHECK(shared->script()->IsScript());
- Handle<Script> script(Script::cast(shared->script()));
+ Handle<Script> script(Script::cast(shared->script()), isolate_);
DCHECK(script->IsUserJavaScript());
debug::Location start = GetDebugLocation(script, shared->StartPosition());
debug::Location end = GetDebugLocation(script, shared->EndPosition());
is_blackboxed = debug_delegate_->IsFunctionBlackboxed(
ToApiHandle<debug::Script>(script), start, end);
}
- shared->set_debug_is_blackboxed(is_blackboxed);
- shared->set_computed_debug_is_blackboxed(true);
+ debug_info->set_debug_is_blackboxed(is_blackboxed);
+ debug_info->set_computed_debug_is_blackboxed(true);
}
- return shared->debug_is_blackboxed();
+ return debug_info->debug_is_blackboxed();
}
bool Debug::AllFramesOnStackAreBlackboxed() {
@@ -2064,30 +1827,26 @@ bool Debug::CanBreakAtEntry(Handle<SharedFunctionInfo> shared) {
}
bool Debug::SetScriptSource(Handle<Script> script, Handle<String> source,
- bool preview, bool* stack_changed) {
+ bool preview, debug::LiveEditResult* result) {
DebugScope debug_scope(this);
- set_live_edit_enabled(true);
- Handle<Object> script_wrapper = Script::GetWrapper(script);
- Handle<Object> argv[] = {script_wrapper, source,
- isolate_->factory()->ToBoolean(preview),
- isolate_->factory()->NewJSArray(0)};
- Handle<Object> result;
- if (!CallFunction("SetScriptSource", arraysize(argv), argv, false)
- .ToHandle(&result)) {
- isolate_->OptionalRescheduleException(false);
- set_live_edit_enabled(false);
- return false;
- }
- set_live_edit_enabled(false);
- Handle<Object> stack_changed_value =
- JSReceiver::GetProperty(isolate_, Handle<JSObject>::cast(result),
- "stack_modified")
- .ToHandleChecked();
- *stack_changed = stack_changed_value->IsTrue(isolate_);
- return true;
+ running_live_edit_ = true;
+ LiveEdit::PatchScript(isolate_, script, source, preview, result);
+ running_live_edit_ = false;
+ return result->status == debug::LiveEditResult::OK;
+}
+
+void Debug::OnCompileError(Handle<Script> script) {
+ ProcessCompileEvent(true, script);
}
-void Debug::ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script) {
+void Debug::OnAfterCompile(Handle<Script> script) {
+ ProcessCompileEvent(false, script);
+}
+
+void Debug::ProcessCompileEvent(bool has_compile_error, Handle<Script> script) {
+ // TODO(kozyatinskiy): teach devtools to work with liveedit scripts better
+ // first and then remove this fast return.
+ if (running_live_edit_) return;
// Attach the correct debug id to the script. The debug id is used by the
// inspector to filter scripts by native context.
script->set_context_data(isolate_->native_context()->debug_context_id());
@@ -2098,23 +1857,12 @@ void Debug::ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script) {
if (!debug_delegate_) return;
SuppressDebug while_processing(this);
DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
HandleScope scope(isolate_);
PostponeInterruptsScope postpone(isolate_);
DisableBreak no_recursive_break(this);
AllowJavascriptExecution allow_script(isolate_);
debug_delegate_->ScriptCompiled(ToApiHandle<debug::Script>(script),
- live_edit_enabled(),
- event != v8::AfterCompile);
-}
-
-
-Handle<Context> Debug::GetDebugContext() {
- if (!is_loaded()) return Handle<Context>();
- DebugScope debug_scope(this);
- if (debug_scope.failed()) return Handle<Context>();
- // The global handle may be destroyed soon after. Return it reboxed.
- return handle(*debug_context(), isolate_);
+ running_live_edit_, has_compile_error);
}
int Debug::CurrentFrameCount() {
@@ -2138,63 +1886,36 @@ int Debug::CurrentFrameCount() {
return counter;
}
-void Debug::SetDebugDelegate(debug::DebugDelegate* delegate,
- bool pass_ownership) {
- RemoveDebugDelegate();
+void Debug::SetDebugDelegate(debug::DebugDelegate* delegate) {
debug_delegate_ = delegate;
- owns_debug_delegate_ = pass_ownership;
UpdateState();
}
-void Debug::RemoveDebugDelegate() {
- if (debug_delegate_ == nullptr) return;
- if (owns_debug_delegate_) {
- owns_debug_delegate_ = false;
- delete debug_delegate_;
- }
- debug_delegate_ = nullptr;
-}
-
void Debug::UpdateState() {
bool is_active = debug_delegate_ != nullptr;
- if (is_active || in_debug_scope()) {
+ if (is_active == is_active_) return;
+ if (is_active) {
// Note that the debug context could have already been loaded to
// bootstrap test cases.
isolate_->compilation_cache()->Disable();
- is_active = Load();
- } else if (is_loaded()) {
+ is_active = true;
+ feature_tracker()->Track(DebugFeatureTracker::kActive);
+ } else {
isolate_->compilation_cache()->Enable();
Unload();
}
is_active_ = is_active;
- isolate_->DebugStateUpdated();
+ if (is_active && isolate_->IsPromiseHookProtectorIntact()) {
+ isolate_->InvalidatePromiseHookProtector();
+ }
}
void Debug::UpdateHookOnFunctionCall() {
STATIC_ASSERT(LastStepAction == StepIn);
hook_on_function_call_ =
thread_local_.last_step_action_ == StepIn ||
- isolate_->debug_execution_mode() == DebugInfo::kSideEffects;
-}
-
-MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
- AllowJavascriptExecutionDebugOnly allow_script(isolate_);
- DebugScope debug_scope(this);
- if (debug_scope.failed()) return isolate_->factory()->undefined_value();
-
- // Create the execution state.
- Handle<Object> exec_state;
- if (!MakeExecutionState().ToHandle(&exec_state)) {
- return isolate_->factory()->undefined_value();
- }
-
- Handle<Object> argv[] = { exec_state, data };
- return Execution::Call(
- isolate_,
- fun,
- Handle<Object>(debug_context()->global_proxy(), isolate_),
- arraysize(argv),
- argv);
+ isolate_->debug_execution_mode() == DebugInfo::kSideEffects ||
+ thread_local_.break_on_next_function_call_;
}
void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
@@ -2221,35 +1942,17 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
bool ignore_break = ignore_break_mode == kIgnoreIfTopFrameBlackboxed
? IsBlackboxed(shared)
: AllFramesOnStackAreBlackboxed();
- if (ignore_break) {
- // Inspector uses pause on next statement for asynchronous breakpoints.
- // When breakpoint is fired we try to break on first not blackboxed
- // statement. To achieve this goal we need to deoptimize current
- // function and don't clear requested DebugBreak even if it's blackboxed
- // to be able to break on not blackboxed function call.
- // TODO(yangguo): introduce break_on_function_entry since current
- // implementation is slow.
- if (isolate_->stack_guard()->CheckDebugBreak()) {
- Deoptimizer::DeoptimizeFunction(*function);
- }
- return;
- }
- JSGlobalObject* global = function->context()->global_object();
- // Don't stop in debugger functions.
- if (IsDebugGlobal(global)) return;
+ if (ignore_break) return;
// Don't stop if the break location is muted.
if (IsMutedAtCurrentLocation(it.frame())) return;
}
}
- isolate_->stack_guard()->ClearDebugBreak();
-
// Clear stepping to avoid duplicate breaks.
ClearStepping();
HandleScope scope(isolate_);
DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
OnDebugBreak(isolate_->factory()->empty_fixed_array());
}
@@ -2269,13 +1972,14 @@ void Debug::PrintBreakLocation() {
PrintF("'.\n");
if (script_obj->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_obj);
- Handle<String> source(String::cast(script->source()));
+ Handle<String> source(String::cast(script->source()), isolate_);
Script::InitLineEnds(script);
int line =
Script::GetLineNumber(script, source_position) - script->line_offset();
int column = Script::GetColumnNumber(script, source_position) -
(line == 0 ? script->column_offset() : 0);
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+ Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()),
+ isolate_);
int line_start = line == 0 ? 0 : Smi::ToInt(line_ends->get(line - 1)) + 1;
int line_end = Smi::ToInt(line_ends->get(line));
DisallowHeapAllocation no_gc;
@@ -2296,7 +2000,6 @@ void Debug::PrintBreakLocation() {
DebugScope::DebugScope(Debug* debug)
: debug_(debug),
prev_(debug->debugger_entry()),
- save_(debug_->isolate_),
no_termination_exceptons_(debug_->isolate_,
StackGuard::TERMINATE_EXECUTION) {
// Link recursive debugger entry.
@@ -2316,10 +2019,6 @@ DebugScope::DebugScope(Debug* debug)
debug_->SetNextBreakId();
debug_->UpdateState();
- // Make sure that debugger is loaded and enter the debugger context.
- // The previous context is kept in save_.
- failed_ = !debug_->is_loaded();
- if (!failed_) isolate()->set_context(*debug->debug_context());
}
@@ -2343,6 +2042,27 @@ ReturnValueScope::~ReturnValueScope() {
debug_->set_return_value(*return_value_);
}
+void Debug::UpdateDebugInfosForExecutionMode() {
+ // Walk all debug infos and update their execution mode if it is different
+ // from the isolate execution mode.
+ DebugInfoListNode* current = debug_info_list_;
+ while (current != nullptr) {
+ Handle<DebugInfo> debug_info = current->debug_info();
+ if (debug_info->HasInstrumentedBytecodeArray() &&
+ debug_info->DebugExecutionMode() != isolate_->debug_execution_mode()) {
+ DCHECK(debug_info->shared()->HasBytecodeArray());
+ if (isolate_->debug_execution_mode() == DebugInfo::kBreakpoints) {
+ ClearSideEffectChecks(debug_info);
+ ApplyBreakPoints(debug_info);
+ } else {
+ ClearBreakPoints(debug_info);
+ ApplySideEffectChecks(debug_info);
+ }
+ }
+ current = current->next();
+ }
+}
+
void Debug::StartSideEffectCheckMode() {
DCHECK(isolate_->debug_execution_mode() != DebugInfo::kSideEffects);
isolate_->set_debug_execution_mode(DebugInfo::kSideEffects);
@@ -2352,17 +2072,20 @@ void Debug::StartSideEffectCheckMode() {
DCHECK(!temporary_objects_);
temporary_objects_.reset(new TemporaryObjectsTracker());
isolate_->heap()->AddHeapObjectAllocationTracker(temporary_objects_.get());
- Handle<FixedArray> array(
- isolate_->native_context()->regexp_last_match_info());
+ Handle<FixedArray> array(isolate_->native_context()->regexp_last_match_info(),
+ isolate_);
regexp_match_info_ =
Handle<RegExpMatchInfo>::cast(isolate_->factory()->CopyFixedArray(array));
+
+ // Update debug infos to have correct execution mode.
+ UpdateDebugInfosForExecutionMode();
}
void Debug::StopSideEffectCheckMode() {
DCHECK(isolate_->debug_execution_mode() == DebugInfo::kSideEffects);
if (side_effect_check_failed_) {
DCHECK(isolate_->has_pending_exception());
- DCHECK_EQ(isolate_->heap()->termination_exception(),
+ DCHECK_EQ(ReadOnlyRoots(isolate_).termination_exception(),
isolate_->pending_exception());
// Convert the termination exception into a regular exception.
isolate_->CancelTerminateExecution();
@@ -2378,19 +2101,24 @@ void Debug::StopSideEffectCheckMode() {
temporary_objects_.reset();
isolate_->native_context()->set_regexp_last_match_info(*regexp_match_info_);
regexp_match_info_ = Handle<RegExpMatchInfo>::null();
+
+ // Update debug infos to have correct execution mode.
+ UpdateDebugInfosForExecutionMode();
}
void Debug::ApplySideEffectChecks(Handle<DebugInfo> debug_info) {
- DCHECK(debug_info->HasDebugBytecodeArray());
- Handle<BytecodeArray> debug_bytecode(debug_info->DebugBytecodeArray());
+ DCHECK(debug_info->HasInstrumentedBytecodeArray());
+ Handle<BytecodeArray> debug_bytecode(debug_info->DebugBytecodeArray(),
+ isolate_);
DebugEvaluate::ApplySideEffectChecks(debug_bytecode);
debug_info->SetDebugExecutionMode(DebugInfo::kSideEffects);
}
void Debug::ClearSideEffectChecks(Handle<DebugInfo> debug_info) {
- DCHECK(debug_info->HasDebugBytecodeArray());
- Handle<BytecodeArray> debug_bytecode(debug_info->DebugBytecodeArray());
- Handle<BytecodeArray> original(debug_info->OriginalBytecodeArray());
+ DCHECK(debug_info->HasInstrumentedBytecodeArray());
+ Handle<BytecodeArray> debug_bytecode(debug_info->DebugBytecodeArray(),
+ isolate_);
+ Handle<BytecodeArray> original(debug_info->OriginalBytecodeArray(), isolate_);
for (interpreter::BytecodeArrayIterator it(debug_bytecode); !it.done();
it.Advance()) {
debug_bytecode->set(it.current_offset(),
@@ -2406,10 +2134,12 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function,
!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
return false;
}
- SharedFunctionInfo::SideEffectState side_effect_state =
- SharedFunctionInfo::GetSideEffectState(handle(function->shared()));
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
+ Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
+ DebugInfo::SideEffectState side_effect_state =
+ debug_info->GetSideEffectState(isolate_);
switch (side_effect_state) {
- case SharedFunctionInfo::kHasSideEffects:
+ case DebugInfo::kHasSideEffects:
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] Function %s failed side effect check.\n",
function->shared()->DebugName()->ToCString().get());
@@ -2418,8 +2148,7 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function,
// Throw an uncatchable termination exception.
isolate_->TerminateExecution();
return false;
- case SharedFunctionInfo::kRequiresRuntimeChecks: {
- Handle<SharedFunctionInfo> shared(function->shared());
+ case DebugInfo::kRequiresRuntimeChecks: {
if (!shared->HasBytecodeArray()) {
return PerformSideEffectCheckForObject(receiver);
}
@@ -2430,13 +2159,13 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function,
isolate_->builtins()->builtin(Builtins::kDeserializeLazy)) {
Snapshot::EnsureBuiltinIsDeserialized(isolate_, shared);
}
- GetOrCreateDebugInfo(shared);
PrepareFunctionForDebugExecution(shared);
+ ApplySideEffectChecks(debug_info);
return true;
}
- case SharedFunctionInfo::kHasNoSideEffect:
+ case DebugInfo::kHasNoSideEffect:
return true;
- case SharedFunctionInfo::kNotComputed:
+ case DebugInfo::kNotComputed:
UNREACHABLE();
return false;
}
@@ -2469,8 +2198,8 @@ bool Debug::PerformSideEffectCheckAtBytecode(InterpretedFrame* frame) {
SharedFunctionInfo* shared = frame->function()->shared();
BytecodeArray* bytecode_array = shared->GetBytecodeArray();
int offset = frame->GetBytecodeOffset();
- interpreter::BytecodeArrayAccessor bytecode_accessor(handle(bytecode_array),
- offset);
+ interpreter::BytecodeArrayAccessor bytecode_accessor(
+ handle(bytecode_array, isolate_), offset);
Bytecode bytecode = bytecode_accessor.current_bytecode();
interpreter::Register reg;
@@ -2503,116 +2232,5 @@ bool Debug::PerformSideEffectCheckForObject(Handle<Object> object) {
isolate_->TerminateExecution();
return false;
}
-
-void LegacyDebugDelegate::PromiseEventOccurred(
- v8::debug::PromiseDebugActionType type, int id, bool is_blackboxed) {
- DebugScope debug_scope(isolate_->debug());
- if (debug_scope.failed()) return;
- HandleScope scope(isolate_);
- Handle<Object> event_data;
- if (isolate_->debug()->MakeAsyncTaskEvent(type, id).ToHandle(&event_data)) {
- ProcessDebugEvent(v8::AsyncTaskEvent, Handle<JSObject>::cast(event_data));
- }
-}
-
-void LegacyDebugDelegate::ScriptCompiled(v8::Local<v8::debug::Script> script,
- bool is_live_edited,
- bool is_compile_error) {
- Handle<Object> event_data;
- v8::DebugEvent event = is_compile_error ? v8::CompileError : v8::AfterCompile;
- if (isolate_->debug()
- ->MakeCompileEvent(v8::Utils::OpenHandle(*script), event)
- .ToHandle(&event_data)) {
- ProcessDebugEvent(event, Handle<JSObject>::cast(event_data));
- }
-}
-
-void LegacyDebugDelegate::BreakProgramRequested(
- v8::Local<v8::Context> paused_context, v8::Local<v8::Object> exec_state,
- const std::vector<debug::BreakpointId>&) {
- ProcessDebugEvent(v8::Break, isolate_->factory()->NewJSObjectWithNullProto(),
- Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
-}
-
-void LegacyDebugDelegate::ExceptionThrown(v8::Local<v8::Context> paused_context,
- v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> exception,
- v8::Local<v8::Value> promise,
- bool is_uncaught) {
- Handle<Object> event_data;
- if (isolate_->debug()
- ->MakeExceptionEvent(v8::Utils::OpenHandle(*exception), is_uncaught,
- v8::Utils::OpenHandle(*promise))
- .ToHandle(&event_data)) {
- ProcessDebugEvent(
- v8::Exception, Handle<JSObject>::cast(event_data),
- Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
- }
-}
-
-void LegacyDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data) {
- Handle<Object> exec_state;
- if (isolate_->debug()->MakeExecutionState().ToHandle(&exec_state)) {
- ProcessDebugEvent(event, event_data, Handle<JSObject>::cast(exec_state));
- }
-}
-
-NativeDebugDelegate::NativeDebugDelegate(Isolate* isolate,
- v8::Debug::EventCallback callback,
- Handle<Object> data)
- : LegacyDebugDelegate(isolate), callback_(callback) {
- data_ = isolate->global_handles()->Create(*data);
-}
-
-NativeDebugDelegate::~NativeDebugDelegate() {
- GlobalHandles::Destroy(data_.location());
-}
-
-NativeDebugDelegate::EventDetails::EventDetails(DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<Object> callback_data)
- : event_(event),
- exec_state_(exec_state),
- event_data_(event_data),
- callback_data_(callback_data) {}
-
-DebugEvent NativeDebugDelegate::EventDetails::GetEvent() const {
- return event_;
-}
-
-v8::Local<v8::Object> NativeDebugDelegate::EventDetails::GetExecutionState()
- const {
- return v8::Utils::ToLocal(exec_state_);
-}
-
-v8::Local<v8::Object> NativeDebugDelegate::EventDetails::GetEventData() const {
- return v8::Utils::ToLocal(event_data_);
-}
-
-v8::Local<v8::Context> NativeDebugDelegate::EventDetails::GetEventContext()
- const {
- return GetDebugEventContext(exec_state_->GetIsolate());
-}
-
-v8::Local<v8::Value> NativeDebugDelegate::EventDetails::GetCallbackData()
- const {
- return v8::Utils::ToLocal(callback_data_);
-}
-
-v8::Isolate* NativeDebugDelegate::EventDetails::GetIsolate() const {
- return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
-}
-
-void NativeDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- Handle<JSObject> exec_state) {
- EventDetails event_details(event, exec_state, event_data, data_);
- Isolate* isolate = isolate_;
- callback_(event_details);
- CHECK(!isolate->has_scheduled_exception());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 17bdfc0368..31881fe106 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -31,7 +31,6 @@ namespace internal {
// Forward declarations.
class DebugScope;
-
// Step actions. NOTE: These values are in macros.py as well.
enum StepAction : int8_t {
StepNone = -1, // Stepping not prepared.
@@ -87,7 +86,7 @@ class BreakLocation {
return result;
}
- bool HasBreakPoint(Handle<DebugInfo> debug_info) const;
+ bool HasBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info) const;
inline int position() const { return position_; }
@@ -171,7 +170,7 @@ class BreakIterator {
// weak handles to avoid a debug info object to keep a function alive.
class DebugInfoListNode {
public:
- explicit DebugInfoListNode(DebugInfo* debug_info);
+ DebugInfoListNode(Isolate* isolate, DebugInfo* debug_info);
~DebugInfoListNode();
DebugInfoListNode* next() { return next_; }
@@ -225,13 +224,8 @@ class Debug {
void OnCompileError(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
- V8_WARN_UNUSED_RESULT MaybeHandle<Object> Call(Handle<Object> fun,
- Handle<Object> data);
- Handle<Context> GetDebugContext();
void HandleDebugBreak(IgnoreBreakMode ignore_break_mode);
- // Internal logic
- bool Load();
// The break target may not be the top-most frame, since we may be
// breaking before entering a function that cannot contain break points.
void Break(JavaScriptFrame* frame, Handle<JSFunction> break_target);
@@ -264,7 +258,9 @@ class Debug {
void PrepareStepInSuspendedGenerator();
void PrepareStepOnThrow();
void ClearStepping();
- void ClearStepOut();
+
+ void SetBreakOnNextFunctionCall();
+ void ClearBreakOnNextFunctionCall();
void DeoptimizeFunction(Handle<SharedFunctionInfo> shared);
void PrepareFunctionForDebugExecution(Handle<SharedFunctionInfo> shared);
@@ -273,16 +269,11 @@ class Debug {
int end_position, bool restrict_to_function,
std::vector<BreakLocation>* locations);
- void RunPromiseHook(PromiseHookType hook_type, Handle<JSPromise> promise,
- Handle<Object> parent);
-
- int NextAsyncTaskId(Handle<JSObject> promise);
-
bool IsBlackboxed(Handle<SharedFunctionInfo> shared);
bool CanBreakAtEntry(Handle<SharedFunctionInfo> shared);
- void SetDebugDelegate(debug::DebugDelegate* delegate, bool pass_ownership);
+ void SetDebugDelegate(debug::DebugDelegate* delegate);
// Returns whether the operation succeeded.
bool EnsureBreakInfo(Handle<SharedFunctionInfo> shared);
@@ -293,18 +284,12 @@ class Debug {
Handle<CoverageInfo> coverage_info);
void RemoveAllCoverageInfos();
- template <typename C>
- bool CompileToRevealInnerFunctions(C* compilable);
-
// This function is used in FunctionNameUsing* tests.
Handle<Object> FindSharedFunctionInfoInScript(Handle<Script> script,
int position);
static Handle<Object> GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared);
-
- // Check whether a global object is the debug global object.
- bool IsDebugGlobal(JSGlobalObject* global);
+ Isolate* isolate, Handle<SharedFunctionInfo> shared);
// Check whether this frame is just about to return.
bool IsBreakAtReturn(JavaScriptFrame* frame);
@@ -319,7 +304,9 @@ class Debug {
// change. stack_changed is true if after editing script on pause stack is
// changed and client should request stack trace again.
bool SetScriptSource(Handle<Script> script, Handle<String> source,
- bool preview, bool* stack_changed);
+ bool preview, debug::LiveEditResult* result);
+
+ int GetFunctionDebuggingId(Handle<JSFunction> function);
// Threading support.
char* ArchiveDebug(char* to);
@@ -327,18 +314,8 @@ class Debug {
static int ArchiveSpacePerThread();
void FreeThreadResources() { }
void Iterate(RootVisitor* v);
- void InitThread(const ExecutionAccess& lock) { ThreadInit(); }
-
- bool CheckExecutionState(int id) {
- return CheckExecutionState() && break_id() == id;
- }
-
- bool CheckExecutionState() {
- return is_active() && !debug_context().is_null() && break_id() != 0;
- }
- // Apply proper instrumentation depends on debug_execution_mode.
- void ApplyInstrumentation(Handle<SharedFunctionInfo> shared);
+ bool CheckExecutionState() { return is_active() && break_id() != 0; }
void StartSideEffectCheckMode();
void StopSideEffectCheckMode();
@@ -357,15 +334,8 @@ class Debug {
return reinterpret_cast<DebugScope*>(
base::Relaxed_Load(&thread_local_.current_debug_scope_));
}
- inline Handle<Context> debug_context() { return debug_context_; }
-
- void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
- bool live_edit_enabled() const {
- return FLAG_enable_liveedit && live_edit_enabled_;
- }
inline bool is_active() const { return is_active_; }
- inline bool is_loaded() const { return !debug_context_.is_null(); }
inline bool in_debug_scope() const {
return !!base::Relaxed_Load(&thread_local_.current_debug_scope_);
}
@@ -394,10 +364,6 @@ class Debug {
return reinterpret_cast<Address>(&hook_on_function_call_);
}
- Address last_step_action_address() {
- return reinterpret_cast<Address>(&thread_local_.last_step_action_);
- }
-
Address suspended_generator_address() {
return reinterpret_cast<Address>(&thread_local_.suspended_generator_);
}
@@ -407,6 +373,9 @@ class Debug {
}
StepAction last_step_action() { return thread_local_.last_step_action_; }
+ bool break_on_next_function_call() const {
+ return thread_local_.break_on_next_function_call_;
+ }
DebugFeatureTracker* feature_tracker() { return &feature_tracker_; }
@@ -414,13 +383,15 @@ class Debug {
// source position for break points.
static const int kBreakAtEntryPosition = 0;
+ void RemoveBreakInfoAndMaybeFree(Handle<DebugInfo> debug_info);
+
private:
explicit Debug(Isolate* isolate);
~Debug();
+ void UpdateDebugInfosForExecutionMode();
void UpdateState();
void UpdateHookOnFunctionCall();
- void RemoveDebugDelegate();
void Unload();
void SetNextBreakId() {
thread_local_.break_id_ = ++thread_local_.break_count_;
@@ -447,17 +418,7 @@ class Debug {
void OnException(Handle<Object> exception, Handle<Object> promise);
- // Constructors for debug event objects.
- V8_WARN_UNUSED_RESULT MaybeHandle<Object> MakeExecutionState();
- V8_WARN_UNUSED_RESULT MaybeHandle<Object> MakeExceptionEvent(
- Handle<Object> exception, bool uncaught, Handle<Object> promise);
- V8_WARN_UNUSED_RESULT MaybeHandle<Object> MakeCompileEvent(
- Handle<Script> script, v8::DebugEvent type);
- V8_WARN_UNUSED_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
- v8::debug::PromiseDebugActionType type, int id);
-
- void ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script);
- void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data);
+ void ProcessCompileEvent(bool has_compile_error, Handle<Script> script);
// Find the closest source position for a break point for a given position.
int FindBreakablePosition(Handle<DebugInfo> debug_info, int source_position);
@@ -483,12 +444,8 @@ class Debug {
// Check whether a BreakPoint object is hit. Evaluate condition depending
// on whether this is a regular break location or a break at function entry.
bool CheckBreakPoint(Handle<BreakPoint> break_point, bool is_break_at_entry);
- MaybeHandle<Object> CallFunction(const char* name, int argc,
- Handle<Object> args[],
- bool catch_exceptions = true);
inline void AssertDebugContext() {
- DCHECK(isolate_->context() == *debug_context());
DCHECK(in_debug_scope());
}
@@ -496,20 +453,17 @@ class Debug {
void PrintBreakLocation();
+ void ClearAllDebuggerHints();
+
// Wraps logic for clearing and maybe freeing all debug infos.
- typedef std::function<bool(Handle<DebugInfo>)> DebugInfoClearFunction;
+ typedef std::function<void(Handle<DebugInfo>)> DebugInfoClearFunction;
void ClearAllDebugInfos(DebugInfoClearFunction clear_function);
- void RemoveBreakInfoAndMaybeFree(Handle<DebugInfo> debug_info);
void FindDebugInfo(Handle<DebugInfo> debug_info, DebugInfoListNode** prev,
DebugInfoListNode** curr);
void FreeDebugInfoListNode(DebugInfoListNode* prev, DebugInfoListNode* node);
- // Global handles.
- Handle<Context> debug_context_;
-
debug::DebugDelegate* debug_delegate_ = nullptr;
- bool owns_debug_delegate_ = false;
// Debugger is active, i.e. there is a debug event listener attached.
bool is_active_;
@@ -518,8 +472,8 @@ class Debug {
bool hook_on_function_call_;
// Suppress debug events.
bool is_suppressed_;
- // LiveEdit is enabled.
- bool live_edit_enabled_;
+ // Running liveedit.
+ bool running_live_edit_ = false;
// Do not trigger debug break events.
bool break_disabled_;
// Do not break on break points.
@@ -586,10 +540,12 @@ class Debug {
// The new frame pointer to drop to when restarting a frame.
Address restart_fp_;
- int async_task_count_;
-
// Last used inspector breakpoint id.
int last_breakpoint_id_;
+
+ // This flag is true when SetBreakOnNextFunctionCall is called and it forces
+ // debugger to break on next function call.
+ bool break_on_next_function_call_;
};
// Storage location for registers when handling debug break calls
@@ -602,8 +558,6 @@ class Debug {
friend class DisableBreak;
friend class LiveEdit;
friend class SuppressDebug;
- friend class NoSideEffectScope;
- friend class LegacyDebugDelegate;
friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
friend void CheckDebuggerUnloaded(); // In test-debug.cc
@@ -611,84 +565,13 @@ class Debug {
DISALLOW_COPY_AND_ASSIGN(Debug);
};
-class LegacyDebugDelegate : public v8::debug::DebugDelegate {
- public:
- explicit LegacyDebugDelegate(Isolate* isolate) : isolate_(isolate) {}
- void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
- bool is_blackboxed) override;
- void ScriptCompiled(v8::Local<v8::debug::Script> script, bool is_live_edited,
- bool has_compile_error) override;
- void BreakProgramRequested(v8::Local<v8::Context> paused_context,
- v8::Local<v8::Object> exec_state,
- const std::vector<debug::BreakpointId>&) override;
- void ExceptionThrown(v8::Local<v8::Context> paused_context,
- v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> exception,
- v8::Local<v8::Value> promise, bool is_uncaught) override;
- bool IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
- const v8::debug::Location& start,
- const v8::debug::Location& end) override {
- return false;
- }
-
- protected:
- Isolate* isolate_;
-
- private:
- void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data);
- virtual void ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- Handle<JSObject> exec_state) = 0;
-};
-
-class NativeDebugDelegate : public LegacyDebugDelegate {
- public:
- NativeDebugDelegate(Isolate* isolate, v8::Debug::EventCallback callback,
- Handle<Object> data);
- virtual ~NativeDebugDelegate();
-
- private:
- // Details of the debug event delivered to the debug event listener.
- class EventDetails : public v8::Debug::EventDetails {
- public:
- EventDetails(DebugEvent event, Handle<JSObject> exec_state,
- Handle<JSObject> event_data, Handle<Object> callback_data);
- virtual DebugEvent GetEvent() const;
- virtual v8::Local<v8::Object> GetExecutionState() const;
- virtual v8::Local<v8::Object> GetEventData() const;
- virtual v8::Local<v8::Context> GetEventContext() const;
- virtual v8::Local<v8::Value> GetCallbackData() const;
- virtual v8::Isolate* GetIsolate() const;
-
- private:
- DebugEvent event_; // Debug event causing the break.
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
- Handle<Object> callback_data_; // User data passed with the callback
- // when it was registered.
- };
-
- void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
- Handle<JSObject> exec_state) override;
-
- v8::Debug::EventCallback callback_;
- Handle<Object> data_;
-};
-
// This scope is used to load and enter the debug context and create a new
// break state. Leaving the scope will restore the previous state.
-// On failure to load, FailedToEnter returns true.
class DebugScope BASE_EMBEDDED {
public:
explicit DebugScope(Debug* debug);
~DebugScope();
- // Check whether loading was successful.
- inline bool failed() { return failed_; }
-
- // Get the active context from before entering the debugger.
- inline Handle<Context> GetContext() { return save_.context(); }
-
private:
Isolate* isolate() { return debug_->isolate_; }
@@ -696,8 +579,6 @@ class DebugScope BASE_EMBEDDED {
DebugScope* prev_; // Previous scope if entered recursively.
StackFrame::Id break_frame_id_; // Previous break frame id.
int break_id_; // Previous break id.
- bool failed_; // Did the debug context fail to load?
- SaveContext save_; // Saves previous context.
PostponeInterruptsScope no_termination_exceptons_;
};
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
deleted file mode 100644
index 97a0886ca7..0000000000
--- a/deps/v8/src/debug/debug.js
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function (global, utils) {
-"use strict";
-
-// ----------------------------------------------------------------------------
-// Imports
-
-var FrameMirror = global.FrameMirror;
-var GlobalArray = global.Array;
-var GlobalRegExp = global.RegExp;
-var IsNaN = global.isNaN;
-var MakeMirror = global.MakeMirror;
-var MathMin = global.Math.min;
-var Mirror = global.Mirror;
-var ValueMirror = global.ValueMirror;
-
-//----------------------------------------------------------------------------
-
-var Debug = {};
-
-// Debug events which can occur in the V8 JavaScript engine. These originate
-// from the API include file debug.h.
-Debug.DebugEvent = { Break: 1,
- Exception: 2,
- AfterCompile: 3,
- CompileError: 4,
- AsyncTaskEvent: 5 };
-
-// Types of exceptions that can be broken upon.
-Debug.ExceptionBreak = { Caught : 0,
- Uncaught: 1 };
-
-// The different types of steps.
-Debug.StepAction = { StepOut: 0,
- StepNext: 1,
- StepIn: 2 };
-
-// The different types of scripts matching enum ScriptType in objects.h.
-Debug.ScriptType = { Native: 0,
- Extension: 1,
- Normal: 2,
- Wasm: 3};
-
-// The different types of script compilations matching enum
-// Script::CompilationType in objects.h.
-Debug.ScriptCompilationType = { Host: 0,
- Eval: 1,
- JSON: 2 };
-
-function ScriptTypeFlag(type) {
- return (1 << type);
-}
-
-// Globals.
-var debugger_flags = {
- breakOnCaughtException: {
- getValue: function() { return Debug.isBreakOnException(); },
- setValue: function(value) {
- if (value) {
- Debug.setBreakOnException();
- } else {
- Debug.clearBreakOnException();
- }
- }
- },
- breakOnUncaughtException: {
- getValue: function() { return Debug.isBreakOnUncaughtException(); },
- setValue: function(value) {
- if (value) {
- Debug.setBreakOnUncaughtException();
- } else {
- Debug.clearBreakOnUncaughtException();
- }
- }
- },
-};
-
-
-// Returns a Script object. If the parameter is a function the return value
-// is the script in which the function is defined. If the parameter is a string
-// the return value is the script for which the script name has that string
-// value. If it is a regexp and there is a unique script whose name matches
-// we return that, otherwise undefined.
-Debug.findScript = function(func_or_script_name) {
- if (IS_FUNCTION(func_or_script_name)) {
- return %FunctionGetScript(func_or_script_name);
- } else if (%IsRegExp(func_or_script_name)) {
- var scripts = this.scripts();
- var last_result = null;
- var result_count = 0;
- for (var i in scripts) {
- var script = scripts[i];
- if (func_or_script_name.test(script.name)) {
- last_result = script;
- result_count++;
- }
- }
- // Return the unique script matching the regexp. If there are more
- // than one we don't return a value since there is no good way to
- // decide which one to return. Returning a "random" one, say the
- // first, would introduce nondeterminism (or something close to it)
- // because the order is the heap iteration order.
- if (result_count == 1) {
- return last_result;
- } else {
- return UNDEFINED;
- }
- } else {
- return %GetScript(func_or_script_name);
- }
-};
-
-// Returns the script source. If the parameter is a function the return value
-// is the script source for the script in which the function is defined. If the
-// parameter is a string the return value is the script for which the script
-// name has that string value.
-Debug.scriptSource = function(func_or_script_name) {
- return this.findScript(func_or_script_name).source;
-};
-
-
-Debug.source = function(f) {
- if (!IS_FUNCTION(f)) throw %make_type_error(kDebuggerType);
- return %FunctionGetSourceCode(f);
-};
-
-
-Debug.sourcePosition = function(f) {
- if (!IS_FUNCTION(f)) throw %make_type_error(kDebuggerType);
- return %FunctionGetScriptSourcePosition(f);
-};
-
-
-Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
- var script = %FunctionGetScript(func);
- var script_offset = %FunctionGetScriptSourcePosition(func);
- return %ScriptLocationFromLine(script, opt_line, opt_column, script_offset);
-};
-
-
-// Returns the character position in a script based on a line number and an
-// optional position within that line.
-Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
- var location = %ScriptLocationFromLine(script, opt_line, opt_column, 0);
- return location ? location.position : null;
-};
-
-Debug.clearStepping = function() {
- %ClearStepping();
-};
-
-Debug.setBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
-};
-
-Debug.clearBreakOnException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
-};
-
-Debug.isBreakOnException = function() {
- return !!%IsBreakOnException(Debug.ExceptionBreak.Caught);
-};
-
-Debug.setBreakOnUncaughtException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, true);
-};
-
-Debug.clearBreakOnUncaughtException = function() {
- return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
-};
-
-Debug.isBreakOnUncaughtException = function() {
- return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
-};
-
-// Get all the scripts currently loaded. Locating all the scripts is based on
-// scanning the heap.
-Debug.scripts = function() {
- // Collect all scripts in the heap.
- return %DebugGetLoadedScripts();
-};
-
-
-// Get a specific script currently loaded. This is based on scanning the heap.
-// TODO(clemensh): Create a runtime function for this.
-function scriptById(scriptId) {
- var scripts = Debug.scripts();
- for (var script of scripts) {
- if (script.id == scriptId) return script;
- }
- return UNDEFINED;
-};
-
-
-Debug.debuggerFlags = function() {
- return debugger_flags;
-};
-
-Debug.MakeMirror = MakeMirror;
-
-function MakeExecutionState(break_id) {
- return new ExecutionState(break_id);
-}
-
-function ExecutionState(break_id) {
- this.break_id = break_id;
- this.selected_frame = 0;
-}
-
-ExecutionState.prototype.prepareStep = function(action) {
- if (action === Debug.StepAction.StepIn ||
- action === Debug.StepAction.StepOut ||
- action === Debug.StepAction.StepNext) {
- return %PrepareStep(this.break_id, action);
- }
- throw %make_type_error(kDebuggerType);
-};
-
-ExecutionState.prototype.evaluateGlobal = function(source) {
- return MakeMirror(%DebugEvaluateGlobal(this.break_id, source));
-};
-
-ExecutionState.prototype.frameCount = function() {
- return %GetFrameCount(this.break_id);
-};
-
-ExecutionState.prototype.frame = function(opt_index) {
- // If no index supplied return the selected frame.
- if (opt_index == null) opt_index = this.selected_frame;
- if (opt_index < 0 || opt_index >= this.frameCount()) {
- throw %make_type_error(kDebuggerFrame);
- }
- return new FrameMirror(this.break_id, opt_index);
-};
-
-ExecutionState.prototype.setSelectedFrame = function(index) {
- var i = TO_NUMBER(index);
- if (i < 0 || i >= this.frameCount()) {
- throw %make_type_error(kDebuggerFrame);
- }
- this.selected_frame = i;
-};
-
-ExecutionState.prototype.selectedFrame = function() {
- return this.selected_frame;
-};
-
-
-function MakeExceptionEvent(break_id, exception, uncaught, promise) {
- return new ExceptionEvent(break_id, exception, uncaught, promise);
-}
-
-
-function ExceptionEvent(break_id, exception, uncaught, promise) {
- this.exec_state_ = new ExecutionState(break_id);
- this.exception_ = exception;
- this.uncaught_ = uncaught;
- this.promise_ = promise;
-}
-
-
-ExceptionEvent.prototype.eventType = function() {
- return Debug.DebugEvent.Exception;
-};
-
-
-ExceptionEvent.prototype.exception = function() {
- return this.exception_;
-};
-
-
-ExceptionEvent.prototype.uncaught = function() {
- return this.uncaught_;
-};
-
-
-ExceptionEvent.prototype.promise = function() {
- return this.promise_;
-};
-
-
-ExceptionEvent.prototype.func = function() {
- return this.exec_state_.frame(0).func();
-};
-
-
-ExceptionEvent.prototype.sourceLine = function() {
- return this.exec_state_.frame(0).sourceLine();
-};
-
-
-ExceptionEvent.prototype.sourceColumn = function() {
- return this.exec_state_.frame(0).sourceColumn();
-};
-
-
-ExceptionEvent.prototype.sourceLineText = function() {
- return this.exec_state_.frame(0).sourceLineText();
-};
-
-
-function MakeCompileEvent(script, type) {
- return new CompileEvent(script, type);
-}
-
-
-function CompileEvent(script, type) {
- this.script_ = MakeMirror(script);
- this.type_ = type;
-}
-
-
-CompileEvent.prototype.eventType = function() {
- return this.type_;
-};
-
-
-CompileEvent.prototype.script = function() {
- return this.script_;
-};
-
-
-function MakeScriptObject_(script, include_source) {
- var o = { id: script.id(),
- name: script.name(),
- lineOffset: script.lineOffset(),
- columnOffset: script.columnOffset(),
- lineCount: script.lineCount(),
- };
- if (!IS_UNDEFINED(script.data())) {
- o.data = script.data();
- }
- if (include_source) {
- o.source = script.source();
- }
- return o;
-}
-
-
-function MakeAsyncTaskEvent(type, id) {
- return new AsyncTaskEvent(type, id);
-}
-
-
-function AsyncTaskEvent(type, id) {
- this.type_ = type;
- this.id_ = id;
-}
-
-
-AsyncTaskEvent.prototype.type = function() {
- return this.type_;
-}
-
-
-AsyncTaskEvent.prototype.id = function() {
- return this.id_;
-}
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.InstallConstants(global, [
- "Debug", Debug,
- "CompileEvent", CompileEvent,
-]);
-
-// Functions needed by the debugger runtime.
-utils.InstallConstants(utils, [
- "MakeExecutionState", MakeExecutionState,
- "MakeExceptionEvent", MakeExceptionEvent,
- "MakeCompileEvent", MakeCompileEvent,
- "MakeAsyncTaskEvent", MakeAsyncTaskEvent,
-]);
-
-})
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index d8dcb0e5d5..03a60d269e 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -37,8 +37,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ leave();
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movzx_w(
+ ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount dummy(ebx);
__ InvokeFunction(edi, dummy, dummy, JUMP_FUNCTION);
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 219ed6a060..f0b47e6238 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -69,13 +69,14 @@ struct WasmDisassembly {
OffsetTable offset_table;
};
-enum PromiseDebugActionType {
- kDebugAsyncFunctionPromiseCreated,
+enum DebugAsyncActionType {
kDebugPromiseThen,
kDebugPromiseCatch,
kDebugPromiseFinally,
kDebugWillHandle,
kDebugDidHandle,
+ kAsyncFunctionSuspended,
+ kAsyncFunctionFinished
};
enum BreakLocationType {
@@ -154,16 +155,10 @@ class ConsoleDelegate {
const ConsoleContext& context) {}
virtual void Assert(const ConsoleCallArguments& args,
const ConsoleContext& context) {}
- virtual void MarkTimeline(const ConsoleCallArguments& args,
- const ConsoleContext& context) {}
virtual void Profile(const ConsoleCallArguments& args,
const ConsoleContext& context) {}
virtual void ProfileEnd(const ConsoleCallArguments& args,
const ConsoleContext& context) {}
- virtual void Timeline(const ConsoleCallArguments& args,
- const ConsoleContext& context) {}
- virtual void TimelineEnd(const ConsoleCallArguments& args,
- const ConsoleContext& context) {}
virtual void Time(const ConsoleCallArguments& args,
const ConsoleContext& context) {}
virtual void TimeEnd(const ConsoleCallArguments& args,
@@ -178,34 +173,4 @@ typedef int BreakpointId;
} // namespace debug
} // namespace v8
-// TODO(yangguo): this is legacy left over from removing v8-debug.h, and still
-// used in cctests. Let's get rid of these soon.
-namespace v8 {
-enum DebugEvent {
- Break = 1,
- Exception = 2,
- AfterCompile = 3,
- CompileError = 4,
- AsyncTaskEvent = 5,
-};
-
-class Debug {
- public:
- class EventDetails {
- public:
- virtual DebugEvent GetEvent() const = 0;
- virtual Local<Object> GetExecutionState() const = 0;
- virtual Local<Object> GetEventData() const = 0;
- virtual Local<Context> GetEventContext() const = 0;
- virtual Local<Value> GetCallbackData() const = 0;
-
- virtual Isolate* GetIsolate() const = 0;
-
- virtual ~EventDetails() {}
- };
-
- typedef void (*EventCallback)(const EventDetails& event_details);
-};
-} // namespace v8
-
#endif // V8_DEBUG_INTERFACE_TYPES_H_
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 68be1a39dd..7193d0abd1 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -4,36 +4,56 @@
#include "src/debug/liveedit.h"
-#include "src/assembler-inl.h"
+#include "src/api.h"
+#include "src/ast/ast-traversal-visitor.h"
+#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
+#include "src/debug/debug-interface.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
#include "src/frames-inl.h"
-#include "src/global-handles.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects-inl.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parsing.h"
#include "src/source-position-table.h"
#include "src/v8.h"
-#include "src/v8memory.h"
namespace v8 {
namespace internal {
+namespace {
+// A general-purpose comparator between 2 arrays.
+class Comparator {
+ public:
+ // Holds 2 arrays of some elements allowing to compare any pair of
+ // element from the first array and element from the second array.
+ class Input {
+ public:
+ virtual int GetLength1() = 0;
+ virtual int GetLength2() = 0;
+ virtual bool Equals(int index1, int index2) = 0;
-void SetElementSloppy(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value) {
- // Ignore return value from SetElement. It can only be a failure if there
- // are element setters causing exceptions and the debugger context has none
- // of these.
- Object::SetElement(object->GetIsolate(), object, index, value,
- LanguageMode::kSloppy)
- .Assert();
-}
+ protected:
+ virtual ~Input() = default;
+ };
+ // Receives compare result as a series of chunks.
+ class Output {
+ public:
+ // Puts another chunk in result list. Note that technically speaking
+ // only 3 arguments actually needed with 4th being derivable.
+ virtual void AddChunk(int pos1, int pos2, int len1, int len2) = 0;
+
+ protected:
+ virtual ~Output() = default;
+ };
+
+ // Finds the difference between 2 arrays of elements.
+ static void CalculateDifference(Input* input, Output* result_writer);
+};
// A simple implementation of dynamic programming algorithm. It solves
// the problem of finding the difference of 2 arrays. It uses a table of results
@@ -234,7 +254,6 @@ class Differencer {
};
};
-
void Comparator::CalculateDifference(Comparator::Input* input,
Comparator::Output* result_writer) {
Differencer differencer(input);
@@ -243,18 +262,14 @@ void Comparator::CalculateDifference(Comparator::Input* input,
differencer.SaveResult(result_writer);
}
-
-static bool CompareSubstrings(Handle<String> s1, int pos1,
- Handle<String> s2, int pos2, int len) {
+bool CompareSubstrings(Handle<String> s1, int pos1, Handle<String> s2, int pos2,
+ int len) {
for (int i = 0; i < len; i++) {
- if (s1->Get(i + pos1) != s2->Get(i + pos2)) {
- return false;
- }
+ if (s1->Get(i + pos1) != s2->Get(i + pos2)) return false;
}
return true;
}
-
// Additional to Input interface. Lets switch Input range to subrange.
// More elegant way would be to wrap one Input as another Input object
// and translate positions there, but that would cost us additional virtual
@@ -272,16 +287,9 @@ class SubrangableOutput : public Comparator::Output {
virtual void SetSubrange2(int offset, int len) = 0;
};
-
-static int min(int a, int b) {
- return a < b ? a : b;
-}
-
-
// Finds common prefix and suffix in input. This parts shouldn't take space in
// linear programming table. Enable subranging in input and output.
-static void NarrowDownInput(SubrangableInput* input,
- SubrangableOutput* output) {
+void NarrowDownInput(SubrangableInput* input, SubrangableOutput* output) {
const int len1 = input->GetLength1();
const int len2 = input->GetLength2();
@@ -290,14 +298,15 @@ static void NarrowDownInput(SubrangableInput* input,
{
common_prefix_len = 0;
- int prefix_limit = min(len1, len2);
+ int prefix_limit = std::min(len1, len2);
while (common_prefix_len < prefix_limit &&
input->Equals(common_prefix_len, common_prefix_len)) {
common_prefix_len++;
}
common_suffix_len = 0;
- int suffix_limit = min(len1 - common_prefix_len, len2 - common_prefix_len);
+ int suffix_limit =
+ std::min(len1 - common_prefix_len, len2 - common_prefix_len);
while (common_suffix_len < suffix_limit &&
input->Equals(len1 - common_suffix_len - 1,
@@ -318,40 +327,6 @@ static void NarrowDownInput(SubrangableInput* input,
}
}
-
-// A helper class that writes chunk numbers into JSArray.
-// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
-class CompareOutputArrayWriter {
- public:
- explicit CompareOutputArrayWriter(Isolate* isolate)
- : array_(isolate->factory()->NewJSArray(10)), current_size_(0) {}
-
- Handle<JSArray> GetResult() {
- return array_;
- }
-
- void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
- Isolate* isolate = array_->GetIsolate();
- SetElementSloppy(array_,
- current_size_,
- Handle<Object>(Smi::FromInt(char_pos1), isolate));
- SetElementSloppy(array_,
- current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
- isolate));
- SetElementSloppy(array_,
- current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
- isolate));
- current_size_ += 3;
- }
-
- private:
- Handle<JSArray> array_;
- int current_size_;
-};
-
-
// Represents 2 strings as 2 arrays of tokens.
// TODO(LiveEdit): Currently it's actually an array of charactres.
// Make array of tokens instead.
@@ -362,13 +337,9 @@ class TokensCompareInput : public Comparator::Input {
: s1_(s1), offset1_(offset1), len1_(len1),
s2_(s2), offset2_(offset2), len2_(len2) {
}
- virtual int GetLength1() {
- return len1_;
- }
- virtual int GetLength2() {
- return len2_;
- }
- bool Equals(int index1, int index2) {
+ int GetLength1() override { return len1_; }
+ int GetLength2() override { return len2_; }
+ bool Equals(int index1, int index2) override {
return s1_->Get(offset1_ + index1) == s2_->Get(offset2_ + index2);
}
@@ -381,47 +352,39 @@ class TokensCompareInput : public Comparator::Input {
int len2_;
};
-
-// Stores compare result in JSArray. Converts substring positions
+// Stores compare result in std::vector. Converts substring positions
// to absolute positions.
class TokensCompareOutput : public Comparator::Output {
public:
- TokensCompareOutput(CompareOutputArrayWriter* array_writer,
- int offset1, int offset2)
- : array_writer_(array_writer), offset1_(offset1), offset2_(offset2) {
- }
+ TokensCompareOutput(int offset1, int offset2,
+ std::vector<SourceChangeRange>* output)
+ : output_(output), offset1_(offset1), offset2_(offset2) {}
- void AddChunk(int pos1, int pos2, int len1, int len2) {
- array_writer_->WriteChunk(pos1 + offset1_, pos2 + offset2_, len1, len2);
+ void AddChunk(int pos1, int pos2, int len1, int len2) override {
+ output_->emplace_back(
+ SourceChangeRange{pos1 + offset1_, pos1 + len1 + offset1_,
+ pos2 + offset2_, pos2 + offset2_ + len2});
}
private:
- CompareOutputArrayWriter* array_writer_;
+ std::vector<SourceChangeRange>* output_;
int offset1_;
int offset2_;
};
-
// Wraps raw n-elements line_ends array as a list of n+1 lines. The last line
// never has terminating new line character.
class LineEndsWrapper {
public:
- explicit LineEndsWrapper(Handle<String> string)
- : ends_array_(String::CalculateLineEnds(string, false)),
- string_len_(string->length()) {
- }
+ explicit LineEndsWrapper(Isolate* isolate, Handle<String> string)
+ : ends_array_(String::CalculateLineEnds(isolate, string, false)),
+ string_len_(string->length()) {}
int length() {
return ends_array_->length() + 1;
}
// Returns start for any line including start of the imaginary line after
// the last line.
- int GetLineStart(int index) {
- if (index == 0) {
- return 0;
- } else {
- return GetLineEnd(index - 1);
- }
- }
+ int GetLineStart(int index) { return index == 0 ? 0 : GetLineEnd(index - 1); }
int GetLineEnd(int index) {
if (index == ends_array_->length()) {
// End of the last line is always an end of the whole string.
@@ -442,7 +405,6 @@ class LineEndsWrapper {
}
};
-
// Represents 2 strings as 2 arrays of lines.
class LineArrayCompareInput : public SubrangableInput {
public:
@@ -454,13 +416,9 @@ class LineArrayCompareInput : public SubrangableInput {
subrange_len1_(line_ends1_.length()),
subrange_len2_(line_ends2_.length()) {
}
- int GetLength1() {
- return subrange_len1_;
- }
- int GetLength2() {
- return subrange_len2_;
- }
- bool Equals(int index1, int index2) {
+ int GetLength1() override { return subrange_len1_; }
+ int GetLength2() override { return subrange_len2_; }
+ bool Equals(int index1, int index2) override {
index1 += subrange_offset1_;
index2 += subrange_offset2_;
@@ -476,11 +434,11 @@ class LineArrayCompareInput : public SubrangableInput {
return CompareSubstrings(s1_, line_start1, s2_, line_start2,
len1);
}
- void SetSubrange1(int offset, int len) {
+ void SetSubrange1(int offset, int len) override {
subrange_offset1_ = offset;
subrange_len1_ = len;
}
- void SetSubrange2(int offset, int len) {
+ void SetSubrange2(int offset, int len) override {
subrange_offset2_ = offset;
subrange_len2_ = len;
}
@@ -496,20 +454,25 @@ class LineArrayCompareInput : public SubrangableInput {
int subrange_len2_;
};
-
-// Stores compare result in JSArray. For each chunk tries to conduct
+// Stores compare result in std::vector. For each chunk tries to conduct
// a fine-grained nested diff token-wise.
class TokenizingLineArrayCompareOutput : public SubrangableOutput {
public:
- TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
+ TokenizingLineArrayCompareOutput(Isolate* isolate, LineEndsWrapper line_ends1,
LineEndsWrapper line_ends2,
- Handle<String> s1, Handle<String> s2)
- : array_writer_(s1->GetIsolate()),
- line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2),
- subrange_offset1_(0), subrange_offset2_(0) {
- }
-
- void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
+ Handle<String> s1, Handle<String> s2,
+ std::vector<SourceChangeRange>* output)
+ : isolate_(isolate),
+ line_ends1_(line_ends1),
+ line_ends2_(line_ends2),
+ s1_(s1),
+ s2_(s2),
+ subrange_offset1_(0),
+ subrange_offset2_(0),
+ output_(output) {}
+
+ void AddChunk(int line_pos1, int line_pos2, int line_len1,
+ int line_len2) override {
line_pos1 += subrange_offset1_;
line_pos2 += subrange_offset2_;
@@ -520,443 +483,478 @@ class TokenizingLineArrayCompareOutput : public SubrangableOutput {
if (char_len1 < CHUNK_LEN_LIMIT && char_len2 < CHUNK_LEN_LIMIT) {
// Chunk is small enough to conduct a nested token-level diff.
- HandleScope subTaskScope(s1_->GetIsolate());
+ HandleScope subTaskScope(isolate_);
TokensCompareInput tokens_input(s1_, char_pos1, char_len1,
s2_, char_pos2, char_len2);
- TokensCompareOutput tokens_output(&array_writer_, char_pos1,
- char_pos2);
+ TokensCompareOutput tokens_output(char_pos1, char_pos2, output_);
Comparator::CalculateDifference(&tokens_input, &tokens_output);
} else {
- array_writer_.WriteChunk(char_pos1, char_pos2, char_len1, char_len2);
+ output_->emplace_back(SourceChangeRange{
+ char_pos1, char_pos1 + char_len1, char_pos2, char_pos2 + char_len2});
}
}
- void SetSubrange1(int offset, int len) {
+ void SetSubrange1(int offset, int len) override {
subrange_offset1_ = offset;
}
- void SetSubrange2(int offset, int len) {
+ void SetSubrange2(int offset, int len) override {
subrange_offset2_ = offset;
}
- Handle<JSArray> GetResult() {
- return array_writer_.GetResult();
- }
-
private:
static const int CHUNK_LEN_LIMIT = 800;
- CompareOutputArrayWriter array_writer_;
+ Isolate* isolate_;
LineEndsWrapper line_ends1_;
LineEndsWrapper line_ends2_;
Handle<String> s1_;
Handle<String> s2_;
int subrange_offset1_;
int subrange_offset2_;
+ std::vector<SourceChangeRange>* output_;
};
+struct SourcePositionEvent {
+ enum Type { LITERAL_STARTS, LITERAL_ENDS, DIFF_STARTS, DIFF_ENDS };
-Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
- Handle<String> s2) {
- s1 = String::Flatten(s1);
- s2 = String::Flatten(s2);
-
- LineEndsWrapper line_ends1(s1);
- LineEndsWrapper line_ends2(s2);
-
- LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
- TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
-
- NarrowDownInput(&input, &output);
-
- Comparator::CalculateDifference(&input, &output);
-
- return output.GetResult();
-}
-
-
-// Unwraps JSValue object, returning its field "value"
-static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
- return Handle<Object>(jsValue->value(), jsValue->GetIsolate());
-}
+ int position;
+ Type type;
+ union {
+ FunctionLiteral* literal;
+ int pos_diff;
+ };
-// Wraps any object into a OpaqueReference, that will hide the object
-// from JavaScript.
-static Handle<JSValue> WrapInJSValue(Handle<HeapObject> object) {
- Isolate* isolate = object->GetIsolate();
- Handle<JSFunction> constructor = isolate->opaque_reference_function();
- Handle<JSValue> result =
- Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
- result->set_value(*object);
- return result;
-}
-
-
-static Handle<SharedFunctionInfo> UnwrapSharedFunctionInfoFromJSValue(
- Handle<JSValue> jsValue) {
- Object* shared = jsValue->value();
- CHECK(shared->IsSharedFunctionInfo());
- return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(shared));
-}
-
-
-static int GetArrayLength(Handle<JSArray> array) {
- Object* length = array->length();
- CHECK(length->IsSmi());
- return Smi::ToInt(length);
-}
-
-void FunctionInfoWrapper::SetInitialProperties(Handle<String> name,
- int start_position,
- int end_position, int param_num,
- int parent_index,
- int function_literal_id) {
- HandleScope scope(isolate());
- this->SetField(kFunctionNameOffset_, name);
- this->SetSmiValueField(kStartPositionOffset_, start_position);
- this->SetSmiValueField(kEndPositionOffset_, end_position);
- this->SetSmiValueField(kParamNumOffset_, param_num);
- this->SetSmiValueField(kParentIndexOffset_, parent_index);
- this->SetSmiValueField(kFunctionLiteralIdOffset_, function_literal_id);
-}
-
-void FunctionInfoWrapper::SetSharedFunctionInfo(
- Handle<SharedFunctionInfo> info) {
- Handle<JSValue> info_holder = WrapInJSValue(info);
- this->SetField(kSharedFunctionInfoOffset_, info_holder);
-}
-
-Handle<SharedFunctionInfo> FunctionInfoWrapper::GetSharedFunctionInfo() {
- Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_);
- Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
- Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
- CHECK(raw_result->IsSharedFunctionInfo());
- return Handle<SharedFunctionInfo>::cast(raw_result);
-}
-
-void SharedInfoWrapper::SetProperties(Handle<String> name,
- int start_position,
- int end_position,
- Handle<SharedFunctionInfo> info) {
- HandleScope scope(isolate());
- this->SetField(kFunctionNameOffset_, name);
- Handle<JSValue> info_holder = WrapInJSValue(info);
- this->SetField(kSharedInfoOffset_, info_holder);
- this->SetSmiValueField(kStartPositionOffset_, start_position);
- this->SetSmiValueField(kEndPositionOffset_, end_position);
-}
-
-
-Handle<SharedFunctionInfo> SharedInfoWrapper::GetInfo() {
- Handle<Object> element = this->GetField(kSharedInfoOffset_);
- Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
- return UnwrapSharedFunctionInfoFromJSValue(value_wrapper);
-}
-
-
-void LiveEdit::InitializeThreadLocal(Debug* debug) {
- debug->thread_local_.restart_fp_ = 0;
-}
-
-
-MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
- Handle<String> source) {
- Isolate* isolate = script->GetIsolate();
-
- MaybeHandle<JSArray> infos;
- Handle<Object> original_source =
- Handle<Object>(script->source(), isolate);
- script->set_source(*source);
-
- {
- // Creating verbose TryCatch from public API is currently the only way to
- // force code save location. We do not use this the object directly.
- v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- try_catch.SetVerbose(true);
-
- // A logical 'try' section.
- infos = Compiler::CompileForLiveEdit(script);
- }
-
- // A logical 'catch' section.
- Handle<JSObject> rethrow_exception;
- if (isolate->has_pending_exception()) {
- Handle<Object> exception(isolate->pending_exception(), isolate);
- MessageLocation message_location = isolate->GetMessageLocation();
-
- isolate->clear_pending_message();
- isolate->clear_pending_exception();
-
- // If possible, copy positions from message object to exception object.
- if (exception->IsJSObject() && !message_location.script().is_null()) {
- rethrow_exception = Handle<JSObject>::cast(exception);
-
- Factory* factory = isolate->factory();
- Handle<String> start_pos_key = factory->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("startPosition"));
- Handle<String> end_pos_key =
- factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("endPosition"));
- Handle<String> script_obj_key =
- factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptObject"));
- Handle<Smi> start_pos(
- Smi::FromInt(message_location.start_pos()), isolate);
- Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
- Handle<JSObject> script_obj =
- Script::GetWrapper(message_location.script());
- Object::SetProperty(rethrow_exception, start_pos_key, start_pos,
- LanguageMode::kSloppy)
- .Assert();
- Object::SetProperty(rethrow_exception, end_pos_key, end_pos,
- LanguageMode::kSloppy)
- .Assert();
- Object::SetProperty(rethrow_exception, script_obj_key, script_obj,
- LanguageMode::kSloppy)
- .Assert();
+ SourcePositionEvent(FunctionLiteral* literal, bool is_start)
+ : position(is_start ? literal->start_position()
+ : literal->end_position()),
+ type(is_start ? LITERAL_STARTS : LITERAL_ENDS),
+ literal(literal) {}
+ SourcePositionEvent(const SourceChangeRange& change, bool is_start)
+ : position(is_start ? change.start_position : change.end_position),
+ type(is_start ? DIFF_STARTS : DIFF_ENDS),
+ pos_diff((change.new_end_position - change.new_start_position) -
+ (change.end_position - change.start_position)) {}
+
+ static bool LessThan(const SourcePositionEvent& a,
+ const SourcePositionEvent& b) {
+ if (a.position != b.position) return a.position < b.position;
+ if (a.type != b.type) return a.type < b.type;
+ if (a.type == LITERAL_STARTS && b.type == LITERAL_STARTS) {
+ return a.literal->end_position() < b.literal->end_position();
+ } else if (a.type == LITERAL_ENDS && b.type == LITERAL_ENDS) {
+ return a.literal->start_position() > b.literal->start_position();
+ } else {
+ return a.pos_diff < b.pos_diff;
}
}
+};
- // A logical 'finally' section.
- script->set_source(*original_source);
+struct FunctionLiteralChange {
+ // If any of start/end position is kNoSourcePosition, this literal is
+ // considered damaged and will not be mapped and edited at all.
+ int new_start_position;
+ int new_end_position;
+ bool has_changes;
+ FunctionLiteral* outer_literal;
+
+ explicit FunctionLiteralChange(int new_start_position, FunctionLiteral* outer)
+ : new_start_position(new_start_position),
+ new_end_position(kNoSourcePosition),
+ has_changes(false),
+ outer_literal(outer) {}
+};
- if (rethrow_exception.is_null()) {
- return infos.ToHandleChecked();
- } else {
- return isolate->Throw<JSArray>(rethrow_exception);
+using FunctionLiteralChanges =
+ std::unordered_map<FunctionLiteral*, FunctionLiteralChange>;
+void CalculateFunctionLiteralChanges(
+ const std::vector<FunctionLiteral*>& literals,
+ const std::vector<SourceChangeRange>& diffs,
+ FunctionLiteralChanges* result) {
+ std::vector<SourcePositionEvent> events;
+ events.reserve(literals.size() * 2 + diffs.size() * 2);
+ for (FunctionLiteral* literal : literals) {
+ events.emplace_back(literal, true);
+ events.emplace_back(literal, false);
+ }
+ for (const SourceChangeRange& diff : diffs) {
+ events.emplace_back(diff, true);
+ events.emplace_back(diff, false);
+ }
+ std::sort(events.begin(), events.end(), SourcePositionEvent::LessThan);
+ bool inside_diff = false;
+ int delta = 0;
+ std::stack<std::pair<FunctionLiteral*, FunctionLiteralChange>> literal_stack;
+ for (const SourcePositionEvent& event : events) {
+ switch (event.type) {
+ case SourcePositionEvent::DIFF_ENDS:
+ DCHECK(inside_diff);
+ inside_diff = false;
+ delta += event.pos_diff;
+ break;
+ case SourcePositionEvent::LITERAL_ENDS: {
+ DCHECK_EQ(literal_stack.top().first, event.literal);
+ FunctionLiteralChange& change = literal_stack.top().second;
+ change.new_end_position = inside_diff
+ ? kNoSourcePosition
+ : event.literal->end_position() + delta;
+ result->insert(literal_stack.top());
+ literal_stack.pop();
+ break;
+ }
+ case SourcePositionEvent::LITERAL_STARTS:
+ literal_stack.push(std::make_pair(
+ event.literal,
+ FunctionLiteralChange(
+ inside_diff ? kNoSourcePosition
+ : event.literal->start_position() + delta,
+ literal_stack.empty() ? nullptr : literal_stack.top().first)));
+ break;
+ case SourcePositionEvent::DIFF_STARTS:
+ DCHECK(!inside_diff);
+ inside_diff = true;
+ if (!literal_stack.empty()) {
+ // Note that outer literal has not necessarily changed, unless the
+ // diff goes past the end of this literal. In this case, we'll mark
+ // this function as damaged and parent as changed later in
+ // MapLiterals.
+ literal_stack.top().second.has_changes = true;
+ }
+ break;
+ }
}
}
-// Patch function feedback vector.
-// The feedback vector is a cache for complex object boilerplates and for a
-// native context. We must clean cached values, or if the structure of the
-// vector itself changes we need to allocate a new one.
-class FeedbackVectorFixer {
- public:
- static void PatchFeedbackVector(FunctionInfoWrapper* compile_info_wrapper,
- Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate) {
- // When feedback metadata changes, we have to create new array instances.
- // Since we cannot create instances when iterating heap, we should first
- // collect all functions and fix their literal arrays.
- Handle<FixedArray> function_instances =
- CollectJSFunctions(shared_info, isolate);
-
- for (int i = 0; i < function_instances->length(); i++) {
- Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
- Handle<FeedbackCell> feedback_cell =
- isolate->factory()->NewManyClosuresCell(
- isolate->factory()->undefined_value());
- fun->set_feedback_cell(*feedback_cell);
- // Only create feedback vectors if we already have the metadata.
- if (shared_info->is_compiled()) JSFunction::EnsureFeedbackVector(fun);
+// Function which has not changed itself, but if any variable in its
+// outer context has been added/removed, we must consider this function
+// as damaged and not update references to it.
+// This is because old compiled function has hardcoded references to
+// it's outer context.
+bool HasChangedScope(FunctionLiteral* a, FunctionLiteral* b) {
+ Scope* scope_a = a->scope()->outer_scope();
+ Scope* scope_b = b->scope()->outer_scope();
+ while (scope_a && scope_b) {
+ std::unordered_map<int, Handle<String>> vars;
+ for (Variable* var : *scope_a->locals()) {
+ if (!var->IsContextSlot()) continue;
+ vars[var->index()] = var->name();
+ }
+ for (Variable* var : *scope_b->locals()) {
+ if (!var->IsContextSlot()) continue;
+ auto it = vars.find(var->index());
+ if (it == vars.end()) return true;
+ if (*it->second != *var->name()) return true;
}
+ scope_a = scope_a->outer_scope();
+ scope_b = scope_b->outer_scope();
}
+ return scope_a != scope_b;
+}
- private:
- // Iterates all function instances in the HEAP that refers to the
- // provided shared_info.
- template<typename Visitor>
- static void IterateJSFunctions(Handle<SharedFunctionInfo> shared_info,
- Visitor* visitor) {
- HeapIterator iterator(shared_info->GetHeap());
- for (HeapObject* obj = iterator.next(); obj != nullptr;
- obj = iterator.next()) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- if (function->shared() == *shared_info) {
- visitor->visit(function);
- }
+enum ChangeState { UNCHANGED, CHANGED, DAMAGED };
+
+using LiteralMap = std::unordered_map<FunctionLiteral*, FunctionLiteral*>;
+void MapLiterals(const FunctionLiteralChanges& changes,
+ const std::vector<FunctionLiteral*>& new_literals,
+ LiteralMap* unchanged, LiteralMap* changed) {
+ std::map<std::pair<int, int>, FunctionLiteral*> position_to_new_literal;
+ for (FunctionLiteral* literal : new_literals) {
+ DCHECK(literal->start_position() != kNoSourcePosition);
+ DCHECK(literal->end_position() != kNoSourcePosition);
+ position_to_new_literal[std::make_pair(literal->start_position(),
+ literal->end_position())] = literal;
+ }
+ LiteralMap mappings;
+ std::unordered_map<FunctionLiteral*, ChangeState> change_state;
+ for (const auto& change_pair : changes) {
+ FunctionLiteral* literal = change_pair.first;
+ const FunctionLiteralChange& change = change_pair.second;
+ auto it = position_to_new_literal.find(
+ std::make_pair(change.new_start_position, change.new_end_position));
+ if (it == position_to_new_literal.end() ||
+ HasChangedScope(literal, it->second)) {
+ change_state[literal] = ChangeState::DAMAGED;
+ if (!change.outer_literal) continue;
+ if (change_state[change.outer_literal] != ChangeState::DAMAGED) {
+ change_state[change.outer_literal] = ChangeState::CHANGED;
+ }
+ } else {
+ mappings[literal] = it->second;
+ if (change_state.find(literal) == change_state.end()) {
+ change_state[literal] =
+ change.has_changes ? ChangeState::CHANGED : ChangeState::UNCHANGED;
}
}
}
-
- // Finds all instances of JSFunction that refers to the provided shared_info
- // and returns array with them.
- static Handle<FixedArray> CollectJSFunctions(
- Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
- CountVisitor count_visitor;
- count_visitor.count = 0;
- IterateJSFunctions(shared_info, &count_visitor);
- int size = count_visitor.count;
-
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
- if (size > 0) {
- CollectVisitor collect_visitor(result);
- IterateJSFunctions(shared_info, &collect_visitor);
+ for (const auto& mapping : mappings) {
+ if (change_state[mapping.first] == ChangeState::UNCHANGED) {
+ (*unchanged)[mapping.first] = mapping.second;
+ } else if (change_state[mapping.first] == ChangeState::CHANGED) {
+ (*changed)[mapping.first] = mapping.second;
}
- return result;
}
+}
- class CountVisitor {
- public:
- void visit(JSFunction* fun) {
- count++;
- }
- int count;
- };
+class CollectFunctionLiterals final
+ : public AstTraversalVisitor<CollectFunctionLiterals> {
+ public:
+ CollectFunctionLiterals(Isolate* isolate, AstNode* root)
+ : AstTraversalVisitor<CollectFunctionLiterals>(isolate, root) {}
+ void VisitFunctionLiteral(FunctionLiteral* lit) {
+ AstTraversalVisitor::VisitFunctionLiteral(lit);
+ literals_->push_back(lit);
+ }
+ void Run(std::vector<FunctionLiteral*>* literals) {
+ literals_ = literals;
+ AstTraversalVisitor::Run();
+ literals_ = nullptr;
+ }
- class CollectVisitor {
- public:
- explicit CollectVisitor(Handle<FixedArray> output)
- : m_output(output), m_pos(0) {}
+ private:
+ std::vector<FunctionLiteral*>* literals_;
+};
- void visit(JSFunction* fun) {
- m_output->set(m_pos, fun);
- m_pos++;
+bool ParseScript(Isolate* isolate, ParseInfo* parse_info, bool compile_as_well,
+ std::vector<FunctionLiteral*>* literals,
+ debug::LiveEditResult* result) {
+ parse_info->set_eager();
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ Handle<SharedFunctionInfo> shared;
+ bool success = false;
+ if (compile_as_well) {
+ success =
+ Compiler::CompileForLiveEdit(parse_info, isolate).ToHandle(&shared);
+ } else {
+ success = parsing::ParseProgram(parse_info, isolate);
+ if (success) {
+ success = Compiler::Analyze(parse_info);
+ parse_info->ast_value_factory()->Internalize(isolate);
}
- private:
- Handle<FixedArray> m_output;
- int m_pos;
+ }
+ if (!success) {
+ isolate->OptionalRescheduleException(false);
+ DCHECK(try_catch.HasCaught());
+ result->message = try_catch.Message()->Get();
+ auto self = Utils::OpenHandle(*try_catch.Message());
+ auto msg = i::Handle<i::JSMessageObject>::cast(self);
+ result->line_number = msg->GetLineNumber();
+ result->column_number = msg->GetColumnNumber();
+ result->status = debug::LiveEditResult::COMPILE_ERROR;
+ return false;
+ }
+ CollectFunctionLiterals(isolate, parse_info->literal()).Run(literals);
+ return true;
+}
+
+struct FunctionData {
+ FunctionData(FunctionLiteral* literal, bool should_restart)
+ : literal(literal),
+ stack_position(NOT_ON_STACK),
+ should_restart(should_restart) {}
+
+ FunctionLiteral* literal;
+ MaybeHandle<SharedFunctionInfo> shared;
+ std::vector<Handle<JSFunction>> js_functions;
+ std::vector<Handle<JSGeneratorObject>> running_generators;
+ // In case of multiple functions with different stack position, the latest
+ // one (in the order below) is used, since it is the most restrictive.
+ // This is important only for functions to be restarted.
+ enum StackPosition {
+ NOT_ON_STACK,
+ ABOVE_BREAK_FRAME,
+ PATCHABLE,
+ BELOW_NON_DROPPABLE_FRAME,
+ ARCHIVED_THREAD,
};
+ StackPosition stack_position;
+ bool should_restart;
};
+class FunctionDataMap : public ThreadVisitor {
+ public:
+ void AddInterestingLiteral(int script_id, FunctionLiteral* literal,
+ bool should_restart) {
+ map_.emplace(std::make_pair(script_id, literal->function_literal_id()),
+ FunctionData{literal, should_restart});
+ }
-void LiveEdit::ReplaceFunctionCode(
- Handle<JSArray> new_compile_info_array,
- Handle<JSArray> shared_info_array) {
- Isolate* isolate = new_compile_info_array->GetIsolate();
-
- FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
-
- Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
- Handle<SharedFunctionInfo> new_shared_info =
- compile_info_wrapper.GetSharedFunctionInfo();
-
- if (shared_info->is_compiled()) {
- // Clear old bytecode. This will trigger self-healing if we do not install
- // new bytecode.
- shared_info->FlushCompiled();
- if (new_shared_info->HasInterpreterData()) {
- shared_info->set_interpreter_data(new_shared_info->interpreter_data());
- } else {
- shared_info->set_bytecode_array(new_shared_info->GetBytecodeArray());
+ bool Lookup(Isolate* isolate, SharedFunctionInfo* sfi, FunctionData** data) {
+ int function_literal_id = sfi->FunctionLiteralId(isolate);
+ if (!sfi->script()->IsScript() || function_literal_id == -1) {
+ return false;
}
-
- if (shared_info->HasBreakInfo()) {
- // Existing break points will be re-applied. Reset the debug info here.
- isolate->debug()->RemoveBreakInfoAndMaybeFree(
- handle(shared_info->GetDebugInfo()));
+ Script* script = Script::cast(sfi->script());
+ return Lookup(script->id(), function_literal_id, data);
+ }
+
+ bool Lookup(Handle<Script> script, FunctionLiteral* literal,
+ FunctionData** data) {
+ return Lookup(script->id(), literal->function_literal_id(), data);
+ }
+
+ void Fill(Isolate* isolate, Address* restart_frame_fp) {
+ {
+ HeapIterator iterator(isolate->heap(), HeapIterator::kFilterUnreachable);
+ while (HeapObject* obj = iterator.next()) {
+ if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ FunctionData* data = nullptr;
+ if (!Lookup(isolate, sfi, &data)) continue;
+ data->shared = handle(sfi, isolate);
+ } else if (obj->IsJSFunction()) {
+ JSFunction* js_function = JSFunction::cast(obj);
+ SharedFunctionInfo* sfi = js_function->shared();
+ FunctionData* data = nullptr;
+ if (!Lookup(isolate, sfi, &data)) continue;
+ data->js_functions.emplace_back(js_function, isolate);
+ } else if (obj->IsJSGeneratorObject()) {
+ JSGeneratorObject* gen = JSGeneratorObject::cast(obj);
+ if (gen->is_closed()) continue;
+ SharedFunctionInfo* sfi = gen->function()->shared();
+ FunctionData* data = nullptr;
+ if (!Lookup(isolate, sfi, &data)) continue;
+ data->running_generators.emplace_back(gen, isolate);
+ }
+ }
+ }
+ FunctionData::StackPosition stack_position =
+ isolate->debug()->break_frame_id() == StackFrame::NO_ID
+ ? FunctionData::PATCHABLE
+ : FunctionData::ABOVE_BREAK_FRAME;
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
+ StackFrame* frame = it.frame();
+ if (stack_position == FunctionData::ABOVE_BREAK_FRAME) {
+ if (frame->id() == isolate->debug()->break_frame_id()) {
+ stack_position = FunctionData::PATCHABLE;
+ }
+ }
+ if (stack_position == FunctionData::PATCHABLE &&
+ (frame->is_exit() || frame->is_builtin_exit())) {
+ stack_position = FunctionData::BELOW_NON_DROPPABLE_FRAME;
+ continue;
+ }
+ if (!frame->is_java_script()) continue;
+ std::vector<Handle<SharedFunctionInfo>> sfis;
+ JavaScriptFrame::cast(frame)->GetFunctions(&sfis);
+ for (auto& sfi : sfis) {
+ if (stack_position == FunctionData::PATCHABLE &&
+ IsResumableFunction(sfi->kind())) {
+ stack_position = FunctionData::BELOW_NON_DROPPABLE_FRAME;
+ }
+ FunctionData* data = nullptr;
+ if (!Lookup(isolate, *sfi, &data)) continue;
+ if (!data->should_restart) continue;
+ data->stack_position = stack_position;
+ *restart_frame_fp = frame->fp();
+ }
}
- shared_info->set_scope_info(new_shared_info->scope_info());
- shared_info->set_feedback_metadata(new_shared_info->feedback_metadata());
- shared_info->DisableOptimization(BailoutReason::kLiveEdit);
- } else {
- // There should not be any feedback metadata. Keep the outer scope info the
- // same.
- DCHECK(!shared_info->HasFeedbackMetadata());
- }
- int start_position = compile_info_wrapper.GetStartPosition();
- int end_position = compile_info_wrapper.GetEndPosition();
- // TODO(cbruni): only store position information on the SFI.
- shared_info->set_raw_start_position(start_position);
- shared_info->set_raw_end_position(end_position);
- if (shared_info->scope_info()->HasPositionInfo()) {
- shared_info->scope_info()->SetPositionInfo(start_position, end_position);
+ isolate->thread_manager()->IterateArchivedThreads(this);
}
- FeedbackVectorFixer::PatchFeedbackVector(&compile_info_wrapper, shared_info,
- isolate);
-
- isolate->debug()->DeoptimizeFunction(shared_info);
-}
-
-void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array,
- int new_function_literal_id) {
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
- Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
-
- shared_info->set_function_literal_id(new_function_literal_id);
- shared_info_array->GetIsolate()->debug()->DeoptimizeFunction(shared_info);
-}
-
-void LiveEdit::FixupScript(Handle<Script> script, int max_function_literal_id) {
- Isolate* isolate = script->GetIsolate();
- Handle<WeakFixedArray> old_infos(script->shared_function_infos(), isolate);
- Handle<WeakFixedArray> new_infos(
- isolate->factory()->NewWeakFixedArray(max_function_literal_id + 1));
- script->set_shared_function_infos(*new_infos);
- SharedFunctionInfo::ScriptIterator iterator(isolate, old_infos);
- while (SharedFunctionInfo* shared = iterator.Next()) {
- // We can't use SharedFunctionInfo::SetScript(info, undefined_value()) here,
- // as we severed the link from the Script to the SharedFunctionInfo above.
- Handle<SharedFunctionInfo> info(shared, isolate);
- info->set_script(isolate->heap()->undefined_value());
- Handle<Object> new_noscript_list = FixedArrayOfWeakCells::Add(
- isolate->factory()->noscript_shared_function_infos(), info);
- isolate->heap()->SetRootNoScriptSharedFunctionInfos(*new_noscript_list);
-
- // Put the SharedFunctionInfo at its new, correct location.
- SharedFunctionInfo::SetScript(info, script);
+ private:
+ bool Lookup(int script_id, int function_literal_id, FunctionData** data) {
+ auto it = map_.find(std::make_pair(script_id, function_literal_id));
+ if (it == map_.end()) return false;
+ *data = &it->second;
+ return true;
+ }
+
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
+ for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ std::vector<Handle<SharedFunctionInfo>> sfis;
+ it.frame()->GetFunctions(&sfis);
+ for (auto& sfi : sfis) {
+ FunctionData* data = nullptr;
+ if (!Lookup(isolate, *sfi, &data)) continue;
+ data->stack_position = FunctionData::ARCHIVED_THREAD;
+ }
+ }
}
-}
-void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
- Handle<Object> script_handle) {
- Handle<SharedFunctionInfo> shared_info =
- UnwrapSharedFunctionInfoFromJSValue(function_wrapper);
- Isolate* isolate = function_wrapper->GetIsolate();
- CHECK(script_handle->IsScript() || script_handle->IsUndefined(isolate));
- SharedFunctionInfo::SetScript(shared_info, script_handle);
- shared_info->DisableOptimization(BailoutReason::kLiveEdit);
+ using UniqueLiteralId = std::pair<int, int>; // script_id + literal_id
+ std::map<UniqueLiteralId, FunctionData> map_;
+};
- function_wrapper->GetIsolate()->compilation_cache()->Remove(shared_info);
+bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
+ Handle<Script> new_script,
+ FunctionDataMap& function_data_map,
+ debug::LiveEditResult* result) {
+ debug::LiveEditResult::Status status = debug::LiveEditResult::OK;
+ for (const auto& mapping : changed) {
+ FunctionData* data = nullptr;
+ function_data_map.Lookup(script, mapping.first, &data);
+ FunctionData* new_data = nullptr;
+ function_data_map.Lookup(new_script, mapping.second, &new_data);
+ Handle<SharedFunctionInfo> sfi;
+ if (!data->shared.ToHandle(&sfi)) {
+ continue;
+ } else if (!data->should_restart) {
+ UNREACHABLE();
+ } else if (data->stack_position == FunctionData::ABOVE_BREAK_FRAME) {
+ status = debug::LiveEditResult::BLOCKED_BY_FUNCTION_ABOVE_BREAK_FRAME;
+ } else if (data->stack_position ==
+ FunctionData::BELOW_NON_DROPPABLE_FRAME) {
+ status =
+ debug::LiveEditResult::BLOCKED_BY_FUNCTION_BELOW_NON_DROPPABLE_FRAME;
+ } else if (!data->running_generators.empty()) {
+ status = debug::LiveEditResult::BLOCKED_BY_RUNNING_GENERATOR;
+ } else if (data->stack_position == FunctionData::ARCHIVED_THREAD) {
+ status = debug::LiveEditResult::BLOCKED_BY_ACTIVE_FUNCTION;
+ }
+ if (status != debug::LiveEditResult::OK) {
+ result->status = status;
+ return false;
+ }
+ }
+ return true;
}
-namespace {
-// For a script text change (defined as position_change_array), translates
-// position in unchanged text to position in changed text.
-// Text change is a set of non-overlapping regions in text, that have changed
-// their contents and length. It is specified as array of groups of 3 numbers:
-// (change_begin, change_end, change_end_new_position).
-// Each group describes a change in text; groups are sorted by change_begin.
-// Only position in text beyond any changes may be successfully translated.
-// If a positions is inside some region that changed, result is currently
-// undefined.
-static int TranslatePosition(int original_position,
- Handle<JSArray> position_change_array) {
- int position_diff = 0;
- int array_len = GetArrayLength(position_change_array);
- Isolate* isolate = position_change_array->GetIsolate();
- // TODO(635): binary search may be used here
- for (int i = 0; i < array_len; i += 3) {
- HandleScope scope(isolate);
- Handle<Object> element =
- JSReceiver::GetElement(isolate, position_change_array, i)
- .ToHandleChecked();
- CHECK(element->IsSmi());
- int chunk_start = Handle<Smi>::cast(element)->value();
- if (original_position < chunk_start) {
+bool CanRestartFrame(Isolate* isolate, Address fp,
+ FunctionDataMap& function_data_map,
+ const LiteralMap& changed, debug::LiveEditResult* result) {
+ DCHECK_GT(fp, 0);
+ StackFrame* restart_frame = nullptr;
+ StackFrameIterator it(isolate);
+ for (; !it.done(); it.Advance()) {
+ if (it.frame()->fp() == fp) {
+ restart_frame = it.frame();
break;
}
- element = JSReceiver::GetElement(isolate, position_change_array, i + 1)
- .ToHandleChecked();
- CHECK(element->IsSmi());
- int chunk_end = Handle<Smi>::cast(element)->value();
- // Position mustn't be inside a chunk.
- DCHECK(original_position >= chunk_end);
- element = JSReceiver::GetElement(isolate, position_change_array, i + 2)
- .ToHandleChecked();
- CHECK(element->IsSmi());
- int chunk_changed_end = Handle<Smi>::cast(element)->value();
- position_diff = chunk_changed_end - chunk_end;
- }
-
- return original_position + position_diff;
+ }
+ DCHECK(restart_frame && restart_frame->is_java_script());
+ if (!LiveEdit::kFrameDropperSupported) {
+ result->status = debug::LiveEditResult::FRAME_RESTART_IS_NOT_SUPPORTED;
+ return false;
+ }
+ std::vector<Handle<SharedFunctionInfo>> sfis;
+ JavaScriptFrame::cast(restart_frame)->GetFunctions(&sfis);
+ for (auto& sfi : sfis) {
+ FunctionData* data = nullptr;
+ if (!function_data_map.Lookup(isolate, *sfi, &data)) continue;
+ auto new_literal_it = changed.find(data->literal);
+ if (new_literal_it == changed.end()) continue;
+ if (new_literal_it->second->scope()->new_target_var()) {
+ result->status =
+ debug::LiveEditResult::BLOCKED_BY_NEW_TARGET_IN_RESTART_FRAME;
+ return false;
+ }
+ }
+ return true;
}
void TranslateSourcePositionTable(Handle<BytecodeArray> code,
- Handle<JSArray> position_change_array) {
+ const std::vector<SourceChangeRange>& diffs) {
Isolate* isolate = code->GetIsolate();
SourcePositionTableBuilder builder;
- Handle<ByteArray> source_position_table(code->SourcePositionTable());
+ Handle<ByteArray> source_position_table(code->SourcePositionTable(), isolate);
for (SourcePositionTableIterator iterator(*source_position_table);
!iterator.done(); iterator.Advance()) {
SourcePosition position = iterator.source_position();
position.SetScriptOffset(
- TranslatePosition(position.ScriptOffset(), position_change_array));
+ LiveEdit::TranslatePosition(diffs, position.ScriptOffset()));
builder.AddPosition(iterator.code_offset(), position,
iterator.is_statement());
}
@@ -968,608 +966,329 @@ void TranslateSourcePositionTable(Handle<BytecodeArray> code,
CodeLinePosInfoRecordEvent(code->GetFirstBytecodeAddress(),
*new_source_position_table));
}
-} // namespace
-
-void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
- Handle<JSArray> position_change_array) {
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
- Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
-
- int old_function_start = info->StartPosition();
- int new_function_start = TranslatePosition(old_function_start,
- position_change_array);
- int new_function_end =
- TranslatePosition(info->EndPosition(), position_change_array);
- int new_function_token_pos =
- TranslatePosition(info->function_token_position(), position_change_array);
-
- info->set_raw_start_position(new_function_start);
- info->set_raw_end_position(new_function_end);
- // TODO(cbruni): Allocate helper ScopeInfo once the position fields are gone
- // on the SFI.
- if (info->scope_info()->HasPositionInfo()) {
- info->scope_info()->SetPositionInfo(new_function_start, new_function_end);
- }
- info->set_function_token_position(new_function_token_pos);
-
- if (info->HasBytecodeArray()) {
- TranslateSourcePositionTable(handle(info->GetBytecodeArray()),
- position_change_array);
- }
- if (info->HasBreakInfo()) {
- // Existing break points will be re-applied. Reset the debug info here.
- info->GetIsolate()->debug()->RemoveBreakInfoAndMaybeFree(
- handle(info->GetDebugInfo()));
+
+void UpdatePositions(Isolate* isolate, Handle<SharedFunctionInfo> sfi,
+ const std::vector<SourceChangeRange>& diffs) {
+ int old_start_position = sfi->StartPosition();
+ int new_start_position =
+ LiveEdit::TranslatePosition(diffs, old_start_position);
+ int new_end_position = LiveEdit::TranslatePosition(diffs, sfi->EndPosition());
+ int new_function_token_position =
+ LiveEdit::TranslatePosition(diffs, sfi->function_token_position());
+ sfi->SetPosition(new_start_position, new_end_position);
+ sfi->SetFunctionTokenPosition(new_function_token_position,
+ new_start_position);
+ if (sfi->HasBytecodeArray()) {
+ TranslateSourcePositionTable(handle(sfi->GetBytecodeArray(), isolate),
+ diffs);
}
}
+} // anonymous namespace
+void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
+ Handle<String> new_source, bool preview,
+ debug::LiveEditResult* result) {
+ std::vector<SourceChangeRange> diffs;
+ LiveEdit::CompareStrings(isolate,
+ handle(String::cast(script->source()), isolate),
+ new_source, &diffs);
+ if (diffs.empty()) {
+ result->status = debug::LiveEditResult::OK;
+ return;
+ }
-static Handle<Script> CreateScriptCopy(Handle<Script> original) {
- Isolate* isolate = original->GetIsolate();
-
- Handle<String> original_source(String::cast(original->source()));
- Handle<Script> copy = isolate->factory()->NewScript(original_source);
-
- copy->set_name(original->name());
- copy->set_line_offset(original->line_offset());
- copy->set_column_offset(original->column_offset());
- copy->set_type(original->type());
- copy->set_context_data(original->context_data());
- copy->set_eval_from_shared_or_wrapped_arguments(
- original->eval_from_shared_or_wrapped_arguments());
- copy->set_eval_from_position(original->eval_from_position());
+ ParseInfo parse_info(isolate, script);
+ std::vector<FunctionLiteral*> literals;
+ if (!ParseScript(isolate, &parse_info, false, &literals, result)) return;
- Handle<WeakFixedArray> infos(isolate->factory()->NewWeakFixedArray(
- original->shared_function_infos()->length()));
- copy->set_shared_function_infos(*infos);
+ Handle<Script> new_script = isolate->factory()->CloneScript(script);
+ new_script->set_source(*new_source);
+ std::vector<FunctionLiteral*> new_literals;
+ ParseInfo new_parse_info(isolate, new_script);
+ if (!ParseScript(isolate, &new_parse_info, true, &new_literals, result)) {
+ return;
+ }
- // Copy all the flags, but clear compilation state.
- copy->set_flags(original->flags());
- copy->set_compilation_state(Script::COMPILATION_STATE_INITIAL);
+ FunctionLiteralChanges literal_changes;
+ CalculateFunctionLiteralChanges(literals, diffs, &literal_changes);
- return copy;
-}
+ LiteralMap changed;
+ LiteralMap unchanged;
+ MapLiterals(literal_changes, new_literals, &unchanged, &changed);
-Handle<Object> LiveEdit::ChangeScriptSource(Handle<Script> original_script,
- Handle<String> new_source,
- Handle<Object> old_script_name) {
- Isolate* isolate = original_script->GetIsolate();
- Handle<Object> old_script_object;
- if (old_script_name->IsString()) {
- Handle<Script> old_script = CreateScriptCopy(original_script);
- old_script->set_name(String::cast(*old_script_name));
- old_script_object = old_script;
- isolate->debug()->OnAfterCompile(old_script);
- } else {
- old_script_object = isolate->factory()->null_value();
+ FunctionDataMap function_data_map;
+ for (const auto& mapping : changed) {
+ function_data_map.AddInterestingLiteral(script->id(), mapping.first, true);
+ function_data_map.AddInterestingLiteral(new_script->id(), mapping.second,
+ false);
}
+ for (const auto& mapping : unchanged) {
+ function_data_map.AddInterestingLiteral(script->id(), mapping.first, false);
+ }
+ Address restart_frame_fp = 0;
+ function_data_map.Fill(isolate, &restart_frame_fp);
- original_script->set_source(*new_source);
-
- // Drop line ends so that they will be recalculated.
- original_script->set_line_ends(isolate->heap()->undefined_value());
-
- return old_script_object;
-}
+ if (!CanPatchScript(changed, script, new_script, function_data_map, result)) {
+ return;
+ }
+ if (restart_frame_fp &&
+ !CanRestartFrame(isolate, restart_frame_fp, function_data_map, changed,
+ result)) {
+ return;
+ }
+ if (preview) {
+ result->status = debug::LiveEditResult::OK;
+ return;
+ }
+ for (const auto& mapping : unchanged) {
+ FunctionData* data = nullptr;
+ if (!function_data_map.Lookup(script, mapping.first, &data)) continue;
+ Handle<SharedFunctionInfo> sfi;
+ if (!data->shared.ToHandle(&sfi)) continue;
+ DCHECK_EQ(sfi->script(), *script);
-void LiveEdit::ReplaceRefToNestedFunction(
- Handle<JSValue> parent_function_wrapper,
- Handle<JSValue> orig_function_wrapper,
- Handle<JSValue> subst_function_wrapper) {
+ isolate->compilation_cache()->Remove(sfi);
+ isolate->debug()->DeoptimizeFunction(sfi);
+ if (sfi->HasDebugInfo()) {
+ Handle<DebugInfo> debug_info(sfi->GetDebugInfo(), isolate);
+ isolate->debug()->RemoveBreakInfoAndMaybeFree(debug_info);
+ }
+ UpdatePositions(isolate, sfi, diffs);
- Handle<SharedFunctionInfo> parent_shared =
- UnwrapSharedFunctionInfoFromJSValue(parent_function_wrapper);
- Handle<SharedFunctionInfo> orig_shared =
- UnwrapSharedFunctionInfoFromJSValue(orig_function_wrapper);
- Handle<SharedFunctionInfo> subst_shared =
- UnwrapSharedFunctionInfoFromJSValue(subst_function_wrapper);
+ MaybeObject* weak_redundant_new_sfi =
+ new_script->shared_function_infos()->Get(
+ mapping.second->function_literal_id());
- for (RelocIterator it(parent_shared->GetCode()); !it.done(); it.next()) {
- if (it.rinfo()->rmode() == RelocInfo::EMBEDDED_OBJECT) {
- if (it.rinfo()->target_object() == *orig_shared) {
- it.rinfo()->set_target_object(*subst_shared);
+ sfi->set_script(*new_script);
+ if (sfi->HasUncompiledData()) {
+ sfi->uncompiled_data()->set_function_literal_id(
+ mapping.second->function_literal_id());
+ }
+ new_script->shared_function_infos()->Set(
+ mapping.second->function_literal_id(), HeapObjectReference::Weak(*sfi));
+ DCHECK_EQ(sfi->FunctionLiteralId(isolate),
+ mapping.second->function_literal_id());
+
+ // Swap the now-redundant, newly compiled SFI into the old script, so that
+ // we can look up the old function_literal_id using the new SFI when
+ // processing changed functions.
+ HeapObject* redundant_new_sfi_obj;
+ if (weak_redundant_new_sfi->ToStrongOrWeakHeapObject(
+ &redundant_new_sfi_obj)) {
+ SharedFunctionInfo* redundant_new_sfi =
+ SharedFunctionInfo::cast(redundant_new_sfi_obj);
+
+ redundant_new_sfi->set_script(*script);
+ if (redundant_new_sfi->HasUncompiledData()) {
+ redundant_new_sfi->uncompiled_data()->set_function_literal_id(
+ mapping.first->function_literal_id());
}
+ script->shared_function_infos()->Set(
+ mapping.first->function_literal_id(),
+ HeapObjectReference::Weak(redundant_new_sfi));
+ DCHECK_EQ(redundant_new_sfi->FunctionLiteralId(isolate),
+ mapping.first->function_literal_id());
}
- }
-}
-
-// Check an activation against list of functions. If there is a function
-// that matches, its status in result array is changed to status argument value.
-static bool CheckActivation(Handle<JSArray> shared_info_array,
- Handle<JSArray> result,
- StackFrame* frame,
- LiveEdit::FunctionPatchabilityStatus status) {
- if (!frame->is_java_script()) return false;
-
- Handle<JSFunction> function(JavaScriptFrame::cast(frame)->function());
+ if (sfi->HasUncompiledDataWithPreParsedScope()) {
+ sfi->ClearPreParsedScopeData();
+ }
- Isolate* isolate = shared_info_array->GetIsolate();
- int len = GetArrayLength(shared_info_array);
- for (int i = 0; i < len; i++) {
- HandleScope scope(isolate);
- Handle<Object> element =
- JSReceiver::GetElement(isolate, shared_info_array, i).ToHandleChecked();
- Handle<JSValue> jsvalue = Handle<JSValue>::cast(element);
- Handle<SharedFunctionInfo> shared =
- UnwrapSharedFunctionInfoFromJSValue(jsvalue);
-
- if (function->shared() == *shared ||
- (function->code()->is_optimized_code() &&
- function->code()->Inlines(*shared))) {
- SetElementSloppy(result, i, Handle<Smi>(Smi::FromInt(status), isolate));
- return true;
+ for (auto& js_function : data->js_functions) {
+ js_function->set_feedback_cell(*isolate->factory()->many_closures_cell());
+ if (!js_function->is_compiled()) continue;
+ JSFunction::EnsureFeedbackVector(js_function);
}
- }
- return false;
-}
-// Describes a set of call frames that execute any of listed functions.
-// Finding no such frames does not mean error.
-class MultipleFunctionTarget {
- public:
- MultipleFunctionTarget(Handle<JSArray> old_shared_array,
- Handle<JSArray> new_shared_array,
- Handle<JSArray> result)
- : old_shared_array_(old_shared_array),
- new_shared_array_(new_shared_array),
- result_(result) {}
- bool MatchActivation(StackFrame* frame,
- LiveEdit::FunctionPatchabilityStatus status) {
- return CheckActivation(old_shared_array_, result_, frame, status);
- }
- const char* GetNotFoundMessage() const { return nullptr; }
- bool FrameUsesNewTarget(StackFrame* frame) {
- if (!frame->is_java_script()) return false;
- JavaScriptFrame* jsframe = JavaScriptFrame::cast(frame);
- Handle<SharedFunctionInfo> old_shared(jsframe->function()->shared());
- Isolate* isolate = old_shared->GetIsolate();
- int len = GetArrayLength(old_shared_array_);
- // Find corresponding new shared function info and return whether it
- // references new.target.
- for (int i = 0; i < len; i++) {
- HandleScope scope(isolate);
- Handle<Object> old_element =
- JSReceiver::GetElement(isolate, old_shared_array_, i)
- .ToHandleChecked();
- if (!old_shared.is_identical_to(UnwrapSharedFunctionInfoFromJSValue(
- Handle<JSValue>::cast(old_element)))) {
+ if (!sfi->HasBytecodeArray()) continue;
+ FixedArray* constants = sfi->GetBytecodeArray()->constant_pool();
+ for (int i = 0; i < constants->length(); ++i) {
+ if (!constants->get(i)->IsSharedFunctionInfo()) continue;
+ FunctionData* data = nullptr;
+ if (!function_data_map.Lookup(
+ isolate, SharedFunctionInfo::cast(constants->get(i)), &data)) {
continue;
}
-
- Handle<Object> new_element =
- JSReceiver::GetElement(isolate, new_shared_array_, i)
- .ToHandleChecked();
- if (new_element->IsUndefined(isolate)) return false;
- Handle<SharedFunctionInfo> new_shared =
- UnwrapSharedFunctionInfoFromJSValue(
- Handle<JSValue>::cast(new_element));
- if (new_shared->scope_info()->HasNewTarget()) {
- SetElementSloppy(
- result_, i,
- Handle<Smi>(
- Smi::FromInt(
- LiveEdit::FUNCTION_BLOCKED_NO_NEW_TARGET_ON_RESTART),
- isolate));
- return true;
+ auto change_it = changed.find(data->literal);
+ if (change_it == changed.end()) continue;
+ if (!function_data_map.Lookup(new_script, change_it->second, &data)) {
+ continue;
}
- return false;
+ Handle<SharedFunctionInfo> new_sfi;
+ if (!data->shared.ToHandle(&new_sfi)) continue;
+ constants->set(i, *new_sfi);
}
- return false;
}
-
- void set_status(LiveEdit::FunctionPatchabilityStatus status) {
- Isolate* isolate = old_shared_array_->GetIsolate();
- int len = GetArrayLength(old_shared_array_);
- for (int i = 0; i < len; ++i) {
- Handle<Object> old_element =
- JSReceiver::GetElement(isolate, result_, i).ToHandleChecked();
- if (!old_element->IsSmi() ||
- Smi::ToInt(*old_element) == LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH) {
- SetElementSloppy(result_, i,
- Handle<Smi>(Smi::FromInt(status), isolate));
- }
+ for (const auto& mapping : changed) {
+ FunctionData* data = nullptr;
+ if (!function_data_map.Lookup(new_script, mapping.second, &data)) continue;
+ Handle<SharedFunctionInfo> new_sfi = data->shared.ToHandleChecked();
+ DCHECK_EQ(new_sfi->script(), *new_script);
+
+ if (!function_data_map.Lookup(script, mapping.first, &data)) continue;
+ Handle<SharedFunctionInfo> sfi;
+ if (!data->shared.ToHandle(&sfi)) continue;
+
+ isolate->debug()->DeoptimizeFunction(sfi);
+ isolate->compilation_cache()->Remove(sfi);
+ for (auto& js_function : data->js_functions) {
+ js_function->set_shared(*new_sfi);
+ js_function->set_code(js_function->shared()->GetCode());
+
+ js_function->set_feedback_cell(*isolate->factory()->many_closures_cell());
+ if (!js_function->is_compiled()) continue;
+ JSFunction::EnsureFeedbackVector(js_function);
}
- }
- private:
- Handle<JSArray> old_shared_array_;
- Handle<JSArray> new_shared_array_;
- Handle<JSArray> result_;
-};
-
-
-// Drops all call frame matched by target and all frames above them.
-template <typename TARGET>
-static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
- TARGET& target, // NOLINT
- bool do_drop) {
- Debug* debug = isolate->debug();
- Zone zone(isolate->allocator(), ZONE_NAME);
- Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
-
- int top_frame_index = -1;
- int frame_index = 0;
- for (; frame_index < frames.length(); frame_index++) {
- StackFrame* frame = frames[frame_index];
- if (frame->id() == debug->break_frame_id()) {
- top_frame_index = frame_index;
- break;
+ if (!new_sfi->HasBytecodeArray()) continue;
+ FixedArray* constants = new_sfi->GetBytecodeArray()->constant_pool();
+ for (int i = 0; i < constants->length(); ++i) {
+ if (!constants->get(i)->IsSharedFunctionInfo()) continue;
+ SharedFunctionInfo* inner_sfi =
+ SharedFunctionInfo::cast(constants->get(i));
+ if (inner_sfi->script() != *script) continue;
+
+ // If the inner SFI's script is the old script, then this is actually a
+ // redundant new_script SFI where the old script SFI was unchanged, so we
+ // swapped their scripts in the unchanged iteration. This means that we
+ // have to update this changed SFI's inner SFI constant to point at the
+ // old inner SFI, which has already been patched to be on the new script.
+ //
+ // So, we look up FunctionData using the current, newly compiled
+ // inner_sfi, but the resulting FunctionData will still be referring to
+ // the old, unchanged SFI.
+ FunctionData* data = nullptr;
+ if (!function_data_map.Lookup(isolate, inner_sfi, &data)) continue;
+ Handle<SharedFunctionInfo> old_unchanged_inner_sfi =
+ data->shared.ToHandleChecked();
+ // Now some sanity checks. Make sure that this inner_sfi is not the
+ // unchanged SFI yet...
+ DCHECK_NE(*old_unchanged_inner_sfi, inner_sfi);
+ // ... that the unchanged SFI has already been processed and patched to be
+ // on the new script ...
+ DCHECK_EQ(old_unchanged_inner_sfi->script(), *new_script);
+ // ... and that the id of the unchanged SFI matches the unchanged target
+ // literal's id.
+ DCHECK_EQ(old_unchanged_inner_sfi->FunctionLiteralId(isolate),
+ unchanged[data->literal]->function_literal_id());
+ constants->set(i, *old_unchanged_inner_sfi);
}
- if (target.MatchActivation(
- frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
- // We are still above break_frame. It is not a target frame,
- // it is a problem.
- return "Debugger mark-up on stack is not found";
- }
- }
-
- if (top_frame_index == -1) {
- // We haven't found break frame, but no function is blocking us anyway.
- return target.GetNotFoundMessage();
}
-
- bool target_frame_found = false;
- int bottom_js_frame_index = top_frame_index;
- bool non_droppable_frame_found = false;
- LiveEdit::FunctionPatchabilityStatus non_droppable_reason;
-
- for (; frame_index < frames.length(); frame_index++) {
- StackFrame* frame = frames[frame_index];
- if (frame->is_exit() || frame->is_builtin_exit()) {
- non_droppable_frame_found = true;
- non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE;
- break;
- }
- if (frame->is_java_script()) {
- SharedFunctionInfo* shared =
- JavaScriptFrame::cast(frame)->function()->shared();
- if (IsResumableFunction(shared->kind())) {
- non_droppable_frame_found = true;
- non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
- break;
+#ifdef DEBUG
+ {
+ // Check that all the functions in the new script are valid and that their
+ // function literals match what is expected.
+ DisallowHeapAllocation no_gc;
+
+ SharedFunctionInfo::ScriptIterator it(isolate, *new_script);
+ while (SharedFunctionInfo* sfi = it.Next()) {
+ DCHECK_EQ(sfi->script(), *new_script);
+ DCHECK_EQ(sfi->FunctionLiteralId(isolate), it.CurrentIndex());
+
+ if (!sfi->HasBytecodeArray()) continue;
+ // Check that all the functions in this function's constant pool are also
+ // on the new script, and that their id matches their index in the new
+ // scripts function list.
+ FixedArray* constants = sfi->GetBytecodeArray()->constant_pool();
+ for (int i = 0; i < constants->length(); ++i) {
+ if (!constants->get(i)->IsSharedFunctionInfo()) continue;
+ SharedFunctionInfo* inner_sfi =
+ SharedFunctionInfo::cast(constants->get(i));
+ DCHECK_EQ(inner_sfi->script(), *new_script);
+ DCHECK_EQ(inner_sfi, new_script->shared_function_infos()
+ ->Get(inner_sfi->FunctionLiteralId(isolate))
+ ->GetHeapObject());
}
}
- if (target.MatchActivation(
- frame, LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
- target_frame_found = true;
- bottom_js_frame_index = frame_index;
- }
}
+#endif
- if (non_droppable_frame_found) {
- // There is a C or generator frame on stack. We can't drop C frames, and we
- // can't restart generators. Check that there are no target frames below
- // them.
- for (; frame_index < frames.length(); frame_index++) {
- StackFrame* frame = frames[frame_index];
- if (frame->is_java_script()) {
- if (target.MatchActivation(frame, non_droppable_reason)) {
- // Fail.
- return nullptr;
- }
- if (non_droppable_reason ==
- LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR &&
- !target_frame_found) {
- // Fail.
- target.set_status(non_droppable_reason);
- return nullptr;
- }
+ if (restart_frame_fp) {
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
+ if (it.frame()->fp() == restart_frame_fp) {
+ isolate->debug()->ScheduleFrameRestart(it.frame());
+ result->stack_changed = true;
+ break;
}
}
}
- // We cannot restart a frame that uses new.target.
- if (target.FrameUsesNewTarget(frames[bottom_js_frame_index])) return nullptr;
-
- if (!do_drop) {
- // We are in check-only mode.
- return nullptr;
- }
-
- if (!target_frame_found) {
- // Nothing to drop.
- return target.GetNotFoundMessage();
- }
-
- if (!LiveEdit::kFrameDropperSupported) {
- return "Stack manipulations are not supported in this architecture.";
- }
-
- debug->ScheduleFrameRestart(frames[bottom_js_frame_index]);
- return nullptr;
+ int script_id = script->id();
+ script->set_id(new_script->id());
+ new_script->set_id(script_id);
+ result->status = debug::LiveEditResult::OK;
+ result->script = ToApiHandle<v8::debug::Script>(new_script);
}
-
-// Fills result array with statuses of functions. Modifies the stack
-// removing all listed function if possible and if do_drop is true.
-static const char* DropActivationsInActiveThread(
- Handle<JSArray> old_shared_array, Handle<JSArray> new_shared_array,
- Handle<JSArray> result, bool do_drop) {
- MultipleFunctionTarget target(old_shared_array, new_shared_array, result);
- Isolate* isolate = old_shared_array->GetIsolate();
-
- const char* message =
- DropActivationsInActiveThreadImpl(isolate, target, do_drop);
- if (message) {
- return message;
- }
-
- int array_len = GetArrayLength(old_shared_array);
-
- // Replace "blocked on active" with "replaced on active" status.
- for (int i = 0; i < array_len; i++) {
- Handle<Object> obj =
- JSReceiver::GetElement(isolate, result, i).ToHandleChecked();
- if (*obj == Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
- Handle<Object> replaced(
- Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
- SetElementSloppy(result, i, replaced);
- }
- }
- return nullptr;
+void LiveEdit::InitializeThreadLocal(Debug* debug) {
+ debug->thread_local_.restart_fp_ = 0;
}
-
-bool LiveEdit::FindActiveGenerators(Handle<FixedArray> shared_info_array,
- Handle<FixedArray> result,
- int len) {
- Isolate* isolate = shared_info_array->GetIsolate();
- bool found_suspended_activations = false;
-
- DCHECK_LE(len, result->length());
-
- FunctionPatchabilityStatus active = FUNCTION_BLOCKED_ACTIVE_GENERATOR;
-
- Heap* heap = isolate->heap();
- HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
- HeapObject* obj = nullptr;
- while ((obj = iterator.next()) != nullptr) {
- if (!obj->IsJSGeneratorObject()) continue;
-
- JSGeneratorObject* gen = JSGeneratorObject::cast(obj);
- if (gen->is_closed()) continue;
-
- HandleScope scope(isolate);
-
- for (int i = 0; i < len; i++) {
- Handle<JSValue> jsvalue = Handle<JSValue>::cast(
- FixedArray::get(*shared_info_array, i, isolate));
- Handle<SharedFunctionInfo> shared =
- UnwrapSharedFunctionInfoFromJSValue(jsvalue);
-
- if (gen->function()->shared() == *shared) {
- result->set(i, Smi::FromInt(active));
- found_suspended_activations = true;
+bool LiveEdit::RestartFrame(JavaScriptFrame* frame) {
+ if (!LiveEdit::kFrameDropperSupported) return false;
+ Isolate* isolate = frame->isolate();
+ StackFrame::Id break_frame_id = isolate->debug()->break_frame_id();
+ bool break_frame_found = break_frame_id == StackFrame::NO_ID;
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
+ StackFrame* current = it.frame();
+ break_frame_found = break_frame_found || break_frame_id == current->id();
+ if (current->fp() == frame->fp()) {
+ if (break_frame_found) {
+ isolate->debug()->ScheduleFrameRestart(current);
+ return true;
+ } else {
+ return false;
}
}
- }
-
- return found_suspended_activations;
-}
-
-
-class InactiveThreadActivationsChecker : public ThreadVisitor {
- public:
- InactiveThreadActivationsChecker(Handle<JSArray> old_shared_array,
- Handle<JSArray> result)
- : old_shared_array_(old_shared_array),
- result_(result),
- has_blocked_functions_(false) {}
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- has_blocked_functions_ |=
- CheckActivation(old_shared_array_, result_, it.frame(),
- LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
+ if (!break_frame_found) continue;
+ if (current->is_exit() || current->is_builtin_exit()) {
+ return false;
}
- }
- bool HasBlockedFunctions() {
- return has_blocked_functions_;
- }
-
- private:
- Handle<JSArray> old_shared_array_;
- Handle<JSArray> result_;
- bool has_blocked_functions_;
-};
-
-
-Handle<JSArray> LiveEdit::CheckAndDropActivations(
- Handle<JSArray> old_shared_array, Handle<JSArray> new_shared_array,
- bool do_drop) {
- Isolate* isolate = old_shared_array->GetIsolate();
- int len = GetArrayLength(old_shared_array);
-
- DCHECK(old_shared_array->HasFastElements());
- Handle<FixedArray> old_shared_array_elements(
- FixedArray::cast(old_shared_array->elements()));
-
- Handle<JSArray> result = isolate->factory()->NewJSArray(len);
- result->set_length(Smi::FromInt(len));
- JSObject::EnsureWritableFastElements(result);
- Handle<FixedArray> result_elements =
- handle(FixedArray::cast(result->elements()), isolate);
-
- // Fill the default values.
- for (int i = 0; i < len; i++) {
- FunctionPatchabilityStatus status = FUNCTION_AVAILABLE_FOR_PATCH;
- result_elements->set(i, Smi::FromInt(status));
- }
-
- // Scan the heap for active generators -- those that are either currently
- // running (as we wouldn't want to restart them, because we don't know where
- // to restart them from) or suspended. Fail if any one corresponds to the set
- // of functions being edited.
- if (FindActiveGenerators(old_shared_array_elements, result_elements, len)) {
- return result;
- }
-
- // Check inactive threads. Fail if some functions are blocked there.
- InactiveThreadActivationsChecker inactive_threads_checker(old_shared_array,
- result);
- isolate->thread_manager()->IterateArchivedThreads(
- &inactive_threads_checker);
- if (inactive_threads_checker.HasBlockedFunctions()) {
- return result;
- }
-
- // Try to drop activations from the current stack.
- const char* error_message = DropActivationsInActiveThread(
- old_shared_array, new_shared_array, result, do_drop);
- if (error_message != nullptr) {
- // Add error message as an array extra element.
- Handle<String> str =
- isolate->factory()->NewStringFromAsciiChecked(error_message);
- SetElementSloppy(result, len, str);
- }
- return result;
-}
-
-
-// Describes a single callframe a target. Not finding this frame
-// means an error.
-class SingleFrameTarget {
- public:
- explicit SingleFrameTarget(JavaScriptFrame* frame)
- : m_frame(frame),
- m_saved_status(LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH) {}
-
- bool MatchActivation(StackFrame* frame,
- LiveEdit::FunctionPatchabilityStatus status) {
- if (frame->fp() == m_frame->fp()) {
- m_saved_status = status;
- return true;
+ if (!current->is_java_script()) continue;
+ std::vector<Handle<SharedFunctionInfo>> shareds;
+ JavaScriptFrame::cast(current)->GetFunctions(&shareds);
+ for (auto& shared : shareds) {
+ if (IsResumableFunction(shared->kind())) {
+ return false;
+ }
}
- return false;
}
- const char* GetNotFoundMessage() const {
- return "Failed to found requested frame";
- }
- LiveEdit::FunctionPatchabilityStatus saved_status() {
- return m_saved_status;
- }
- void set_status(LiveEdit::FunctionPatchabilityStatus status) {
- m_saved_status = status;
- }
-
- bool FrameUsesNewTarget(StackFrame* frame) {
- if (!frame->is_java_script()) return false;
- JavaScriptFrame* jsframe = JavaScriptFrame::cast(frame);
- Handle<SharedFunctionInfo> shared(jsframe->function()->shared());
- return shared->scope_info()->HasNewTarget();
- }
-
- private:
- JavaScriptFrame* m_frame;
- LiveEdit::FunctionPatchabilityStatus m_saved_status;
-};
-
-// Finds a drops required frame and all frames above.
-// Returns error message or nullptr.
-const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
- SingleFrameTarget target(frame);
-
- const char* result =
- DropActivationsInActiveThreadImpl(frame->isolate(), target, true);
- if (result != nullptr) {
- return result;
- }
- if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE) {
- return "Function is blocked under native code";
- }
- if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR) {
- return "Function is blocked under a generator activation";
- }
- return nullptr;
-}
-
-Handle<JSArray> LiveEditFunctionTracker::Collect(FunctionLiteral* node,
- Handle<Script> script,
- Zone* zone, Isolate* isolate) {
- LiveEditFunctionTracker visitor(script, zone, isolate);
- visitor.VisitFunctionLiteral(node);
- return visitor.result_;
+ return false;
}
-LiveEditFunctionTracker::LiveEditFunctionTracker(Handle<Script> script,
- Zone* zone, Isolate* isolate)
- : AstTraversalVisitor<LiveEditFunctionTracker>(isolate) {
- current_parent_index_ = -1;
- isolate_ = isolate;
- len_ = 0;
- result_ = isolate->factory()->NewJSArray(10);
- script_ = script;
- zone_ = zone;
-}
+void LiveEdit::CompareStrings(Isolate* isolate, Handle<String> s1,
+ Handle<String> s2,
+ std::vector<SourceChangeRange>* diffs) {
+ s1 = String::Flatten(isolate, s1);
+ s2 = String::Flatten(isolate, s2);
-void LiveEditFunctionTracker::VisitFunctionLiteral(FunctionLiteral* node) {
- // FunctionStarted is called in pre-order.
- FunctionStarted(node);
- // Recurse using the regular traversal.
- AstTraversalVisitor::VisitFunctionLiteral(node);
- // FunctionDone are called in post-order.
- Handle<SharedFunctionInfo> info =
- script_->FindSharedFunctionInfo(isolate_, node).ToHandleChecked();
- FunctionDone(info, node->scope());
-}
-
-void LiveEditFunctionTracker::FunctionStarted(FunctionLiteral* fun) {
- HandleScope handle_scope(isolate_);
- FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate_);
- info.SetInitialProperties(fun->name(isolate_), fun->start_position(),
- fun->end_position(), fun->parameter_count(),
- current_parent_index_, fun->function_literal_id());
- current_parent_index_ = len_;
- SetElementSloppy(result_, len_, info.GetJSArray());
- len_++;
-}
+ LineEndsWrapper line_ends1(isolate, s1);
+ LineEndsWrapper line_ends2(isolate, s2);
-// Saves full information about a function: its code, its scope info
-// and a SharedFunctionInfo object.
-void LiveEditFunctionTracker::FunctionDone(Handle<SharedFunctionInfo> shared,
- Scope* scope) {
- HandleScope handle_scope(isolate_);
- FunctionInfoWrapper info = FunctionInfoWrapper::cast(
- *JSReceiver::GetElement(isolate_, result_, current_parent_index_)
- .ToHandleChecked());
- info.SetSharedFunctionInfo(shared);
+ LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
+ TokenizingLineArrayCompareOutput output(isolate, line_ends1, line_ends2, s1,
+ s2, diffs);
- Handle<Object> scope_info_list = SerializeFunctionScope(scope);
- info.SetFunctionScopeInfo(scope_info_list);
+ NarrowDownInput(&input, &output);
- current_parent_index_ = info.GetParentIndex();
+ Comparator::CalculateDifference(&input, &output);
}
-Handle<Object> LiveEditFunctionTracker::SerializeFunctionScope(Scope* scope) {
- Handle<JSArray> scope_info_list = isolate_->factory()->NewJSArray(10);
- int scope_info_length = 0;
-
- // Saves some description of scope. It stores name and indexes of
- // variables in the whole scope chain. Null-named slots delimit
- // scopes of this chain.
- Scope* current_scope = scope;
- while (current_scope != nullptr) {
- HandleScope handle_scope(isolate_);
- for (Variable* var : *current_scope->locals()) {
- if (!var->IsContextSlot()) continue;
- int context_index = var->index() - Context::MIN_CONTEXT_SLOTS;
- int location = scope_info_length + context_index * 2;
- SetElementSloppy(scope_info_list, location, var->name());
- SetElementSloppy(scope_info_list, location + 1,
- handle(Smi::FromInt(var->index()), isolate_));
- }
- scope_info_length += current_scope->ContextLocalCount() * 2;
- SetElementSloppy(scope_info_list, scope_info_length,
- isolate_->factory()->null_value());
- scope_info_length++;
-
- current_scope = current_scope->outer_scope();
- }
-
- return scope_info_list;
+int LiveEdit::TranslatePosition(const std::vector<SourceChangeRange>& diffs,
+ int position) {
+ auto it = std::lower_bound(diffs.begin(), diffs.end(), position,
+ [](const SourceChangeRange& change, int position) {
+ return change.end_position < position;
+ });
+ if (it != diffs.end() && position == it->end_position) {
+ return it->new_end_position;
+ }
+ if (it == diffs.begin()) return position;
+ DCHECK(it == diffs.end() || position <= it->start_position);
+ it = std::prev(it);
+ return position + (it->new_end_position - it->end_position);
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 120b9c87eb..1b053e4519 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -5,318 +5,73 @@
#ifndef V8_DEBUG_LIVEEDIT_H_
#define V8_DEBUG_LIVEEDIT_H_
+#include <vector>
-// Live Edit feature implementation.
-// User should be able to change script on already running VM. This feature
-// matches hot swap features in other frameworks.
-//
-// The basic use-case is when user spots some mistake in function body
-// from debugger and wishes to change the algorithm without restart.
-//
-// A single change always has a form of a simple replacement (in pseudo-code):
-// script.source[positions, positions+length] = new_string;
-// Implementation first determines, which function's body includes this
-// change area. Then both old and new versions of script are fully compiled
-// in order to analyze, whether the function changed its outer scope
-// expectations (or number of parameters). If it didn't, function's code is
-// patched with a newly compiled code. If it did change, enclosing function
-// gets patched. All inner functions are left untouched, whatever happened
-// to them in a new script version. However, new version of code will
-// instantiate newly compiled functions.
-
-
-#include "src/allocation.h"
-#include "src/ast/ast-traversal-visitor.h"
+#include "src/globals.h"
+#include "src/handles.h"
namespace v8 {
+namespace debug {
+struct LiveEditResult;
+}
namespace internal {
+class Script;
+class String;
+class Debug;
class JavaScriptFrame;
-// This class collects some specific information on structure of functions
-// in a particular script.
-//
-// The primary interest of the Tracker is to record function scope structures
-// in order to analyze whether function code may be safely patched (with new
-// code successfully reading existing data from function scopes). The Tracker
-// also collects compiled function codes.
-class LiveEditFunctionTracker
- : public AstTraversalVisitor<LiveEditFunctionTracker> {
- public:
- // Traverses the entire AST, and records information about all
- // FunctionLiterals for further use by LiveEdit code patching. The collected
- // information is returned as a serialized array.
- static Handle<JSArray> Collect(FunctionLiteral* node, Handle<Script> script,
- Zone* zone, Isolate* isolate);
-
- protected:
- friend AstTraversalVisitor<LiveEditFunctionTracker>;
- void VisitFunctionLiteral(FunctionLiteral* node);
-
- private:
- LiveEditFunctionTracker(Handle<Script> script, Zone* zone, Isolate* isolate);
-
- void FunctionStarted(FunctionLiteral* fun);
- void FunctionDone(Handle<SharedFunctionInfo> shared, Scope* scope);
- Handle<Object> SerializeFunctionScope(Scope* scope);
-
- Handle<Script> script_;
- Zone* zone_;
- Isolate* isolate_;
-
- Handle<JSArray> result_;
- int len_;
- int current_parent_index_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveEditFunctionTracker);
+struct SourceChangeRange {
+ int start_position;
+ int end_position;
+ int new_start_position;
+ int new_end_position;
};
+/**
+ Liveedit step-by-step:
+ 1. calculate diff between old source and new source,
+ 2. map function literals from old source to new source,
+ 3. create new script for new_source,
+ 4. mark literals with changed code as changed, all others as unchanged,
+ 5. check that for changed literals there are no:
+ - running generators in the heap,
+ - non droppable frames (e.g. running generator) above them on stack.
+ 6. mark the bottom most frame with changed function as scheduled for restart
+ if any,
+ 7. for unchanged functions:
+ - deoptimize,
+ - remove from cache,
+ - update source positions,
+ - move to new script,
+ - reset feedback information and preparsed scope information if any,
+ - replace any sfi in constant pool with changed one if any.
+ 8. for changed functions:
+ - deoptimize
+ - remove from cache,
+ - reset feedback information,
+ - update all links from js functions to old shared with new one.
+ 9. swap scripts.
+ */
class LiveEdit : AllStatic {
public:
static void InitializeThreadLocal(Debug* debug);
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> GatherCompileInfo(
- Handle<Script> script, Handle<String> source);
-
- static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
- Handle<JSArray> shared_info_array);
-
- static void FixupScript(Handle<Script> script, int max_function_literal_id);
-
- static void FunctionSourceUpdated(Handle<JSArray> shared_info_array,
- int new_function_literal_id);
-
- // Updates script field in FunctionSharedInfo.
- static void SetFunctionScript(Handle<JSValue> function_wrapper,
- Handle<Object> script_handle);
-
- static void PatchFunctionPositions(Handle<JSArray> shared_info_array,
- Handle<JSArray> position_change_array);
-
- // For a script updates its source field. If old_script_name is provided
- // (i.e. is a String), also creates a copy of the script with its original
- // source and sends notification to debugger.
- static Handle<Object> ChangeScriptSource(Handle<Script> original_script,
- Handle<String> new_source,
- Handle<Object> old_script_name);
-
- // In a code of a parent function replaces original function as embedded
- // object with a substitution one.
- static void ReplaceRefToNestedFunction(Handle<JSValue> parent_function_shared,
- Handle<JSValue> orig_function_shared,
- Handle<JSValue> subst_function_shared);
-
- // Find open generator activations, and set corresponding "result" elements to
- // FUNCTION_BLOCKED_ACTIVE_GENERATOR.
- static bool FindActiveGenerators(Handle<FixedArray> shared_info_array,
- Handle<FixedArray> result, int len);
-
- // Checks listed functions on stack and return array with corresponding
- // FunctionPatchabilityStatus statuses; extra array element may
- // contain general error message. Modifies the current stack and
- // has restart the lowest found frames and drops all other frames above
- // if possible and if do_drop is true.
- static Handle<JSArray> CheckAndDropActivations(
- Handle<JSArray> old_shared_array, Handle<JSArray> new_shared_array,
- bool do_drop);
-
// Restarts the call frame and completely drops all frames above it.
- // Return error message or nullptr.
- static const char* RestartFrame(JavaScriptFrame* frame);
-
- // A copy of this is in liveedit.js.
- enum FunctionPatchabilityStatus {
- FUNCTION_AVAILABLE_FOR_PATCH = 1,
- FUNCTION_BLOCKED_ON_ACTIVE_STACK = 2,
- FUNCTION_BLOCKED_ON_OTHER_STACK = 3,
- FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4,
- FUNCTION_REPLACED_ON_ACTIVE_STACK = 5,
- FUNCTION_BLOCKED_UNDER_GENERATOR = 6,
- FUNCTION_BLOCKED_ACTIVE_GENERATOR = 7,
- FUNCTION_BLOCKED_NO_NEW_TARGET_ON_RESTART = 8
- };
-
- // Compares 2 strings line-by-line, then token-wise and returns diff in form
- // of array of triplets (pos1, pos1_end, pos2_end) describing list
- // of diff chunks.
- static Handle<JSArray> CompareStrings(Handle<String> s1,
- Handle<String> s2);
-
+ static bool RestartFrame(JavaScriptFrame* frame);
+
+ static void CompareStrings(Isolate* isolate, Handle<String> a,
+ Handle<String> b,
+ std::vector<SourceChangeRange>* diffs);
+ static int TranslatePosition(const std::vector<SourceChangeRange>& changed,
+ int position);
+ static void PatchScript(Isolate* isolate, Handle<Script> script,
+ Handle<String> source, bool preview,
+ debug::LiveEditResult* result);
// Architecture-specific constant.
static const bool kFrameDropperSupported;
};
-
-
-// A general-purpose comparator between 2 arrays.
-class Comparator {
- public:
- // Holds 2 arrays of some elements allowing to compare any pair of
- // element from the first array and element from the second array.
- class Input {
- public:
- virtual int GetLength1() = 0;
- virtual int GetLength2() = 0;
- virtual bool Equals(int index1, int index2) = 0;
-
- protected:
- virtual ~Input() {}
- };
-
- // Receives compare result as a series of chunks.
- class Output {
- public:
- // Puts another chunk in result list. Note that technically speaking
- // only 3 arguments actually needed with 4th being derivable.
- virtual void AddChunk(int pos1, int pos2, int len1, int len2) = 0;
-
- protected:
- virtual ~Output() {}
- };
-
- // Finds the difference between 2 arrays of elements.
- static void CalculateDifference(Input* input,
- Output* result_writer);
-};
-
-
-
-// Simple helper class that creates more or less typed structures over
-// JSArray object. This is an adhoc method of passing structures from C++
-// to JavaScript.
-template<typename S>
-class JSArrayBasedStruct {
- public:
- static S Create(Isolate* isolate) {
- Factory* factory = isolate->factory();
- Handle<JSArray> array = factory->NewJSArray(S::kSize_);
- return S(array);
- }
-
- static S cast(Object* object) {
- JSArray* array = JSArray::cast(object);
- Handle<JSArray> array_handle(array);
- return S(array_handle);
- }
-
- explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) {
- }
-
- Handle<JSArray> GetJSArray() {
- return array_;
- }
-
- Isolate* isolate() const {
- return array_->GetIsolate();
- }
-
- protected:
- void SetField(int field_position, Handle<Object> value) {
- Object::SetElement(isolate(), array_, field_position, value,
- LanguageMode::kSloppy)
- .Assert();
- }
-
- void SetSmiValueField(int field_position, int value) {
- SetField(field_position, Handle<Smi>(Smi::FromInt(value), isolate()));
- }
-
- Handle<Object> GetField(int field_position) {
- return JSReceiver::GetElement(isolate(), array_, field_position)
- .ToHandleChecked();
- }
-
- int GetSmiValueField(int field_position) {
- Handle<Object> res = GetField(field_position);
- return Handle<Smi>::cast(res)->value();
- }
-
- private:
- Handle<JSArray> array_;
-};
-
-
-// Represents some function compilation details. This structure will be used
-// from JavaScript. It contains Code object, which is kept wrapped
-// into a BlindReference for sanitizing reasons.
-class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
- public:
- explicit FunctionInfoWrapper(Handle<JSArray> array)
- : JSArrayBasedStruct<FunctionInfoWrapper>(array) {
- }
-
- void SetInitialProperties(Handle<String> name, int start_position,
- int end_position, int param_num, int parent_index,
- int function_literal_id);
-
- void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
- this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
- }
-
- void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info);
-
- Handle<SharedFunctionInfo> GetSharedFunctionInfo();
-
- int GetParentIndex() {
- return this->GetSmiValueField(kParentIndexOffset_);
- }
-
- int GetStartPosition() {
- return this->GetSmiValueField(kStartPositionOffset_);
- }
-
- int GetEndPosition() { return this->GetSmiValueField(kEndPositionOffset_); }
-
- private:
- static const int kFunctionNameOffset_ = 0;
- static const int kStartPositionOffset_ = 1;
- static const int kEndPositionOffset_ = 2;
- static const int kParamNumOffset_ = 3;
- static const int kFunctionScopeInfoOffset_ = 4;
- static const int kParentIndexOffset_ = 5;
- static const int kSharedFunctionInfoOffset_ = 6;
- static const int kFunctionLiteralIdOffset_ = 7;
- static const int kSize_ = 8;
-
- friend class JSArrayBasedStruct<FunctionInfoWrapper>;
-};
-
-
-// Wraps SharedFunctionInfo along with some of its fields for passing it
-// back to JavaScript. SharedFunctionInfo object itself is additionally
-// wrapped into BlindReference for sanitizing reasons.
-class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
- public:
- static bool IsInstance(Handle<JSArray> array) {
- if (array->length() != Smi::FromInt(kSize_)) return false;
- Handle<Object> element(
- JSReceiver::GetElement(array->GetIsolate(), array, kSharedInfoOffset_)
- .ToHandleChecked());
- if (!element->IsJSValue()) return false;
- return Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo();
- }
-
- explicit SharedInfoWrapper(Handle<JSArray> array)
- : JSArrayBasedStruct<SharedInfoWrapper>(array) {
- }
-
- void SetProperties(Handle<String> name,
- int start_position,
- int end_position,
- Handle<SharedFunctionInfo> info);
-
- Handle<SharedFunctionInfo> GetInfo();
-
- private:
- static const int kFunctionNameOffset_ = 0;
- static const int kStartPositionOffset_ = 1;
- static const int kEndPositionOffset_ = 2;
- static const int kSharedInfoOffset_ = 3;
- static const int kSize_ = 4;
-
- friend class JSArrayBasedStruct<SharedInfoWrapper>;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/liveedit.js b/deps/v8/src/debug/liveedit.js
deleted file mode 100644
index db0aaf8177..0000000000
--- a/deps/v8/src/debug/liveedit.js
+++ /dev/null
@@ -1,1058 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// LiveEdit feature implementation. The script should be executed after
-// debug.js.
-
-// A LiveEdit namespace. It contains functions that modifies JavaScript code
-// according to changes of script source (if possible).
-//
-// When new script source is put in, the difference is calculated textually,
-// in form of list of delete/add/change chunks. The functions that include
-// change chunk(s) get recompiled, or their enclosing functions are
-// recompiled instead.
-// If the function may not be recompiled (e.g. it was completely erased in new
-// version of the script) it remains unchanged, but the code that could
-// create a new instance of this function goes away. An old version of script
-// is created to back up this obsolete function.
-// All unchanged functions have their positions updated accordingly.
-//
-// LiveEdit namespace is declared inside a single function constructor.
-
-(function(global, utils) {
- "use strict";
-
- // -------------------------------------------------------------------
- // Imports
-
- var FindScriptSourcePosition = global.Debug.findScriptSourcePosition;
- var GlobalArray = global.Array;
- var MathFloor = global.Math.floor;
- var MathMax = global.Math.max;
- var SyntaxError = global.SyntaxError;
-
- // -------------------------------------------------------------------
-
- // Forward declaration for minifier.
- var FunctionStatus;
-
- // Applies the change to the script.
- // The change is in form of list of chunks encoded in a single array as
- // a series of triplets (pos1_start, pos1_end, pos2_end)
- function ApplyPatchMultiChunk(script, diff_array, new_source, preview_only,
- change_log) {
-
- var old_source = script.source;
-
- // Gather compile information about old version of script.
- var old_compile_info = GatherCompileInfo(old_source, script);
-
- // Build tree structures for old and new versions of the script.
- var root_old_node = BuildCodeInfoTree(old_compile_info);
-
- var pos_translator = new PosTranslator(diff_array);
-
- // Analyze changes.
- MarkChangedFunctions(root_old_node, pos_translator.GetChunks());
-
- // Find all SharedFunctionInfo's that were compiled from this script.
- FindLiveSharedInfos(root_old_node, script);
-
- // Gather compile information about new version of script.
- var new_compile_info;
- try {
- new_compile_info = GatherCompileInfo(new_source, script);
- } catch (e) {
- var failure =
- new Failure("Failed to compile new version of script: " + e);
- if (e instanceof SyntaxError) {
- var details = {
- type: "liveedit_compile_error",
- syntaxErrorMessage: e.message
- };
- CopyErrorPositionToDetails(e, details);
- failure.details = details;
- }
- throw failure;
- }
-
- var max_function_literal_id = new_compile_info.reduce(
- (max, info) => MathMax(max, info.function_literal_id), 0);
-
- var root_new_node = BuildCodeInfoTree(new_compile_info);
-
- // Link recompiled script data with other data.
- FindCorrespondingFunctions(root_old_node, root_new_node);
-
- // Prepare to-do lists.
- var replace_code_list = new GlobalArray();
- var link_to_old_script_list = new GlobalArray();
- var link_to_original_script_list = new GlobalArray();
- var update_positions_list = new GlobalArray();
-
- function HarvestTodo(old_node) {
- function CollectDamaged(node) {
- link_to_old_script_list.push(node);
- for (var i = 0; i < node.children.length; i++) {
- CollectDamaged(node.children[i]);
- }
- }
-
- // Recursively collects all newly compiled functions that are going into
- // business and should have link to the actual script updated.
- function CollectNew(node_list) {
- for (var i = 0; i < node_list.length; i++) {
- link_to_original_script_list.push(node_list[i]);
- CollectNew(node_list[i].children);
- }
- }
-
- if (old_node.status == FunctionStatus.DAMAGED) {
- CollectDamaged(old_node);
- return;
- }
- if (old_node.status == FunctionStatus.UNCHANGED) {
- update_positions_list.push(old_node);
- } else if (old_node.status == FunctionStatus.SOURCE_CHANGED) {
- update_positions_list.push(old_node);
- } else if (old_node.status == FunctionStatus.CHANGED) {
- replace_code_list.push(old_node);
- CollectNew(old_node.unmatched_new_nodes);
- }
- for (var i = 0; i < old_node.children.length; i++) {
- HarvestTodo(old_node.children[i]);
- }
- }
-
- var preview_description = {
- change_tree: DescribeChangeTree(root_old_node),
- textual_diff: {
- old_len: old_source.length,
- new_len: new_source.length,
- chunks: diff_array
- },
- updated: false
- };
-
- if (preview_only) {
- return preview_description;
- }
-
- HarvestTodo(root_old_node);
-
- // Collect shared infos for functions whose code need to be patched.
- var replaced_function_old_infos = new GlobalArray();
- var replaced_function_new_infos = new GlobalArray();
- for (var i = 0; i < replace_code_list.length; i++) {
- var old_infos = replace_code_list[i].live_shared_function_infos;
- var new_info =
- replace_code_list[i].corresponding_node.info.shared_function_info;
-
- if (old_infos) {
- for (var j = 0; j < old_infos.length; j++) {
- replaced_function_old_infos.push(old_infos[j]);
- replaced_function_new_infos.push(new_info);
- }
- }
- }
-
- // We haven't changed anything before this line yet.
- // Committing all changes.
-
- // Check that function being patched is not currently on stack or drop them.
- var dropped_functions_number =
- CheckStackActivations(replaced_function_old_infos,
- replaced_function_new_infos,
- change_log);
-
- // Our current implementation requires client to manually issue "step in"
- // command for correct stack state if the stack was modified.
- preview_description.stack_modified = dropped_functions_number != 0;
-
- var old_script;
-
- // Create an old script only if there are function that should be linked
- // to old version.
- if (link_to_old_script_list.length == 0) {
- %LiveEditReplaceScript(script, new_source, null);
- old_script = UNDEFINED;
- } else {
- var old_script_name = CreateNameForOldScript(script);
-
- // Update the script text and create a new script representing an old
- // version of the script.
- old_script = %LiveEditReplaceScript(script, new_source, old_script_name);
-
- var link_to_old_script_report = new GlobalArray();
- change_log.push( { linked_to_old_script: link_to_old_script_report } );
-
- // We need to link to old script all former nested functions.
- for (var i = 0; i < link_to_old_script_list.length; i++) {
- LinkToOldScript(link_to_old_script_list[i], old_script,
- link_to_old_script_report);
- }
-
- preview_description.created_script_name = old_script_name;
- }
-
- for (var i = 0; i < replace_code_list.length; i++) {
- PatchFunctionCode(replace_code_list[i], change_log);
- }
-
- var position_patch_report = new GlobalArray();
- change_log.push( {position_patched: position_patch_report} );
-
- for (var i = 0; i < update_positions_list.length; i++) {
- // TODO(LiveEdit): take into account whether it's source_changed or
- // unchanged and whether positions changed at all.
- PatchPositions(update_positions_list[i], diff_array,
- position_patch_report);
-
- if (update_positions_list[i].live_shared_function_infos) {
- var new_function_literal_id =
- update_positions_list[i]
- .corresponding_node.info.function_literal_id;
- update_positions_list[i].live_shared_function_infos.forEach(function(
- info) {
- %LiveEditFunctionSourceUpdated(
- info.raw_array, new_function_literal_id);
- });
- }
- }
-
- %LiveEditFixupScript(script, max_function_literal_id);
-
- // Link all the functions we're going to use to an actual script.
- for (var i = 0; i < link_to_original_script_list.length; i++) {
- %LiveEditFunctionSetScript(
- link_to_original_script_list[i].info.shared_function_info, script);
- }
-
- preview_description.updated = true;
- return preview_description;
- }
-
- // Fully compiles source string as a script. Returns Array of
- // FunctionCompileInfo -- a descriptions of all functions of the script.
- // Elements of array are ordered by start positions of functions (from top
- // to bottom) in the source. Fields outer_index and next_sibling_index help
- // to navigate the nesting structure of functions.
- //
- // All functions get compiled linked to script provided as parameter script.
- // TODO(LiveEdit): consider not using actual scripts as script, because
- // we have to manually erase all links right after compile.
- function GatherCompileInfo(source, script) {
- // Get function info, elements are partially sorted (it is a tree of
- // nested functions serialized as parent followed by serialized children.
- var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
-
- // Sort function infos by start position field.
- var compile_info = new GlobalArray();
- var old_index_map = new GlobalArray();
- for (var i = 0; i < raw_compile_info.length; i++) {
- var info = new FunctionCompileInfo(raw_compile_info[i]);
- // Remove all links to the actual script. Breakpoints system and
- // LiveEdit itself believe that any function in heap that points to a
- // particular script is a regular function.
- // For some functions we will restore this link later.
- %LiveEditFunctionSetScript(info.shared_function_info, UNDEFINED);
- compile_info.push(info);
- old_index_map.push(i);
- }
-
- for (var i = 0; i < compile_info.length; i++) {
- var k = i;
- for (var j = i + 1; j < compile_info.length; j++) {
- if (compile_info[k].start_position > compile_info[j].start_position) {
- k = j;
- }
- }
- if (k != i) {
- var temp_info = compile_info[k];
- var temp_index = old_index_map[k];
- compile_info[k] = compile_info[i];
- old_index_map[k] = old_index_map[i];
- compile_info[i] = temp_info;
- old_index_map[i] = temp_index;
- }
- }
-
- // After sorting update outer_index field using old_index_map. Also
- // set next_sibling_index field.
- var current_index = 0;
-
- // The recursive function, that goes over all children of a particular
- // node (i.e. function info).
- function ResetIndexes(new_parent_index, old_parent_index) {
- var previous_sibling = -1;
- while (current_index < compile_info.length &&
- compile_info[current_index].outer_index == old_parent_index) {
- var saved_index = current_index;
- compile_info[saved_index].outer_index = new_parent_index;
- if (previous_sibling != -1) {
- compile_info[previous_sibling].next_sibling_index = saved_index;
- }
- previous_sibling = saved_index;
- current_index++;
- ResetIndexes(saved_index, old_index_map[saved_index]);
- }
- if (previous_sibling != -1) {
- compile_info[previous_sibling].next_sibling_index = -1;
- }
- }
-
- ResetIndexes(-1, -1);
- Assert(current_index == compile_info.length);
-
- return compile_info;
- }
-
-
- // Replaces function's Code.
- function PatchFunctionCode(old_node, change_log) {
- var new_info = old_node.corresponding_node.info;
- if (old_node.live_shared_function_infos) {
- old_node.live_shared_function_infos.forEach(function (old_info) {
- %LiveEditReplaceFunctionCode(new_info.raw_array,
- old_info.raw_array);
-
- // The function got a new code. However, this new code brings all new
- // instances of SharedFunctionInfo for nested functions. However,
- // we want the original instances to be used wherever possible.
- // (This is because old instances and new instances will be both
- // linked to a script and breakpoints subsystem does not really
- // expects this; neither does LiveEdit subsystem on next call).
- for (var i = 0; i < old_node.children.length; i++) {
- if (old_node.children[i].corresponding_node) {
- var corresponding_child_info =
- old_node.children[i].corresponding_node.info.
- shared_function_info;
-
- if (old_node.children[i].live_shared_function_infos) {
- old_node.children[i].live_shared_function_infos.
- forEach(function (old_child_info) {
- %LiveEditReplaceRefToNestedFunction(
- old_info.info,
- corresponding_child_info,
- old_child_info.info);
- });
- }
- }
- }
- });
-
- change_log.push( {function_patched: new_info.function_name} );
- } else {
- change_log.push( {function_patched: new_info.function_name,
- function_info_not_found: true} );
- }
- }
-
-
- // Makes a function associated with another instance of a script (the
- // one representing its old version). This way the function still
- // may access its own text.
- function LinkToOldScript(old_info_node, old_script, report_array) {
- if (old_info_node.live_shared_function_infos) {
- old_info_node.live_shared_function_infos.
- forEach(function (info) {
- %LiveEditFunctionSetScript(info.info, old_script);
- });
-
- report_array.push( { name: old_info_node.info.function_name } );
- } else {
- report_array.push(
- { name: old_info_node.info.function_name, not_found: true } );
- }
- }
-
- function Assert(condition, message) {
- if (!condition) {
- if (message) {
- throw "Assert " + message;
- } else {
- throw "Assert";
- }
- }
- }
-
- function DiffChunk(pos1, pos2, len1, len2) {
- this.pos1 = pos1;
- this.pos2 = pos2;
- this.len1 = len1;
- this.len2 = len2;
- }
-
- function PosTranslator(diff_array) {
- var chunks = new GlobalArray();
- var current_diff = 0;
- for (var i = 0; i < diff_array.length; i += 3) {
- var pos1_begin = diff_array[i];
- var pos2_begin = pos1_begin + current_diff;
- var pos1_end = diff_array[i + 1];
- var pos2_end = diff_array[i + 2];
- chunks.push(new DiffChunk(pos1_begin, pos2_begin, pos1_end - pos1_begin,
- pos2_end - pos2_begin));
- current_diff = pos2_end - pos1_end;
- }
- this.chunks = chunks;
- }
- PosTranslator.prototype.GetChunks = function() {
- return this.chunks;
- };
-
- PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
- var array = this.chunks;
- if (array.length == 0 || pos < array[0].pos1) {
- return pos;
- }
- var chunk_index1 = 0;
- var chunk_index2 = array.length - 1;
-
- while (chunk_index1 < chunk_index2) {
- var middle_index = MathFloor((chunk_index1 + chunk_index2) / 2);
- if (pos < array[middle_index + 1].pos1) {
- chunk_index2 = middle_index;
- } else {
- chunk_index1 = middle_index + 1;
- }
- }
- var chunk = array[chunk_index1];
- if (pos >= chunk.pos1 + chunk.len1) {
- return pos + chunk.pos2 + chunk.len2 - chunk.pos1 - chunk.len1;
- }
-
- if (!inside_chunk_handler) {
- inside_chunk_handler = PosTranslator.DefaultInsideChunkHandler;
- }
- return inside_chunk_handler(pos, chunk);
- };
-
- PosTranslator.DefaultInsideChunkHandler = function(pos, diff_chunk) {
- Assert(false, "Cannot translate position in changed area");
- };
-
- PosTranslator.ShiftWithTopInsideChunkHandler =
- function(pos, diff_chunk) {
- // We carelessly do not check whether we stay inside the chunk after
- // translation.
- return pos - diff_chunk.pos1 + diff_chunk.pos2;
- };
-
- var FunctionStatus = {
- // No change to function or its inner functions; however its positions
- // in script may have been shifted.
- UNCHANGED: "unchanged",
- // The code of a function remains unchanged, but something happened inside
- // some inner functions.
- SOURCE_CHANGED: "source changed",
- // The code of a function is changed or some nested function cannot be
- // properly patched so this function must be recompiled.
- CHANGED: "changed",
- // Function is changed but cannot be patched.
- DAMAGED: "damaged"
- };
-
- function CodeInfoTreeNode(code_info, children, array_index) {
- this.info = code_info;
- this.children = children;
- // an index in array of compile_info
- this.array_index = array_index;
- this.parent = UNDEFINED;
-
- this.status = FunctionStatus.UNCHANGED;
- // Status explanation is used for debugging purposes and will be shown
- // in user UI if some explanations are needed.
- this.status_explanation = UNDEFINED;
- this.new_start_pos = UNDEFINED;
- this.new_end_pos = UNDEFINED;
- this.corresponding_node = UNDEFINED;
- this.unmatched_new_nodes = UNDEFINED;
-
- // 'Textual' correspondence/matching is weaker than 'pure'
- // correspondence/matching. We need 'textual' level for visual presentation
- // in UI, we use 'pure' level for actual code manipulation.
- // Sometimes only function body is changed (functions in old and new script
- // textually correspond), but we cannot patch the code, so we see them
- // as an old function deleted and new function created.
- this.textual_corresponding_node = UNDEFINED;
- this.textually_unmatched_new_nodes = UNDEFINED;
-
- this.live_shared_function_infos = UNDEFINED;
- }
-
- // From array of function infos that is implicitly a tree creates
- // an actual tree of functions in script.
- function BuildCodeInfoTree(code_info_array) {
- // Throughtout all function we iterate over input array.
- var index = 0;
-
- // Recursive function that builds a branch of tree.
- function BuildNode() {
- var my_index = index;
- index++;
- var child_array = new GlobalArray();
- while (index < code_info_array.length &&
- code_info_array[index].outer_index == my_index) {
- child_array.push(BuildNode());
- }
- var node = new CodeInfoTreeNode(code_info_array[my_index], child_array,
- my_index);
- for (var i = 0; i < child_array.length; i++) {
- child_array[i].parent = node;
- }
- return node;
- }
-
- var root = BuildNode();
- Assert(index == code_info_array.length);
- return root;
- }
-
- // Applies a list of the textual diff chunks onto the tree of functions.
- // Determines status of each function (from unchanged to damaged). However
- // children of unchanged functions are ignored.
- function MarkChangedFunctions(code_info_tree, chunks) {
-
- // A convenient iterator over diff chunks that also translates
- // positions from old to new in a current non-changed part of script.
- var chunk_it = new function() {
- var chunk_index = 0;
- var pos_diff = 0;
- this.current = function() { return chunks[chunk_index]; };
- this.next = function() {
- var chunk = chunks[chunk_index];
- pos_diff = chunk.pos2 + chunk.len2 - (chunk.pos1 + chunk.len1);
- chunk_index++;
- };
- this.done = function() { return chunk_index >= chunks.length; };
- this.TranslatePos = function(pos) { return pos + pos_diff; };
- };
-
- // A recursive function that processes internals of a function and all its
- // inner functions. Iterator chunk_it initially points to a chunk that is
- // below function start.
- function ProcessInternals(info_node) {
- info_node.new_start_pos = chunk_it.TranslatePos(
- info_node.info.start_position);
- var child_index = 0;
- var code_changed = false;
- var source_changed = false;
- // Simultaneously iterates over child functions and over chunks.
- while (!chunk_it.done() &&
- chunk_it.current().pos1 < info_node.info.end_position) {
- if (child_index < info_node.children.length) {
- var child = info_node.children[child_index];
-
- if (child.info.end_position <= chunk_it.current().pos1) {
- ProcessUnchangedChild(child);
- child_index++;
- continue;
- } else if (child.info.start_position >=
- chunk_it.current().pos1 + chunk_it.current().len1) {
- code_changed = true;
- chunk_it.next();
- continue;
- } else if (child.info.start_position <= chunk_it.current().pos1 &&
- child.info.end_position >= chunk_it.current().pos1 +
- chunk_it.current().len1) {
- ProcessInternals(child);
- source_changed = source_changed ||
- ( child.status != FunctionStatus.UNCHANGED );
- code_changed = code_changed ||
- ( child.status == FunctionStatus.DAMAGED );
- child_index++;
- continue;
- } else {
- code_changed = true;
- child.status = FunctionStatus.DAMAGED;
- child.status_explanation =
- "Text diff overlaps with function boundary";
- child_index++;
- continue;
- }
- } else {
- if (chunk_it.current().pos1 + chunk_it.current().len1 <=
- info_node.info.end_position) {
- info_node.status = FunctionStatus.CHANGED;
- chunk_it.next();
- continue;
- } else {
- info_node.status = FunctionStatus.DAMAGED;
- info_node.status_explanation =
- "Text diff overlaps with function boundary";
- return;
- }
- }
- Assert("Unreachable", false);
- }
- while (child_index < info_node.children.length) {
- var child = info_node.children[child_index];
- ProcessUnchangedChild(child);
- child_index++;
- }
- if (code_changed) {
- info_node.status = FunctionStatus.CHANGED;
- } else if (source_changed) {
- info_node.status = FunctionStatus.SOURCE_CHANGED;
- }
- info_node.new_end_pos =
- chunk_it.TranslatePos(info_node.info.end_position);
- }
-
- function ProcessUnchangedChild(node) {
- node.new_start_pos = chunk_it.TranslatePos(node.info.start_position);
- node.new_end_pos = chunk_it.TranslatePos(node.info.end_position);
- }
-
- ProcessInternals(code_info_tree);
- }
-
- // For each old function (if it is not damaged) tries to find a corresponding
- // function in new script. Typically it should succeed (non-damaged functions
- // by definition may only have changes inside their bodies). However there are
- // reasons for correspondence not to be found; function with unmodified text
- // in new script may become enclosed into other function; the innocent change
- // inside function body may in fact be something like "} function B() {" that
- // splits a function into 2 functions.
- function FindCorrespondingFunctions(old_code_tree, new_code_tree) {
-
- // A recursive function that tries to find a correspondence for all
- // child functions and for their inner functions.
- function ProcessNode(old_node, new_node) {
- var scope_change_description =
- IsFunctionContextLocalsChanged(old_node.info, new_node.info);
- if (scope_change_description) {
- old_node.status = FunctionStatus.CHANGED;
- }
-
- var old_children = old_node.children;
- var new_children = new_node.children;
-
- var unmatched_new_nodes_list = [];
- var textually_unmatched_new_nodes_list = [];
-
- var old_index = 0;
- var new_index = 0;
- while (old_index < old_children.length) {
- if (old_children[old_index].status == FunctionStatus.DAMAGED) {
- old_index++;
- } else if (new_index < new_children.length) {
- if (new_children[new_index].info.start_position <
- old_children[old_index].new_start_pos) {
- unmatched_new_nodes_list.push(new_children[new_index]);
- textually_unmatched_new_nodes_list.push(new_children[new_index]);
- new_index++;
- } else if (new_children[new_index].info.start_position ==
- old_children[old_index].new_start_pos) {
- if (new_children[new_index].info.end_position ==
- old_children[old_index].new_end_pos) {
- old_children[old_index].corresponding_node =
- new_children[new_index];
- old_children[old_index].textual_corresponding_node =
- new_children[new_index];
- if (scope_change_description) {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "Enclosing function is now incompatible. " +
- scope_change_description;
- old_children[old_index].corresponding_node = UNDEFINED;
- } else if (old_children[old_index].status !=
- FunctionStatus.UNCHANGED) {
- ProcessNode(old_children[old_index],
- new_children[new_index]);
- if (old_children[old_index].status == FunctionStatus.DAMAGED) {
- unmatched_new_nodes_list.push(
- old_children[old_index].corresponding_node);
- old_children[old_index].corresponding_node = UNDEFINED;
- old_node.status = FunctionStatus.CHANGED;
- }
- } else {
- ProcessNode(old_children[old_index], new_children[new_index]);
- }
- } else {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "No corresponding function in new script found";
- old_node.status = FunctionStatus.CHANGED;
- unmatched_new_nodes_list.push(new_children[new_index]);
- textually_unmatched_new_nodes_list.push(new_children[new_index]);
- }
- new_index++;
- old_index++;
- } else {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "No corresponding function in new script found";
- old_node.status = FunctionStatus.CHANGED;
- old_index++;
- }
- } else {
- old_children[old_index].status = FunctionStatus.DAMAGED;
- old_children[old_index].status_explanation =
- "No corresponding function in new script found";
- old_node.status = FunctionStatus.CHANGED;
- old_index++;
- }
- }
-
- while (new_index < new_children.length) {
- unmatched_new_nodes_list.push(new_children[new_index]);
- textually_unmatched_new_nodes_list.push(new_children[new_index]);
- new_index++;
- }
-
- if (old_node.status == FunctionStatus.CHANGED) {
- if (old_node.info.param_num != new_node.info.param_num) {
- old_node.status = FunctionStatus.DAMAGED;
- old_node.status_explanation = "Changed parameter number: " +
- old_node.info.param_num + " and " + new_node.info.param_num;
- }
- }
- old_node.unmatched_new_nodes = unmatched_new_nodes_list;
- old_node.textually_unmatched_new_nodes =
- textually_unmatched_new_nodes_list;
- }
-
- ProcessNode(old_code_tree, new_code_tree);
-
- old_code_tree.corresponding_node = new_code_tree;
- old_code_tree.textual_corresponding_node = new_code_tree;
-
- Assert(old_code_tree.status != FunctionStatus.DAMAGED,
- "Script became damaged");
- }
-
- function FindLiveSharedInfos(old_code_tree, script) {
- var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
-
- var shared_infos = new GlobalArray();
-
- for (var i = 0; i < shared_raw_list.length; i++) {
- shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
- }
-
- // Finds all SharedFunctionInfos that corresponds to compile info
- // in old version of the script.
- function FindFunctionInfos(compile_info) {
- var wrappers = [];
-
- for (var i = 0; i < shared_infos.length; i++) {
- var wrapper = shared_infos[i];
- if (wrapper.start_position == compile_info.start_position &&
- wrapper.end_position == compile_info.end_position) {
- wrappers.push(wrapper);
- }
- }
-
- if (wrappers.length > 0) {
- return wrappers;
- }
- }
-
- function TraverseTree(node) {
- node.live_shared_function_infos = FindFunctionInfos(node.info);
-
- for (var i = 0; i < node.children.length; i++) {
- TraverseTree(node.children[i]);
- }
- }
-
- TraverseTree(old_code_tree);
- }
-
-
- // An object describing function compilation details. Its index fields
- // apply to indexes inside array that stores these objects.
- function FunctionCompileInfo(raw_array) {
- this.function_name = raw_array[0];
- this.start_position = raw_array[1];
- this.end_position = raw_array[2];
- this.param_num = raw_array[3];
- this.scope_info = raw_array[4];
- this.outer_index = raw_array[5];
- this.shared_function_info = raw_array[6];
- this.function_literal_id = raw_array[7];
- this.next_sibling_index = null;
- this.raw_array = raw_array;
- }
-
- function SharedInfoWrapper(raw_array) {
- this.function_name = raw_array[0];
- this.start_position = raw_array[1];
- this.end_position = raw_array[2];
- this.info = raw_array[3];
- this.raw_array = raw_array;
- }
-
- // Changes positions (including all statements) in function.
- function PatchPositions(old_info_node, diff_array, report_array) {
- if (old_info_node.live_shared_function_infos) {
- old_info_node.live_shared_function_infos.forEach(function (info) {
- %LiveEditPatchFunctionPositions(info.raw_array,
- diff_array);
- });
-
- report_array.push( { name: old_info_node.info.function_name } );
- } else {
- // TODO(LiveEdit): function is not compiled yet or is already collected.
- report_array.push(
- { name: old_info_node.info.function_name, info_not_found: true } );
- }
- }
-
- // Adds a suffix to script name to mark that it is old version.
- function CreateNameForOldScript(script) {
- // TODO(635): try better than this; support several changes.
- return script.name + " (old)";
- }
-
- // Compares a function scope heap structure, old and new version, whether it
- // changed or not. Returns explanation if they differ.
- function IsFunctionContextLocalsChanged(function_info1, function_info2) {
- var scope_info1 = function_info1.scope_info;
- var scope_info2 = function_info2.scope_info;
-
- var scope_info1_text;
- var scope_info2_text;
-
- if (scope_info1) {
- scope_info1_text = scope_info1.toString();
- } else {
- scope_info1_text = "";
- }
- if (scope_info2) {
- scope_info2_text = scope_info2.toString();
- } else {
- scope_info2_text = "";
- }
-
- if (scope_info1_text != scope_info2_text) {
- return "Variable map changed: [" + scope_info1_text +
- "] => [" + scope_info2_text + "]";
- }
- // No differences. Return undefined.
- return;
- }
-
- // Minifier forward declaration.
- var FunctionPatchabilityStatus;
-
- // For array of wrapped shared function infos checks that none of them
- // have activations on stack (of any thread). Throws a Failure exception
- // if this proves to be false.
- function CheckStackActivations(old_shared_wrapper_list,
- new_shared_list,
- change_log) {
- var old_shared_list = new GlobalArray();
- for (var i = 0; i < old_shared_wrapper_list.length; i++) {
- old_shared_list[i] = old_shared_wrapper_list[i].info;
- }
- var result = %LiveEditCheckAndDropActivations(
- old_shared_list, new_shared_list, true);
- if (result[old_shared_wrapper_list.length]) {
- // Extra array element may contain error message.
- throw new Failure(result[old_shared_wrapper_list.length]);
- }
-
- var problems = new GlobalArray();
- var dropped = new GlobalArray();
- for (var i = 0; i < old_shared_list.length; i++) {
- var shared = old_shared_wrapper_list[i];
- if (result[i] == FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
- dropped.push({ name: shared.function_name } );
- } else if (result[i] != FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) {
- var description = {
- name: shared.function_name,
- start_pos: shared.start_position,
- end_pos: shared.end_position,
- replace_problem:
- FunctionPatchabilityStatus.SymbolName(result[i])
- };
- problems.push(description);
- }
- }
- if (dropped.length > 0) {
- change_log.push({ dropped_from_stack: dropped });
- }
- if (problems.length > 0) {
- change_log.push( { functions_on_stack: problems } );
- throw new Failure("Blocked by functions on stack");
- }
-
- return dropped.length;
- }
-
- // A copy of the FunctionPatchabilityStatus enum from liveedit.h
- var FunctionPatchabilityStatus = {
- AVAILABLE_FOR_PATCH: 1,
- BLOCKED_ON_ACTIVE_STACK: 2,
- BLOCKED_ON_OTHER_STACK: 3,
- BLOCKED_UNDER_NATIVE_CODE: 4,
- REPLACED_ON_ACTIVE_STACK: 5,
- BLOCKED_UNDER_GENERATOR: 6,
- BLOCKED_ACTIVE_GENERATOR: 7,
- BLOCKED_NO_NEW_TARGET_ON_RESTART: 8
- };
-
- FunctionPatchabilityStatus.SymbolName = function(code) {
- var enumeration = FunctionPatchabilityStatus;
- for (var name in enumeration) {
- if (enumeration[name] == code) {
- return name;
- }
- }
- };
-
-
- // A logical failure in liveedit process. This means that change_log
- // is valid and consistent description of what happened.
- function Failure(message) {
- this.message = message;
- }
-
- Failure.prototype.toString = function() {
- return "LiveEdit Failure: " + this.message;
- };
-
- function CopyErrorPositionToDetails(e, details) {
- function createPositionStruct(script, position) {
- if (position == -1) return;
- var location = %ScriptPositionInfo(script, position, true);
- if (location == null) return;
- return {
- line: location.line + 1,
- column: location.column + 1,
- position: position
- };
- }
-
- if (!("scriptObject" in e) || !("startPosition" in e)) {
- return;
- }
-
- var script = e.scriptObject;
-
- var position_struct = {
- start: createPositionStruct(script, e.startPosition),
- end: createPositionStruct(script, e.endPosition)
- };
- details.position = position_struct;
- }
-
- // LiveEdit main entry point: changes a script text to a new string.
- function SetScriptSource(script, new_source, preview_only, change_log) {
- var old_source = script.source;
- var diff = CompareStrings(old_source, new_source);
- return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
- change_log);
- }
-
- function CompareStrings(s1, s2) {
- return %LiveEditCompareStrings(s1, s2);
- }
-
- // Applies the change to the script.
- // The change is always a substring (change_pos, change_pos + change_len)
- // being replaced with a completely different string new_str.
- // This API is a legacy and is obsolete.
- //
- // @param {Script} script that is being changed
- // @param {Array} change_log a list that collects engineer-readable
- // description of what happened.
- function ApplySingleChunkPatch(script, change_pos, change_len, new_str,
- change_log) {
- var old_source = script.source;
-
- // Prepare new source string.
- var new_source = old_source.substring(0, change_pos) +
- new_str + old_source.substring(change_pos + change_len);
-
- return ApplyPatchMultiChunk(script,
- [ change_pos, change_pos + change_len, change_pos + new_str.length],
- new_source, false, change_log);
- }
-
- // Creates JSON description for a change tree.
- function DescribeChangeTree(old_code_tree) {
-
- function ProcessOldNode(node) {
- var child_infos = [];
- for (var i = 0; i < node.children.length; i++) {
- var child = node.children[i];
- if (child.status != FunctionStatus.UNCHANGED) {
- child_infos.push(ProcessOldNode(child));
- }
- }
- var new_child_infos = [];
- if (node.textually_unmatched_new_nodes) {
- for (var i = 0; i < node.textually_unmatched_new_nodes.length; i++) {
- var child = node.textually_unmatched_new_nodes[i];
- new_child_infos.push(ProcessNewNode(child));
- }
- }
- var res = {
- name: node.info.function_name,
- positions: DescribePositions(node),
- status: node.status,
- children: child_infos,
- new_children: new_child_infos
- };
- if (node.status_explanation) {
- res.status_explanation = node.status_explanation;
- }
- if (node.textual_corresponding_node) {
- res.new_positions = DescribePositions(node.textual_corresponding_node);
- }
- return res;
- }
-
- function ProcessNewNode(node) {
- var child_infos = [];
- // Do not list ancestors.
- if (false) {
- for (var i = 0; i < node.children.length; i++) {
- child_infos.push(ProcessNewNode(node.children[i]));
- }
- }
- var res = {
- name: node.info.function_name,
- positions: DescribePositions(node),
- children: child_infos,
- };
- return res;
- }
-
- function DescribePositions(node) {
- return {
- start_position: node.info.start_position,
- end_position: node.info.end_position
- };
- }
-
- return ProcessOldNode(old_code_tree);
- }
-
- // -------------------------------------------------------------------
- // Exports
-
- var LiveEdit = {};
- LiveEdit.SetScriptSource = SetScriptSource;
- LiveEdit.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
- LiveEdit.Failure = Failure;
-
- LiveEdit.TestApi = {
- PosTranslator: PosTranslator,
- CompareStrings: CompareStrings,
- ApplySingleChunkPatch: ApplySingleChunkPatch
- };
-
- // Functions needed by the debugger runtime.
- utils.InstallConstants(utils, [
- "SetScriptSource", LiveEdit.SetScriptSource,
- ]);
-
- global.Debug.LiveEdit = LiveEdit;
-
-})
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index 9767ab0fd8..b84779a4fb 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -39,8 +39,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::INTERNAL);
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lhu(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(a2, a0);
ParameterCount dummy1(a2);
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index 8bfb31e3a2..ebd8db26d7 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -39,8 +39,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::INTERNAL);
__ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Lw(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lhu(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(a2, a0);
ParameterCount dummy1(a2);
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
deleted file mode 100644
index 99b6414f3a..0000000000
--- a/deps/v8/src/debug/mirrors.js
+++ /dev/null
@@ -1,2439 +0,0 @@
-// Copyright 2006-2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-"use strict";
-
-// ----------------------------------------------------------------------------
-// Imports
-
-var GlobalArray = global.Array;
-var IsNaN = global.isNaN;
-var JSONStringify = global.JSON.stringify;
-var MapEntries = global.Map.prototype.entries;
-var MapIteratorNext = (new global.Map).entries().next;
-var SetIteratorNext = (new global.Set).values().next;
-var SetValues = global.Set.prototype.values;
-
-// ----------------------------------------------------------------------------
-
-// Mirror hierarchy:
-// - Mirror
-// - ValueMirror
-// - UndefinedMirror
-// - NullMirror
-// - BooleanMirror
-// - NumberMirror
-// - StringMirror
-// - SymbolMirror
-// - ObjectMirror
-// - FunctionMirror
-// - UnresolvedFunctionMirror
-// - ArrayMirror
-// - DateMirror
-// - RegExpMirror
-// - ErrorMirror
-// - PromiseMirror
-// - MapMirror
-// - SetMirror
-// - IteratorMirror
-// - GeneratorMirror
-// - PropertyMirror
-// - InternalPropertyMirror
-// - FrameMirror
-// - ScriptMirror
-// - ScopeMirror
-
-macro IS_BOOLEAN(arg)
-(typeof(arg) === 'boolean')
-endmacro
-
-macro IS_DATE(arg)
-(%IsDate(arg))
-endmacro
-
-macro IS_ERROR(arg)
-(%IsJSError(arg))
-endmacro
-
-macro IS_GENERATOR(arg)
-(%IsJSGeneratorObject(arg))
-endmacro
-
-macro IS_MAP(arg)
-(%_IsJSMap(arg))
-endmacro
-
-macro IS_MAP_ITERATOR(arg)
-(%IsJSMapIterator(arg))
-endmacro
-
-macro IS_SCRIPT(arg)
-(%IsScriptWrapper(arg))
-endmacro
-
-macro IS_SET(arg)
-(%_IsJSSet(arg))
-endmacro
-
-macro IS_SET_ITERATOR(arg)
-(%IsJSSetIterator(arg))
-endmacro
-
-// Must match PropertyFilter in property-details.h
-define PROPERTY_FILTER_NONE = 0;
-
-// Type names of the different mirrors.
-var MirrorType = {
- UNDEFINED_TYPE : 'undefined',
- NULL_TYPE : 'null',
- BOOLEAN_TYPE : 'boolean',
- NUMBER_TYPE : 'number',
- STRING_TYPE : 'string',
- SYMBOL_TYPE : 'symbol',
- OBJECT_TYPE : 'object',
- FUNCTION_TYPE : 'function',
- REGEXP_TYPE : 'regexp',
- ERROR_TYPE : 'error',
- PROPERTY_TYPE : 'property',
- INTERNAL_PROPERTY_TYPE : 'internalProperty',
- FRAME_TYPE : 'frame',
- SCRIPT_TYPE : 'script',
- CONTEXT_TYPE : 'context',
- SCOPE_TYPE : 'scope',
- PROMISE_TYPE : 'promise',
- MAP_TYPE : 'map',
- SET_TYPE : 'set',
- ITERATOR_TYPE : 'iterator',
- GENERATOR_TYPE : 'generator',
-}
-
-/**
- * Returns the mirror for a specified value or object.
- *
- * @param {value or Object} value the value or object to retrieve the mirror for
- * @returns {Mirror} the mirror reflects the passed value or object
- */
-function MakeMirror(value) {
- var mirror;
-
- if (IS_UNDEFINED(value)) {
- mirror = new UndefinedMirror();
- } else if (IS_NULL(value)) {
- mirror = new NullMirror();
- } else if (IS_BOOLEAN(value)) {
- mirror = new BooleanMirror(value);
- } else if (IS_NUMBER(value)) {
- mirror = new NumberMirror(value);
- } else if (IS_STRING(value)) {
- mirror = new StringMirror(value);
- } else if (IS_SYMBOL(value)) {
- mirror = new SymbolMirror(value);
- } else if (IS_ARRAY(value)) {
- mirror = new ArrayMirror(value);
- } else if (IS_DATE(value)) {
- mirror = new DateMirror(value);
- } else if (IS_FUNCTION(value)) {
- mirror = new FunctionMirror(value);
- } else if (%IsRegExp(value)) {
- mirror = new RegExpMirror(value);
- } else if (IS_ERROR(value)) {
- mirror = new ErrorMirror(value);
- } else if (IS_SCRIPT(value)) {
- mirror = new ScriptMirror(value);
- } else if (IS_MAP(value) || IS_WEAKMAP(value)) {
- mirror = new MapMirror(value);
- } else if (IS_SET(value) || IS_WEAKSET(value)) {
- mirror = new SetMirror(value);
- } else if (IS_MAP_ITERATOR(value) || IS_SET_ITERATOR(value)) {
- mirror = new IteratorMirror(value);
- } else if (%is_promise(value)) {
- mirror = new PromiseMirror(value);
- } else if (IS_GENERATOR(value)) {
- mirror = new GeneratorMirror(value);
- } else {
- mirror = new ObjectMirror(value, MirrorType.OBJECT_TYPE);
- }
-
- return mirror;
-}
-
-
-/**
- * Returns the mirror for the undefined value.
- *
- * @returns {Mirror} the mirror reflects the undefined value
- */
-function GetUndefinedMirror() {
- return MakeMirror(UNDEFINED);
-}
-
-
-/**
- * Inherit the prototype methods from one constructor into another.
- *
- * The Function.prototype.inherits from lang.js rewritten as a standalone
- * function (not on Function.prototype). NOTE: If this file is to be loaded
- * during bootstrapping this function needs to be revritten using some native
- * functions as prototype setup using normal JavaScript does not work as
- * expected during bootstrapping (see mirror.js in r114903).
- *
- * @param {function} ctor Constructor function which needs to inherit the
- * prototype
- * @param {function} superCtor Constructor function to inherit prototype from
- */
-function inherits(ctor, superCtor) {
- var tempCtor = function(){};
- tempCtor.prototype = superCtor.prototype;
- ctor.super_ = superCtor.prototype;
- ctor.prototype = new tempCtor();
- ctor.prototype.constructor = ctor;
-}
-
-// Maximum length when sending strings through the JSON protocol.
-var kMaxProtocolStringLength = 80;
-
-
-// A copy of the PropertyKind enum from property-details.h
-var PropertyType = {};
-PropertyType.Data = 0;
-PropertyType.Accessor = 1;
-
-
-// Different attributes for a property.
-var PropertyAttribute = {};
-PropertyAttribute.None = NONE;
-PropertyAttribute.ReadOnly = READ_ONLY;
-PropertyAttribute.DontEnum = DONT_ENUM;
-PropertyAttribute.DontDelete = DONT_DELETE;
-
-
-// A copy of the scope types from runtime-debug.cc.
-// NOTE: these constants should be backward-compatible, so
-// add new ones to the end of this list.
-var ScopeType = { Global: 0,
- Local: 1,
- With: 2,
- Closure: 3,
- Catch: 4,
- Block: 5,
- Script: 6,
- Eval: 7,
- Module: 8,
- };
-
-/**
- * Base class for all mirror objects.
- * @param {string} type The type of the mirror
- * @constructor
- */
-function Mirror(type) {
- this.type_ = type;
-}
-
-
-Mirror.prototype.type = function() {
- return this.type_;
-};
-
-
-/**
- * Check whether the mirror reflects a value.
- * @returns {boolean} True if the mirror reflects a value.
- */
-Mirror.prototype.isValue = function() {
- return this instanceof ValueMirror;
-};
-
-
-/**
- * Check whether the mirror reflects the undefined value.
- * @returns {boolean} True if the mirror reflects the undefined value.
- */
-Mirror.prototype.isUndefined = function() {
- return this instanceof UndefinedMirror;
-};
-
-
-/**
- * Check whether the mirror reflects the null value.
- * @returns {boolean} True if the mirror reflects the null value
- */
-Mirror.prototype.isNull = function() {
- return this instanceof NullMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a boolean value.
- * @returns {boolean} True if the mirror reflects a boolean value
- */
-Mirror.prototype.isBoolean = function() {
- return this instanceof BooleanMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a number value.
- * @returns {boolean} True if the mirror reflects a number value
- */
-Mirror.prototype.isNumber = function() {
- return this instanceof NumberMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a string value.
- * @returns {boolean} True if the mirror reflects a string value
- */
-Mirror.prototype.isString = function() {
- return this instanceof StringMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a symbol.
- * @returns {boolean} True if the mirror reflects a symbol
- */
-Mirror.prototype.isSymbol = function() {
- return this instanceof SymbolMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an object.
- * @returns {boolean} True if the mirror reflects an object
- */
-Mirror.prototype.isObject = function() {
- return this instanceof ObjectMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a function.
- * @returns {boolean} True if the mirror reflects a function
- */
-Mirror.prototype.isFunction = function() {
- return this instanceof FunctionMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an unresolved function.
- * @returns {boolean} True if the mirror reflects an unresolved function
- */
-Mirror.prototype.isUnresolvedFunction = function() {
- return this instanceof UnresolvedFunctionMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an array.
- * @returns {boolean} True if the mirror reflects an array
- */
-Mirror.prototype.isArray = function() {
- return this instanceof ArrayMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a date.
- * @returns {boolean} True if the mirror reflects a date
- */
-Mirror.prototype.isDate = function() {
- return this instanceof DateMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a regular expression.
- * @returns {boolean} True if the mirror reflects a regular expression
- */
-Mirror.prototype.isRegExp = function() {
- return this instanceof RegExpMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an error.
- * @returns {boolean} True if the mirror reflects an error
- */
-Mirror.prototype.isError = function() {
- return this instanceof ErrorMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a promise.
- * @returns {boolean} True if the mirror reflects a promise
- */
-Mirror.prototype.isPromise = function() {
- return this instanceof PromiseMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a generator object.
- * @returns {boolean} True if the mirror reflects a generator object
- */
-Mirror.prototype.isGenerator = function() {
- return this instanceof GeneratorMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a property.
- * @returns {boolean} True if the mirror reflects a property
- */
-Mirror.prototype.isProperty = function() {
- return this instanceof PropertyMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an internal property.
- * @returns {boolean} True if the mirror reflects an internal property
- */
-Mirror.prototype.isInternalProperty = function() {
- return this instanceof InternalPropertyMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a stack frame.
- * @returns {boolean} True if the mirror reflects a stack frame
- */
-Mirror.prototype.isFrame = function() {
- return this instanceof FrameMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a script.
- * @returns {boolean} True if the mirror reflects a script
- */
-Mirror.prototype.isScript = function() {
- return this instanceof ScriptMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a context.
- * @returns {boolean} True if the mirror reflects a context
- */
-Mirror.prototype.isContext = function() {
- return this instanceof ContextMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a scope.
- * @returns {boolean} True if the mirror reflects a scope
- */
-Mirror.prototype.isScope = function() {
- return this instanceof ScopeMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a map.
- * @returns {boolean} True if the mirror reflects a map
- */
-Mirror.prototype.isMap = function() {
- return this instanceof MapMirror;
-};
-
-
-/**
- * Check whether the mirror reflects a set.
- * @returns {boolean} True if the mirror reflects a set
- */
-Mirror.prototype.isSet = function() {
- return this instanceof SetMirror;
-};
-
-
-/**
- * Check whether the mirror reflects an iterator.
- * @returns {boolean} True if the mirror reflects an iterator
- */
-Mirror.prototype.isIterator = function() {
- return this instanceof IteratorMirror;
-};
-
-
-Mirror.prototype.toText = function() {
- // Simpel to text which is used when on specialization in subclass.
- return "#<" + this.constructor.name + ">";
-};
-
-
-/**
- * Base class for all value mirror objects.
- * @param {string} type The type of the mirror
- * @param {value} value The value reflected by this mirror
- * @constructor
- * @extends Mirror
- */
-function ValueMirror(type, value) {
- %_Call(Mirror, this, type);
- this.value_ = value;
-}
-inherits(ValueMirror, Mirror);
-
-
-/**
- * Check whether this is a primitive value.
- * @return {boolean} True if the mirror reflects a primitive value
- */
-ValueMirror.prototype.isPrimitive = function() {
- var type = this.type();
- return type === 'undefined' ||
- type === 'null' ||
- type === 'boolean' ||
- type === 'number' ||
- type === 'string' ||
- type === 'symbol';
-};
-
-
-/**
- * Get the actual value reflected by this mirror.
- * @return {value} The value reflected by this mirror
- */
-ValueMirror.prototype.value = function() {
- return this.value_;
-};
-
-
-/**
- * Mirror object for Undefined.
- * @constructor
- * @extends ValueMirror
- */
-function UndefinedMirror() {
- %_Call(ValueMirror, this, MirrorType.UNDEFINED_TYPE, UNDEFINED);
-}
-inherits(UndefinedMirror, ValueMirror);
-
-
-UndefinedMirror.prototype.toText = function() {
- return 'undefined';
-};
-
-
-/**
- * Mirror object for null.
- * @constructor
- * @extends ValueMirror
- */
-function NullMirror() {
- %_Call(ValueMirror, this, MirrorType.NULL_TYPE, null);
-}
-inherits(NullMirror, ValueMirror);
-
-
-NullMirror.prototype.toText = function() {
- return 'null';
-};
-
-
-/**
- * Mirror object for boolean values.
- * @param {boolean} value The boolean value reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function BooleanMirror(value) {
- %_Call(ValueMirror, this, MirrorType.BOOLEAN_TYPE, value);
-}
-inherits(BooleanMirror, ValueMirror);
-
-
-BooleanMirror.prototype.toText = function() {
- return this.value_ ? 'true' : 'false';
-};
-
-
-/**
- * Mirror object for number values.
- * @param {number} value The number value reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function NumberMirror(value) {
- %_Call(ValueMirror, this, MirrorType.NUMBER_TYPE, value);
-}
-inherits(NumberMirror, ValueMirror);
-
-
-NumberMirror.prototype.toText = function() {
- return '' + this.value_;
-};
-
-
-/**
- * Mirror object for string values.
- * @param {string} value The string value reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function StringMirror(value) {
- %_Call(ValueMirror, this, MirrorType.STRING_TYPE, value);
-}
-inherits(StringMirror, ValueMirror);
-
-
-StringMirror.prototype.length = function() {
- return this.value_.length;
-};
-
-StringMirror.prototype.getTruncatedValue = function(maxLength) {
- if (maxLength != -1 && this.length() > maxLength) {
- return this.value_.substring(0, maxLength) +
- '... (length: ' + this.length() + ')';
- }
- return this.value_;
-};
-
-StringMirror.prototype.toText = function() {
- return this.getTruncatedValue(kMaxProtocolStringLength);
-};
-
-
-/**
- * Mirror object for a Symbol
- * @param {Object} value The Symbol
- * @constructor
- * @extends Mirror
- */
-function SymbolMirror(value) {
- %_Call(ValueMirror, this, MirrorType.SYMBOL_TYPE, value);
-}
-inherits(SymbolMirror, ValueMirror);
-
-
-SymbolMirror.prototype.description = function() {
- return %SymbolDescription(%ValueOf(this.value_));
-}
-
-
-SymbolMirror.prototype.toText = function() {
- return %SymbolDescriptiveString(%ValueOf(this.value_));
-}
-
-
-/**
- * Mirror object for objects.
- * @param {object} value The object reflected by this mirror
- * @constructor
- * @extends ValueMirror
- */
-function ObjectMirror(value, type) {
- type = type || MirrorType.OBJECT_TYPE;
- %_Call(ValueMirror, this, type, value);
-}
-inherits(ObjectMirror, ValueMirror);
-
-
-ObjectMirror.prototype.className = function() {
- return %ClassOf(this.value_);
-};
-
-
-ObjectMirror.prototype.constructorFunction = function() {
- return MakeMirror(%DebugGetProperty(this.value_, 'constructor'));
-};
-
-
-ObjectMirror.prototype.prototypeObject = function() {
- return MakeMirror(%DebugGetProperty(this.value_, 'prototype'));
-};
-
-
-ObjectMirror.prototype.protoObject = function() {
- return MakeMirror(%DebugGetPrototype(this.value_));
-};
-
-
-ObjectMirror.prototype.hasNamedInterceptor = function() {
- // Get information on interceptors for this object.
- var x = %GetInterceptorInfo(this.value_);
- return (x & 2) != 0;
-};
-
-
-ObjectMirror.prototype.hasIndexedInterceptor = function() {
- // Get information on interceptors for this object.
- var x = %GetInterceptorInfo(this.value_);
- return (x & 1) != 0;
-};
-
-
-/**
- * Return the property names for this object.
- * @param {number} kind Indicate whether named, indexed or both kinds of
- * properties are requested
- * @param {number} limit Limit the number of names returend to the specified
- value
- * @return {Array} Property names for this object
- */
-ObjectMirror.prototype.propertyNames = function() {
- return %GetOwnPropertyKeys(this.value_, PROPERTY_FILTER_NONE);
-};
-
-
-/**
- * Return the properties for this object as an array of PropertyMirror objects.
- * @param {number} kind Indicate whether named, indexed or both kinds of
- * properties are requested
- * @param {number} limit Limit the number of properties returned to the
- specified value
- * @return {Array} Property mirrors for this object
- */
-ObjectMirror.prototype.properties = function() {
- var names = this.propertyNames();
- var properties = new GlobalArray(names.length);
- for (var i = 0; i < names.length; i++) {
- properties[i] = this.property(names[i]);
- }
-
- return properties;
-};
-
-
-/**
- * Return the internal properties for this object as an array of
- * InternalPropertyMirror objects.
- * @return {Array} Property mirrors for this object
- */
-ObjectMirror.prototype.internalProperties = function() {
- return ObjectMirror.GetInternalProperties(this.value_);
-}
-
-
-ObjectMirror.prototype.property = function(name) {
- var details = %DebugGetPropertyDetails(this.value_, name);
- if (details) {
- return new PropertyMirror(this, name, details);
- }
-
- // Nothing found.
- return GetUndefinedMirror();
-};
-
-
-
-/**
- * Try to find a property from its value.
- * @param {Mirror} value The property value to look for
- * @return {PropertyMirror} The property with the specified value. If no
- * property was found with the specified value UndefinedMirror is returned
- */
-ObjectMirror.prototype.lookupProperty = function(value) {
- var properties = this.properties();
-
- // Look for property value in properties.
- for (var i = 0; i < properties.length; i++) {
-
- // Skip properties which are defined through accessors.
- var property = properties[i];
- if (property.propertyType() == PropertyType.Data) {
- if (property.value_ === value.value_) {
- return property;
- }
- }
- }
-
- // Nothing found.
- return GetUndefinedMirror();
-};
-
-
-/**
- * Returns objects which has direct references to this object
- * @param {number} opt_max_objects Optional parameter specifying the maximum
- * number of referencing objects to return.
- * @return {Array} The objects which has direct references to this object.
- */
-ObjectMirror.prototype.referencedBy = function(opt_max_objects) {
- // Find all objects with direct references to this object.
- var result = %DebugReferencedBy(this.value_,
- Mirror.prototype, opt_max_objects || 0);
-
- // Make mirrors for all the references found.
- for (var i = 0; i < result.length; i++) {
- result[i] = MakeMirror(result[i]);
- }
-
- return result;
-};
-
-
-ObjectMirror.prototype.toText = function() {
- var name;
- var ctor = this.constructorFunction();
- if (!ctor.isFunction()) {
- name = this.className();
- } else {
- name = ctor.name();
- if (!name) {
- name = this.className();
- }
- }
- return '#<' + name + '>';
-};
-
-
-/**
- * Return the internal properties of the value, such as [[PrimitiveValue]] of
- * scalar wrapper objects, properties of the bound function and properties of
- * the promise.
- * This method is done static to be accessible from Debug API with the bare
- * values without mirrors.
- * @return {Array} array (possibly empty) of InternalProperty instances
- */
-ObjectMirror.GetInternalProperties = function(value) {
- var properties = %DebugGetInternalProperties(value);
- var result = [];
- for (var i = 0; i < properties.length; i += 2) {
- result.push(new InternalPropertyMirror(properties[i], properties[i + 1]));
- }
- return result;
-}
-
-
-/**
- * Mirror object for functions.
- * @param {function} value The function object reflected by this mirror.
- * @constructor
- * @extends ObjectMirror
- */
-function FunctionMirror(value) {
- %_Call(ObjectMirror, this, value, MirrorType.FUNCTION_TYPE);
- this.resolved_ = true;
-}
-inherits(FunctionMirror, ObjectMirror);
-
-
-/**
- * Returns whether the function is resolved.
- * @return {boolean} True if the function is resolved. Unresolved functions can
- * only originate as functions from stack frames
- */
-FunctionMirror.prototype.resolved = function() {
- return this.resolved_;
-};
-
-
-/**
- * Returns the name of the function.
- * @return {string} Name of the function
- */
-FunctionMirror.prototype.name = function() {
- return %FunctionGetName(this.value_);
-};
-
-
-/**
- * Returns the displayName if it is set, otherwise name, otherwise inferred
- * name.
- * @return {string} Name of the function
- */
-FunctionMirror.prototype.debugName = function() {
- return %FunctionGetDebugName(this.value_);
-}
-
-
-/**
- * Returns the inferred name of the function.
- * @return {string} Name of the function
- */
-FunctionMirror.prototype.inferredName = function() {
- return %FunctionGetInferredName(this.value_);
-};
-
-
-/**
- * Returns the source code for the function.
- * @return {string or undefined} The source code for the function. If the
- * function is not resolved undefined will be returned.
- */
-FunctionMirror.prototype.source = function() {
- // Return source if function is resolved. Otherwise just fall through to
- // return undefined.
- if (this.resolved()) {
- return %FunctionToString(this.value_);
- }
-};
-
-
-/**
- * Returns the script object for the function.
- * @return {ScriptMirror or undefined} Script object for the function or
- * undefined if the function has no script
- */
-FunctionMirror.prototype.script = function() {
- // Return script if function is resolved. Otherwise just fall through
- // to return undefined.
- if (this.resolved()) {
- if (this.script_) {
- return this.script_;
- }
- var script = %FunctionGetScript(this.value_);
- if (script) {
- return this.script_ = MakeMirror(script);
- }
- }
-};
-
-
-/**
- * Returns the script source position for the function. Only makes sense
- * for functions which has a script defined.
- * @return {Number or undefined} in-script position for the function
- */
-FunctionMirror.prototype.sourcePosition_ = function() {
- // Return position if function is resolved. Otherwise just fall
- // through to return undefined.
- if (this.resolved()) {
- return %FunctionGetScriptSourcePosition(this.value_);
- }
-};
-
-
-/**
- * Returns the script source location object for the function. Only makes sense
- * for functions which has a script defined.
- * @return {Location or undefined} in-script location for the function begin
- */
-FunctionMirror.prototype.sourceLocation = function() {
- if (this.resolved()) {
- var script = this.script();
- if (script) {
- return script.locationFromPosition(this.sourcePosition_(), true);
- }
- }
-};
-
-
-/**
- * Returns objects constructed by this function.
- * @param {number} opt_max_instances Optional parameter specifying the maximum
- * number of instances to return.
- * @return {Array or undefined} The objects constructed by this function.
- */
-FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
- if (this.resolved()) {
- // Find all objects constructed from this function.
- var result = %DebugConstructedBy(this.value_, opt_max_instances || 0);
-
- // Make mirrors for all the instances found.
- for (var i = 0; i < result.length; i++) {
- result[i] = MakeMirror(result[i]);
- }
-
- return result;
- } else {
- return [];
- }
-};
-
-
-FunctionMirror.prototype.scopeCount = function() {
- if (this.resolved()) {
- if (IS_UNDEFINED(this.scopeCount_)) {
- this.scopeCount_ = %GetFunctionScopeCount(this.value());
- }
- return this.scopeCount_;
- } else {
- return 0;
- }
-};
-
-
-FunctionMirror.prototype.scope = function(index) {
- if (this.resolved()) {
- return new ScopeMirror(UNDEFINED, this, UNDEFINED, index);
- }
-};
-
-
-FunctionMirror.prototype.toText = function() {
- return this.source();
-};
-
-
-FunctionMirror.prototype.context = function() {
- if (this.resolved()) {
- if (!this._context)
- this._context = new ContextMirror(%FunctionGetContextData(this.value_));
- return this._context;
- }
-};
-
-
-/**
- * Mirror object for unresolved functions.
- * @param {string} value The name for the unresolved function reflected by this
- * mirror.
- * @constructor
- * @extends ObjectMirror
- */
-function UnresolvedFunctionMirror(value) {
- // Construct this using the ValueMirror as an unresolved function is not a
- // real object but just a string.
- %_Call(ValueMirror, this, MirrorType.FUNCTION_TYPE, value);
- this.propertyCount_ = 0;
- this.elementCount_ = 0;
- this.resolved_ = false;
-}
-inherits(UnresolvedFunctionMirror, FunctionMirror);
-
-
-UnresolvedFunctionMirror.prototype.className = function() {
- return 'Function';
-};
-
-
-UnresolvedFunctionMirror.prototype.constructorFunction = function() {
- return GetUndefinedMirror();
-};
-
-
-UnresolvedFunctionMirror.prototype.prototypeObject = function() {
- return GetUndefinedMirror();
-};
-
-
-UnresolvedFunctionMirror.prototype.protoObject = function() {
- return GetUndefinedMirror();
-};
-
-
-UnresolvedFunctionMirror.prototype.name = function() {
- return this.value_;
-};
-
-
-UnresolvedFunctionMirror.prototype.debugName = function() {
- return this.value_;
-};
-
-
-UnresolvedFunctionMirror.prototype.inferredName = function() {
- return UNDEFINED;
-};
-
-
-UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
- return [];
-};
-
-
-/**
- * Mirror object for arrays.
- * @param {Array} value The Array object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function ArrayMirror(value) {
- %_Call(ObjectMirror, this, value);
-}
-inherits(ArrayMirror, ObjectMirror);
-
-
-ArrayMirror.prototype.length = function() {
- return this.value_.length;
-};
-
-
-ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
- opt_to_index) {
- var from_index = opt_from_index || 0;
- var to_index = opt_to_index || this.length() - 1;
- if (from_index > to_index) return new GlobalArray();
- var values = new GlobalArray(to_index - from_index + 1);
- for (var i = from_index; i <= to_index; i++) {
- var details = %DebugGetPropertyDetails(this.value_, TO_STRING(i));
- var value;
- if (details) {
- value = new PropertyMirror(this, i, details);
- } else {
- value = GetUndefinedMirror();
- }
- values[i - from_index] = value;
- }
- return values;
-};
-
-
-/**
- * Mirror object for dates.
- * @param {Date} value The Date object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function DateMirror(value) {
- %_Call(ObjectMirror, this, value);
-}
-inherits(DateMirror, ObjectMirror);
-
-
-DateMirror.prototype.toText = function() {
- var s = JSONStringify(this.value_);
- return s.substring(1, s.length - 1); // cut quotes
-};
-
-
-/**
- * Mirror object for regular expressions.
- * @param {RegExp} value The RegExp object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function RegExpMirror(value) {
- %_Call(ObjectMirror, this, value, MirrorType.REGEXP_TYPE);
-}
-inherits(RegExpMirror, ObjectMirror);
-
-
-/**
- * Returns the source to the regular expression.
- * @return {string or undefined} The source to the regular expression
- */
-RegExpMirror.prototype.source = function() {
- return this.value_.source;
-};
-
-
-/**
- * Returns whether this regular expression has the global (g) flag set.
- * @return {boolean} Value of the global flag
- */
-RegExpMirror.prototype.global = function() {
- return this.value_.global;
-};
-
-
-/**
- * Returns whether this regular expression has the ignore case (i) flag set.
- * @return {boolean} Value of the ignore case flag
- */
-RegExpMirror.prototype.ignoreCase = function() {
- return this.value_.ignoreCase;
-};
-
-
-/**
- * Returns whether this regular expression has the multiline (m) flag set.
- * @return {boolean} Value of the multiline flag
- */
-RegExpMirror.prototype.multiline = function() {
- return this.value_.multiline;
-};
-
-
-/**
- * Returns whether this regular expression has the sticky (y) flag set.
- * @return {boolean} Value of the sticky flag
- */
-RegExpMirror.prototype.sticky = function() {
- return this.value_.sticky;
-};
-
-
-/**
- * Returns whether this regular expression has the unicode (u) flag set.
- * @return {boolean} Value of the unicode flag
- */
-RegExpMirror.prototype.unicode = function() {
- return this.value_.unicode;
-};
-
-
-RegExpMirror.prototype.toText = function() {
- // Simpel to text which is used when on specialization in subclass.
- return "/" + this.source() + "/";
-};
-
-
-/**
- * Mirror object for error objects.
- * @param {Error} value The error object reflected by this mirror
- * @constructor
- * @extends ObjectMirror
- */
-function ErrorMirror(value) {
- %_Call(ObjectMirror, this, value, MirrorType.ERROR_TYPE);
-}
-inherits(ErrorMirror, ObjectMirror);
-
-
-/**
- * Returns the message for this eror object.
- * @return {string or undefined} The message for this eror object
- */
-ErrorMirror.prototype.message = function() {
- return this.value_.message;
-};
-
-
-ErrorMirror.prototype.toText = function() {
- // Use the same text representation as in messages.js.
- var text;
- try {
- text = %ErrorToString(this.value_);
- } catch (e) {
- text = '#<Error>';
- }
- return text;
-};
-
-
-/**
- * Mirror object for a Promise object.
- * @param {Object} value The Promise object
- * @constructor
- * @extends ObjectMirror
- */
-function PromiseMirror(value) {
- %_Call(ObjectMirror, this, value, MirrorType.PROMISE_TYPE);
-}
-inherits(PromiseMirror, ObjectMirror);
-
-
-function PromiseGetStatus_(value) {
- var status = %PromiseStatus(value);
- if (status == 0) return "pending";
- if (status == 1) return "resolved";
- return "rejected";
-}
-
-
-function PromiseGetValue_(value) {
- return %PromiseResult(value);
-}
-
-
-PromiseMirror.prototype.status = function() {
- return PromiseGetStatus_(this.value_);
-};
-
-
-PromiseMirror.prototype.promiseValue = function() {
- return MakeMirror(PromiseGetValue_(this.value_));
-};
-
-
-function MapMirror(value) {
- %_Call(ObjectMirror, this, value, MirrorType.MAP_TYPE);
-}
-inherits(MapMirror, ObjectMirror);
-
-
-/**
- * Returns an array of key/value pairs of a map.
- * This will keep keys alive for WeakMaps.
- *
- * @param {number=} opt_limit Max elements to return.
- * @returns {Array.<Object>} Array of key/value pairs of a map.
- */
-MapMirror.prototype.entries = function(opt_limit) {
- var result = [];
-
- if (IS_WEAKMAP(this.value_)) {
- var entries = %GetWeakMapEntries(this.value_, opt_limit || 0);
- for (var i = 0; i < entries.length; i += 2) {
- result.push({
- key: entries[i],
- value: entries[i + 1]
- });
- }
- return result;
- }
-
- var iter = %_Call(MapEntries, this.value_);
- var next;
- while ((!opt_limit || result.length < opt_limit) &&
- !(next = iter.next()).done) {
- result.push({
- key: next.value[0],
- value: next.value[1]
- });
- }
- return result;
-};
-
-
-function SetMirror(value) {
- %_Call(ObjectMirror, this, value, MirrorType.SET_TYPE);
-}
-inherits(SetMirror, ObjectMirror);
-
-
-function IteratorGetValues_(iter, next_function, opt_limit) {
- var result = [];
- var next;
- while ((!opt_limit || result.length < opt_limit) &&
- !(next = %_Call(next_function, iter)).done) {
- result.push(next.value);
- }
- return result;
-}
-
-
-/**
- * Returns an array of elements of a set.
- * This will keep elements alive for WeakSets.
- *
- * @param {number=} opt_limit Max elements to return.
- * @returns {Array.<Object>} Array of elements of a set.
- */
-SetMirror.prototype.values = function(opt_limit) {
- if (IS_WEAKSET(this.value_)) {
- return %GetWeakSetValues(this.value_, opt_limit || 0);
- }
-
- var iter = %_Call(SetValues, this.value_);
- return IteratorGetValues_(iter, SetIteratorNext, opt_limit);
-};
-
-
-function IteratorMirror(value) {
- %_Call(ObjectMirror, this, value, MirrorType.ITERATOR_TYPE);
-}
-inherits(IteratorMirror, ObjectMirror);
-
-
-/**
- * Returns a preview of elements of an iterator.
- * Does not change the backing iterator state.
- *
- * @param {number=} opt_limit Max elements to return.
- * @returns {Array.<Object>} Array of elements of an iterator.
- */
-IteratorMirror.prototype.preview = function(opt_limit) {
- if (IS_MAP_ITERATOR(this.value_)) {
- return IteratorGetValues_(%MapIteratorClone(this.value_),
- MapIteratorNext,
- opt_limit);
- } else if (IS_SET_ITERATOR(this.value_)) {
- return IteratorGetValues_(%SetIteratorClone(this.value_),
- SetIteratorNext,
- opt_limit);
- }
-};
-
-
-/**
- * Mirror object for a Generator object.
- * @param {Object} data The Generator object
- * @constructor
- * @extends Mirror
- */
-function GeneratorMirror(value) {
- %_Call(ObjectMirror, this, value, MirrorType.GENERATOR_TYPE);
-}
-inherits(GeneratorMirror, ObjectMirror);
-
-
-function GeneratorGetStatus_(value) {
- var continuation = %GeneratorGetContinuation(value);
- if (continuation < -1) return "running";
- if (continuation == -1) return "closed";
- return "suspended";
-}
-
-
-GeneratorMirror.prototype.status = function() {
- return GeneratorGetStatus_(this.value_);
-};
-
-
-GeneratorMirror.prototype.sourcePosition_ = function() {
- return %GeneratorGetSourcePosition(this.value_);
-};
-
-
-GeneratorMirror.prototype.sourceLocation = function() {
- var pos = this.sourcePosition_();
- if (!IS_UNDEFINED(pos)) {
- var script = this.func().script();
- if (script) {
- return script.locationFromPosition(pos, true);
- }
- }
-};
-
-
-GeneratorMirror.prototype.func = function() {
- if (!this.func_) {
- this.func_ = MakeMirror(%GeneratorGetFunction(this.value_));
- }
- return this.func_;
-};
-
-
-GeneratorMirror.prototype.receiver = function() {
- if (!this.receiver_) {
- this.receiver_ = MakeMirror(%GeneratorGetReceiver(this.value_));
- }
- return this.receiver_;
-};
-
-
-GeneratorMirror.prototype.scopeCount = function() {
- // This value can change over time as the underlying generator is suspended
- // at different locations.
- return %GetGeneratorScopeCount(this.value());
-};
-
-
-GeneratorMirror.prototype.scope = function(index) {
- return new ScopeMirror(UNDEFINED, UNDEFINED, this, index);
-};
-
-
-GeneratorMirror.prototype.allScopes = function() {
- var scopes = [];
- for (let i = 0; i < this.scopeCount(); i++) {
- scopes.push(this.scope(i));
- }
- return scopes;
-};
-
-
-/**
- * Base mirror object for properties.
- * @param {ObjectMirror} mirror The mirror object having this property
- * @param {string} name The name of the property
- * @param {Array} details Details about the property
- * @constructor
- * @extends Mirror
- */
-function PropertyMirror(mirror, name, details) {
- %_Call(Mirror, this, MirrorType.PROPERTY_TYPE);
- this.mirror_ = mirror;
- this.name_ = name;
- this.value_ = details[0];
- this.details_ = details[1];
- this.is_interceptor_ = details[2];
- if (details.length > 3) {
- this.exception_ = details[3];
- this.getter_ = details[4];
- this.setter_ = details[5];
- }
-}
-inherits(PropertyMirror, Mirror);
-
-
-PropertyMirror.prototype.isReadOnly = function() {
- return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
-};
-
-
-PropertyMirror.prototype.isEnum = function() {
- return (this.attributes() & PropertyAttribute.DontEnum) == 0;
-};
-
-
-PropertyMirror.prototype.canDelete = function() {
- return (this.attributes() & PropertyAttribute.DontDelete) == 0;
-};
-
-
-PropertyMirror.prototype.name = function() {
- return this.name_;
-};
-
-
-PropertyMirror.prototype.toText = function() {
- if (IS_SYMBOL(this.name_)) return %SymbolDescriptiveString(this.name_);
- return this.name_;
-};
-
-
-PropertyMirror.prototype.isIndexed = function() {
- for (var i = 0; i < this.name_.length; i++) {
- if (this.name_[i] < '0' || '9' < this.name_[i]) {
- return false;
- }
- }
- return true;
-};
-
-
-PropertyMirror.prototype.value = function() {
- return MakeMirror(this.value_, false);
-};
-
-
-/**
- * Returns whether this property value is an exception.
- * @return {boolean} True if this property value is an exception
- */
-PropertyMirror.prototype.isException = function() {
- return this.exception_ ? true : false;
-};
-
-
-PropertyMirror.prototype.attributes = function() {
- return %DebugPropertyAttributesFromDetails(this.details_);
-};
-
-
-PropertyMirror.prototype.propertyType = function() {
- return %DebugPropertyKindFromDetails(this.details_);
-};
-
-
-/**
- * Returns whether this property has a getter defined through __defineGetter__.
- * @return {boolean} True if this property has a getter
- */
-PropertyMirror.prototype.hasGetter = function() {
- return this.getter_ ? true : false;
-};
-
-
-/**
- * Returns whether this property has a setter defined through __defineSetter__.
- * @return {boolean} True if this property has a setter
- */
-PropertyMirror.prototype.hasSetter = function() {
- return this.setter_ ? true : false;
-};
-
-
-/**
- * Returns the getter for this property defined through __defineGetter__.
- * @return {Mirror} FunctionMirror reflecting the getter function or
- * UndefinedMirror if there is no getter for this property
- */
-PropertyMirror.prototype.getter = function() {
- if (this.hasGetter()) {
- return MakeMirror(this.getter_);
- } else {
- return GetUndefinedMirror();
- }
-};
-
-
-/**
- * Returns the setter for this property defined through __defineSetter__.
- * @return {Mirror} FunctionMirror reflecting the setter function or
- * UndefinedMirror if there is no setter for this property
- */
-PropertyMirror.prototype.setter = function() {
- if (this.hasSetter()) {
- return MakeMirror(this.setter_);
- } else {
- return GetUndefinedMirror();
- }
-};
-
-
-/**
- * Returns whether this property is natively implemented by the host or a set
- * through JavaScript code.
- * @return {boolean} True if the property is
- * UndefinedMirror if there is no setter for this property
- */
-PropertyMirror.prototype.isNative = function() {
- return this.is_interceptor_ ||
- ((this.propertyType() == PropertyType.Accessor) &&
- !this.hasGetter() && !this.hasSetter());
-};
-
-
-/**
- * Mirror object for internal properties. Internal property reflects properties
- * not accessible from user code such as [[BoundThis]] in bound function.
- * Their names are merely symbolic.
- * @param {string} name The name of the property
- * @param {value} property value
- * @constructor
- * @extends Mirror
- */
-function InternalPropertyMirror(name, value) {
- %_Call(Mirror, this, MirrorType.INTERNAL_PROPERTY_TYPE);
- this.name_ = name;
- this.value_ = value;
-}
-inherits(InternalPropertyMirror, Mirror);
-
-
-InternalPropertyMirror.prototype.name = function() {
- return this.name_;
-};
-
-
-InternalPropertyMirror.prototype.value = function() {
- return MakeMirror(this.value_, false);
-};
-
-
-var kFrameDetailsFrameIdIndex = 0;
-var kFrameDetailsReceiverIndex = 1;
-var kFrameDetailsFunctionIndex = 2;
-var kFrameDetailsScriptIndex = 3;
-var kFrameDetailsArgumentCountIndex = 4;
-var kFrameDetailsLocalCountIndex = 5;
-var kFrameDetailsSourcePositionIndex = 6;
-var kFrameDetailsConstructCallIndex = 7;
-var kFrameDetailsAtReturnIndex = 8;
-var kFrameDetailsFlagsIndex = 9;
-var kFrameDetailsFirstDynamicIndex = 10;
-
-var kFrameDetailsNameIndex = 0;
-var kFrameDetailsValueIndex = 1;
-var kFrameDetailsNameValueSize = 2;
-
-var kFrameDetailsFlagDebuggerFrameMask = 1 << 0;
-var kFrameDetailsFlagOptimizedFrameMask = 1 << 1;
-var kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
-
-/**
- * Wrapper for the frame details information retreived from the VM. The frame
- * details from the VM is an array with the following content. See runtime.cc
- * Runtime_GetFrameDetails.
- * 0: Id
- * 1: Receiver
- * 2: Function
- * 3: Script
- * 4: Argument count
- * 5: Local count
- * 6: Source position
- * 7: Construct call
- * 8: Is at return
- * 9: Flags (debugger frame, optimized frame, inlined frame index)
- * Arguments name, value
- * Locals name, value
- * Return value if any
- * @param {number} break_id Current break id
- * @param {number} index Frame number
- * @constructor
- */
-function FrameDetails(break_id, index) {
- this.break_id_ = break_id;
- this.details_ = %GetFrameDetails(break_id, index);
-}
-
-
-FrameDetails.prototype.frameId = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsFrameIdIndex];
-};
-
-
-FrameDetails.prototype.receiver = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsReceiverIndex];
-};
-
-
-FrameDetails.prototype.func = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsFunctionIndex];
-};
-
-
-FrameDetails.prototype.script = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsScriptIndex];
-};
-
-
-FrameDetails.prototype.isConstructCall = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsConstructCallIndex];
-};
-
-
-FrameDetails.prototype.isAtReturn = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsAtReturnIndex];
-};
-
-
-FrameDetails.prototype.isDebuggerFrame = function() {
- %CheckExecutionState(this.break_id_);
- var f = kFrameDetailsFlagDebuggerFrameMask;
- return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-};
-
-
-FrameDetails.prototype.isOptimizedFrame = function() {
- %CheckExecutionState(this.break_id_);
- var f = kFrameDetailsFlagOptimizedFrameMask;
- return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-};
-
-
-FrameDetails.prototype.isInlinedFrame = function() {
- return this.inlinedFrameIndex() > 0;
-};
-
-
-FrameDetails.prototype.inlinedFrameIndex = function() {
- %CheckExecutionState(this.break_id_);
- var f = kFrameDetailsFlagInlinedFrameIndexMask;
- return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2;
-};
-
-
-FrameDetails.prototype.argumentCount = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsArgumentCountIndex];
-};
-
-
-FrameDetails.prototype.argumentName = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.argumentCount()) {
- return this.details_[kFrameDetailsFirstDynamicIndex +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsNameIndex];
- }
-};
-
-
-FrameDetails.prototype.argumentValue = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.argumentCount()) {
- return this.details_[kFrameDetailsFirstDynamicIndex +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsValueIndex];
- }
-};
-
-
-FrameDetails.prototype.localCount = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsLocalCountIndex];
-};
-
-
-FrameDetails.prototype.sourcePosition = function() {
- %CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsSourcePositionIndex];
-};
-
-
-FrameDetails.prototype.localName = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex +
- this.argumentCount() * kFrameDetailsNameValueSize;
- return this.details_[locals_offset +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsNameIndex];
- }
-};
-
-
-FrameDetails.prototype.localValue = function(index) {
- %CheckExecutionState(this.break_id_);
- if (index >= 0 && index < this.localCount()) {
- var locals_offset = kFrameDetailsFirstDynamicIndex +
- this.argumentCount() * kFrameDetailsNameValueSize;
- return this.details_[locals_offset +
- index * kFrameDetailsNameValueSize +
- kFrameDetailsValueIndex];
- }
-};
-
-
-FrameDetails.prototype.returnValue = function() {
- %CheckExecutionState(this.break_id_);
- var return_value_offset =
- kFrameDetailsFirstDynamicIndex +
- (this.argumentCount() + this.localCount()) * kFrameDetailsNameValueSize;
- if (this.details_[kFrameDetailsAtReturnIndex]) {
- return this.details_[return_value_offset];
- }
-};
-
-
-FrameDetails.prototype.scopeCount = function() {
- if (IS_UNDEFINED(this.scopeCount_)) {
- this.scopeCount_ = %GetScopeCount(this.break_id_, this.frameId());
- }
- return this.scopeCount_;
-};
-
-
-/**
- * Mirror object for stack frames.
- * @param {number} break_id The break id in the VM for which this frame is
- valid
- * @param {number} index The frame index (top frame is index 0)
- * @constructor
- * @extends Mirror
- */
-function FrameMirror(break_id, index) {
- %_Call(Mirror, this, MirrorType.FRAME_TYPE);
- this.break_id_ = break_id;
- this.index_ = index;
- this.details_ = new FrameDetails(break_id, index);
-}
-inherits(FrameMirror, Mirror);
-
-
-FrameMirror.prototype.details = function() {
- return this.details_;
-};
-
-
-FrameMirror.prototype.index = function() {
- return this.index_;
-};
-
-
-FrameMirror.prototype.func = function() {
- if (this.func_) {
- return this.func_;
- }
-
- // Get the function for this frame from the VM.
- var f = this.details_.func();
-
- // Create a function mirror. NOTE: MakeMirror cannot be used here as the
- // value returned from the VM might be a string if the function for the
- // frame is unresolved.
- if (IS_FUNCTION(f)) {
- return this.func_ = MakeMirror(f);
- } else {
- return new UnresolvedFunctionMirror(f);
- }
-};
-
-
-FrameMirror.prototype.script = function() {
- if (!this.script_) {
- this.script_ = MakeMirror(this.details_.script());
- }
-
- return this.script_;
-}
-
-
-FrameMirror.prototype.receiver = function() {
- return MakeMirror(this.details_.receiver());
-};
-
-
-FrameMirror.prototype.isConstructCall = function() {
- return this.details_.isConstructCall();
-};
-
-
-FrameMirror.prototype.isAtReturn = function() {
- return this.details_.isAtReturn();
-};
-
-
-FrameMirror.prototype.isDebuggerFrame = function() {
- return this.details_.isDebuggerFrame();
-};
-
-
-FrameMirror.prototype.isOptimizedFrame = function() {
- return this.details_.isOptimizedFrame();
-};
-
-
-FrameMirror.prototype.isInlinedFrame = function() {
- return this.details_.isInlinedFrame();
-};
-
-
-FrameMirror.prototype.inlinedFrameIndex = function() {
- return this.details_.inlinedFrameIndex();
-};
-
-
-FrameMirror.prototype.argumentCount = function() {
- return this.details_.argumentCount();
-};
-
-
-FrameMirror.prototype.argumentName = function(index) {
- return this.details_.argumentName(index);
-};
-
-
-FrameMirror.prototype.argumentValue = function(index) {
- return MakeMirror(this.details_.argumentValue(index));
-};
-
-
-FrameMirror.prototype.localCount = function() {
- return this.details_.localCount();
-};
-
-
-FrameMirror.prototype.localName = function(index) {
- return this.details_.localName(index);
-};
-
-
-FrameMirror.prototype.localValue = function(index) {
- return MakeMirror(this.details_.localValue(index));
-};
-
-
-FrameMirror.prototype.returnValue = function() {
- return MakeMirror(this.details_.returnValue());
-};
-
-
-FrameMirror.prototype.sourcePosition = function() {
- return this.details_.sourcePosition();
-};
-
-
-FrameMirror.prototype.sourceLocation = function() {
- var script = this.script();
- if (script) {
- return script.locationFromPosition(this.sourcePosition(), true);
- }
-};
-
-
-FrameMirror.prototype.sourceLine = function() {
- var location = this.sourceLocation();
- if (location) {
- return location.line;
- }
-};
-
-
-FrameMirror.prototype.sourceColumn = function() {
- var location = this.sourceLocation();
- if (location) {
- return location.column;
- }
-};
-
-
-FrameMirror.prototype.sourceLineText = function() {
- var location = this.sourceLocation();
- if (location) {
- return location.sourceText;
- }
-};
-
-
-FrameMirror.prototype.scopeCount = function() {
- return this.details_.scopeCount();
-};
-
-
-FrameMirror.prototype.scope = function(index) {
- return new ScopeMirror(this, UNDEFINED, UNDEFINED, index);
-};
-
-
-FrameMirror.prototype.allScopes = function(opt_ignore_nested_scopes) {
- var scopeDetails = %GetAllScopesDetails(this.break_id_,
- this.details_.frameId(),
- this.details_.inlinedFrameIndex(),
- !!opt_ignore_nested_scopes);
- var result = [];
- for (var i = 0; i < scopeDetails.length; ++i) {
- result.push(new ScopeMirror(this, UNDEFINED, UNDEFINED, i,
- scopeDetails[i]));
- }
- return result;
-};
-
-
-FrameMirror.prototype.evaluate = function(source, throw_on_side_effect = false) {
- return MakeMirror(%DebugEvaluate(this.break_id_,
- this.details_.frameId(),
- this.details_.inlinedFrameIndex(),
- source,
- throw_on_side_effect));
-};
-
-
-FrameMirror.prototype.invocationText = function() {
- // Format frame invoaction (receiver, function and arguments).
- var result = '';
- var func = this.func();
- var receiver = this.receiver();
- if (this.isConstructCall()) {
- // For constructor frames display new followed by the function name.
- result += 'new ';
- result += func.name() ? func.name() : '[anonymous]';
- } else if (this.isDebuggerFrame()) {
- result += '[debugger]';
- } else {
- // If the receiver has a className which is 'global' don't display it.
- var display_receiver =
- !receiver.className || (receiver.className() != 'global');
- if (display_receiver) {
- result += receiver.toText();
- }
- // Try to find the function as a property in the receiver. Include the
- // prototype chain in the lookup.
- var property = GetUndefinedMirror();
- if (receiver.isObject()) {
- for (var r = receiver;
- !r.isNull() && property.isUndefined();
- r = r.protoObject()) {
- property = r.lookupProperty(func);
- }
- }
- if (!property.isUndefined()) {
- // The function invoked was found on the receiver. Use the property name
- // for the backtrace.
- if (!property.isIndexed()) {
- if (display_receiver) {
- result += '.';
- }
- result += property.toText();
- } else {
- result += '[';
- result += property.toText();
- result += ']';
- }
- // Also known as - if the name in the function doesn't match the name
- // under which it was looked up.
- if (func.name() && func.name() != property.name()) {
- result += '(aka ' + func.name() + ')';
- }
- } else {
- // The function invoked was not found on the receiver. Use the function
- // name if available for the backtrace.
- if (display_receiver) {
- result += '.';
- }
- result += func.name() ? func.name() : '[anonymous]';
- }
- }
-
- // Render arguments for normal frames.
- if (!this.isDebuggerFrame()) {
- result += '(';
- for (var i = 0; i < this.argumentCount(); i++) {
- if (i != 0) result += ', ';
- if (this.argumentName(i)) {
- result += this.argumentName(i);
- result += '=';
- }
- result += this.argumentValue(i).toText();
- }
- result += ')';
- }
-
- if (this.isAtReturn()) {
- result += ' returning ';
- result += this.returnValue().toText();
- }
-
- return result;
-};
-
-
-FrameMirror.prototype.sourceAndPositionText = function() {
- // Format source and position.
- var result = '';
- var func = this.func();
- if (func.resolved()) {
- var script = func.script();
- if (script) {
- if (script.name()) {
- result += script.name();
- } else {
- result += '[unnamed]';
- }
- if (!this.isDebuggerFrame()) {
- var location = this.sourceLocation();
- result += ' line ';
- result += !IS_UNDEFINED(location) ? (location.line + 1) : '?';
- result += ' column ';
- result += !IS_UNDEFINED(location) ? (location.column + 1) : '?';
- if (!IS_UNDEFINED(this.sourcePosition())) {
- result += ' (position ' + (this.sourcePosition() + 1) + ')';
- }
- }
- } else {
- result += '[no source]';
- }
- } else {
- result += '[unresolved]';
- }
-
- return result;
-};
-
-
-FrameMirror.prototype.localsText = function() {
- // Format local variables.
- var result = '';
- var locals_count = this.localCount();
- if (locals_count > 0) {
- for (var i = 0; i < locals_count; ++i) {
- result += ' var ';
- result += this.localName(i);
- result += ' = ';
- result += this.localValue(i).toText();
- if (i < locals_count - 1) result += '\n';
- }
- }
-
- return result;
-};
-
-
-FrameMirror.prototype.restart = function() {
- var result = %LiveEditRestartFrame(this.break_id_, this.index_);
- if (IS_UNDEFINED(result)) {
- result = "Failed to find requested frame";
- }
- return result;
-};
-
-
-FrameMirror.prototype.toText = function(opt_locals) {
- var result = '';
- result += '#' + (this.index() <= 9 ? '0' : '') + this.index();
- result += ' ';
- result += this.invocationText();
- result += ' ';
- result += this.sourceAndPositionText();
- if (opt_locals) {
- result += '\n';
- result += this.localsText();
- }
- return result;
-};
-
-
-// This indexes correspond definitions in debug-scopes.h.
-var kScopeDetailsTypeIndex = 0;
-var kScopeDetailsObjectIndex = 1;
-var kScopeDetailsNameIndex = 2;
-var kScopeDetailsStartPositionIndex = 3;
-var kScopeDetailsEndPositionIndex = 4;
-var kScopeDetailsFunctionIndex = 5;
-
-function ScopeDetails(frame, fun, gen, index, opt_details) {
- if (frame) {
- this.break_id_ = frame.break_id_;
- this.details_ = opt_details ||
- %GetScopeDetails(frame.break_id_,
- frame.details_.frameId(),
- frame.details_.inlinedFrameIndex(),
- index);
- this.frame_id_ = frame.details_.frameId();
- this.inlined_frame_id_ = frame.details_.inlinedFrameIndex();
- } else if (fun) {
- this.details_ = opt_details || %GetFunctionScopeDetails(fun.value(), index);
- this.fun_value_ = fun.value();
- this.break_id_ = UNDEFINED;
- } else {
- this.details_ =
- opt_details || %GetGeneratorScopeDetails(gen.value(), index);
- this.gen_value_ = gen.value();
- this.break_id_ = UNDEFINED;
- }
- this.index_ = index;
-}
-
-
-ScopeDetails.prototype.type = function() {
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- }
- return this.details_[kScopeDetailsTypeIndex];
-};
-
-
-ScopeDetails.prototype.object = function() {
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- }
- return this.details_[kScopeDetailsObjectIndex];
-};
-
-
-ScopeDetails.prototype.name = function() {
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- }
- return this.details_[kScopeDetailsNameIndex];
-};
-
-
-ScopeDetails.prototype.startPosition = function() {
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- }
- return this.details_[kScopeDetailsStartPositionIndex];
-}
-
-
-ScopeDetails.prototype.endPosition = function() {
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- }
- return this.details_[kScopeDetailsEndPositionIndex];
-}
-
-ScopeDetails.prototype.func = function() {
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- }
- return this.details_[kScopeDetailsFunctionIndex];
-}
-
-
-ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
- var raw_res;
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- raw_res = %SetScopeVariableValue(this.break_id_, this.frame_id_,
- this.inlined_frame_id_, this.index_, name, new_value);
- } else if (!IS_UNDEFINED(this.fun_value_)) {
- raw_res = %SetScopeVariableValue(this.fun_value_, null, null, this.index_,
- name, new_value);
- } else {
- raw_res = %SetScopeVariableValue(this.gen_value_, null, null, this.index_,
- name, new_value);
- }
- if (!raw_res) throw %make_error(kDebugger, "Failed to set variable value");
-};
-
-
-/**
- * Mirror object for scope of frame or function. Either frame or function must
- * be specified.
- * @param {FrameMirror} frame The frame this scope is a part of
- * @param {FunctionMirror} function The function this scope is a part of
- * @param {GeneratorMirror} gen The generator this scope is a part of
- * @param {number} index The scope index in the frame
- * @param {Array=} opt_details Raw scope details data
- * @constructor
- * @extends Mirror
- */
-function ScopeMirror(frame, fun, gen, index, opt_details) {
- %_Call(Mirror, this, MirrorType.SCOPE_TYPE);
- if (frame) {
- this.frame_index_ = frame.index_;
- } else {
- this.frame_index_ = UNDEFINED;
- }
- this.scope_index_ = index;
- this.details_ = new ScopeDetails(frame, fun, gen, index, opt_details);
-}
-inherits(ScopeMirror, Mirror);
-
-
-ScopeMirror.prototype.details = function() {
- return this.details_;
-};
-
-
-ScopeMirror.prototype.frameIndex = function() {
- return this.frame_index_;
-};
-
-
-ScopeMirror.prototype.scopeIndex = function() {
- return this.scope_index_;
-};
-
-
-ScopeMirror.prototype.scopeType = function() {
- return this.details_.type();
-};
-
-
-ScopeMirror.prototype.scopeObject = function() {
- // For local, closure and script scopes create a mirror
- // as these objects are created on the fly materializing the local
- // or closure scopes and therefore will not preserve identity.
- return MakeMirror(this.details_.object());
-};
-
-
-ScopeMirror.prototype.setVariableValue = function(name, new_value) {
- this.details_.setVariableValueImpl(name, new_value);
-};
-
-
-/**
- * Mirror object for script source.
- * @param {Script} script The script object
- * @constructor
- * @extends Mirror
- */
-function ScriptMirror(script) {
- %_Call(Mirror, this, MirrorType.SCRIPT_TYPE);
- this.script_ = script;
- this.context_ = new ContextMirror(script.context_data);
-}
-inherits(ScriptMirror, Mirror);
-
-
-ScriptMirror.prototype.value = function() {
- return this.script_;
-};
-
-
-ScriptMirror.prototype.name = function() {
- return this.script_.name || this.script_.source_url;
-};
-
-
-ScriptMirror.prototype.id = function() {
- return this.script_.id;
-};
-
-
-ScriptMirror.prototype.source = function() {
- return this.script_.source;
-};
-
-
-ScriptMirror.prototype.setSource = function(source) {
- if (!IS_STRING(source)) throw %make_error(kDebugger, "Source is not a string");
- %DebugSetScriptSource(this.script_, source);
-};
-
-
-ScriptMirror.prototype.lineOffset = function() {
- return this.script_.line_offset;
-};
-
-
-ScriptMirror.prototype.columnOffset = function() {
- return this.script_.column_offset;
-};
-
-
-ScriptMirror.prototype.data = function() {
- return this.script_.data;
-};
-
-
-ScriptMirror.prototype.scriptType = function() {
- return this.script_.type;
-};
-
-
-ScriptMirror.prototype.compilationType = function() {
- return this.script_.compilation_type;
-};
-
-
-ScriptMirror.prototype.lineCount = function() {
- return %ScriptLineCount(this.script_);
-};
-
-
-ScriptMirror.prototype.locationFromPosition = function(
- position, include_resource_offset) {
- return %ScriptPositionInfo(this.script_, position, !!include_resource_offset);
-};
-
-
-ScriptMirror.prototype.context = function() {
- return this.context_;
-};
-
-
-ScriptMirror.prototype.evalFromScript = function() {
- return MakeMirror(this.script_.eval_from_script);
-};
-
-
-ScriptMirror.prototype.evalFromFunctionName = function() {
- return MakeMirror(this.script_.eval_from_function_name);
-};
-
-
-ScriptMirror.prototype.evalFromLocation = function() {
- var eval_from_script = this.evalFromScript();
- if (!eval_from_script.isUndefined()) {
- var position = this.script_.eval_from_script_position;
- return eval_from_script.locationFromPosition(position, true);
- }
-};
-
-
-ScriptMirror.prototype.toText = function() {
- var result = '';
- result += this.name();
- result += ' (lines: ';
- if (this.lineOffset() > 0) {
- result += this.lineOffset();
- result += '-';
- result += this.lineOffset() + this.lineCount() - 1;
- } else {
- result += this.lineCount();
- }
- result += ')';
- return result;
-};
-
-
-/**
- * Mirror object for context.
- * @param {Object} data The context data
- * @constructor
- * @extends Mirror
- */
-function ContextMirror(data) {
- %_Call(Mirror, this, MirrorType.CONTEXT_TYPE);
- this.data_ = data;
-}
-inherits(ContextMirror, Mirror);
-
-
-ContextMirror.prototype.data = function() {
- return this.data_;
-};
-
-// ----------------------------------------------------------------------------
-// Exports
-
-utils.InstallConstants(global, [
- "MakeMirror", MakeMirror,
- "ScopeType", ScopeType,
- "PropertyType", PropertyType,
- "PropertyAttribute", PropertyAttribute,
- "Mirror", Mirror,
- "ValueMirror", ValueMirror,
- "UndefinedMirror", UndefinedMirror,
- "NullMirror", NullMirror,
- "BooleanMirror", BooleanMirror,
- "NumberMirror", NumberMirror,
- "StringMirror", StringMirror,
- "SymbolMirror", SymbolMirror,
- "ObjectMirror", ObjectMirror,
- "FunctionMirror", FunctionMirror,
- "UnresolvedFunctionMirror", UnresolvedFunctionMirror,
- "ArrayMirror", ArrayMirror,
- "DateMirror", DateMirror,
- "RegExpMirror", RegExpMirror,
- "ErrorMirror", ErrorMirror,
- "PromiseMirror", PromiseMirror,
- "MapMirror", MapMirror,
- "SetMirror", SetMirror,
- "IteratorMirror", IteratorMirror,
- "GeneratorMirror", GeneratorMirror,
- "PropertyMirror", PropertyMirror,
- "InternalPropertyMirror", InternalPropertyMirror,
- "FrameMirror", FrameMirror,
- "ScriptMirror", ScriptMirror,
- "ScopeMirror", ScopeMirror,
- "FrameDetails", FrameDetails,
-]);
-
-})
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index f3bd7c86ec..4a6d0a67d5 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -37,13 +37,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LeaveFrame(StackFrame::INTERNAL);
__ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-#if V8_TARGET_ARCH_PPC64
- __ lwz(r3, FieldMemOperand(
- r3, SharedFunctionInfo::kFormalParameterCountOffset + 4));
-#else
- __ lwz(r3,
+ __ lhz(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
-#endif
__ mr(r5, r3);
ParameterCount dummy1(r5);
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
index e7bc6f56c0..f7aabe39b6 100644
--- a/deps/v8/src/debug/s390/debug-s390.cc
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -39,13 +39,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LeaveFrame(StackFrame::INTERNAL);
__ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
-#if V8_TARGET_ARCH_S390X
- __ LoadW(r2, FieldMemOperand(
- r2, SharedFunctionInfo::kFormalParameterCountOffset + 4));
-#else
- __ LoadP(
+ __ LoadLogicalHalfWordP(
r2, FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
-#endif
__ LoadRR(r4, r2);
ParameterCount dummy1(r4);
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 337fec3515..6667a5f3a7 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -39,7 +39,7 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ leave();
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(
+ __ movzxwq(
rbx, FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount dummy(rbx);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 5fa74d571a..8106abea60 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -131,29 +131,36 @@ class FrameWriter {
};
DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) {
- for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
+ for (int i = 0; i <= DeoptimizerData::kLastDeoptimizeKind; ++i) {
deopt_entry_code_[i] = nullptr;
}
Code** start = &deopt_entry_code_[0];
- Code** end = &deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
+ Code** end = &deopt_entry_code_[DeoptimizerData::kLastDeoptimizeKind + 1];
heap_->RegisterStrongRoots(reinterpret_cast<Object**>(start),
reinterpret_cast<Object**>(end));
}
DeoptimizerData::~DeoptimizerData() {
- for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
+ for (int i = 0; i <= DeoptimizerData::kLastDeoptimizeKind; ++i) {
deopt_entry_code_[i] = nullptr;
}
Code** start = &deopt_entry_code_[0];
heap_->UnregisterStrongRoots(reinterpret_cast<Object**>(start));
}
+Code* DeoptimizerData::deopt_entry_code(DeoptimizeKind kind) {
+ return deopt_entry_code_[static_cast<int>(kind)];
+}
+
+void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code* code) {
+ deopt_entry_code_[static_cast<int>(kind)] = code;
+}
Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_->IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
- Isolate* isolate = function_->GetIsolate();
+ Isolate* isolate = isolate_;
Context* native_context = function_->context()->native_context();
Object* element = native_context->DeoptimizedCodeListHead();
while (!element->IsUndefined(isolate)) {
@@ -169,13 +176,10 @@ Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
-Deoptimizer* Deoptimizer::New(JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
- Isolate* isolate) {
- Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, type,
+Deoptimizer* Deoptimizer::New(JSFunction* function, DeoptimizeKind kind,
+ unsigned bailout_id, Address from,
+ int fp_to_sp_delta, Isolate* isolate) {
+ Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, kind,
bailout_id, from, fp_to_sp_delta);
CHECK_NULL(isolate->deoptimizer_data()->current_);
isolate->deoptimizer_data()->current_ = deoptimizer;
@@ -226,11 +230,10 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
return info;
}
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- int count,
- BailoutType type) {
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, int count,
+ DeoptimizeKind kind) {
NoRootArrayScope no_root_array(masm);
- TableEntryGenerator generator(masm, type, count);
+ TableEntryGenerator generator(masm, kind, count);
generator.Generate();
}
@@ -457,41 +460,26 @@ void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
deoptimizer->DoComputeOutputFrames();
}
-
-const char* Deoptimizer::MessageFor(BailoutType type) {
- switch (type) {
- case EAGER: return "eager";
- case SOFT: return "soft";
- case LAZY: return "lazy";
+const char* Deoptimizer::MessageFor(DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return "eager";
+ case DeoptimizeKind::kSoft:
+ return "soft";
+ case DeoptimizeKind::kLazy:
+ return "lazy";
}
- FATAL("Unsupported deopt type");
+ FATAL("Unsupported deopt kind");
return nullptr;
}
-namespace {
-
-CodeEventListener::DeoptKind DeoptKindOfBailoutType(
- Deoptimizer::BailoutType bailout_type) {
- switch (bailout_type) {
- case Deoptimizer::EAGER:
- return CodeEventListener::kEager;
- case Deoptimizer::SOFT:
- return CodeEventListener::kSoft;
- case Deoptimizer::LAZY:
- return CodeEventListener::kLazy;
- }
- UNREACHABLE();
-}
-
-} // namespace
-
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
- BailoutType type, unsigned bailout_id, Address from,
+ DeoptimizeKind kind, unsigned bailout_id, Address from,
int fp_to_sp_delta)
: isolate_(isolate),
function_(function),
bailout_id_(bailout_id),
- bailout_type_(type),
+ deopt_kind_(kind),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
deoptimizing_throw_(false),
@@ -531,7 +519,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
// increment the function's deopt count so that we can avoid optimising
// functions that deopt too often.
- if (bailout_type_ == Deoptimizer::SOFT) {
+ if (deopt_kind_ == DeoptimizeKind::kSoft) {
// Soft deopts shouldn't count against the overall deoptimization count
// that can eventually lead to disabling optimization for a function.
isolate->counters()->soft_deopts_executed()->Increment();
@@ -542,8 +530,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
compiled_code_->set_deopt_already_counted(true);
PROFILE(isolate_,
- CodeDeoptEvent(compiled_code_, DeoptKindOfBailoutType(type), from_,
- fp_to_sp_delta_));
+ CodeDeoptEvent(compiled_code_, kind, from_, fp_to_sp_delta_));
}
unsigned size = ComputeInputFrameSize();
int parameter_count =
@@ -569,10 +556,10 @@ void Deoptimizer::PrintFunctionName() {
}
Handle<JSFunction> Deoptimizer::function() const {
- return Handle<JSFunction>(function_);
+ return Handle<JSFunction>(function_, isolate());
}
Handle<Code> Deoptimizer::compiled_code() const {
- return Handle<Code>(compiled_code_);
+ return Handle<Code>(compiled_code_, isolate());
}
Deoptimizer::~Deoptimizer() {
@@ -599,34 +586,56 @@ void Deoptimizer::DeleteFrameDescriptions() {
}
Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, int id,
- BailoutType type) {
+ DeoptimizeKind kind) {
CHECK_GE(id, 0);
if (id >= kMaxNumberOfEntries) return kNullAddress;
DeoptimizerData* data = isolate->deoptimizer_data();
- CHECK_LE(type, kLastBailoutType);
- CHECK_NOT_NULL(data->deopt_entry_code_[type]);
- Code* code = data->deopt_entry_code_[type];
+ CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
+ CHECK_NOT_NULL(data->deopt_entry_code(kind));
+ Code* code = data->deopt_entry_code(kind);
return code->raw_instruction_start() + (id * table_entry_size_);
}
-
-int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
- Address addr,
- BailoutType type) {
+int Deoptimizer::GetDeoptimizationId(Isolate* isolate, Address addr,
+ DeoptimizeKind kind) {
DeoptimizerData* data = isolate->deoptimizer_data();
- CHECK_LE(type, kLastBailoutType);
- Code* code = data->deopt_entry_code_[type];
- if (code == nullptr) return kNotDeoptimizationEntry;
+ CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
+ DCHECK(IsInDeoptimizationTable(isolate, addr, kind));
+ Code* code = data->deopt_entry_code(kind);
Address start = code->raw_instruction_start();
- if (addr < start ||
- addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
- return kNotDeoptimizationEntry;
- }
DCHECK_EQ(0,
static_cast<int>(addr - start) % table_entry_size_);
return static_cast<int>(addr - start) / table_entry_size_;
}
+bool Deoptimizer::IsInDeoptimizationTable(Isolate* isolate, Address addr,
+ DeoptimizeKind type) {
+ DeoptimizerData* data = isolate->deoptimizer_data();
+ CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind);
+ Code* code = data->deopt_entry_code(type);
+ if (code == nullptr) return false;
+ Address start = code->raw_instruction_start();
+ return ((table_entry_size_ == 0 && addr == start) ||
+ (addr >= start &&
+ addr < start + (kMaxNumberOfEntries * table_entry_size_)));
+}
+
+bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
+ DeoptimizeKind* type) {
+ if (IsInDeoptimizationTable(isolate, addr, DeoptimizeKind::kEager)) {
+ *type = DeoptimizeKind::kEager;
+ return true;
+ }
+ if (IsInDeoptimizationTable(isolate, addr, DeoptimizeKind::kSoft)) {
+ *type = DeoptimizeKind::kSoft;
+ return true;
+ }
+ if (IsInDeoptimizationTable(isolate, addr, DeoptimizeKind::kLazy)) {
+ *type = DeoptimizeKind::kLazy;
+ return true;
+ }
+ return false;
+}
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
@@ -708,14 +717,15 @@ void Deoptimizer::DoComputeOutputFrames() {
if (trace_scope_ != nullptr) {
timer.Start();
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
- MessageFor(bailout_type_));
+ MessageFor(deopt_kind_));
PrintFunctionName();
PrintF(trace_scope_->file(),
" (opt #%d) @%d, FP to SP delta: %d, caller sp: " V8PRIxPTR_FMT
"]\n",
input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_,
caller_frame_top_);
- if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
+ if (deopt_kind_ == DeoptimizeKind::kEager ||
+ deopt_kind_ == DeoptimizeKind::kSoft) {
compiled_code_->PrintDeoptLocation(
trace_scope_->file(), " ;;; deoptimize at ", from_);
}
@@ -728,7 +738,7 @@ void Deoptimizer::DoComputeOutputFrames() {
TranslationIterator state_iterator(translations, translation_index);
translated_state_.Init(
- input_->GetFramePointerAddress(), &state_iterator,
+ isolate_, input_->GetFramePointerAddress(), &state_iterator,
input_data->LiteralArray(), input_->GetRegisterValues(),
trace_scope_ == nullptr ? nullptr : trace_scope_->file(),
function_->IsHeapObject()
@@ -804,7 +814,7 @@ void Deoptimizer::DoComputeOutputFrames() {
double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
PrintF(trace_scope_->file(), "[deoptimizing (%s): end ",
- MessageFor(bailout_type_));
+ MessageFor(deopt_kind_));
PrintFunctionName();
PrintF(trace_scope_->file(),
" @%d => node=%d, pc=" V8PRIxPTR_FMT ", caller sp=" V8PRIxPTR_FMT
@@ -877,9 +887,9 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Compute the incoming parameter translation.
+ ReadOnlyRoots roots(isolate());
if (ShouldPadArguments(parameter_count)) {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
- "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
for (int i = 0; i < parameter_count; ++i, ++value_iterator) {
@@ -979,15 +989,13 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// to ensure the stack frame is aligned. Do this now.
while (register_slots_written < register_stack_slot_count) {
register_slots_written++;
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
- "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
// Translate the accumulator register (depending on frame position).
if (is_topmost) {
if (PadTopOfStackRegister()) {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
- "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
// For topmost frame, put the accumulator on the stack. The
// {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
@@ -1017,7 +1025,8 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// simulating what normal handlers do upon completion of the operation.
Builtins* builtins = isolate_->builtins();
Code* dispatch_builtin =
- (!is_topmost || (bailout_type_ == LAZY)) && !goto_catch_handler
+ (!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
+ !goto_catch_handler
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
output_frame->SetPc(
@@ -1088,9 +1097,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
}
output_frame->SetTop(top_address);
+ ReadOnlyRoots roots(isolate());
if (ShouldPadArguments(parameter_count)) {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
- "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
// Compute the incoming parameter translation.
@@ -1132,7 +1141,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
// Number of incoming arguments.
frame_writer.PushRawObject(Smi::FromInt(height - 1), "argc\n");
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(), "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
CHECK_EQ(translated_frame->end(), value_iterator);
DCHECK_EQ(0, frame_writer.top_offset());
@@ -1157,8 +1166,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
bool is_topmost = (output_count_ - 1 == frame_index);
// The construct frame could become topmost only if we inlined a constructor
// call which does a tail call (otherwise the tail callee's frame would be
- // the topmost one). So it could only be the LAZY case.
- CHECK(!is_topmost || bailout_type_ == LAZY);
+ // the topmost one). So it could only be the DeoptimizeKind::kLazy case.
+ CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy);
Builtins* builtins = isolate_->builtins();
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
@@ -1207,9 +1216,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
+ ReadOnlyRoots roots(isolate());
if (ShouldPadArguments(parameter_count)) {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
- "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
// The allocated receiver of a construct stub frame is passed as the
@@ -1266,7 +1275,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// position of the receiver. Copy it to the top of stack, with the hole value
// as padding to maintain alignment.
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(), "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
CHECK(bailout_id == BailoutId::ConstructStubCreate() ||
bailout_id == BailoutId::ConstructStubInvoke());
@@ -1277,8 +1286,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
if (is_topmost) {
if (PadTopOfStackRegister()) {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
- "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
// Ensure the result is restored back when we return to the stub.
Register result_reg = kReturnRegister0;
@@ -1307,7 +1315,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
if (is_topmost) {
Register constant_pool_reg =
JavaScriptFrame::constant_pool_pointer_register();
- output_frame->SetRegister(constant_pool_reg.code(), fp_value);
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
}
}
@@ -1323,7 +1331,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// Set the continuation for the topmost frame.
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
- DCHECK_EQ(LAZY, bailout_type_);
+ DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_);
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation->InstructionStart()));
@@ -1465,7 +1473,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
const bool is_bottommost = (0 == frame_index);
const bool is_topmost = (output_count_ - 1 == frame_index);
- const bool must_handle_result = !is_topmost || bailout_type_ == LAZY;
+ const bool must_handle_result =
+ !is_topmost || deopt_kind_ == DeoptimizeKind::kLazy;
const RegisterConfiguration* config(RegisterConfiguration::Default());
const int allocatable_register_count =
@@ -1555,9 +1564,9 @@ void Deoptimizer::DoComputeBuiltinContinuation(
reinterpret_cast<intptr_t>(value_iterator->GetRawValue());
++value_iterator;
+ ReadOnlyRoots roots(isolate());
if (ShouldPadArguments(stack_param_count)) {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
- "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
for (int i = 0; i < translated_stack_parameters; ++i, ++value_iterator) {
@@ -1570,7 +1579,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
case BuiltinContinuationMode::JAVASCRIPT:
break;
case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
+ frame_writer.PushRawObject(roots.the_hole_value(),
"placeholder for exception on lazy deopt\n");
} break;
case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: {
@@ -1582,7 +1591,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
}
if (must_handle_result) {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
+ frame_writer.PushRawObject(roots.the_hole_value(),
"placeholder for return result on lazy deopt\n");
}
@@ -1680,14 +1689,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// Some architectures must pad the stack frame with extra stack slots
// to ensure the stack frame is aligned.
for (int i = 0; i < padding_slot_count; ++i) {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
- "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
if (is_topmost) {
if (PadTopOfStackRegister()) {
- frame_writer.PushRawObject(isolate()->heap()->the_hole_value(),
- "padding\n");
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
// Ensure the result is restored back when we return to the stub.
@@ -1696,8 +1703,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
frame_writer.PushRawValue(input_->GetRegister(result_reg.code()),
"callback result\n");
} else {
- frame_writer.PushRawObject(isolate()->heap()->undefined_value(),
- "callback result\n");
+ frame_writer.PushRawObject(roots.undefined_value(), "callback result\n");
}
}
@@ -1768,7 +1774,7 @@ void Deoptimizer::MaterializeHeapObjects() {
void Deoptimizer::QueueValueForMaterialization(
Address output_address, Object* obj,
const TranslatedFrame::iterator& iterator) {
- if (obj == isolate_->heap()->arguments_marker()) {
+ if (obj == ReadOnlyRoots(isolate_).arguments_marker()) {
values_to_materialize_.push_back({output_address, iterator});
}
}
@@ -1813,14 +1819,15 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
}
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- BailoutType type) {
- CHECK(type == EAGER || type == SOFT || type == LAZY);
+ DeoptimizeKind kind) {
+ CHECK(kind == DeoptimizeKind::kEager || kind == DeoptimizeKind::kSoft ||
+ kind == DeoptimizeKind::kLazy);
DeoptimizerData* data = isolate->deoptimizer_data();
- if (data->deopt_entry_code_[type] != nullptr) return;
+ if (data->deopt_entry_code(kind) != nullptr) return;
MacroAssembler masm(isolate, nullptr, 16 * KB, CodeObjectRequired::kYes);
masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, kMaxNumberOfEntries, type);
+ GenerateDeoptimizationEntries(&masm, kMaxNumberOfEntries, kind);
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
@@ -1832,14 +1839,14 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
MaybeHandle<ByteArray>(), MaybeHandle<DeoptimizationData>(), kImmovable);
CHECK(Heap::IsImmovable(*code));
- CHECK_NULL(data->deopt_entry_code_[type]);
- data->deopt_entry_code_[type] = *code;
+ CHECK_NULL(data->deopt_entry_code(kind));
+ data->set_deopt_entry_code(kind, *code);
}
void Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate) {
- EnsureCodeForDeoptimizationEntry(isolate, EAGER);
- EnsureCodeForDeoptimizationEntry(isolate, LAZY);
- EnsureCodeForDeoptimizationEntry(isolate, SOFT);
+ EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kEager);
+ EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kLazy);
+ EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kSoft);
}
FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
@@ -2152,7 +2159,7 @@ bool MaterializedObjectStore::Remove(Address fp) {
for (int i = index; i < fps_size; i++) {
array->set(i, array->get(i + 1));
}
- array->set(fps_size, isolate()->heap()->undefined_value());
+ array->set(fps_size, ReadOnlyRoots(isolate()).undefined_value());
return true;
}
@@ -2166,7 +2173,8 @@ int MaterializedObjectStore::StackIdToIndex(Address fp) {
Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
- return Handle<FixedArray>(isolate()->heap()->materialized_objects());
+ return Handle<FixedArray>(isolate()->heap()->materialized_objects(),
+ isolate());
}
@@ -2186,8 +2194,9 @@ Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
for (int i = 0; i < array->length(); i++) {
new_array->set(i, array->get(i));
}
+ HeapObject* undefined_value = ReadOnlyRoots(isolate()).undefined_value();
for (int i = array->length(); i < length; i++) {
- new_array->set(i, isolate()->heap()->undefined_value());
+ new_array->set(i, undefined_value);
}
isolate()->heap()->SetRootMaterializedObjects(*new_array);
return new_array;
@@ -2197,7 +2206,7 @@ namespace {
Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it,
Isolate* isolate) {
- if (it->GetRawValue() == isolate->heap()->arguments_marker()) {
+ if (it->GetRawValue() == ReadOnlyRoots(isolate).arguments_marker()) {
if (!it->IsMaterializableByDebugger()) {
return isolate->factory()->optimized_out();
}
@@ -2210,52 +2219,28 @@ Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it,
DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
TranslatedState::iterator frame_it,
Isolate* isolate) {
- // If the previous frame is an adaptor frame, we will take the parameters
- // from there.
- TranslatedState::iterator parameter_frame = frame_it;
- if (parameter_frame != state->begin()) {
- parameter_frame--;
- }
- int parameter_count;
- if (parameter_frame->kind() == TranslatedFrame::kArgumentsAdaptor) {
- parameter_count = parameter_frame->height() - 1; // Ignore the receiver.
- } else {
- parameter_frame = frame_it;
- parameter_count =
- frame_it->shared_info()->internal_formal_parameter_count();
- }
- TranslatedFrame::iterator parameter_it = parameter_frame->begin();
- parameter_it++; // Skip the function.
- parameter_it++; // Skip the receiver.
+ int parameter_count =
+ frame_it->shared_info()->internal_formal_parameter_count();
+ TranslatedFrame::iterator stack_it = frame_it->begin();
- // Figure out whether there is a construct stub frame on top of
- // the parameter frame.
- has_construct_stub_ =
- parameter_frame != state->begin() &&
- (parameter_frame - 1)->kind() == TranslatedFrame::kConstructStub;
+ // Get the function. Note that this might materialize the function.
+ // In case the debugger mutates this value, we should deoptimize
+ // the function and remember the value in the materialized value store.
+ function_ = Handle<JSFunction>::cast(stack_it->GetValue());
+ stack_it++; // Skip the function.
+ stack_it++; // Skip the receiver.
DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind());
source_position_ = Deoptimizer::ComputeSourcePositionFromBytecodeArray(
*frame_it->shared_info(), frame_it->node_id());
- TranslatedFrame::iterator value_it = frame_it->begin();
- // Get the function. Note that this might materialize the function.
- // In case the debugger mutates this value, we should deoptimize
- // the function and remember the value in the materialized value store.
- function_ = Handle<JSFunction>::cast(value_it->GetValue());
+ DCHECK_EQ(parameter_count,
+ function_->shared()->internal_formal_parameter_count());
parameters_.resize(static_cast<size_t>(parameter_count));
for (int i = 0; i < parameter_count; i++) {
- Handle<Object> parameter = GetValueForDebugger(parameter_it, isolate);
+ Handle<Object> parameter = GetValueForDebugger(stack_it, isolate);
SetParameter(i, parameter);
- parameter_it++;
- }
-
- // Skip the function, the receiver and the arguments.
- int skip_count =
- frame_it->shared_info()->internal_formal_parameter_count() + 2;
- TranslatedFrame::iterator stack_it = frame_it->begin();
- for (int i = 0; i < skip_count; i++) {
stack_it++;
}
@@ -2471,10 +2456,10 @@ Object* TranslatedValue::GetRawValue() const {
case kBoolBit: {
if (uint32_value() == 0) {
- return isolate()->heap()->false_value();
+ return ReadOnlyRoots(isolate()).false_value();
} else {
CHECK_EQ(1U, uint32_value());
- return isolate()->heap()->true_value();
+ return ReadOnlyRoots(isolate()).true_value();
}
}
@@ -2484,7 +2469,7 @@ Object* TranslatedValue::GetRawValue() const {
// If we could not get the value without allocation, return the arguments
// marker.
- return isolate()->heap()->arguments_marker();
+ return ReadOnlyRoots(isolate()).arguments_marker();
}
void TranslatedValue::set_initialized_storage(Handle<Object> storage) {
@@ -2542,7 +2527,7 @@ void TranslatedValue::MaterializeSimple() {
if (materialization_state() == kFinished) return;
Object* raw_value = GetRawValue();
- if (raw_value != isolate()->heap()->arguments_marker()) {
+ if (raw_value != ReadOnlyRoots(isolate()).arguments_marker()) {
// We can get the value without allocation, just return it here.
set_initialized_storage(Handle<Object>(raw_value, isolate()));
return;
@@ -2705,7 +2690,8 @@ int TranslatedFrame::GetValueCount() {
void TranslatedFrame::Handlify() {
if (raw_shared_info_ != nullptr) {
- shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_);
+ shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_,
+ raw_shared_info_->GetIsolate());
raw_shared_info_ = nullptr;
}
for (auto& value : values_) {
@@ -2915,8 +2901,8 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
frame.Add(TranslatedValue::NewDeferredObject(
this, length + FixedArray::kHeaderSize / kPointerSize, object_index));
- frame.Add(
- TranslatedValue::NewTagged(this, isolate_->heap()->fixed_array_map()));
+ ReadOnlyRoots roots(isolate_);
+ frame.Add(TranslatedValue::NewTagged(this, roots.fixed_array_map()));
frame.Add(TranslatedValue::NewInt32(this, length));
int number_of_holes = 0;
@@ -2926,8 +2912,7 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
number_of_holes = Min(formal_parameter_count_, length);
}
for (int i = 0; i < number_of_holes; ++i) {
- frame.Add(
- TranslatedValue::NewTagged(this, isolate_->heap()->the_hole_value()));
+ frame.Add(TranslatedValue::NewTagged(this, roots.the_hole_value()));
}
for (int i = length - number_of_holes - 1; i >= 0; --i) {
Address argument_slot = arguments_frame +
@@ -3236,20 +3221,20 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
DCHECK(data != nullptr && deopt_index != Safepoint::kNoDeoptimizationIndex);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
- Init(frame->fp(), &it, data->LiteralArray(), nullptr /* registers */,
- nullptr /* trace file */,
+ Init(frame->isolate(), frame->fp(), &it, data->LiteralArray(),
+ nullptr /* registers */, nullptr /* trace file */,
frame->function()->shared()->internal_formal_parameter_count());
}
-void TranslatedState::Init(Address input_frame_pointer,
+void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
TranslationIterator* iterator,
FixedArray* literal_array, RegisterValues* registers,
FILE* trace_file, int formal_parameter_count) {
DCHECK(frames_.empty());
formal_parameter_count_ = formal_parameter_count;
+ isolate_ = isolate;
- isolate_ = literal_array->GetIsolate();
// Read out the 'header' translation.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
@@ -3413,9 +3398,17 @@ void TranslatedState::InitializeCapturedObjectAt(
case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
- case BOILERPLATE_DESCRIPTION_TYPE:
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
case PROPERTY_ARRAY_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
InitializeObjectWithTaggedFieldsAt(frame, &value_index, slot, map,
no_allocation);
break;
@@ -3474,9 +3467,9 @@ void TranslatedState::MaterializeMutableHeapNumber(TranslatedFrame* frame,
CHECK_NE(TranslatedValue::kCapturedObject,
frame->values_[*value_index].kind());
Handle<Object> value = frame->values_[*value_index].GetValue();
- Handle<HeapNumber> box;
CHECK(value->IsNumber());
- box = isolate()->factory()->NewHeapNumber(value->Number(), MUTABLE);
+ Handle<MutableHeapNumber> box =
+ isolate()->factory()->NewMutableHeapNumber(value->Number());
(*value_index)++;
slot->set_storage(box);
}
@@ -3538,6 +3531,7 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
return MaterializeMutableHeapNumber(frame, &value_index, slot);
case FIXED_ARRAY_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -3547,7 +3541,14 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
- case HASH_TABLE_TYPE: {
+ case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE: {
// Check we have the right size.
int array_length =
Smi::cast(frame->values_[value_index].GetRawValue())->value();
@@ -3556,7 +3557,7 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
CHECK_EQ(instance_size, slot->GetChildrenCount() * kPointerSize);
// Canonicalize empty fixed array.
- if (*map == isolate()->heap()->empty_fixed_array()->map() &&
+ if (*map == ReadOnlyRoots(isolate()).empty_fixed_array()->map() &&
array_length == 0) {
slot->set_storage(isolate()->factory()->empty_fixed_array());
} else {
@@ -3637,7 +3638,7 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
properties_slot->set_storage(object_storage);
// Set markers for the double properties.
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
int field_count = map->NumberOfOwnDescriptors();
for (int i = 0; i < field_count; i++) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
@@ -3670,7 +3671,7 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
Handle<ByteArray> object_storage = AllocateStorageFor(slot);
// Now we handle the interesting (JSObject) case.
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
int field_count = map->NumberOfOwnDescriptors();
// Set markers for the double properties.
@@ -3762,7 +3763,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
// Skip the writes if we already have the canonical empty fixed array.
- if (*object_storage == isolate()->heap()->empty_fixed_array()) {
+ if (*object_storage == ReadOnlyRoots(isolate()).empty_fixed_array()) {
CHECK_EQ(2, slot->GetChildrenCount());
Handle<Object> length_value = GetValueAndAdvance(frame, value_index);
CHECK_EQ(*length_value, Smi::FromInt(0));
@@ -3934,7 +3935,7 @@ void TranslatedState::VerifyMaterializedObjects() {
if (slot->kind() == TranslatedValue::kCapturedObject) {
CHECK_EQ(slot, GetValueByObjectIndex(slot->object_index()));
if (slot->materialization_state() == TranslatedValue::kFinished) {
- slot->GetStorage()->ObjectVerify();
+ slot->GetStorage()->ObjectVerify(isolate());
} else {
CHECK_EQ(slot->materialization_state(),
TranslatedValue::kUninitialized);
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index eae442dec3..1f20dbdac1 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -11,9 +11,11 @@
#include "src/allocation.h"
#include "src/base/macros.h"
#include "src/boxed-float.h"
+#include "src/code-tracer.h"
#include "src/deoptimize-reason.h"
#include "src/feedback-vector.h"
#include "src/frame-constants.h"
+#include "src/globals.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/source-position.h"
@@ -31,8 +33,8 @@ class RegisterValues;
class TranslatedValue {
public:
// Allocation-less getter of the value.
- // Returns heap()->arguments_marker() if allocation would be
- // necessary to get the value.
+ // Returns ReadOnlyRoots::arguments_marker() if allocation would be necessary
+ // to get the value.
Object* GetRawValue() const;
// Getter for the value, takes care of materializing the subgraph
@@ -305,9 +307,9 @@ class TranslatedState {
Isolate* isolate() { return isolate_; }
- void Init(Address input_frame_pointer, TranslationIterator* iterator,
- FixedArray* literal_array, RegisterValues* registers,
- FILE* trace_file, int parameter_count);
+ void Init(Isolate* isolate, Address input_frame_pointer,
+ TranslationIterator* iterator, FixedArray* literal_array,
+ RegisterValues* registers, FILE* trace_file, int parameter_count);
void VerifyMaterializedObjects();
bool DoUpdateFeedback();
@@ -392,8 +394,6 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
class Deoptimizer : public Malloced {
public:
- enum BailoutType { EAGER, LAZY, SOFT, kLastBailoutType = SOFT };
-
struct DeoptInfo {
DeoptInfo(SourcePosition position, DeoptimizeReason deopt_reason,
int deopt_id)
@@ -413,41 +413,38 @@ class Deoptimizer : public Malloced {
struct JumpTableEntry : public ZoneObject {
inline JumpTableEntry(Address entry, const DeoptInfo& deopt_info,
- Deoptimizer::BailoutType type, bool frame)
+ DeoptimizeKind kind, bool frame)
: label(),
address(entry),
deopt_info(deopt_info),
- bailout_type(type),
+ deopt_kind(kind),
needs_frame(frame) {}
bool IsEquivalentTo(const JumpTableEntry& other) const {
- return address == other.address && bailout_type == other.bailout_type &&
+ return address == other.address && deopt_kind == other.deopt_kind &&
needs_frame == other.needs_frame;
}
Label label;
Address address;
DeoptInfo deopt_info;
- Deoptimizer::BailoutType bailout_type;
+ DeoptimizeKind deopt_kind;
bool needs_frame;
};
- static const char* MessageFor(BailoutType type);
+ static const char* MessageFor(DeoptimizeKind kind);
int output_count() const { return output_count_; }
Handle<JSFunction> function() const;
Handle<Code> compiled_code() const;
- BailoutType bailout_type() const { return bailout_type_; }
+ DeoptimizeKind deopt_kind() const { return deopt_kind_; }
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
- static Deoptimizer* New(JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
+ static Deoptimizer* New(JSFunction* function, DeoptimizeKind kind,
+ unsigned bailout_id, Address from, int fp_to_sp_delta,
Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
@@ -478,10 +475,14 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
static Address GetDeoptimizationEntry(Isolate* isolate, int id,
- BailoutType type);
- static int GetDeoptimizationId(Isolate* isolate,
- Address addr,
- BailoutType type);
+ DeoptimizeKind kind);
+ static int GetDeoptimizationId(Isolate* isolate, Address addr,
+ DeoptimizeKind kind);
+
+ // Returns true if {addr} is a deoptimization entry and stores its type in
+ // {type}. Returns false if {addr} is not a deoptimization entry.
+ static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
+ DeoptimizeKind* type);
// Code generation support.
static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
@@ -501,14 +502,14 @@ class Deoptimizer : public Malloced {
// Generators for the deoptimization entry code.
class TableEntryGenerator BASE_EMBEDDED {
public:
- TableEntryGenerator(MacroAssembler* masm, BailoutType type, int count)
- : masm_(masm), type_(type), count_(count) {}
+ TableEntryGenerator(MacroAssembler* masm, DeoptimizeKind kind, int count)
+ : masm_(masm), deopt_kind_(kind), count_(count) {}
void Generate();
protected:
MacroAssembler* masm() const { return masm_; }
- BailoutType type() const { return type_; }
+ DeoptimizeKind deopt_kind() const { return deopt_kind_; }
Isolate* isolate() const { return masm_->isolate(); }
void GeneratePrologue();
@@ -517,12 +518,12 @@ class Deoptimizer : public Malloced {
int count() const { return count_; }
MacroAssembler* masm_;
- Deoptimizer::BailoutType type_;
+ DeoptimizeKind deopt_kind_;
int count_;
};
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- BailoutType type);
+ DeoptimizeKind kind);
static void EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
@@ -535,12 +536,15 @@ class Deoptimizer : public Malloced {
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
- Deoptimizer(Isolate* isolate, JSFunction* function, BailoutType type,
+ Deoptimizer(Isolate* isolate, JSFunction* function, DeoptimizeKind kind,
unsigned bailout_id, Address from, int fp_to_sp_delta);
Code* FindOptimizedCode();
void PrintFunctionName();
void DeleteFrameDescriptions();
+ static bool IsInDeoptimizationTable(Isolate* isolate, Address addr,
+ DeoptimizeKind type);
+
void DoComputeOutputFrames();
void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int frame_index, bool goto_catch_handler);
@@ -573,8 +577,8 @@ class Deoptimizer : public Malloced {
static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo* shared);
static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
- static void GenerateDeoptimizationEntries(
- MacroAssembler* masm, int count, BailoutType type);
+ static void GenerateDeoptimizationEntries(MacroAssembler* masm, int count,
+ DeoptimizeKind kind);
// Marks all the code in the given context for deoptimization.
static void MarkAllCodeForContext(Context* native_context);
@@ -595,7 +599,7 @@ class Deoptimizer : public Malloced {
JSFunction* function_;
Code* compiled_code_;
unsigned bailout_id_;
- BailoutType bailout_type_;
+ DeoptimizeKind deopt_kind_;
Address from_;
int fp_to_sp_delta_;
bool deoptimizing_throw_;
@@ -846,7 +850,11 @@ class DeoptimizerData {
private:
Heap* heap_;
- Code* deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
+ static const int kLastDeoptimizeKind =
+ static_cast<int>(DeoptimizeKind::kLastDeoptimizeKind);
+ Code* deopt_entry_code_[kLastDeoptimizeKind + 1];
+ Code* deopt_entry_code(DeoptimizeKind kind);
+ void set_deopt_entry_code(DeoptimizeKind kind, Code* code);
Deoptimizer* current_;
@@ -1026,12 +1034,6 @@ class DeoptimizedFrameInfo : public Malloced {
// Get the frame context.
Handle<Object> GetContext() { return context_; }
- // Check if this frame is preceded by construct stub frame. The bottom-most
- // inlined frame might still be called by an uninlined construct stub.
- bool HasConstructStub() {
- return has_construct_stub_;
- }
-
// Get an incoming argument.
Handle<Object> GetParameter(int index) {
DCHECK(0 <= index && index < parameters_count());
@@ -1063,7 +1065,6 @@ class DeoptimizedFrameInfo : public Malloced {
Handle<JSFunction> function_;
Handle<Object> context_;
- bool has_construct_stub_;
std::vector<Handle<Object> > parameters_;
std::vector<Handle<Object> > expression_stack_;
int source_position_;
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 6fc4bf16a0..6a9d8deee0 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -45,7 +45,8 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
if (!code_.is_null()) {
const char* name =
- isolate_->builtins()->Lookup(reinterpret_cast<Address>(pc));
+ isolate_ ? isolate_->builtins()->Lookup(reinterpret_cast<Address>(pc))
+ : nullptr;
if (name != nullptr) {
SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc), name);
@@ -61,8 +62,9 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
}
wasm::WasmCode* wasm_code =
- isolate_->wasm_engine()->code_manager()->LookupCode(
- reinterpret_cast<Address>(pc));
+ isolate_ ? isolate_->wasm_engine()->code_manager()->LookupCode(
+ reinterpret_cast<Address>(pc))
+ : nullptr;
if (wasm_code != nullptr) {
SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc),
wasm::GetWasmCodeKindAsString(wasm_code->kind()));
@@ -91,7 +93,7 @@ static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
- const ExternalReferenceEncoder& ref_encoder,
+ const ExternalReferenceEncoder* ref_encoder,
std::ostream* os, RelocInfo* relocinfo,
bool first_reloc_info = true) {
// Indent the printing of the reloc info.
@@ -125,69 +127,50 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
std::unique_ptr<char[]> obj_name = accumulator.ToCString();
out->AddFormatted(" ;; object: %s", obj_name.get());
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- const char* reference_name = ref_encoder.NameOfAddress(
- isolate, relocinfo->target_external_reference());
+ const char* reference_name =
+ ref_encoder ? ref_encoder->NameOfAddress(
+ isolate, relocinfo->target_external_reference())
+ : "unknown";
out->AddFormatted(" ;; external reference (%s)", reference_name);
- } else if (RelocInfo::IsCodeTarget(rmode)) {
+ } else if (RelocInfo::IsCodeTargetMode(rmode)) {
out->AddFormatted(" ;; code:");
- wasm::WasmCode* wasmCode =
- isolate->wasm_engine()->code_manager()->LookupCode(
- relocinfo->target_address());
- if (wasmCode) {
- out->AddFormatted(" wasm(%s)",
- wasm::GetWasmCodeKindAsString(wasmCode->kind()));
+ Code* code = isolate->heap()->GcSafeFindCodeForInnerPointer(
+ relocinfo->target_address());
+ Code::Kind kind = code->kind();
+ if (kind == Code::STUB) {
+ // Get the STUB key and extract major and minor key.
+ uint32_t key = code->stub_key();
+ uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
+ DCHECK(major_key == CodeStub::MajorKeyFromKey(key));
+ out->AddFormatted(" %s, %s, ", Code::Kind2String(kind),
+ CodeStub::MajorName(major_key));
+ out->AddFormatted("minor: %d", minor_key);
+ } else if (code->is_builtin()) {
+ out->AddFormatted(" Builtin::%s", Builtins::name(code->builtin_index()));
} else {
- Code* code = Code::GetCodeFromTargetAddress(relocinfo->target_address());
- Code::Kind kind = code->kind();
- if (kind == Code::STUB) {
- // Get the STUB key and extract major and minor key.
- uint32_t key = code->stub_key();
- uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- DCHECK(major_key == CodeStub::MajorKeyFromKey(key));
- out->AddFormatted(" %s, %s, ", Code::Kind2String(kind),
- CodeStub::MajorName(major_key));
- out->AddFormatted("minor: %d", minor_key);
- } else if (code->is_builtin()) {
- out->AddFormatted(" Builtin::%s",
- Builtins::name(code->builtin_index()));
- } else {
- out->AddFormatted(" %s", Code::Kind2String(kind));
- }
+ out->AddFormatted(" %s", Code::Kind2String(kind));
}
- } else if (RelocInfo::IsRuntimeEntry(rmode) &&
+ } else if (RelocInfo::IsRuntimeEntry(rmode) && isolate &&
isolate->deoptimizer_data() != nullptr) {
- // A runtime entry reloinfo might be a deoptimization bailout->
+ // A runtime entry relocinfo might be a deoptimization bailout.
Address addr = relocinfo->target_address();
- int id =
- Deoptimizer::GetDeoptimizationId(isolate, addr, Deoptimizer::EAGER);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- id = Deoptimizer::GetDeoptimizationId(isolate, addr, Deoptimizer::LAZY);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- id = Deoptimizer::GetDeoptimizationId(isolate, addr, Deoptimizer::SOFT);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- out->AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
- } else {
- out->AddFormatted(" ;; soft deoptimization bailout %d", id);
- }
- } else {
- out->AddFormatted(" ;; lazy deoptimization bailout %d", id);
- }
+ DeoptimizeKind type;
+ if (Deoptimizer::IsDeoptimizationEntry(isolate, addr, &type)) {
+ int id = relocinfo->GetDeoptimizationId(isolate, type);
+ out->AddFormatted(" ;; %s deoptimization bailout %d",
+ Deoptimizer::MessageFor(type), id);
} else {
- out->AddFormatted(" ;; deoptimization bailout %d", id);
+ out->AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
}
} else {
out->AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
}
}
-static int DecodeIt(Isolate* isolate, std::ostream* os,
- const V8NameConverter& converter, byte* begin, byte* end,
- Address current_pc) {
- SealHandleScope shs(isolate);
- DisallowHeapAllocation no_alloc;
- ExternalReferenceEncoder ref_encoder(isolate);
-
+static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
+ std::ostream* os, const V8NameConverter& converter,
+ byte* begin, byte* end, Address current_pc) {
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
StringBuilder out(out_buffer.start(), out_buffer.length());
@@ -275,9 +258,9 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
for (size_t i = 0; i < pcs.size(); i++) {
// Put together the reloc info
const CodeReference& host = converter.code();
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], nullptr);
- relocinfo.set_constant_pool(host.is_null() ? kNullAddress
- : host.constant_pool());
+ Address constant_pool =
+ host.is_null() ? kNullAddress : host.constant_pool();
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], nullptr, constant_pool);
bool first_reloc_info = (i == 0);
PrintRelocInfo(&out, isolate, ref_encoder, os, &relocinfo,
@@ -331,7 +314,18 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
byte* end, CodeReference code, Address current_pc) {
V8NameConverter v8NameConverter(isolate, code);
- return DecodeIt(isolate, os, v8NameConverter, begin, end, current_pc);
+ if (isolate) {
+ // We have an isolate, so support external reference names.
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_alloc;
+ ExternalReferenceEncoder ref_encoder(isolate);
+ return DecodeIt(isolate, &ref_encoder, os, v8NameConverter, begin, end,
+ current_pc);
+ } else {
+ // No isolate => isolate-independent code. No external reference names.
+ return DecodeIt(nullptr, nullptr, os, v8NameConverter, begin, end,
+ current_pc);
+ }
}
#else // ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index 116098c736..deb98ca140 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -17,6 +17,7 @@ class Disassembler : public AllStatic {
// code into os. Returns the number of bytes disassembled or 1 if no
// instruction could be decoded.
// the code object is used for name resolution and may be null.
+ // TODO(titzer): accept a {WasmCodeManager*} if {isolate} is null
static int Decode(Isolate* isolate, std::ostream* os, byte* begin, byte* end,
CodeReference code = {}, Address current_pc = kNullAddress);
};
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 0905677c3c..95bfc2e93d 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -49,6 +49,9 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
UNREACHABLE();
}
+int ElementsKindToByteSize(ElementsKind elements_kind) {
+ return 1 << ElementsKindToShiftSize(elements_kind);
+}
int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 13a824940f..88c5350b4d 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -74,6 +74,7 @@ const int kFastElementsKindPackedToHoley =
HOLEY_SMI_ELEMENTS - PACKED_SMI_ELEMENTS;
int ElementsKindToShiftSize(ElementsKind elements_kind);
+int ElementsKindToByteSize(ElementsKind elements_kind);
int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind);
const char* ElementsKindToString(ElementsKind kind);
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index e4491f0eeb..575ccde2a7 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -11,6 +11,7 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects-inl.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/utils.h"
@@ -124,12 +125,12 @@ WriteBarrierMode GetWriteBarrierMode(ElementsKind kind) {
return UPDATE_WRITE_BARRIER;
}
-void CopyObjectToObjectElements(FixedArrayBase* from_base,
+void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase* from_base,
ElementsKind from_kind, uint32_t from_start,
FixedArrayBase* to_base, ElementsKind to_kind,
uint32_t to_start, int raw_copy_size) {
- DCHECK(to_base->map() !=
- from_base->GetIsolate()->heap()->fixed_cow_array_map());
+ ReadOnlyRoots roots(isolate);
+ DCHECK(to_base->map() != roots.fixed_cow_array_map());
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@@ -141,9 +142,8 @@ void CopyObjectToObjectElements(FixedArrayBase* from_base,
int start = to_start + copy_size;
int length = to_base->length() - start;
if (length > 0) {
- Heap* heap = from_base->GetHeap();
MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- heap->the_hole_value(), length);
+ roots.the_hole_value(), length);
}
}
}
@@ -165,10 +165,10 @@ void CopyObjectToObjectElements(FixedArrayBase* from_base,
}
}
-
static void CopyDictionaryToObjectElements(
- FixedArrayBase* from_base, uint32_t from_start, FixedArrayBase* to_base,
- ElementsKind to_kind, uint32_t to_start, int raw_copy_size) {
+ Isolate* isolate, FixedArrayBase* from_base, uint32_t from_start,
+ FixedArrayBase* to_base, ElementsKind to_kind, uint32_t to_start,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
NumberDictionary* from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
@@ -180,9 +180,8 @@ static void CopyDictionaryToObjectElements(
int start = to_start + copy_size;
int length = to_base->length() - start;
if (length > 0) {
- Heap* heap = from->GetHeap();
MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- heap->the_hole_value(), length);
+ ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
}
@@ -195,7 +194,6 @@ static void CopyDictionaryToObjectElements(
copy_size = to_length - to_start;
}
WriteBarrierMode write_barrier_mode = GetWriteBarrierMode(to_kind);
- Isolate* isolate = from->GetIsolate();
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(isolate, i + from_start);
if (entry != NumberDictionary::kNotFound) {
@@ -212,7 +210,8 @@ static void CopyDictionaryToObjectElements(
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameters in the function that allocates.
// See ElementsAccessorBase::CopyElements() for details.
-static void CopyDoubleToObjectElements(FixedArrayBase* from_base,
+static void CopyDoubleToObjectElements(Isolate* isolate,
+ FixedArrayBase* from_base,
uint32_t from_start,
FixedArrayBase* to_base,
uint32_t to_start, int raw_copy_size) {
@@ -230,9 +229,8 @@ static void CopyDoubleToObjectElements(FixedArrayBase* from_base,
int start = to_start;
int length = to_base->length() - start;
if (length > 0) {
- Heap* heap = from_base->GetHeap();
MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- heap->the_hole_value(), length);
+ ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
}
@@ -243,7 +241,6 @@ static void CopyDoubleToObjectElements(FixedArrayBase* from_base,
// From here on, the code below could actually allocate. Therefore the raw
// values are wrapped into handles.
- Isolate* isolate = from_base->GetIsolate();
Handle<FixedDoubleArray> from(FixedDoubleArray::cast(from_base), isolate);
Handle<FixedArray> to(FixedArray::cast(to_base), isolate);
@@ -317,7 +314,7 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
if (copy_size == 0) return;
FixedArray* from = FixedArray::cast(from_base);
FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
+ Object* the_hole = from->GetReadOnlyRoots().the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
Object* hole_or_smi = from->get(from_start);
@@ -363,7 +360,7 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
- DCHECK(!smi->IsTheHole(from->GetIsolate()));
+ DCHECK(!smi->IsTheHole());
to->set(to_start, Smi::ToInt(smi));
}
}
@@ -390,7 +387,7 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
if (copy_size == 0) return;
FixedArray* from = FixedArray::cast(from_base);
FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
+ Object* the_hole = from->GetReadOnlyRoots().the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
Object* hole_or_object = from->get(from_start);
@@ -402,12 +399,9 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
}
}
-
-static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- uint32_t to_start,
- int raw_copy_size) {
+static void CopyDictionaryToDoubleElements(
+ Isolate* isolate, FixedArrayBase* from_base, uint32_t from_start,
+ FixedArrayBase* to_base, uint32_t to_start, int raw_copy_size) {
DisallowHeapAllocation no_allocation;
NumberDictionary* from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
@@ -427,7 +421,6 @@ static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
}
- Isolate* isolate = from->GetIsolate();
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(isolate, i + from_start);
if (entry != NumberDictionary::kNotFound) {
@@ -446,9 +439,10 @@ static void TraceTopFrame(Isolate* isolate) {
}
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Code* apply_builtin =
- isolate->builtins()->builtin(Builtins::kFunctionPrototypeApply);
- if (raw_frame->unchecked_code() == apply_builtin) {
+ Code* current_code_object =
+ isolate->heap()->GcSafeFindCodeForInnerPointer(raw_frame->pc());
+ if (current_code_object->builtin_index() ==
+ Builtins::kFunctionPrototypeApply) {
PrintF("apply from ");
it.Advance();
raw_frame = it.frame();
@@ -458,31 +452,28 @@ static void TraceTopFrame(Isolate* isolate) {
}
static void SortIndices(
- Handle<FixedArray> indices, uint32_t sort_size,
+ Isolate* isolate, Handle<FixedArray> indices, uint32_t sort_size,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) {
- struct {
- bool operator()(const base::AtomicElement<Object*>& elementA,
- const base::AtomicElement<Object*>& elementB) {
- const Object* a = elementA.value();
- const Object* b = elementB.value();
- if (a->IsSmi() || !a->IsUndefined(HeapObject::cast(a)->GetIsolate())) {
- if (!b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate())) {
- return true;
- }
- return a->Number() < b->Number();
- }
- return !b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate());
- }
- } cmp;
// Use AtomicElement wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
base::AtomicElement<Object*>* start =
reinterpret_cast<base::AtomicElement<Object*>*>(
indices->GetFirstElementAddress());
- std::sort(start, start + sort_size, cmp);
+ std::sort(start, start + sort_size,
+ [isolate](const base::AtomicElement<Object*>& elementA,
+ const base::AtomicElement<Object*>& elementB) {
+ const Object* a = elementA.value();
+ const Object* b = elementB.value();
+ if (a->IsSmi() || !a->IsUndefined(isolate)) {
+ if (!b->IsSmi() && b->IsUndefined(isolate)) {
+ return true;
+ }
+ return a->Number() < b->Number();
+ }
+ return !b->IsSmi() && b->IsUndefined(isolate);
+ });
if (write_barrier_mode != SKIP_WRITE_BARRIER) {
- FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(indices->GetIsolate()->heap(), *indices,
- 0, sort_size);
+ FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(isolate->heap(), *indices, 0, sort_size);
}
}
@@ -598,7 +589,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
uint32_t start, uint32_t end) {
DisallowHeapAllocation no_gc;
if (IsFastPackedElementsKind(kind())) return true;
- Isolate* isolate = backing_store->GetIsolate();
+ Isolate* isolate = holder->GetIsolate();
for (uint32_t i = start; i < end; i++) {
if (!Subclass::HasElementImpl(isolate, holder, i, backing_store,
ALL_PROPERTIES)) {
@@ -610,7 +601,8 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static void TryTransitionResultArrayToPacked(Handle<JSArray> array) {
if (!IsHoleyOrDictionaryElementsKind(kind())) return;
- Handle<FixedArrayBase> backing_store(array->elements());
+ Handle<FixedArrayBase> backing_store(array->elements(),
+ array->GetIsolate());
int length = Smi::ToInt(array->length());
if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) {
return;
@@ -760,7 +752,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
void SetLength(Handle<JSArray> array, uint32_t length) final {
Subclass::SetLengthImpl(array->GetIsolate(), array, length,
- handle(array->elements()));
+ handle(array->elements(), array->GetIsolate()));
}
static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
@@ -873,7 +865,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
packed_size = Smi::ToInt(JSArray::cast(*object)->length());
}
- Subclass::CopyElementsImpl(*old_elements, src_index, *new_elements,
+ Subclass::CopyElementsImpl(isolate, *old_elements, src_index, *new_elements,
from_kind, dst_index, packed_size, copy_size);
return new_elements;
@@ -881,7 +873,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static void TransitionElementsKindImpl(Handle<JSObject> object,
Handle<Map> to_map) {
- Handle<Map> from_map = handle(object->map());
+ Handle<Map> from_map = handle(object->map(), object->GetIsolate());
ElementsKind from_kind = from_map->elements_kind();
ElementsKind to_kind = to_map->elements_kind();
if (IsHoleyElementsKind(from_kind)) {
@@ -893,8 +885,10 @@ class ElementsAccessorBase : public InternalElementsAccessor {
DCHECK(IsFastElementsKind(to_kind));
DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
- Handle<FixedArrayBase> from_elements(object->elements());
- if (object->elements() == object->GetHeap()->empty_fixed_array() ||
+ Handle<FixedArrayBase> from_elements(object->elements(),
+ object->GetIsolate());
+ if (object->elements() ==
+ object->GetReadOnlyRoots().empty_fixed_array() ||
IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
// No change is needed to the elements() buffer, the transition
// only requires a map change.
@@ -909,9 +903,9 @@ class ElementsAccessorBase : public InternalElementsAccessor {
JSObject::SetMapAndElements(object, to_map, elements);
}
if (FLAG_trace_elements_transitions) {
- JSObject::PrintElementsTransition(stdout, object, from_kind,
- from_elements, to_kind,
- handle(object->elements()));
+ JSObject::PrintElementsTransition(
+ stdout, object, from_kind, from_elements, to_kind,
+ handle(object->elements(), object->GetIsolate()));
}
}
}
@@ -925,7 +919,8 @@ class ElementsAccessorBase : public InternalElementsAccessor {
// prototype object, make sure all of these optimizations are invalidated.
object->GetIsolate()->UpdateNoElementsProtectorOnSetLength(object);
}
- Handle<FixedArrayBase> old_elements(object->elements());
+ Handle<FixedArrayBase> old_elements(object->elements(),
+ object->GetIsolate());
// This method should only be called if there's a reason to update the
// elements.
DCHECK(IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(kind()) ||
@@ -971,7 +966,8 @@ class ElementsAccessorBase : public InternalElementsAccessor {
object->WouldConvertToSlowElements(index)) {
return false;
}
- Handle<FixedArrayBase> old_elements(object->elements());
+ Handle<FixedArrayBase> old_elements(object->elements(),
+ object->GetIsolate());
uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1);
DCHECK(static_cast<uint32_t>(old_elements->length()) < new_capacity);
Handle<FixedArrayBase> elements =
@@ -992,10 +988,10 @@ class ElementsAccessorBase : public InternalElementsAccessor {
Subclass::DeleteImpl(obj, entry);
}
- static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
- FixedArrayBase* to, ElementsKind from_kind,
- uint32_t to_start, int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
+ uint32_t from_start, FixedArrayBase* to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
UNREACHABLE();
}
@@ -1021,14 +1017,15 @@ class ElementsAccessorBase : public InternalElementsAccessor {
// copying from object with fast double elements to object with object
// elements. In all the other cases there are no allocations performed and
// handle creation causes noticeable performance degradation of the builtin.
- Subclass::CopyElementsImpl(from, from_start, *to, from_kind, to_start,
- packed_size, copy_size);
+ Subclass::CopyElementsImpl(from_holder->GetIsolate(), from, from_start, *to,
+ from_kind, to_start, packed_size, copy_size);
}
- void CopyElements(Handle<FixedArrayBase> source, ElementsKind source_kind,
+ void CopyElements(Isolate* isolate, Handle<FixedArrayBase> source,
+ ElementsKind source_kind,
Handle<FixedArrayBase> destination, int size) {
- Subclass::CopyElementsImpl(*source, 0, *destination, source_kind, 0,
- kPackedSizeNotKnown, size);
+ Subclass::CopyElementsImpl(isolate, *source, 0, *destination, source_kind,
+ 0, kPackedSizeNotKnown, size);
}
void CopyTypedArrayElementsSlice(JSTypedArray* source,
@@ -1056,7 +1053,8 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
Handle<NumberDictionary> Normalize(Handle<JSObject> object) final {
- return Subclass::NormalizeImpl(object, handle(object->elements()));
+ return Subclass::NormalizeImpl(
+ object, handle(object->elements(), object->GetIsolate()));
}
static Handle<NumberDictionary> NormalizeImpl(
@@ -1242,7 +1240,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
combined_keys, &nof_indices);
if (needs_sorting) {
- SortIndices(combined_keys, nof_indices);
+ SortIndices(isolate, combined_keys, nof_indices);
// Indices from dictionary elements should only be converted after
// sorting.
if (convert == GetKeysConversion::kConvertToString) {
@@ -1255,8 +1253,9 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
// Copy over the passed-in property keys.
- CopyObjectToObjectElements(*keys, PACKED_ELEMENTS, 0, *combined_keys,
- PACKED_ELEMENTS, nof_indices, nof_property_keys);
+ CopyObjectToObjectElements(isolate, *keys, PACKED_ELEMENTS, 0,
+ *combined_keys, PACKED_ELEMENTS, nof_indices,
+ nof_property_keys);
// For holey elements and arguments we might have to shrink the collected
// keys since the estimates might be off.
@@ -1265,7 +1264,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
// Shrink combined_keys to the final size.
int final_size = nof_indices + nof_property_keys;
DCHECK_LE(final_size, combined_keys->length());
- combined_keys->Shrink(final_size);
+ return FixedArray::ShrinkOrEmpty(isolate, combined_keys, final_size);
}
return combined_keys;
@@ -1433,13 +1432,14 @@ class DictionaryElementsAccessor
CHECK(array->length()->ToArrayLength(&old_length));
{
DisallowHeapAllocation no_gc;
+ ReadOnlyRoots roots(isolate);
if (length < old_length) {
if (dict->requires_slow_elements()) {
// Find last non-deletable element in range of elements to be
// deleted and adjust range accordingly.
for (int entry = 0; entry < capacity; entry++) {
Object* index = dict->KeyAt(entry);
- if (dict->IsKey(isolate, index)) {
+ if (dict->IsKey(roots, index)) {
uint32_t number = static_cast<uint32_t>(index->Number());
if (length <= number && number < old_length) {
PropertyDetails details = dict->DetailsAt(entry);
@@ -1457,10 +1457,10 @@ class DictionaryElementsAccessor
int removed_entries = 0;
for (int entry = 0; entry < capacity; entry++) {
Object* index = dict->KeyAt(entry);
- if (dict->IsKey(isolate, index)) {
+ if (dict->IsKey(roots, index)) {
uint32_t number = static_cast<uint32_t>(index->Number());
if (length <= number && number < old_length) {
- dict->ClearEntry(entry);
+ dict->ClearEntry(isolate, entry);
removed_entries++;
}
}
@@ -1478,10 +1478,10 @@ class DictionaryElementsAccessor
array->set_length(*length_obj);
}
- static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
- FixedArrayBase* to, ElementsKind from_kind,
- uint32_t to_start, int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
+ uint32_t from_start, FixedArrayBase* to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
UNREACHABLE();
}
@@ -1496,15 +1496,16 @@ class DictionaryElementsAccessor
JSObject::NormalizeElements(result_array);
result_array->set_length(Smi::FromInt(result_length));
Handle<NumberDictionary> source_dict(
- NumberDictionary::cast(receiver->elements()));
+ NumberDictionary::cast(receiver->elements()), isolate);
int entry_count = source_dict->Capacity();
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < entry_count; i++) {
Object* key = source_dict->KeyAt(i);
- if (!source_dict->ToKey(isolate, i, &key)) continue;
+ if (!source_dict->ToKey(roots, i, &key)) continue;
uint64_t key_value = NumberToInt64(key);
if (key_value >= start && key_value < end) {
Handle<NumberDictionary> dest_dict(
- NumberDictionary::cast(result_array->elements()));
+ NumberDictionary::cast(result_array->elements()), isolate);
Handle<Object> value(source_dict->ValueAt(i), isolate);
PropertyDetails details = source_dict->DetailsAt(i);
PropertyAttributes attr = details.attributes();
@@ -1517,8 +1518,9 @@ class DictionaryElementsAccessor
}
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
- Handle<NumberDictionary> dict(NumberDictionary::cast(obj->elements()));
- dict = NumberDictionary::DeleteEntry(dict, entry);
+ Handle<NumberDictionary> dict(NumberDictionary::cast(obj->elements()),
+ obj->GetIsolate());
+ dict = NumberDictionary::DeleteEntry(obj->GetIsolate(), dict, entry);
obj->set_elements(*dict);
}
@@ -1528,10 +1530,10 @@ class DictionaryElementsAccessor
NumberDictionary* dict = NumberDictionary::cast(backing_store);
if (!dict->requires_slow_elements()) return false;
int capacity = dict->Capacity();
- Isolate* isolate = dict->GetIsolate();
+ ReadOnlyRoots roots = holder->GetReadOnlyRoots();
for (int i = 0; i < capacity; i++) {
Object* key = dict->KeyAt(i);
- if (!dict->IsKey(isolate, key)) continue;
+ if (!dict->IsKey(roots, key)) continue;
PropertyDetails details = dict->DetailsAt(i);
if (details.kind() == kAccessor) return true;
}
@@ -1569,7 +1571,7 @@ class DictionaryElementsAccessor
details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell,
details.dictionary_index());
- dictionary->DetailsAtPut(entry, details);
+ dictionary->DetailsAtPut(object->GetIsolate(), entry, details);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -1579,9 +1581,10 @@ class DictionaryElementsAccessor
Handle<NumberDictionary> dictionary =
object->HasFastElements() || object->HasFastStringWrapperElements()
? JSObject::NormalizeElements(object)
- : handle(NumberDictionary::cast(object->elements()));
- Handle<NumberDictionary> new_dictionary =
- NumberDictionary::Add(dictionary, index, value, details);
+ : handle(NumberDictionary::cast(object->elements()),
+ object->GetIsolate());
+ Handle<NumberDictionary> new_dictionary = NumberDictionary::Add(
+ object->GetIsolate(), dictionary, index, value, details);
new_dictionary->UpdateMaxNumberKey(index, object);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (dictionary.is_identical_to(new_dictionary)) return;
@@ -1643,7 +1646,7 @@ class DictionaryElementsAccessor
int entry, PropertyFilter filter) {
DisallowHeapAllocation no_gc;
Object* raw_key = dictionary->KeyAt(entry);
- if (!dictionary->IsKey(isolate, raw_key)) return kMaxUInt32;
+ if (!dictionary->IsKey(ReadOnlyRoots(isolate), raw_key)) return kMaxUInt32;
return FilterKey(dictionary, entry, raw_key, filter);
}
@@ -1659,9 +1662,10 @@ class DictionaryElementsAccessor
GetMaxNumberOfEntries(*object, *backing_store));
int insertion_index = 0;
PropertyFilter filter = keys->filter();
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
Object* raw_key = dictionary->KeyAt(i);
- if (!dictionary->IsKey(isolate, raw_key)) continue;
+ if (!dictionary->IsKey(roots, raw_key)) continue;
uint32_t key = FilterKey(dictionary, i, raw_key, filter);
if (key == kMaxUInt32) {
keys->AddShadowingKey(raw_key);
@@ -1670,7 +1674,7 @@ class DictionaryElementsAccessor
elements->set(insertion_index, raw_key);
insertion_index++;
}
- SortIndices(elements, insertion_index);
+ SortIndices(isolate, elements, insertion_index);
for (int i = 0; i < insertion_index; i++) {
keys->AddKey(elements->get(i));
}
@@ -1705,9 +1709,10 @@ class DictionaryElementsAccessor
Handle<NumberDictionary> dictionary(
NumberDictionary::cast(receiver->elements()), isolate);
int capacity = dictionary->Capacity();
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
Object* k = dictionary->KeyAt(i);
- if (!dictionary->IsKey(isolate, k)) continue;
+ if (!dictionary->IsKey(roots, k)) continue;
Object* value = dictionary->ValueAt(i);
DCHECK(!value->IsTheHole(isolate));
DCHECK(!value->IsAccessorPair());
@@ -1722,8 +1727,8 @@ class DictionaryElementsAccessor
DisallowHeapAllocation no_gc;
NumberDictionary* dictionary = NumberDictionary::cast(receiver->elements());
int capacity = dictionary->Capacity();
- Object* the_hole = isolate->heap()->the_hole_value();
- Object* undefined = isolate->heap()->undefined_value();
+ Object* the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ Object* undefined = ReadOnlyRoots(isolate).undefined_value();
// Scan for accessor properties. If accessors are present, then elements
// must be accessed in order via the slow path.
@@ -1900,7 +1905,7 @@ class DictionaryElementsAccessor
#if DEBUG
DCHECK_EQ(holder->map()->elements_kind(), DICTIONARY_ELEMENTS);
if (!FLAG_enable_slow_asserts) return;
- Isolate* isolate = holder->GetIsolate();
+ ReadOnlyRoots roots = holder->GetReadOnlyRoots();
NumberDictionary* dictionary = NumberDictionary::cast(holder->elements());
// Validate the requires_slow_elements and max_number_key values.
int capacity = dictionary->Capacity();
@@ -1908,7 +1913,7 @@ class DictionaryElementsAccessor
int max_key = 0;
for (int i = 0; i < capacity; ++i) {
Object* k;
- if (!dictionary->ToKey(isolate, i, &k)) continue;
+ if (!dictionary->ToKey(roots, i, &k)) continue;
DCHECK_LE(0.0, k->Number());
if (k->Number() > NumberDictionary::kRequiresSlowElementsLimit) {
requires_slow_elements = true;
@@ -1937,7 +1942,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
static Handle<NumberDictionary> NormalizeImpl(Handle<JSObject> object,
Handle<FixedArrayBase> store) {
- Isolate* isolate = store->GetIsolate();
+ Isolate* isolate = object->GetIsolate();
ElementsKind kind = Subclass::kind();
// Ensure that notifications fire if the array or object prototypes are
@@ -1960,7 +1965,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
max_number_key = i;
Handle<Object> value = Subclass::GetImpl(isolate, *store, i);
- dictionary = NumberDictionary::Add(dictionary, i, value, details);
+ dictionary =
+ NumberDictionary::Add(isolate, dictionary, i, value, details);
j++;
}
@@ -1979,7 +1985,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (!backing_store->is_the_hole(isolate, entry - 1)) break;
}
if (entry == 0) {
- FixedArray* empty = isolate->heap()->empty_fixed_array();
+ FixedArray* empty = ReadOnlyRoots(isolate).empty_fixed_array();
// Dynamically ask for the elements kind here since we manually redirect
// the operations for argument backing stores.
if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
@@ -2013,7 +2019,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// has too few used values, normalize it.
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() < kMinLengthForSparsenessCheck) return;
- if (backing_store->GetHeap()->InNewSpace(*backing_store)) return;
+ if (Heap::InNewSpace(*backing_store)) return;
uint32_t length = 0;
if (obj->IsJSArray()) {
JSArray::cast(*obj)->length()->ToArrayLength(&length);
@@ -2069,7 +2075,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<Object> value,
PropertyAttributes attributes) {
Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object);
- entry = dictionary->FindEntry(entry);
+ entry = dictionary->FindEntry(object->GetIsolate(), entry);
DictionaryElementsAccessor::ReconfigureImpl(object, dictionary, entry,
value, attributes);
}
@@ -2105,7 +2111,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (IsSmiOrObjectElementsKind(KindTraits::Kind)) {
JSObject::EnsureWritableFastElements(obj);
}
- DeleteCommon(obj, entry, handle(obj->elements()));
+ DeleteCommon(obj, entry, handle(obj->elements(), obj->GetIsolate()));
}
static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* backing_store,
@@ -2146,10 +2152,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
FixedArrayBase* elements = holder->elements();
Map* map = elements->map();
if (IsSmiOrObjectElementsKind(KindTraits::Kind)) {
- DCHECK_NE(map, heap->fixed_double_array_map());
+ DCHECK_NE(map, ReadOnlyRoots(heap).fixed_double_array_map());
} else if (IsDoubleElementsKind(KindTraits::Kind)) {
- DCHECK_NE(map, heap->fixed_cow_array_map());
- if (map == heap->fixed_array_map()) DCHECK_EQ(0, length);
+ DCHECK_NE(map, ReadOnlyRoots(heap).fixed_cow_array_map());
+ if (map == ReadOnlyRoots(heap).fixed_array_map()) DCHECK_EQ(0, length);
} else {
UNREACHABLE();
}
@@ -2186,14 +2192,16 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
static uint32_t PushImpl(Handle<JSArray> receiver,
Arguments* args, uint32_t push_size) {
- Handle<FixedArrayBase> backing_store(receiver->elements());
+ Handle<FixedArrayBase> backing_store(receiver->elements(),
+ receiver->GetIsolate());
return Subclass::AddArguments(receiver, backing_store, args, push_size,
AT_END);
}
static uint32_t UnshiftImpl(Handle<JSArray> receiver,
Arguments* args, uint32_t unshift_size) {
- Handle<FixedArrayBase> backing_store(receiver->elements());
+ Handle<FixedArrayBase> backing_store(receiver->elements(),
+ receiver->GetIsolate());
return Subclass::AddArguments(receiver, backing_store, args, unshift_size,
AT_START);
}
@@ -2206,9 +2214,9 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<JSArray> result_array = isolate->factory()->NewJSArray(
KindTraits::Kind, result_len, result_len);
DisallowHeapAllocation no_gc;
- Subclass::CopyElementsImpl(*backing_store, start, result_array->elements(),
- KindTraits::Kind, 0, kPackedSizeNotKnown,
- result_len);
+ Subclass::CopyElementsImpl(isolate, *backing_store, start,
+ result_array->elements(), KindTraits::Kind, 0,
+ kPackedSizeNotKnown, result_len);
Subclass::TryTransitionResultArrayToPacked(result_array);
return result_array;
}
@@ -2231,7 +2239,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
if (new_length == 0) {
- receiver->set_elements(heap->empty_fixed_array());
+ receiver->set_elements(ReadOnlyRoots(heap).empty_fixed_array());
receiver->set_length(Smi::kZero);
return isolate->factory()->NewJSArrayWithElements(
backing_store, KindTraits::Kind, delete_count);
@@ -2242,7 +2250,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
KindTraits::Kind, delete_count, delete_count);
if (delete_count > 0) {
DisallowHeapAllocation no_gc;
- Subclass::CopyElementsImpl(*backing_store, start,
+ Subclass::CopyElementsImpl(isolate, *backing_store, start,
deleted_elements->elements(), KindTraits::Kind,
0, kPackedSizeNotKnown, delete_count);
}
@@ -2304,8 +2312,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
DisallowHeapAllocation no_gc;
FixedArrayBase* elements_base = receiver->elements();
- Object* the_hole = isolate->heap()->the_hole_value();
- Object* undefined = isolate->heap()->undefined_value();
+ Object* the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ Object* undefined = ReadOnlyRoots(isolate).undefined_value();
Object* value = *search_value;
// Elements beyond the capacity of the backing store treated as undefined.
@@ -2499,8 +2507,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<FixedArrayBase> new_elms = Subclass::ConvertElementsWithCapacity(
receiver, backing_store, KindTraits::Kind, capacity, start);
// Copy the trailing elements after start + delete_count
- Subclass::CopyElementsImpl(*backing_store, start + delete_count, *new_elms,
- KindTraits::Kind, start + add_count,
+ Subclass::CopyElementsImpl(isolate, *backing_store, start + delete_count,
+ *new_elms, KindTraits::Kind, start + add_count,
kPackedSizeNotKnown,
ElementsAccessor::kCopyToEndAndInitializeToHole);
receiver->set_elements(*new_elms);
@@ -2580,7 +2588,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
WriteBarrierMode mode = raw_backing_store->GetWriteBarrierMode(no_gc);
for (uint32_t i = 0; i < copy_size; i++) {
Object* argument = (*args)[src_index + i];
- DCHECK(!argument->IsTheHole(raw_backing_store->GetIsolate()));
+ DCHECK(!argument->IsTheHole());
Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode);
}
}
@@ -2618,10 +2626,10 @@ class FastSmiOrObjectElementsAccessor
// See ElementsAccessor::CopyElements() for details.
// This method could actually allocate if copying from double elements to
// object elements.
- static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
- FixedArrayBase* to, ElementsKind from_kind,
- uint32_t to_start, int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
+ uint32_t from_start, FixedArrayBase* to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
DisallowHeapAllocation no_gc;
ElementsKind to_kind = KindTraits::Kind;
switch (from_kind) {
@@ -2629,19 +2637,20 @@ class FastSmiOrObjectElementsAccessor
case HOLEY_SMI_ELEMENTS:
case PACKED_ELEMENTS:
case HOLEY_ELEMENTS:
- CopyObjectToObjectElements(from, from_kind, from_start, to, to_kind,
- to_start, copy_size);
+ CopyObjectToObjectElements(isolate, from, from_kind, from_start, to,
+ to_kind, to_start, copy_size);
break;
case PACKED_DOUBLE_ELEMENTS:
case HOLEY_DOUBLE_ELEMENTS: {
AllowHeapAllocation allow_allocation;
DCHECK(IsObjectElementsKind(to_kind));
- CopyDoubleToObjectElements(from, from_start, to, to_start, copy_size);
+ CopyDoubleToObjectElements(isolate, from, from_start, to, to_start,
+ copy_size);
break;
}
case DICTIONARY_ELEMENTS:
- CopyDictionaryToObjectElements(from, from_start, to, to_kind, to_start,
- copy_size);
+ CopyDictionaryToObjectElements(isolate, from, from_start, to, to_kind,
+ to_start, copy_size);
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -2789,10 +2798,10 @@ class FastDoubleElementsAccessor
FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
}
- static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
- FixedArrayBase* to, ElementsKind from_kind,
- uint32_t to_start, int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
+ uint32_t from_start, FixedArrayBase* to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
DisallowHeapAllocation no_allocation;
switch (from_kind) {
case PACKED_SMI_ELEMENTS:
@@ -2811,7 +2820,7 @@ class FastDoubleElementsAccessor
CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
break;
case DICTIONARY_ELEMENTS:
- CopyDictionaryToDoubleElements(from, from_start, to, to_start,
+ CopyDictionaryToDoubleElements(isolate, from, from_start, to, to_start,
copy_size);
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -3003,7 +3012,7 @@ class TypedElementsAccessor
KeyAccumulator* accumulator,
AddKeyConversion convert) {
Isolate* isolate = receiver->GetIsolate();
- Handle<FixedArrayBase> elements(receiver->elements());
+ Handle<FixedArrayBase> elements(receiver->elements(), isolate);
uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
for (uint32_t i = 0; i < length; i++) {
Handle<Object> value = AccessorClass::GetImpl(isolate, *elements, i);
@@ -3017,7 +3026,7 @@ class TypedElementsAccessor
PropertyFilter filter) {
int count = 0;
if ((filter & ONLY_CONFIGURABLE) == 0) {
- Handle<FixedArrayBase> elements(object->elements());
+ Handle<FixedArrayBase> elements(object->elements(), isolate);
uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements);
for (uint32_t index = 0; index < length; ++index) {
Handle<Object> value =
@@ -3232,7 +3241,8 @@ class TypedElementsAccessor
DCHECK(!WasNeutered(*object));
DCHECK(object->IsJSTypedArray());
Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
- Handle<BackingStore> elements(BackingStore::cast(object->elements()));
+ Handle<BackingStore> elements(BackingStore::cast(object->elements()),
+ isolate);
for (uint32_t i = 0; i < length; i++) {
Handle<Object> value = AccessorClass::GetImpl(isolate, *elements, i);
result->set(i, *value);
@@ -3426,7 +3436,7 @@ class TypedElementsAccessor
// the hole into undefined.
if (HoleyPrototypeLookupRequired(isolate, context, source)) return false;
- Object* undefined = isolate->heap()->undefined_value();
+ Object* undefined = ReadOnlyRoots(isolate).undefined_value();
// Fastpath for packed Smi kind.
if (kind == PACKED_SMI_ELEMENTS) {
@@ -3486,7 +3496,7 @@ class TypedElementsAccessor
size_t length, uint32_t offset) {
Isolate* isolate = destination->GetIsolate();
Handle<BackingStore> destination_elements(
- BackingStore::cast(destination->elements()));
+ BackingStore::cast(destination->elements()), isolate);
for (uint32_t i = 0; i < length; i++) {
LookupIterator it(isolate, source, i);
Handle<Object> elem;
@@ -3497,7 +3507,7 @@ class TypedElementsAccessor
BigInt::FromObject(isolate, elem));
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
- Object::ToNumber(elem));
+ Object::ToNumber(isolate, elem));
}
if (V8_UNLIKELY(destination->WasNeutered())) {
@@ -3647,10 +3657,10 @@ class SloppyArgumentsElementsAccessor
// Store context mapped entry.
DisallowHeapAllocation no_gc;
Object* probe = elements->get_mapped_entry(entry);
- DCHECK(!probe->IsTheHole(store->GetIsolate()));
+ DCHECK(!probe->IsTheHole());
Context* context = elements->context();
int context_entry = Smi::ToInt(probe);
- DCHECK(!context->get(context_entry)->IsTheHole(store->GetIsolate()));
+ DCHECK(!context->get(context_entry)->IsTheHole());
context->set(context_entry, value);
} else {
// Entry is not context mapped defer to arguments.
@@ -3660,7 +3670,7 @@ class SloppyArgumentsElementsAccessor
AliasedArgumentsEntry* alias = AliasedArgumentsEntry::cast(current);
Context* context = elements->context();
int context_entry = alias->aliased_context_slot();
- DCHECK(!context->get(context_entry)->IsTheHole(store->GetIsolate()));
+ DCHECK(!context->get(context_entry)->IsTheHole());
context->set(context_entry, value);
} else {
ArgumentsAccessor::SetImpl(arguments, entry - length, value);
@@ -3785,7 +3795,7 @@ class SloppyArgumentsElementsAccessor
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
Handle<SloppyArgumentsElements> elements(
- SloppyArgumentsElements::cast(obj->elements()));
+ SloppyArgumentsElements::cast(obj->elements()), obj->GetIsolate());
uint32_t length = elements->parameter_map_length();
uint32_t delete_or_entry = entry;
if (entry < length) {
@@ -3795,7 +3805,8 @@ class SloppyArgumentsElementsAccessor
// SloppyDeleteImpl allocates a new dictionary elements store. For making
// heap verification happy we postpone clearing out the mapped entry.
if (entry < length) {
- elements->set_mapped_entry(entry, obj->GetHeap()->the_hole_value());
+ elements->set_mapped_entry(entry,
+ obj->GetReadOnlyRoots().the_hole_value());
}
}
@@ -3816,7 +3827,7 @@ class SloppyArgumentsElementsAccessor
DirectCollectElementIndicesImpl(isolate, object, backing_store,
GetKeysConversion::kKeepNumbers,
ENUMERABLE_STRINGS, indices, &nof_indices);
- SortIndices(indices, nof_indices);
+ SortIndices(isolate, indices, nof_indices);
for (uint32_t i = 0; i < nof_indices; i++) {
keys->AddKey(indices->get(i));
}
@@ -3990,7 +4001,7 @@ class SlowSloppyArgumentsElementsAccessor
Handle<NumberDictionary> dict(NumberDictionary::cast(elements->arguments()),
isolate);
int length = elements->parameter_map_length();
- dict = NumberDictionary::DeleteEntry(dict, entry - length);
+ dict = NumberDictionary::DeleteEntry(isolate, dict, entry - length);
elements->set_arguments(*dict);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -4007,7 +4018,7 @@ class SlowSloppyArgumentsElementsAccessor
: JSObject::NormalizeElements(object);
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
Handle<NumberDictionary> new_dictionary =
- NumberDictionary::Add(dictionary, index, value, details);
+ NumberDictionary::Add(isolate, dictionary, index, value, details);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (*dictionary != *new_dictionary) {
elements->set_arguments(*new_dictionary);
@@ -4018,7 +4029,7 @@ class SlowSloppyArgumentsElementsAccessor
Handle<FixedArrayBase> store, uint32_t entry,
Handle<Object> value,
PropertyAttributes attributes) {
- Isolate* isolate = store->GetIsolate();
+ Isolate* isolate = object->GetIsolate();
Handle<SloppyArgumentsElements> elements =
Handle<SloppyArgumentsElements>::cast(store);
uint32_t length = elements->parameter_map_length();
@@ -4031,7 +4042,8 @@ class SlowSloppyArgumentsElementsAccessor
context->set(context_entry, *value);
// Redefining attributes of an aliased element destroys fast aliasing.
- elements->set_mapped_entry(entry, isolate->heap()->the_hole_value());
+ elements->set_mapped_entry(entry,
+ ReadOnlyRoots(isolate).the_hole_value());
// For elements that are still writable we re-establish slow aliasing.
if ((attributes & READ_ONLY) == 0) {
value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
@@ -4040,7 +4052,8 @@ class SlowSloppyArgumentsElementsAccessor
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
Handle<NumberDictionary> arguments(
NumberDictionary::cast(elements->arguments()), isolate);
- arguments = NumberDictionary::Add(arguments, entry, value, details);
+ arguments =
+ NumberDictionary::Add(isolate, arguments, entry, value, details);
// If the attributes were NONE, we would have called set rather than
// reconfigure.
DCHECK_NE(NONE, attributes);
@@ -4082,7 +4095,7 @@ class FastSloppyArgumentsElementsAccessor
static Handle<NumberDictionary> NormalizeImpl(
Handle<JSObject> object, Handle<FixedArrayBase> elements) {
Handle<FixedArray> arguments =
- GetArguments(elements->GetIsolate(), *elements);
+ GetArguments(object->GetIsolate(), *elements);
return FastHoleyObjectElementsAccessor::NormalizeImpl(object, arguments);
}
@@ -4096,7 +4109,8 @@ class FastSloppyArgumentsElementsAccessor
if (*entry == kMaxUInt32) return dictionary;
uint32_t length = elements->parameter_map_length();
if (*entry >= length) {
- *entry = dictionary->FindEntry(*entry - length) + length;
+ *entry =
+ dictionary->FindEntry(object->GetIsolate(), *entry - length) + length;
}
return dictionary;
}
@@ -4136,23 +4150,23 @@ class FastSloppyArgumentsElementsAccessor
PropertyAttributes attributes) {
DCHECK_EQ(object->elements(), *store);
Handle<SloppyArgumentsElements> elements(
- SloppyArgumentsElements::cast(*store));
+ SloppyArgumentsElements::cast(*store), object->GetIsolate());
NormalizeArgumentsElements(object, elements, &entry);
SlowSloppyArgumentsElementsAccessor::ReconfigureImpl(object, store, entry,
value, attributes);
}
- static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
- FixedArrayBase* to, ElementsKind from_kind,
- uint32_t to_start, int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
+ uint32_t from_start, FixedArrayBase* to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
DCHECK(!to->IsDictionary());
if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
- CopyDictionaryToObjectElements(from, from_start, to, HOLEY_ELEMENTS,
- to_start, copy_size);
+ CopyDictionaryToObjectElements(isolate, from, from_start, to,
+ HOLEY_ELEMENTS, to_start, copy_size);
} else {
DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, from_kind);
- CopyObjectToObjectElements(from, HOLEY_ELEMENTS, from_start, to,
+ CopyObjectToObjectElements(isolate, from, HOLEY_ELEMENTS, from_start, to,
HOLEY_ELEMENTS, to_start, copy_size);
}
}
@@ -4199,7 +4213,7 @@ class StringWrapperElementsAccessor
uint32_t length = static_cast<uint32_t>(string->length());
if (entry < length) {
return isolate->factory()->LookupSingleCharacterStringFromCode(
- String::Flatten(string)->Get(entry));
+ String::Flatten(isolate, string)->Get(entry));
}
return BackingStoreAccessor::GetImpl(isolate, holder->elements(),
entry - length);
@@ -4281,7 +4295,7 @@ class StringWrapperElementsAccessor
AddKeyConversion convert) {
Isolate* isolate = receiver->GetIsolate();
Handle<String> string(GetString(*receiver), isolate);
- string = String::Flatten(string);
+ string = String::Flatten(isolate, string);
uint32_t length = static_cast<uint32_t>(string->length());
for (uint32_t i = 0; i < length; i++) {
accumulator->AddKey(
@@ -4307,7 +4321,8 @@ class StringWrapperElementsAccessor
static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
uint32_t capacity) {
- Handle<FixedArrayBase> old_elements(object->elements());
+ Handle<FixedArrayBase> old_elements(object->elements(),
+ object->GetIsolate());
ElementsKind from_kind = object->GetElementsKind();
if (from_kind == FAST_STRING_WRAPPER_ELEMENTS) {
// The optimizing compiler relies on the prototype lookups of String
@@ -4325,17 +4340,17 @@ class StringWrapperElementsAccessor
capacity);
}
- static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
- FixedArrayBase* to, ElementsKind from_kind,
- uint32_t to_start, int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
+ uint32_t from_start, FixedArrayBase* to,
+ ElementsKind from_kind, uint32_t to_start,
+ int packed_size, int copy_size) {
DCHECK(!to->IsDictionary());
if (from_kind == SLOW_STRING_WRAPPER_ELEMENTS) {
- CopyDictionaryToObjectElements(from, from_start, to, HOLEY_ELEMENTS,
- to_start, copy_size);
+ CopyDictionaryToObjectElements(isolate, from, from_start, to,
+ HOLEY_ELEMENTS, to_start, copy_size);
} else {
DCHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, from_kind);
- CopyObjectToObjectElements(from, HOLEY_ELEMENTS, from_start, to,
+ CopyObjectToObjectElements(isolate, from, HOLEY_ELEMENTS, from_start, to,
HOLEY_ELEMENTS, to_start, copy_size);
}
}
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 385ae85ad1..a7dab7daa5 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -106,8 +106,9 @@ class ElementsAccessor {
inline MaybeHandle<FixedArray> PrependElementIndices(
Handle<JSObject> object, Handle<FixedArray> keys,
GetKeysConversion convert, PropertyFilter filter = ALL_PROPERTIES) {
- return PrependElementIndices(object, handle(object->elements()), keys,
- convert, filter);
+ return PrependElementIndices(
+ object, handle(object->elements(), object->GetIsolate()), keys, convert,
+ filter);
}
virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
@@ -180,7 +181,7 @@ class ElementsAccessor {
virtual void Reverse(JSObject* receiver) = 0;
- virtual void CopyElements(Handle<FixedArrayBase> source,
+ virtual void CopyElements(Isolate* isolate, Handle<FixedArrayBase> source,
ElementsKind source_kind,
Handle<FixedArrayBase> destination, int size) = 0;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index e375067418..dc8f4fbb2c 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -157,7 +157,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- value->ObjectVerify();
+ value->ObjectVerify(isolate);
}
#endif
@@ -244,7 +244,7 @@ MaybeHandle<Object> Execution::TryCall(
if (maybe_result.is_null()) {
DCHECK(isolate->has_pending_exception());
if (isolate->pending_exception() ==
- isolate->heap()->termination_exception()) {
+ ReadOnlyRoots(isolate).termination_exception()) {
is_termination = true;
} else {
if (exception_out != nullptr) {
@@ -514,15 +514,6 @@ Object* StackGuard::HandleInterrupts() {
isolate_->heap()->HandleGCRequest();
}
- if (CheckDebugBreak()) {
- if (FLAG_trace_interrupts) {
- if (any_interrupt_handled) PrintF(", ");
- PrintF("DEBUG_BREAK");
- any_interrupt_handled = true;
- }
- isolate_->debug()->HandleDebugBreak(kIgnoreIfTopFrameBlackboxed);
- }
-
if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) {
if (FLAG_trace_interrupts) {
if (any_interrupt_handled) PrintF(", ");
@@ -572,7 +563,7 @@ Object* StackGuard::HandleInterrupts() {
isolate_->counters()->runtime_profiler_ticks()->Increment();
isolate_->runtime_profiler()->MarkCandidatesForOptimization();
- return isolate_->heap()->undefined_value();
+ return ReadOnlyRoots(isolate_).undefined_value();
}
} // namespace internal
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index e0606295ed..44491c6e67 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -89,12 +89,11 @@ class V8_EXPORT_PRIVATE StackGuard final {
void ClearThread(const ExecutionAccess& lock);
#define INTERRUPT_LIST(V) \
- V(DEBUGBREAK, DebugBreak, 0) \
- V(TERMINATE_EXECUTION, TerminateExecution, 1) \
- V(GC_REQUEST, GC, 2) \
- V(INSTALL_CODE, InstallCode, 3) \
- V(API_INTERRUPT, ApiInterrupt, 4) \
- V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5)
+ V(TERMINATE_EXECUTION, TerminateExecution, 0) \
+ V(GC_REQUEST, GC, 1) \
+ V(INSTALL_CODE, InstallCode, 2) \
+ V(API_INTERRUPT, ApiInterrupt, 3) \
+ V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4)
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 54c6259b8b..363c0c593d 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -98,10 +98,6 @@ void ExternalizeStringExtension::Externalize(
SimpleOneByteStringResource* resource = new SimpleOneByteStringResource(
reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
- if (result) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- isolate->heap()->RegisterExternalString(*string);
- }
if (!result) delete resource;
} else {
uc16* data = new uc16[string->length()];
@@ -109,10 +105,6 @@ void ExternalizeStringExtension::Externalize(
SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
data, string->length());
result = string->MakeExternal(resource);
- if (result) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- isolate->heap()->RegisterExternalString(*string);
- }
if (!result) delete resource;
}
if (!result) {
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 47d3cd2d53..e9c4221f9a 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -35,7 +35,8 @@ void ExternalReferenceTable::Init(Isolate* isolate) {
AddIsolateAddresses(isolate, &index);
AddAccessors(isolate, &index);
AddStubCache(isolate, &index);
- is_initialized_ = true;
+ is_initialized_ = static_cast<uint32_t>(true);
+ USE(unused_padding_);
CHECK_EQ(kSize, index);
}
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index 8cb7d95eed..c880aa8c40 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -8,7 +8,6 @@
#include <vector>
#include "src/accessors.h"
-#include "src/address-map.h"
#include "src/builtins/builtins.h"
#include "src/external-reference.h"
@@ -43,20 +42,26 @@ class ExternalReferenceTable {
kIsolateAddressReferenceCount + kAccessorReferenceCount +
kStubCacheReferenceCount;
- uint32_t size() const { return static_cast<uint32_t>(kSize); }
+ static constexpr uint32_t size() { return static_cast<uint32_t>(kSize); }
Address address(uint32_t i) { return refs_[i].address; }
const char* name(uint32_t i) { return refs_[i].name; }
- bool is_initialized() const { return is_initialized_; }
+ bool is_initialized() const { return is_initialized_ != 0; }
static const char* ResolveSymbol(void* address);
- static uint32_t OffsetOfEntry(uint32_t i) {
+ static constexpr uint32_t OffsetOfEntry(uint32_t i) {
// Used in CodeAssembler::LookupExternalReference.
STATIC_ASSERT(offsetof(ExternalReferenceEntry, address) == 0);
return i * sizeof(ExternalReferenceEntry);
}
+ static constexpr uint32_t SizeInBytes() {
+ STATIC_ASSERT(OffsetOfEntry(size()) + 2 * kUInt32Size ==
+ sizeof(ExternalReferenceTable));
+ return OffsetOfEntry(size()) + 2 * kUInt32Size;
+ }
+
ExternalReferenceTable() {}
void Init(Isolate* isolate);
@@ -80,7 +85,8 @@ class ExternalReferenceTable {
void AddStubCache(Isolate* isolate, int* index);
ExternalReferenceEntry refs_[kSize];
- bool is_initialized_ = false;
+ uint32_t is_initialized_ = 0; // Not bool to guarantee deterministic size.
+ uint32_t unused_padding_ = 0; // For alignment.
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
};
diff --git a/deps/v8/src/external-reference.cc b/deps/v8/src/external-reference.cc
index 8b09c67ccc..dedf76b6d2 100644
--- a/deps/v8/src/external-reference.cc
+++ b/deps/v8/src/external-reference.cc
@@ -57,8 +57,6 @@ namespace internal {
constexpr double double_min_int_constant = kMinInt;
constexpr double double_one_half_constant = 0.5;
-constexpr double double_minus_one_half_constant = -0.5;
-constexpr double double_negative_infinity_constant = -V8_INFINITY;
constexpr uint64_t double_the_hole_nan_constant = kHoleNanInt64;
constexpr double double_uint32_bias_constant =
static_cast<double>(kMaxUInt32) + 1;
@@ -128,7 +126,7 @@ ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
}
ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
- return ExternalReference(isolate->builtins()->builtins_table_address());
+ return ExternalReference(isolate->heap()->builtin_address(0));
}
ExternalReference ExternalReference::handle_scope_implementer_address(
@@ -480,16 +478,6 @@ ExternalReference ExternalReference::address_of_one_half() {
reinterpret_cast<Address>(&double_one_half_constant));
}
-ExternalReference ExternalReference::address_of_minus_one_half() {
- return ExternalReference(
- reinterpret_cast<Address>(&double_minus_one_half_constant));
-}
-
-ExternalReference ExternalReference::address_of_negative_infinity() {
- return ExternalReference(
- reinterpret_cast<Address>(&double_negative_infinity_constant));
-}
-
ExternalReference ExternalReference::address_of_the_hole_nan() {
return ExternalReference(
reinterpret_cast<Address>(&double_the_hole_nan_constant));
@@ -725,9 +713,9 @@ ExternalReference ExternalReference::libc_memmove_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(libc_memmove)));
}
-void* libc_memset(void* dest, int byte, size_t n) {
- DCHECK_EQ(static_cast<char>(byte), byte);
- return memset(dest, byte, n);
+void* libc_memset(void* dest, int value, size_t n) {
+ DCHECK_EQ(static_cast<byte>(value), value);
+ return memset(dest, value, n);
}
ExternalReference ExternalReference::libc_memset_function() {
@@ -839,9 +827,20 @@ ExternalReference ExternalReference::cpu_features() {
return ExternalReference(&CpuFeatures::supported_);
}
-ExternalReference ExternalReference::promise_hook_or_debug_is_active_address(
+ExternalReference ExternalReference::promise_hook_address(Isolate* isolate) {
+ return ExternalReference(isolate->promise_hook_address());
+}
+
+ExternalReference ExternalReference::async_event_delegate_address(
Isolate* isolate) {
- return ExternalReference(isolate->promise_hook_or_debug_is_active_address());
+ return ExternalReference(isolate->async_event_delegate_address());
+}
+
+ExternalReference
+ExternalReference::promise_hook_or_async_event_delegate_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->promise_hook_or_async_event_delegate_address());
}
ExternalReference ExternalReference::debug_is_active_address(Isolate* isolate) {
@@ -853,11 +852,6 @@ ExternalReference ExternalReference::debug_hook_on_function_call_address(
return ExternalReference(isolate->debug()->hook_on_function_call_address());
}
-ExternalReference ExternalReference::debug_execution_mode_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug_execution_mode_address());
-}
-
ExternalReference ExternalReference::runtime_function_table_address(
Isolate* isolate) {
return ExternalReference(
@@ -927,11 +921,6 @@ ExternalReference ExternalReference::mod_two_doubles_operation() {
Redirect(FUNCTION_ADDR(modulo_double_double), BUILTIN_FP_FP_CALL));
}
-ExternalReference ExternalReference::debug_last_step_action_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->last_step_action_address());
-}
-
ExternalReference ExternalReference::debug_suspended_generator_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->suspended_generator_address());
diff --git a/deps/v8/src/external-reference.h b/deps/v8/src/external-reference.h
index 0332c51974..a22ca0157e 100644
--- a/deps/v8/src/external-reference.h
+++ b/deps/v8/src/external-reference.h
@@ -53,15 +53,15 @@ class StatsCounter;
V(address_of_pending_message_obj, "address_of_pending_message_obj") \
V(get_or_create_hash_raw, "get_or_create_hash_raw") \
V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
- V(promise_hook_or_debug_is_active_address, \
- "Isolate::promise_hook_or_debug_is_active_address()") \
+ V(promise_hook_address, "Isolate::promise_hook_address()") \
+ V(async_event_delegate_address, "Isolate::async_event_delegate_address()") \
+ V(promise_hook_or_async_event_delegate_address, \
+ "Isolate::promise_hook_or_async_event_delegate_address()") \
V(debug_is_active_address, "Debug::is_active_address()") \
V(debug_hook_on_function_call_address, \
"Debug::hook_on_function_call_address()") \
- V(debug_execution_mode_address, "Isolate::debug_execution_mode()") \
V(runtime_function_table_address, \
"Runtime::runtime_function_table_address()") \
- V(debug_last_step_action_address, "Debug::step_in_enabled_address()") \
V(is_profiling_address, "Isolate::is_profiling") \
V(debug_suspended_generator_address, \
"Debug::step_suspended_generator_address()") \
@@ -74,8 +74,6 @@ class StatsCounter;
V(address_of_float_abs_constant, "float_absolute_constant") \
V(address_of_float_neg_constant, "float_negate_constant") \
V(address_of_min_int, "LDoubleConstant::min_int") \
- V(address_of_minus_one_half, "double_constants.minus_one_half") \
- V(address_of_negative_infinity, "LDoubleConstant::negative_infinity") \
V(address_of_one_half, "LDoubleConstant::one_half") \
V(address_of_the_hole_nan, "the_hole_nan") \
V(address_of_uint32_bias, "uint32_bias") \
@@ -260,7 +258,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference Create(const Runtime::Function* f);
static ExternalReference Create(IsolateAddressId id, Isolate* isolate);
static ExternalReference Create(Runtime::FunctionId id);
- static ExternalReference Create(Address address);
+ static V8_EXPORT_PRIVATE ExternalReference Create(Address address);
template <typename SubjectChar, typename PatternChar>
static ExternalReference search_string_raw();
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index f55b8d4ac9..c3c0c8c63d 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -249,8 +249,8 @@ ForInHint ForInHintFromFeedback(int type_feedback) {
void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
int* vector_ic_count) {
- Object* megamorphic_sentinel =
- *FeedbackVector::MegamorphicSentinel(GetIsolate());
+ MaybeObject* megamorphic_sentinel = MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(GetIsolate()));
int with = 0;
int gen = 0;
int total = 0;
@@ -259,7 +259,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
FeedbackSlot slot = iter.Next();
FeedbackSlotKind kind = iter.kind();
- Object* const obj = Get(slot)->ToStrongHeapObject();
+ MaybeObject* const obj = Get(slot);
AssertNoLegacyTypes(obj);
switch (kind) {
case FeedbackSlotKind::kCall:
@@ -277,7 +277,10 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kTypeProfile: {
- if (obj->IsWeakCell() || obj->IsWeakFixedArray() || obj->IsString()) {
+ HeapObject* heap_object;
+ if (obj->IsWeakOrClearedHeapObject() ||
+ (obj->ToStrongHeapObject(&heap_object) &&
+ (heap_object->IsWeakFixedArray() || heap_object->IsString()))) {
with++;
} else if (obj == megamorphic_sentinel) {
gen++;
@@ -287,7 +290,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kBinaryOp: {
- int const feedback = Smi::ToInt(obj);
+ int const feedback = Smi::ToInt(obj->ToSmi());
BinaryOperationHint hint = BinaryOperationHintFromFeedback(feedback);
if (hint == BinaryOperationHint::kAny) {
gen++;
@@ -299,20 +302,19 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kCompareOp: {
- int const feedback = Smi::ToInt(obj);
- CompareOperationHint hint =
- CompareOperationHintFromFeedback(feedback);
- if (hint == CompareOperationHint::kAny) {
- gen++;
- }
- if (hint != CompareOperationHint::kNone) {
- with++;
- }
- total++;
+ int const feedback = Smi::ToInt(obj->ToSmi());
+ CompareOperationHint hint = CompareOperationHintFromFeedback(feedback);
+ if (hint == CompareOperationHint::kAny) {
+ gen++;
+ }
+ if (hint != CompareOperationHint::kNone) {
+ with++;
+ }
+ total++;
break;
}
case FeedbackSlotKind::kForIn: {
- int const feedback = Smi::ToInt(obj);
+ int const feedback = Smi::ToInt(obj->ToSmi());
ForInHint hint = ForInHintFromFeedback(feedback);
if (hint == ForInHint::kAny) {
gen++;
@@ -324,7 +326,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kInstanceOf: {
- if (obj->IsWeakCell()) {
+ if (obj->IsWeakOrClearedHeapObject()) {
with++;
} else if (obj == megamorphic_sentinel) {
gen++;
@@ -365,7 +367,7 @@ Handle<Symbol> FeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
}
Symbol* FeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
- return isolate->heap()->uninitialized_symbol();
+ return ReadOnlyRoots(isolate).uninitialized_symbol();
}
bool FeedbackMetadataIterator::HasNext() const {
@@ -384,8 +386,8 @@ int FeedbackMetadataIterator::entry_size() const {
return FeedbackMetadata::GetSlotSize(kind());
}
-Object* FeedbackNexus::GetFeedback() const {
- Object* feedback = vector()->Get(slot())->ToObject();
+MaybeObject* FeedbackNexus::GetFeedback() const {
+ MaybeObject* feedback = vector()->Get(slot());
FeedbackVector::AssertNoLegacyTypes(feedback);
return feedback;
}
@@ -400,8 +402,12 @@ MaybeObject* FeedbackNexus::GetFeedbackExtra() const {
}
void FeedbackNexus::SetFeedback(Object* feedback, WriteBarrierMode mode) {
+ SetFeedback(MaybeObject::FromObject(feedback));
+}
+
+void FeedbackNexus::SetFeedback(MaybeObject* feedback, WriteBarrierMode mode) {
FeedbackVector::AssertNoLegacyTypes(feedback);
- vector()->Set(slot(), MaybeObject::FromObject(feedback), mode);
+ vector()->Set(slot(), feedback, mode);
}
void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
@@ -409,7 +415,7 @@ void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
#ifdef DEBUG
FeedbackSlotKind kind = vector()->GetKind(slot());
DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
- FeedbackVector::AssertNoLegacyTypes(feedback_extra);
+ FeedbackVector::AssertNoLegacyTypes(MaybeObject::FromObject(feedback_extra));
#endif
int index = vector()->GetIndex(slot()) + 1;
vector()->set(index, MaybeObject::FromObject(feedback_extra), mode);
@@ -418,7 +424,7 @@ void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
void FeedbackNexus::SetFeedbackExtra(MaybeObject* feedback_extra,
WriteBarrierMode mode) {
#ifdef DEBUG
- FeedbackVector::AssertNoLegacyTypes(feedback_extra->GetHeapObjectOrSmi());
+ FeedbackVector::AssertNoLegacyTypes(feedback_extra);
#endif
int index = vector()->GetIndex(slot()) + 1;
vector()->set(index, feedback_extra, mode);
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index 23bfe77e58..ba3b711b1e 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -39,14 +39,16 @@ bool FeedbackVectorSpec::HasTypeProfileSlot() const {
return GetKind(slot) == FeedbackSlotKind::kTypeProfile;
}
-static bool IsPropertyNameFeedback(Object* feedback) {
- if (feedback->IsString()) return true;
- if (!feedback->IsSymbol()) return false;
- Symbol* symbol = Symbol::cast(feedback);
- Heap* heap = symbol->GetHeap();
- return symbol != heap->uninitialized_symbol() &&
- symbol != heap->premonomorphic_symbol() &&
- symbol != heap->megamorphic_symbol();
+static bool IsPropertyNameFeedback(MaybeObject* feedback) {
+ HeapObject* heap_object;
+ if (!feedback->ToStrongHeapObject(&heap_object)) return false;
+ if (heap_object->IsString()) return true;
+ if (!heap_object->IsSymbol()) return false;
+ Symbol* symbol = Symbol::cast(heap_object);
+ ReadOnlyRoots roots = symbol->GetReadOnlyRoots();
+ return symbol != roots.uninitialized_symbol() &&
+ symbol != roots.premonomorphic_symbol() &&
+ symbol != roots.megamorphic_symbol();
}
std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind) {
@@ -217,7 +219,8 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
// Ensure we can skip the write barrier
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
- DCHECK_EQ(isolate->heap()->uninitialized_symbol(), *uninitialized_sentinel);
+ DCHECK_EQ(ReadOnlyRoots(isolate).uninitialized_symbol(),
+ *uninitialized_sentinel);
Handle<Oddball> undefined_value = factory->undefined_value();
for (int i = 0; i < slot_count;) {
FeedbackSlot slot(i);
@@ -231,7 +234,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
- vector->set(index, isolate->heap()->empty_weak_cell(),
+ vector->set(index, HeapObjectReference::ClearedValue(),
SKIP_WRITE_BARRIER);
break;
case FeedbackSlotKind::kForIn:
@@ -292,7 +295,7 @@ void FeedbackVector::AddToVectorsForProfilingTools(
if (!vector->shared_function_info()->IsSubjectToDebugging()) return;
Handle<ArrayList> list = Handle<ArrayList>::cast(
isolate->factory()->feedback_vectors_for_profiling_tools());
- list = ArrayList::Add(list, vector);
+ list = ArrayList::Add(isolate, list, vector);
isolate->SetFeedbackVectorsForProfilingTools(*list);
}
@@ -346,15 +349,15 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
}
bool FeedbackVector::ClearSlots(Isolate* isolate) {
- Object* uninitialized_sentinel =
- FeedbackVector::RawUninitializedSentinel(isolate);
+ MaybeObject* uninitialized_sentinel = MaybeObject::FromObject(
+ FeedbackVector::RawUninitializedSentinel(isolate));
bool feedback_updated = false;
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackSlot slot = iter.Next();
- Object* obj = Get(slot)->ToObject();
+ MaybeObject* obj = Get(slot);
if (obj != uninitialized_sentinel) {
FeedbackNexus nexus(this, slot);
feedback_updated |= nexus.Clear();
@@ -363,23 +366,29 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
return feedback_updated;
}
-void FeedbackVector::AssertNoLegacyTypes(Object* object) {
- // Instead of FixedArray, the Feedback and the Extra should contain
- // WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
- DCHECK_IMPLIES(object->IsFixedArray(), object->IsHashTable());
+void FeedbackVector::AssertNoLegacyTypes(MaybeObject* object) {
+#ifdef DEBUG
+ HeapObject* heap_object;
+ if (object->ToStrongOrWeakHeapObject(&heap_object)) {
+ // Instead of FixedArray, the Feedback and the Extra should contain
+ // WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
+ DCHECK_IMPLIES(heap_object->IsFixedArray(), heap_object->IsHashTable());
+ DCHECK(!heap_object->IsWeakCell());
+ }
+#endif
}
Handle<WeakFixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
- Handle<Object> feedback = handle(GetFeedback(), isolate);
- if (!feedback->IsWeakFixedArray() ||
- WeakFixedArray::cast(*feedback)->length() != length) {
- Handle<WeakFixedArray> array =
- isolate->factory()->NewWeakFixedArray(length);
- SetFeedback(*array);
- return array;
+ HeapObject* heap_object;
+ if (GetFeedback()->ToStrongHeapObject(&heap_object) &&
+ heap_object->IsWeakFixedArray() &&
+ WeakFixedArray::cast(heap_object)->length() == length) {
+ return handle(WeakFixedArray::cast(heap_object), isolate);
}
- return Handle<WeakFixedArray>::cast(feedback);
+ Handle<WeakFixedArray> array = isolate->factory()->NewWeakFixedArray(length);
+ SetFeedback(*array);
+ return array;
}
Handle<WeakFixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
@@ -388,7 +397,7 @@ Handle<WeakFixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
if (GetFeedbackExtra()->ToStrongHeapObject(&heap_object) &&
heap_object->IsWeakFixedArray() &&
WeakFixedArray::cast(heap_object)->length() == length) {
- return handle(WeakFixedArray::cast(heap_object));
+ return handle(WeakFixedArray::cast(heap_object), isolate);
}
Handle<WeakFixedArray> array = isolate->factory()->NewWeakFixedArray(length);
SetFeedbackExtra(*array);
@@ -402,7 +411,7 @@ void FeedbackNexus::ConfigureUninitialized() {
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
- SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
+ SetFeedback(HeapObjectReference::ClearedValue(), SKIP_WRITE_BARRIER);
SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
break;
@@ -496,7 +505,8 @@ bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
bool changed = false;
- Symbol* sentinel = *FeedbackVector::MegamorphicSentinel(isolate);
+ MaybeObject* sentinel =
+ MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
if (GetFeedback() != sentinel) {
SetFeedback(sentinel, SKIP_WRITE_BARRIER);
changed = true;
@@ -512,7 +522,7 @@ bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
InlineCacheState FeedbackNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
+ MaybeObject* feedback = GetFeedback();
switch (kind()) {
case FeedbackSlotKind::kCreateClosure:
@@ -527,8 +537,9 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
if (feedback->IsSmi()) return MONOMORPHIC;
+ DCHECK(feedback->IsWeakOrClearedHeapObject());
MaybeObject* extra = GetFeedbackExtra();
- if (!WeakCell::cast(feedback)->cleared() ||
+ if (!feedback->IsClearedWeakHeapObject() ||
extra != MaybeObject::FromObject(
*FeedbackVector::UninitializedSentinel(isolate))) {
return MONOMORPHIC;
@@ -544,40 +555,51 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed: {
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
return UNINITIALIZED;
}
- if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate))) {
return MEGAMORPHIC;
}
- if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::PremonomorphicSentinel(isolate))) {
return PREMONOMORPHIC;
}
- if (feedback->IsWeakFixedArray()) {
- // Determine state purely by our structure, don't check if the maps are
- // cleared.
- return POLYMORPHIC;
- }
- if (feedback->IsWeakCell()) {
+ if (feedback->IsWeakOrClearedHeapObject()) {
// Don't check if the map is cleared.
return MONOMORPHIC;
}
- if (feedback->IsName()) {
- DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()));
- Object* extra = GetFeedbackExtra()->ToStrongHeapObject();
- WeakFixedArray* extra_array = WeakFixedArray::cast(extra);
- return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
+ HeapObject* heap_object;
+ if (feedback->ToStrongHeapObject(&heap_object)) {
+ if (heap_object->IsWeakFixedArray()) {
+ // Determine state purely by our structure, don't check if the maps
+ // are cleared.
+ return POLYMORPHIC;
+ }
+ if (heap_object->IsName()) {
+ DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()));
+ Object* extra = GetFeedbackExtra()->ToStrongHeapObject();
+ WeakFixedArray* extra_array = WeakFixedArray::cast(extra);
+ return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
+ }
}
UNREACHABLE();
}
case FeedbackSlotKind::kCall: {
- if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ HeapObject* heap_object;
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate))) {
return GENERIC;
- } else if (feedback->IsAllocationSite() || feedback->IsWeakCell()) {
+ } else if (feedback->IsWeakOrClearedHeapObject() ||
+ (feedback->ToStrongHeapObject(&heap_object) &&
+ heap_object->IsAllocationSite())) {
return MONOMORPHIC;
}
- CHECK(feedback == *FeedbackVector::UninitializedSentinel(isolate));
+ CHECK_EQ(feedback, MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate)));
return UNINITIALIZED;
}
case FeedbackSlotKind::kBinaryOp: {
@@ -610,17 +632,21 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
return MONOMORPHIC;
}
case FeedbackSlotKind::kInstanceOf: {
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ } else if (feedback ==
+ MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate))) {
return MEGAMORPHIC;
}
return MONOMORPHIC;
}
case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
return UNINITIALIZED;
- } else if (feedback->IsWeakCell()) {
+ } else if (feedback->IsWeakOrClearedHeapObject()) {
// Don't check if the map is cleared.
return MONOMORPHIC;
}
@@ -628,7 +654,8 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
return MEGAMORPHIC;
}
case FeedbackSlotKind::kTypeProfile: {
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
return UNINITIALIZED;
}
return MONOMORPHIC;
@@ -645,7 +672,7 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
void FeedbackNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
DCHECK(IsGlobalICKind(kind()));
Isolate* isolate = GetIsolate();
- SetFeedback(*isolate->factory()->NewWeakCell(cell));
+ SetFeedback(HeapObjectReference::Weak(*cell));
SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
}
@@ -672,7 +699,7 @@ bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
void FeedbackNexus::ConfigureHandlerMode(const MaybeObjectHandle& handler) {
DCHECK(IsGlobalICKind(kind()));
DCHECK(IC::IsHandler(*handler));
- SetFeedback(GetIsolate()->heap()->empty_weak_cell());
+ SetFeedback(HeapObjectReference::ClearedValue());
SetFeedbackExtra(*handler);
}
@@ -721,18 +748,17 @@ void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
Handle<Map> receiver_map,
const MaybeObjectHandle& handler) {
DCHECK(handler.is_null() || IC::IsHandler(*handler));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
if (kind() == FeedbackSlotKind::kStoreDataPropertyInLiteral) {
- SetFeedback(*cell);
+ SetFeedback(HeapObjectReference::Weak(*receiver_map));
SetFeedbackExtra(*name);
} else {
if (name.is_null()) {
- SetFeedback(*cell);
+ SetFeedback(HeapObjectReference::Weak(*receiver_map));
SetFeedbackExtra(*handler);
} else {
Handle<WeakFixedArray> array = EnsureExtraArrayOfSize(2);
SetFeedback(*name);
- array->Set(0, HeapObjectReference::Strong(*cell));
+ array->Set(0, HeapObjectReference::Weak(*receiver_map));
array->Set(1, *handler);
}
}
@@ -756,8 +782,7 @@ void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name,
for (int current = 0; current < receiver_count; ++current) {
Handle<Map> map = maps[current];
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- array->Set(current * 2, HeapObjectReference::Strong(*cell));
+ array->Set(current * 2, HeapObjectReference::Weak(*map));
DCHECK(IC::IsHandler(*handlers->at(current)));
array->Set(current * 2 + 1, *handlers->at(current));
}
@@ -770,33 +795,34 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
IsStoreInArrayLiteralICKind(kind()));
Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
+ MaybeObject* feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
- if (feedback->IsWeakFixedArray() || is_named_feedback) {
+ HeapObject* heap_object;
+ if ((feedback->ToStrongHeapObject(&heap_object) &&
+ heap_object->IsWeakFixedArray()) ||
+ is_named_feedback) {
int found = 0;
WeakFixedArray* array;
if (is_named_feedback) {
array = WeakFixedArray::cast(GetFeedbackExtra()->ToStrongHeapObject());
} else {
- array = WeakFixedArray::cast(feedback);
+ array = WeakFixedArray::cast(heap_object);
}
const int increment = 2;
+ HeapObject* heap_object;
for (int i = 0; i < array->length(); i += increment) {
- WeakCell* cell = WeakCell::cast(array->Get(i)->ToStrongHeapObject());
- if (!cell->cleared()) {
- Map* map = Map::cast(cell->value());
+ DCHECK(array->Get(i)->IsWeakOrClearedHeapObject());
+ if (array->Get(i)->ToWeakHeapObject(&heap_object)) {
+ Map* map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
found++;
}
}
return found;
- } else if (feedback->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(feedback);
- if (!cell->cleared()) {
- Map* map = Map::cast(cell->value());
- maps->push_back(handle(map, isolate));
- return 1;
- }
+ } else if (feedback->ToWeakHeapObject(&heap_object)) {
+ Map* map = Map::cast(heap_object);
+ maps->push_back(handle(map, isolate));
+ return 1;
}
return 0;
@@ -807,21 +833,25 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
- Object* feedback = GetFeedback();
+ MaybeObject* feedback = GetFeedback();
Isolate* isolate = GetIsolate();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
- if (feedback->IsWeakFixedArray() || is_named_feedback) {
+ HeapObject* heap_object;
+ if ((feedback->ToStrongHeapObject(&heap_object) &&
+ heap_object->IsWeakFixedArray()) ||
+ is_named_feedback) {
WeakFixedArray* array;
if (is_named_feedback) {
array = WeakFixedArray::cast(GetFeedbackExtra()->ToStrongHeapObject());
} else {
- array = WeakFixedArray::cast(feedback);
+ array = WeakFixedArray::cast(heap_object);
}
const int increment = 2;
+ HeapObject* heap_object;
for (int i = 0; i < array->length(); i += increment) {
- WeakCell* cell = WeakCell::cast(array->Get(i)->ToStrongHeapObject());
- if (!cell->cleared()) {
- Map* array_map = Map::cast(cell->value());
+ DCHECK(array->Get(i)->IsWeakOrClearedHeapObject());
+ if (array->Get(i)->ToWeakHeapObject(&heap_object)) {
+ Map* array_map = Map::cast(heap_object);
if (array_map == *map &&
!array->Get(i + increment - 1)->IsClearedWeakHeapObject()) {
MaybeObject* handler = array->Get(i + increment - 1);
@@ -830,15 +860,12 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
}
}
}
- } else if (feedback->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(feedback);
- if (!cell->cleared()) {
- Map* cell_map = Map::cast(cell->value());
- if (cell_map == *map && !GetFeedbackExtra()->IsClearedWeakHeapObject()) {
- MaybeObject* handler = GetFeedbackExtra();
- DCHECK(IC::IsHandler(handler));
- return handle(handler, isolate);
- }
+ } else if (feedback->ToWeakHeapObject(&heap_object)) {
+ Map* cell_map = Map::cast(heap_object);
+ if (cell_map == *map && !GetFeedbackExtra()->IsClearedWeakHeapObject()) {
+ MaybeObject* handler = GetFeedbackExtra();
+ DCHECK(IC::IsHandler(handler));
+ return handle(handler, isolate);
}
}
@@ -852,22 +879,26 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
IsStoreInArrayLiteralICKind(kind()));
- Object* feedback = GetFeedback();
+ MaybeObject* feedback = GetFeedback();
Isolate* isolate = GetIsolate();
int count = 0;
bool is_named_feedback = IsPropertyNameFeedback(feedback);
- if (feedback->IsWeakFixedArray() || is_named_feedback) {
+ HeapObject* heap_object;
+ if ((feedback->ToStrongHeapObject(&heap_object) &&
+ heap_object->IsWeakFixedArray()) ||
+ is_named_feedback) {
WeakFixedArray* array;
if (is_named_feedback) {
array = WeakFixedArray::cast(GetFeedbackExtra()->ToStrongHeapObject());
} else {
- array = WeakFixedArray::cast(feedback);
+ array = WeakFixedArray::cast(heap_object);
}
const int increment = 2;
+ HeapObject* heap_object;
for (int i = 0; i < array->length(); i += increment) {
- WeakCell* cell = WeakCell::cast(array->Get(i)->ToStrongHeapObject());
// Be sure to skip handlers whose maps have been cleared.
- if (!cell->cleared() &&
+ DCHECK(array->Get(i)->IsWeakOrClearedHeapObject());
+ if (array->Get(i)->ToWeakHeapObject(&heap_object) &&
!array->Get(i + increment - 1)->IsClearedWeakHeapObject()) {
MaybeObject* handler = array->Get(i + increment - 1);
DCHECK(IC::IsHandler(handler));
@@ -875,10 +906,9 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
count++;
}
}
- } else if (feedback->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(feedback);
+ } else if (feedback->ToWeakHeapObject(&heap_object)) {
MaybeObject* extra = GetFeedbackExtra();
- if (!cell->cleared() && !extra->IsClearedWeakHeapObject()) {
+ if (!extra->IsClearedWeakHeapObject()) {
DCHECK(IC::IsHandler(extra));
code_list->push_back(handle(extra, isolate));
count++;
@@ -889,9 +919,9 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
Name* FeedbackNexus::FindFirstName() const {
if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind())) {
- Object* feedback = GetFeedback();
+ MaybeObject* feedback = GetFeedback();
if (IsPropertyNameFeedback(feedback)) {
- return Name::cast(feedback);
+ return Name::cast(feedback->ToStrongHeapObject());
}
}
return nullptr;
@@ -930,7 +960,8 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
if (maybe_code_handler.object()->IsStoreHandler()) {
Handle<StoreHandler> data_handler =
Handle<StoreHandler>::cast(maybe_code_handler.object());
- handler = handle(Code::cast(data_handler->smi_handler()));
+ handler = handle(Code::cast(data_handler->smi_handler()),
+ vector()->GetIsolate());
} else if (maybe_code_handler.object()->IsSmi()) {
// Skip proxy handlers.
DCHECK_EQ(*(maybe_code_handler.object()),
@@ -961,8 +992,9 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
IcCheckType FeedbackNexus::GetKeyType() const {
DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
IsStoreInArrayLiteralICKind(kind()));
- Object* feedback = GetFeedback();
- if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
+ MaybeObject* feedback = GetFeedback();
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(GetIsolate()))) {
return static_cast<IcCheckType>(Smi::ToInt(GetFeedbackExtra()->ToObject()));
}
return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
@@ -970,33 +1002,35 @@ IcCheckType FeedbackNexus::GetKeyType() const {
BinaryOperationHint FeedbackNexus::GetBinaryOperationFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kBinaryOp);
- int feedback = Smi::ToInt(GetFeedback());
+ int feedback = Smi::ToInt(GetFeedback()->ToSmi());
return BinaryOperationHintFromFeedback(feedback);
}
CompareOperationHint FeedbackNexus::GetCompareOperationFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kCompareOp);
- int feedback = Smi::ToInt(GetFeedback());
+ int feedback = Smi::ToInt(GetFeedback()->ToSmi());
return CompareOperationHintFromFeedback(feedback);
}
ForInHint FeedbackNexus::GetForInFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kForIn);
- int feedback = Smi::ToInt(GetFeedback());
+ int feedback = Smi::ToInt(GetFeedback()->ToSmi());
return ForInHintFromFeedback(feedback);
}
Handle<FeedbackCell> FeedbackNexus::GetFeedbackCell() const {
DCHECK_EQ(FeedbackSlotKind::kCreateClosure, kind());
- return handle(FeedbackCell::cast(GetFeedback()));
+ return handle(FeedbackCell::cast(GetFeedback()->ToObject()),
+ vector()->GetIsolate());
}
MaybeHandle<JSObject> FeedbackNexus::GetConstructorFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kInstanceOf);
Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
- if (feedback->IsWeakCell() && !WeakCell::cast(feedback)->cleared()) {
- return handle(JSObject::cast(WeakCell::cast(feedback)->value()), isolate);
+ MaybeObject* feedback = GetFeedback();
+ HeapObject* heap_object;
+ if (feedback->ToWeakHeapObject(&heap_object)) {
+ return handle(JSObject::cast(heap_object), isolate);
}
return MaybeHandle<JSObject>();
}
@@ -1019,30 +1053,35 @@ void FeedbackNexus::Collect(Handle<String> type, int position) {
DCHECK_GE(position, 0);
Isolate* isolate = GetIsolate();
- Object* const feedback = GetFeedback();
+ MaybeObject* const feedback = GetFeedback();
// Map source position to collection of types
Handle<SimpleNumberDictionary> types;
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
types = SimpleNumberDictionary::New(isolate, 1);
} else {
- types = handle(SimpleNumberDictionary::cast(feedback));
+ types = handle(SimpleNumberDictionary::cast(feedback->ToStrongHeapObject()),
+ isolate);
}
Handle<ArrayList> position_specific_types;
- int entry = types->FindEntry(position);
+ int entry = types->FindEntry(isolate, position);
if (entry == SimpleNumberDictionary::kNotFound) {
position_specific_types = ArrayList::New(isolate, 1);
types = SimpleNumberDictionary::Set(
- types, position, ArrayList::Add(position_specific_types, type));
+ isolate, types, position,
+ ArrayList::Add(isolate, position_specific_types, type));
} else {
DCHECK(types->ValueAt(entry)->IsArrayList());
- position_specific_types = handle(ArrayList::cast(types->ValueAt(entry)));
+ position_specific_types =
+ handle(ArrayList::cast(types->ValueAt(entry)), isolate);
if (!InList(position_specific_types, type)) { // Add type
types = SimpleNumberDictionary::Set(
- types, position, ArrayList::Add(position_specific_types, type));
+ isolate, types, position,
+ ArrayList::Add(isolate, position_specific_types, type));
}
}
SetFeedback(*types);
@@ -1053,14 +1092,15 @@ std::vector<int> FeedbackNexus::GetSourcePositions() const {
std::vector<int> source_positions;
Isolate* isolate = GetIsolate();
- Object* const feedback = GetFeedback();
+ MaybeObject* const feedback = GetFeedback();
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
return source_positions;
}
- Handle<SimpleNumberDictionary> types = Handle<SimpleNumberDictionary>(
- SimpleNumberDictionary::cast(feedback), isolate);
+ Handle<SimpleNumberDictionary> types(
+ SimpleNumberDictionary::cast(feedback->ToStrongHeapObject()), isolate);
for (int index = SimpleNumberDictionary::kElementsStartIndex;
index < types->length(); index += SimpleNumberDictionary::kEntrySize) {
@@ -1079,22 +1119,23 @@ std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
DCHECK(IsTypeProfileKind(kind()));
Isolate* isolate = GetIsolate();
- Object* const feedback = GetFeedback();
+ MaybeObject* const feedback = GetFeedback();
std::vector<Handle<String>> types_for_position;
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
return types_for_position;
}
- Handle<SimpleNumberDictionary> types = Handle<SimpleNumberDictionary>(
- SimpleNumberDictionary::cast(feedback), isolate);
+ Handle<SimpleNumberDictionary> types(
+ SimpleNumberDictionary::cast(feedback->ToStrongHeapObject()), isolate);
- int entry = types->FindEntry(position);
+ int entry = types->FindEntry(isolate, position);
if (entry == SimpleNumberDictionary::kNotFound) {
return types_for_position;
}
DCHECK(types->ValueAt(entry)->IsArrayList());
Handle<ArrayList> position_specific_types =
- Handle<ArrayList>(ArrayList::cast(types->ValueAt(entry)));
+ Handle<ArrayList>(ArrayList::cast(types->ValueAt(entry)), isolate);
for (int i = 0; i < position_specific_types->Length(); i++) {
Object* t = position_specific_types->Get(i);
types_for_position.push_back(Handle<String>(String::cast(t), isolate));
@@ -1119,15 +1160,14 @@ Handle<JSObject> ConvertToJSObject(Isolate* isolate,
int value_index = index + SimpleNumberDictionary::kEntryValueIndex;
Handle<ArrayList> position_specific_types(
- ArrayList::cast(feedback->get(value_index)));
+ ArrayList::cast(feedback->get(value_index)), isolate);
int position = Smi::ToInt(key);
JSObject::AddDataElement(
type_profile, position,
isolate->factory()->NewJSArrayWithElements(
- ArrayList::Elements(position_specific_types)),
- PropertyAttributes::NONE)
- .ToHandleChecked();
+ ArrayList::Elements(isolate, position_specific_types)),
+ PropertyAttributes::NONE);
}
}
return type_profile;
@@ -1138,14 +1178,17 @@ JSObject* FeedbackNexus::GetTypeProfile() const {
DCHECK(IsTypeProfileKind(kind()));
Isolate* isolate = GetIsolate();
- Object* const feedback = GetFeedback();
+ MaybeObject* const feedback = GetFeedback();
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
return *isolate->factory()->NewJSObject(isolate->object_function());
}
- return *ConvertToJSObject(isolate,
- handle(SimpleNumberDictionary::cast(feedback)));
+ return *ConvertToJSObject(
+ isolate,
+ handle(SimpleNumberDictionary::cast(feedback->ToStrongHeapObject()),
+ isolate));
}
void FeedbackNexus::ResetTypeProfile() {
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index f40ffcd742..880e4713d4 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -140,8 +140,13 @@ class FeedbackMetadata;
// - optimized code cell (weak cell or Smi marker)
// followed by an array of feedback slots, of length determined by the feedback
// metadata.
-class FeedbackVector : public HeapObject {
+class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
public:
+ // Use the mixin methods over the HeapObject methods.
+ // TODO(v8:7786) Remove once the HeapObject methods are gone.
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
// Casting.
static inline FeedbackVector* cast(Object* obj);
@@ -243,12 +248,7 @@ class FeedbackVector : public HeapObject {
return GetLanguageModeFromSlotKind(GetKind(slot));
}
-#ifdef OBJECT_PRINT
- // For gdb debugging.
- void Print();
-#endif // OBJECT_PRINT
-
- static void AssertNoLegacyTypes(Object* object);
+ static void AssertNoLegacyTypes(MaybeObject* object);
DECL_PRINTER(FeedbackVector)
DECL_VERIFIER(FeedbackVector)
@@ -445,11 +445,6 @@ class FeedbackMetadata : public HeapObject {
V8_EXPORT_PRIVATE static Handle<FeedbackMetadata> New(
Isolate* isolate, const FeedbackVectorSpec* spec = nullptr);
-#ifdef OBJECT_PRINT
- // For gdb debugging.
- void Print();
-#endif // OBJECT_PRINT
-
DECL_PRINTER(FeedbackMetadata)
DECL_VERIFIER(FeedbackMetadata)
@@ -609,7 +604,7 @@ class FeedbackNexus final {
void ConfigurePremonomorphic();
bool ConfigureMegamorphic(IcCheckType property_type);
- inline Object* GetFeedback() const;
+ inline MaybeObject* GetFeedback() const;
inline MaybeObject* GetFeedbackExtra() const;
inline Isolate* GetIsolate() const;
@@ -684,6 +679,8 @@ class FeedbackNexus final {
protected:
inline void SetFeedback(Object* feedback,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetFeedback(MaybeObject* feedback,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void SetFeedbackExtra(Object* feedback_extra,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void SetFeedbackExtra(MaybeObject* feedback_extra,
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index e8226006df..052869f308 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -162,6 +162,7 @@ struct MaybeBoolFlag {
FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, {false COMMA false}, cmt)
#define DEFINE_INT(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
#define DEFINE_UINT(nam, def, cmt) FLAG(UINT, unsigned int, nam, def, cmt)
+#define DEFINE_UINT64(nam, def, cmt) FLAG(UINT64, uint64_t, nam, def, cmt)
#define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_SIZE_T(nam, def, cmt) FLAG(SIZE_T, size_t, nam, def, cmt)
#define DEFINE_STRING(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
@@ -212,12 +213,13 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_class_fields, "harmony fields in class literals") \
V(harmony_static_fields, "harmony static fields in class literals") \
- V(harmony_array_flatten, "harmony Array.prototype.flat{ten,Map}")
+ V(harmony_await_optimization, "harmony await taking 1 tick")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
- V(harmony_locale, "Intl.Locale")
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_locale, "Intl.Locale") \
+ V(harmony_intl_relative_time_format, "Intl.RelativeTimeFormat")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
@@ -227,21 +229,19 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
V(harmony_public_fields, "harmony public fields in class literals") \
V(harmony_private_fields, "harmony private fields in class literals") \
V(harmony_numeric_separator, "harmony numeric separator between digits") \
- V(harmony_string_matchall, "harmony String.prototype.matchAll")
+ V(harmony_string_matchall, "harmony String.prototype.matchAll") \
+ V(harmony_symbol_description, "harmony Symbol.prototype.description")
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING(V) \
V(harmony_string_trimming, "harmony String.prototype.trim{Start,End}") \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_regexp_named_captures, "harmony regexp named captures") \
- V(harmony_regexp_property, "harmony Unicode regexp property classes") \
V(harmony_function_tostring, "harmony Function.prototype.toString") \
- V(harmony_promise_finally, "harmony Promise.prototype.finally") \
- V(harmony_optional_catch_binding, "allow omitting binding in catch blocks") \
V(harmony_import_meta, "harmony import.meta property") \
V(harmony_bigint, "harmony arbitrary precision integers") \
V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_array_prototype_values, "harmony Array.prototype.values")
+ V(harmony_array_prototype_values, "harmony Array.prototype.values") \
+ V(harmony_array_flat, "harmony Array.prototype.{flat,flatMap}")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -338,7 +338,7 @@ DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
DEFINE_BOOL(ignition_reo, true, "use ignition register equivalence optimizer")
DEFINE_BOOL(ignition_filter_expression_positions, true,
"filter expression positions before the bytecode pipeline")
-DEFINE_BOOL(ignition_share_named_property_feedback, false,
+DEFINE_BOOL(ignition_share_named_property_feedback, true,
"share feedback slots when loading the same named property from "
"the same object")
DEFINE_BOOL(print_bytecode, false,
@@ -432,6 +432,8 @@ DEFINE_STRING(csa_trap_on_node, nullptr,
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_stats_nvp, false,
"print TurboFan statistics in machine-readable format")
+DEFINE_BOOL(turbo_stats_wasm, false,
+ "print TurboFan statistics of wasm compilations")
DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
DEFINE_BOOL(function_context_specialization, false,
"enable function context specialization in TurboFan")
@@ -530,15 +532,22 @@ DEFINE_BOOL(wasm_async_compilation, true,
"enable actual asynchronous compilation for WebAssembly.compile")
DEFINE_BOOL(wasm_test_streaming, false,
"use streaming compilation instead of async compilation for tests")
-// Parallel compilation confuses turbo_stats, force single threaded.
-DEFINE_VALUE_IMPLICATION(turbo_stats, wasm_num_compilation_tasks, 0)
DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
"maximum memory size of a wasm instance")
DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance")
-DEFINE_BOOL(wasm_tier_up, false,
- "enable basic tiering up to the optimizing compiler")
+// Enable Liftoff by default on ia32 and x64. More architectures will follow
+// once they are implemented and sufficiently tested.
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+DEFINE_BOOL(
+ wasm_tier_up, true,
+ "enable wasm baseline compilation and tier up to the optimizing compiler")
+#else
+DEFINE_BOOL(
+ wasm_tier_up, false,
+ "enable wasm baseline compilation and tier up to the optimizing compiler")
DEFINE_IMPLICATION(future, wasm_tier_up)
+#endif
DEFINE_IMPLICATION(wasm_tier_up, liftoff)
DEFINE_DEBUG_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_decode_time, false,
@@ -552,14 +561,17 @@ DEFINE_INT(trace_wasm_ast_start, 0,
"start function for wasm AST trace (inclusive)")
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_BOOL(liftoff, false,
- "enable liftoff, the experimental wasm baseline compiler")
+ "enable Liftoff, the baseline compiler for WebAssembly")
DEFINE_DEBUG_BOOL(trace_liftoff, false,
- "trace liftoff, the wasm baseline compiler")
-DEFINE_UINT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
+ "trace Liftoff, the baseline compiler for WebAssembly")
DEFINE_DEBUG_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
DEFINE_BOOL(wasm_trace_memory, false,
"print all memory updates performed in wasm code")
+// Fuzzers use {wasm_tier_mask_for_testing} together with {liftoff} and
+// {no_wasm_tier_up} to force some functions to be compiled with Turbofan.
+DEFINE_INT(wasm_tier_mask_for_testing, 0,
+ "bitmask of functions to compile with TurboFan instead of Liftoff")
DEFINE_BOOL(validate_asm, true, "validate asm.js modules before compiling")
DEFINE_BOOL(suppress_asm_messages, false,
@@ -584,11 +596,11 @@ DEFINE_BOOL(experimental_wasm_threads, false,
"enable prototype threads for wasm")
DEFINE_BOOL(experimental_wasm_sat_f2i_conversions, false,
"enable non-trapping float-to-int conversions for wasm")
-DEFINE_BOOL(experimental_wasm_se, false,
+DEFINE_BOOL(experimental_wasm_se, true,
"enable prototype sign extension opcodes for wasm")
DEFINE_BOOL(experimental_wasm_anyref, false,
"enable prototype anyref support for wasm")
-DEFINE_BOOL(experimental_wasm_mut_global, false,
+DEFINE_BOOL(experimental_wasm_mut_global, true,
"enable prototype import/export mutable global support for wasm")
DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
@@ -600,6 +612,8 @@ DEFINE_BOOL(wasm_no_stack_checks, false,
DEFINE_BOOL(wasm_trap_handler, true,
"use signal handlers to catch out of bounds memory access in wasm"
" (currently Linux x86_64 only)")
+DEFINE_BOOL(wasm_trap_handler_fallback, false,
+ "Use bounds checks if guarded memory is not available")
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"Generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
@@ -691,6 +705,9 @@ DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
"use concurrent marking")
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_IMPLICATION(parallel_marking, concurrent_marking)
+DEFINE_INT(ephemeron_fixpoint_iterations, 10,
+ "number of fixpoint iterations it takes to switch to linear "
+ "ephemeron algorithm")
DEFINE_BOOL(trace_concurrent_marking, false, "trace concurrent marking")
DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
@@ -776,6 +793,12 @@ DEFINE_BOOL(manual_evacuation_candidates_selection, false,
DEFINE_BOOL(fast_promotion_new_space, false,
"fast promote new space on high survival rates")
+DEFINE_BOOL(clear_free_memory, false, "initialize free memory with 0")
+
+DEFINE_BOOL(young_generation_large_objects, false,
+ "allocates large objects by default in the young generation large "
+ "object space")
+
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging")
@@ -839,6 +862,7 @@ DEFINE_BOOL(enable_experimental_builtins, false,
"enable new csa-based experimental builtins")
DEFINE_BOOL(disallow_code_generation_from_strings, false,
"disallow eval and friends")
+DEFINE_BOOL(expose_async_hooks, false, "expose async_hooks object")
// builtins.cc
DEFINE_BOOL(allow_unsafe_function_constructor, false,
@@ -902,7 +926,6 @@ DEFINE_IMPLICATION(trace_array_abuse, trace_js_array_abuse)
DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse)
// debugger
-DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
DEFINE_BOOL(
trace_side_effect_free_debug_evaluate, false,
"print debug messages for side-effect-free debug-evaluate for testing")
@@ -1019,9 +1042,9 @@ DEFINE_BOOL(randomize_hashes, true,
"(with snapshots this option cannot override the baked-in seed)")
DEFINE_BOOL(rehash_snapshot, true,
"rehash strings from the snapshot to override the baked-in seed")
-DEFINE_INT(hash_seed, 0,
- "Fixed seed to use to hash property keys (0 means random)"
- "(with snapshots this option cannot override the baked-in seed)")
+DEFINE_UINT64(hash_seed, 0,
+ "Fixed seed to use to hash property keys (0 means random)"
+ "(with snapshots this option cannot override the baked-in seed)")
DEFINE_INT(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
@@ -1039,6 +1062,13 @@ DEFINE_INT(runtime_stats, 0,
DEFINE_VALUE_IMPLICATION(runtime_call_stats, runtime_stats, 1)
// snapshot-common.cc
+#ifdef V8_EMBEDDED_BUILTINS
+#define V8_EMBEDDED_BUILTINS_BOOL true
+#else
+#define V8_EMBEDDED_BUILTINS_BOOL false
+#endif
+DEFINE_BOOL_READONLY(embedded_builtins, V8_EMBEDDED_BUILTINS_BOOL,
+ "Embed builtin code into the binary.")
DEFINE_BOOL(lazy_deserialization, true,
"Deserialize code lazily from the snapshot.")
DEFINE_BOOL(lazy_handler_deserialization, true,
@@ -1050,6 +1080,8 @@ DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
DEFINE_BOOL(serialization_statistics, false,
"Collect statistics on serialized objects.")
+DEFINE_UINT(serialization_chunk_size, 4096,
+ "Custom size for serialization chunks")
// Regexp
DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
@@ -1133,7 +1165,7 @@ DEFINE_NEG_IMPLICATION(gdbjit, compact_code_space)
// checks.cc
#ifdef ENABLE_SLOW_DCHECKS
-DEFINE_BOOL(enable_slow_asserts, false,
+DEFINE_BOOL(enable_slow_asserts, true,
"enable asserts that are slow to execute")
#endif
@@ -1211,9 +1243,6 @@ DEFINE_BOOL(log_function_events, false,
DEFINE_BOOL(prof, false,
"Log statistical profiling information (implies --log-code).")
-DEFINE_BOOL(detailed_line_info, true,
- "Always generate detailed line information for CPU profiling.")
-
#if defined(ANDROID)
// Phones and tablets have processors that are much slower than desktop
// and laptop computers for which current heuristics are tuned.
@@ -1303,6 +1332,8 @@ DEFINE_BOOL(print_opt_code, false, "print optimized code")
DEFINE_STRING(print_opt_code_filter, "*", "filter for printing optimized code")
DEFINE_BOOL(print_code_verbose, false, "print more information for code")
DEFINE_BOOL(print_builtin_code, false, "print generated code for builtins")
+DEFINE_STRING(print_builtin_code_filter, "*",
+ "filter for printing builtin code")
DEFINE_BOOL(print_builtin_size, false, "print code size for builtins")
#ifdef ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index ef5772dce4..13046097a6 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -39,6 +39,7 @@ struct Flag {
TYPE_MAYBE_BOOL,
TYPE_INT,
TYPE_UINT,
+ TYPE_UINT64,
TYPE_FLOAT,
TYPE_SIZE_T,
TYPE_STRING,
@@ -78,6 +79,11 @@ struct Flag {
return reinterpret_cast<unsigned int*>(valptr_);
}
+ uint64_t* uint64_variable() const {
+ DCHECK(type_ == TYPE_UINT64);
+ return reinterpret_cast<uint64_t*>(valptr_);
+ }
+
double* float_variable() const {
DCHECK(type_ == TYPE_FLOAT);
return reinterpret_cast<double*>(valptr_);
@@ -121,6 +127,11 @@ struct Flag {
return *reinterpret_cast<const unsigned int*>(defptr_);
}
+ uint64_t uint64_default() const {
+ DCHECK(type_ == TYPE_UINT64);
+ return *reinterpret_cast<const uint64_t*>(defptr_);
+ }
+
double float_default() const {
DCHECK(type_ == TYPE_FLOAT);
return *reinterpret_cast<const double*>(defptr_);
@@ -152,6 +163,8 @@ struct Flag {
return *int_variable() == int_default();
case TYPE_UINT:
return *uint_variable() == uint_default();
+ case TYPE_UINT64:
+ return *uint64_variable() == uint64_default();
case TYPE_FLOAT:
return *float_variable() == float_default();
case TYPE_SIZE_T:
@@ -184,6 +197,9 @@ struct Flag {
case TYPE_UINT:
*uint_variable() = uint_default();
break;
+ case TYPE_UINT64:
+ *uint64_variable() = uint64_default();
+ break;
case TYPE_FLOAT:
*float_variable() = float_default();
break;
@@ -217,6 +233,8 @@ static const char* Type2String(Flag::FlagType type) {
case Flag::TYPE_INT: return "int";
case Flag::TYPE_UINT:
return "uint";
+ case Flag::TYPE_UINT64:
+ return "uint64";
case Flag::TYPE_FLOAT: return "float";
case Flag::TYPE_SIZE_T:
return "size_t";
@@ -243,6 +261,9 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
case Flag::TYPE_UINT:
os << *flag.uint_variable();
break;
+ case Flag::TYPE_UINT64:
+ os << *flag.uint64_variable();
+ break;
case Flag::TYPE_FLOAT:
os << *flag.float_variable();
break;
@@ -464,6 +485,12 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
return_code = j;
}
break;
+ case Flag::TYPE_UINT64:
+ if (!TryParseUnsigned(flag, arg, value, &endp,
+ flag->uint64_variable())) {
+ return_code = j;
+ }
+ break;
case Flag::TYPE_FLOAT:
*flag->float_variable() = strtod(value, &endp);
break;
@@ -604,13 +631,14 @@ void FlagList::PrintHelp() {
CpuFeatures::PrintTarget();
CpuFeatures::PrintFeatures();
- OFStream os(stdout);
+ StdoutStream os;
os << "Synopsis:\n"
" shell [options] [--shell] [<file>...]\n"
" d8 [options] [-e <string>] [--shell] [[--module] <file>...]\n\n"
" -e execute a string in V8\n"
" --shell run an interactive JavaScript shell\n"
" --module execute a file as a JavaScript module\n\n"
+ "Note: the --module option is implicitly enabled for *.mjs files.\n\n"
"Options:\n";
for (const Flag& f : flags) {
@@ -633,9 +661,9 @@ void ComputeFlagListHash() {
#ifdef DEBUG
modified_args_as_string << "debug";
#endif // DEBUG
-#ifdef V8_EMBEDDED_BUILTINS
- modified_args_as_string << "embedded";
-#endif // V8_EMBEDDED_BUILTINS
+ if (FLAG_embedded_builtins) {
+ modified_args_as_string << "embedded";
+ }
for (size_t i = 0; i < num_flags; ++i) {
Flag* current = &flags[i];
if (!current->IsDefault()) {
diff --git a/deps/v8/src/frame-constants.h b/deps/v8/src/frame-constants.h
index abc0c84639..1426c72bd7 100644
--- a/deps/v8/src/frame-constants.h
+++ b/deps/v8/src/frame-constants.h
@@ -234,13 +234,6 @@ class BuiltinFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(2);
};
-class InternalFrameConstants : public TypedFrameConstants {
- public:
- // FP-relative.
- static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- DEFINE_TYPED_FRAME_SIZES(1);
-};
-
class ConstructFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index b3efd79780..a64404c8ed 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -209,6 +209,10 @@ inline JsToWasmFrame::JsToWasmFrame(StackFrameIteratorBase* iterator)
inline CWasmEntryFrame::CWasmEntryFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
+inline WasmCompileLazyFrame::WasmCompileLazyFrame(
+ StackFrameIteratorBase* iterator)
+ : StandardFrame(iterator) {}
+
inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 2ba38e6b66..96930e6854 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -460,15 +460,16 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
iterator->isolate()->wasm_engine()->code_manager()->LookupCode(pc);
if (wasm_code != nullptr) {
switch (wasm_code->kind()) {
- case wasm::WasmCode::kInterpreterEntry:
- return WASM_INTERPRETER_ENTRY;
case wasm::WasmCode::kFunction:
return WASM_COMPILED;
- case wasm::WasmCode::kLazyStub:
- if (StackFrame::IsTypeMarker(marker)) break;
- return BUILTIN;
case wasm::WasmCode::kWasmToJsWrapper:
return WASM_TO_JS;
+ case wasm::WasmCode::kLazyStub:
+ return WASM_COMPILE_LAZY;
+ case wasm::WasmCode::kRuntimeStub:
+ return STUB;
+ case wasm::WasmCode::kInterpreterEntry:
+ return WASM_INTERPRETER_ENTRY;
default:
UNREACHABLE();
}
@@ -767,11 +768,11 @@ Script* StandardFrame::script() const {
}
Object* StandardFrame::receiver() const {
- return isolate()->heap()->undefined_value();
+ return ReadOnlyRoots(isolate()).undefined_value();
}
Object* StandardFrame::context() const {
- return isolate()->heap()->undefined_value();
+ return ReadOnlyRoots(isolate()).undefined_value();
}
int StandardFrame::position() const {
@@ -876,6 +877,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
case WASM_TO_JS:
case WASM_COMPILED:
case WASM_INTERPRETER_ENTRY:
+ case WASM_COMPILE_LAZY:
frame_header_size = WasmCompiledFrameConstants::kFixedFrameSizeFromFp;
break;
case OPTIMIZED:
@@ -1050,7 +1052,8 @@ void JavaScriptFrame::GetFunctions(
std::vector<SharedFunctionInfo*> raw_functions;
GetFunctions(&raw_functions);
for (const auto& raw_function : raw_functions) {
- functions->push_back(Handle<SharedFunctionInfo>(raw_function));
+ functions->push_back(
+ Handle<SharedFunctionInfo>(raw_function, function()->GetIsolate()));
}
}
@@ -1074,7 +1077,7 @@ Object* JavaScriptFrame::unchecked_function() const {
// materialize some closures on the stack. The arguments marker object
// marks this case.
DCHECK(function_slot_object()->IsJSFunction() ||
- isolate()->heap()->arguments_marker() == function_slot_object());
+ ReadOnlyRoots(isolate()).arguments_marker() == function_slot_object());
return function_slot_object();
}
@@ -1248,7 +1251,7 @@ void JavaScriptBuiltinContinuationWithCatchFrame::SetException(
kPointerSize; // Skip over return value slot.
// Only allow setting exception if previous value was the hole.
- CHECK_EQ(isolate()->heap()->the_hole_value(),
+ CHECK_EQ(ReadOnlyRoots(isolate()).the_hole_value(),
Memory::Object_at(exception_argument_slot));
Memory::Object_at(exception_argument_slot) = exception;
}
@@ -1316,21 +1319,23 @@ WASM_SUMMARY_DISPATCH(int, byte_offset)
#undef WASM_SUMMARY_DISPATCH
int FrameSummary::WasmFrameSummary::SourcePosition() const {
- Handle<WasmSharedModuleData> shared(
- wasm_instance()->module_object()->shared(), isolate());
- return WasmSharedModuleData::GetSourcePosition(
- shared, function_index(), byte_offset(), at_to_number_conversion());
+ Handle<WasmModuleObject> module_object(wasm_instance()->module_object(),
+ isolate());
+ return WasmModuleObject::GetSourcePosition(module_object, function_index(),
+ byte_offset(),
+ at_to_number_conversion());
}
Handle<Script> FrameSummary::WasmFrameSummary::script() const {
- return handle(wasm_instance()->module_object()->shared()->script());
+ return handle(wasm_instance()->module_object()->script(),
+ wasm_instance()->GetIsolate());
}
Handle<String> FrameSummary::WasmFrameSummary::FunctionName() const {
- Handle<WasmSharedModuleData> shared(
- wasm_instance()->module_object()->shared(), isolate());
- return WasmSharedModuleData::GetFunctionName(isolate(), shared,
- function_index());
+ Handle<WasmModuleObject> module_object(wasm_instance()->module_object(),
+ isolate());
+ return WasmModuleObject::GetFunctionName(isolate(), module_object,
+ function_index());
}
Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
@@ -1748,35 +1753,33 @@ Address InternalFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
}
-Code* InternalFrame::unchecked_code() const {
- const int offset = InternalFrameConstants::kCodeOffset;
- Object* code = Memory::Object_at(fp() + offset);
- DCHECK_NOT_NULL(code);
- return reinterpret_cast<Code*>(code);
-}
-
+Code* InternalFrame::unchecked_code() const { UNREACHABLE(); }
void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
PrintIndex(accumulator, mode, index);
accumulator->Add("WASM [");
- Script* script = this->script();
- accumulator->PrintName(script->name());
+ accumulator->PrintName(script()->name());
Address instruction_start = isolate()
->wasm_engine()
->code_manager()
->LookupCode(pc())
->instruction_start();
- int pc = static_cast<int>(this->pc() - instruction_start);
Vector<const uint8_t> raw_func_name =
- shared()->GetRawFunctionName(this->function_index());
+ module_object()->GetRawFunctionName(function_index());
const int kMaxPrintedFunctionName = 64;
char func_name[kMaxPrintedFunctionName + 1];
int func_name_len = std::min(kMaxPrintedFunctionName, raw_func_name.length());
memcpy(func_name, raw_func_name.start(), func_name_len);
func_name[func_name_len] = '\0';
- accumulator->Add("], function #%u ('%s'), pc=%p, pos=%d\n",
- this->function_index(), func_name, pc, this->position());
+ int pos = position();
+ const wasm::WasmModule* module = wasm_instance()->module_object()->module();
+ int func_index = function_index();
+ int func_code_offset = module->functions[func_index].code.offset();
+ accumulator->Add("], function #%u ('%s'), pc=%p (+0x%x), pos=%d (+%d)\n",
+ func_index, func_name, reinterpret_cast<void*>(pc()),
+ static_cast<int>(pc() - instruction_start), pos,
+ pos - func_code_offset);
if (mode != OVERVIEW) accumulator->Add("\n");
}
@@ -1802,19 +1805,15 @@ WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
return WasmInstanceObject::cast(instance);
}
-WasmSharedModuleData* WasmCompiledFrame::shared() const {
- return wasm_instance()->module_object()->shared();
-}
-
-WasmCompiledModule* WasmCompiledFrame::compiled_module() const {
- return wasm_instance()->compiled_module();
+WasmModuleObject* WasmCompiledFrame::module_object() const {
+ return wasm_instance()->module_object();
}
uint32_t WasmCompiledFrame::function_index() const {
return FrameSummary::GetSingle(this).AsWasmCompiled().function_index();
}
-Script* WasmCompiledFrame::script() const { return shared()->script(); }
+Script* WasmCompiledFrame::script() const { return module_object()->script(); }
int WasmCompiledFrame::position() const {
return FrameSummary::GetSingle(this).SourcePosition();
@@ -1898,16 +1897,14 @@ WasmDebugInfo* WasmInterpreterEntryFrame::debug_info() const {
return wasm_instance()->debug_info();
}
-WasmSharedModuleData* WasmInterpreterEntryFrame::shared() const {
- return wasm_instance()->module_object()->shared();
+WasmModuleObject* WasmInterpreterEntryFrame::module_object() const {
+ return wasm_instance()->module_object();
}
-WasmCompiledModule* WasmInterpreterEntryFrame::compiled_module() const {
- return wasm_instance()->compiled_module();
+Script* WasmInterpreterEntryFrame::script() const {
+ return module_object()->script();
}
-Script* WasmInterpreterEntryFrame::script() const { return shared()->script(); }
-
int WasmInterpreterEntryFrame::position() const {
return FrameSummary::GetBottom(this).AsWasmInterpreted().SourcePosition();
}
@@ -1920,6 +1917,27 @@ Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
+WasmInstanceObject* WasmCompileLazyFrame::wasm_instance() const {
+ return WasmInstanceObject::cast(*wasm_instance_slot());
+}
+
+Object** WasmCompileLazyFrame::wasm_instance_slot() const {
+ const int offset = WasmCompileLazyFrameConstants::kWasmInstanceOffset;
+ return &Memory::Object_at(fp() + offset);
+}
+
+void WasmCompileLazyFrame::Iterate(RootVisitor* v) const {
+ const int header_size = WasmCompileLazyFrameConstants::kFixedFrameSizeFromFp;
+ Object** base = &Memory::Object_at(sp());
+ Object** limit = &Memory::Object_at(fp() - header_size);
+ v->VisitRootPointers(Root::kTop, nullptr, base, limit);
+ v->VisitRootPointer(Root::kTop, nullptr, wasm_instance_slot());
+}
+
+Address WasmCompileLazyFrame::GetCallerStackPointer() const {
+ return fp() + WasmCompileLazyFrameConstants::kCallerSPOffset;
+}
+
namespace {
@@ -1986,13 +2004,6 @@ void JavaScriptFrame::Print(StringStream* accumulator,
int parameters_count = ComputeParametersCount();
for (int i = 0; i < parameters_count; i++) {
accumulator->Add(",");
- // If we have a name for the parameter we print it. Nameless
- // parameters are either because we have more actual parameters
- // than formal parameters or because we have no scope information.
- if (i < scope_info->ParameterCount()) {
- accumulator->PrintName(scope_info->ParameterName(i));
- accumulator->Add("=");
- }
accumulator->Add("%o", GetParameter(i));
}
@@ -2010,26 +2021,9 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add(" {\n");
// Compute the number of locals and expression stack elements.
- int stack_locals_count = scope_info->StackLocalCount();
int heap_locals_count = scope_info->ContextLocalCount();
int expressions_count = ComputeExpressionsCount();
- // Print stack-allocated local variables.
- if (stack_locals_count > 0) {
- accumulator->Add(" // stack-allocated locals\n");
- }
- for (int i = 0; i < stack_locals_count; i++) {
- accumulator->Add(" var ");
- accumulator->PrintName(scope_info->StackLocalName(i));
- accumulator->Add(" = ");
- if (i < expressions_count) {
- accumulator->Add("%o", GetExpression(i));
- } else {
- accumulator->Add("// no expression found - inconsistent frame?");
- }
- accumulator->Add("\n");
- }
-
// Try to get hold of the context of this frame.
Context* context = nullptr;
if (this->context() != nullptr && this->context()->IsContext()) {
@@ -2063,11 +2057,10 @@ void JavaScriptFrame::Print(StringStream* accumulator,
}
// Print the expression stack.
- int expressions_start = stack_locals_count;
- if (expressions_start < expressions_count) {
+ if (0 < expressions_count) {
accumulator->Add(" // expression stack (top to bottom)\n");
}
- for (int i = expressions_count - 1; i >= expressions_start; i--) {
+ for (int i = expressions_count - 1; i >= 0; i--) {
accumulator->Add(" [%02d] : %o\n", i, GetExpression(i));
}
@@ -2123,21 +2116,15 @@ void JavaScriptFrame::Iterate(RootVisitor* v) const {
}
void InternalFrame::Iterate(RootVisitor* v) const {
- wasm::WasmCode* wasm_code =
- isolate()->wasm_engine()->code_manager()->LookupCode(pc());
- if (wasm_code != nullptr) {
- DCHECK(wasm_code->kind() == wasm::WasmCode::kLazyStub);
- } else {
- Code* code = LookupCode();
- IteratePc(v, pc_address(), constant_pool_address(), code);
- // Internal frames typically do not receive any arguments, hence their stack
- // only contains tagged pointers.
- // We are misusing the has_tagged_params flag here to tell us whether
- // the full stack frame contains only tagged pointers or only raw values.
- // This is used for the WasmCompileLazy builtin, where we actually pass
- // untagged arguments and also store untagged values on the stack.
- if (code->has_tagged_params()) IterateExpressions(v);
- }
+ Code* code = LookupCode();
+ IteratePc(v, pc_address(), constant_pool_address(), code);
+ // Internal frames typically do not receive any arguments, hence their stack
+ // only contains tagged pointers.
+ // We are misusing the has_tagged_params flag here to tell us whether
+ // the full stack frame contains only tagged pointers or only raw values.
+ // This is used for the WasmCompileLazy builtin, where we actually pass
+ // untagged arguments and also store untagged values on the stack.
+ if (code->has_tagged_params()) IterateExpressions(v);
}
// -------------------------------------------------------------------------
@@ -2166,47 +2153,5 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
}
return entry;
}
-
-
-// -------------------------------------------------------------------------
-
-
-#define DEFINE_WRAPPER(type, field) \
-class field##_Wrapper : public ZoneObject { \
- public: /* NOLINT */ \
- field##_Wrapper(const field& original) : frame_(original) { \
- } \
- field frame_; \
-};
-STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER)
-#undef DEFINE_WRAPPER
-
-static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
-#define FRAME_TYPE_CASE(type, field) \
- case StackFrame::type: { \
- field##_Wrapper* wrapper = \
- new(zone) field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
- return &wrapper->frame_; \
- }
-
- switch (frame->type()) {
- STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
- default: UNREACHABLE();
- }
-#undef FRAME_TYPE_CASE
- return nullptr;
-}
-
-
-Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone) {
- ZoneVector<StackFrame*> frames(zone);
- for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
- StackFrame* frame = AllocateFrameCopy(it.frame(), zone);
- frames.push_back(frame);
- }
- return Vector<StackFrame*>(frames.data(), frames.size());
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 54dcdcedfb..40fce95e7f 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -26,10 +26,9 @@ class RootVisitor;
class StackFrameIteratorBase;
class StringStream;
class ThreadLocalTop;
-class WasmCompiledModule;
class WasmDebugInfo;
class WasmInstanceObject;
-class WasmSharedModuleData;
+class WasmModuleObject;
class InnerPointerToCodeCache {
public:
@@ -96,6 +95,7 @@ class StackHandler BASE_EMBEDDED {
V(JS_TO_WASM, JsToWasmFrame) \
V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame) \
V(C_WASM_ENTRY, CWasmEntryFrame) \
+ V(WASM_COMPILE_LAZY, WasmCompileLazyFrame) \
V(INTERPRETED, InterpretedFrame) \
V(STUB, StubFrame) \
V(BUILTIN_CONTINUATION, BuiltinContinuationFrame) \
@@ -201,6 +201,7 @@ class StackFrame BASE_EMBEDDED {
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_interpreted() const { return type() == INTERPRETED; }
bool is_wasm_compiled() const { return type() == WASM_COMPILED; }
+ bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
bool is_wasm_interpreter_entry() const {
@@ -786,10 +787,6 @@ class JavaScriptFrame : public StandardFrame {
virtual int GetNumberOfIncomingArguments() const;
- // Garbage collection support. Iterates over incoming arguments,
- // receiver, and any callee-saved registers.
- void IterateArguments(RootVisitor* v) const;
-
virtual void PrintFrameKind(StringStream* accumulator) const {}
private:
@@ -1000,8 +997,7 @@ class WasmCompiledFrame final : public StandardFrame {
private:
friend class StackFrameIteratorBase;
- WasmCompiledModule* compiled_module() const;
- WasmSharedModuleData* shared() const;
+ WasmModuleObject* module_object() const;
};
class WasmInterpreterEntryFrame final : public StandardFrame {
@@ -1040,8 +1036,7 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
private:
friend class StackFrameIteratorBase;
- WasmCompiledModule* compiled_module() const;
- WasmSharedModuleData* shared() const;
+ WasmModuleObject* module_object() const;
};
class WasmToJsFrame : public StubFrame {
@@ -1077,6 +1072,31 @@ class CWasmEntryFrame : public StubFrame {
friend class StackFrameIteratorBase;
};
+class WasmCompileLazyFrame : public StandardFrame {
+ public:
+ Type type() const override { return WASM_COMPILE_LAZY; }
+
+ Code* unchecked_code() const override { return nullptr; }
+ WasmInstanceObject* wasm_instance() const;
+ Object** wasm_instance_slot() const;
+
+ // Garbage collection support.
+ void Iterate(RootVisitor* v) const override;
+
+ static WasmCompileLazyFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_wasm_compile_lazy());
+ return static_cast<WasmCompileLazyFrame*>(frame);
+ }
+
+ protected:
+ inline explicit WasmCompileLazyFrame(StackFrameIteratorBase* iterator);
+
+ Address GetCallerStackPointer() const override;
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
class InternalFrame: public StandardFrame {
public:
Type type() const override { return INTERNAL; }
@@ -1303,11 +1323,6 @@ class SafeStackFrameIterator: public StackFrameIteratorBase {
StackFrame::Type top_frame_type_;
ExternalCallbackScope* external_callback_scope_;
};
-
-// Reads all frames on the current stack and copies them into the current
-// zone memory.
-Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index 274e09b2ea..718673e697 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+using AtomicsWaitEvent = v8::Isolate::AtomicsWaitEvent;
+
base::LazyMutex FutexEmulation::mutex_ = LAZY_MUTEX_INITIALIZER;
base::LazyInstance<FutexWaitList>::type FutexEmulation::wait_list_ =
LAZY_INSTANCE_INITIALIZER;
@@ -71,6 +73,17 @@ void FutexWaitList::RemoveNode(FutexWaitListNode* node) {
node->prev_ = node->next_ = nullptr;
}
+void AtomicsWaitWakeHandle::Wake() {
+ // Adding a separate `NotifyWake()` variant that doesn't acquire the lock
+ // itself would likely just add unnecessary complexity..
+ // The split lock by itself isn’t an issue, as long as the caller properly
+ // synchronizes this with the closing `AtomicsWaitCallback`.
+ {
+ base::LockGuard<base::Mutex> lock_guard(FutexEmulation::mutex_.Pointer());
+ stopped_ = true;
+ }
+ isolate_->futex_wait_list_node()->NotifyWake();
+}
Object* FutexEmulation::Wait(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
@@ -81,14 +94,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
int32_t* p =
reinterpret_cast<int32_t*>(static_cast<int8_t*>(backing_store) + addr);
- base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
-
- if (*p != value) {
- return isolate->heap()->not_equal();
- }
-
FutexWaitListNode* node = isolate->futex_wait_list_node();
-
node->backing_store_ = backing_store;
node->wait_addr_ = addr;
node->waiting_ = true;
@@ -112,77 +118,119 @@ Object* FutexEmulation::Wait(Isolate* isolate,
}
}
- base::TimeTicks start_time = base::TimeTicks::Now();
- base::TimeTicks timeout_time = start_time + rel_timeout;
- base::TimeTicks current_time = start_time;
+ AtomicsWaitWakeHandle stop_handle(isolate);
- wait_list_.Pointer()->AddNode(node);
+ isolate->RunAtomicsWaitCallback(AtomicsWaitEvent::kStartWait, array_buffer,
+ addr, value, rel_timeout_ms, &stop_handle);
- Object* result;
-
- while (true) {
- bool interrupted = node->interrupted_;
- node->interrupted_ = false;
-
- // Unlock the mutex here to prevent deadlock from lock ordering between
- // mutex_ and mutexes locked by HandleInterrupts.
- mutex_.Pointer()->Unlock();
-
- // Because the mutex is unlocked, we have to be careful about not dropping
- // an interrupt. The notification can happen in three different places:
- // 1) Before Wait is called: the notification will be dropped, but
- // interrupted_ will be set to 1. This will be checked below.
- // 2) After interrupted has been checked here, but before mutex_ is
- // acquired: interrupted is checked again below, with mutex_ locked.
- // Because the wakeup signal also acquires mutex_, we know it will not
- // be able to notify until mutex_ is released below, when waiting on the
- // condition variable.
- // 3) After the mutex is released in the call to WaitFor(): this
- // notification will wake up the condition variable. node->waiting() will
- // be false, so we'll loop and then check interrupts.
- if (interrupted) {
- Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
- if (interrupt_object->IsException(isolate)) {
- result = interrupt_object;
- mutex_.Pointer()->Lock();
- break;
- }
- }
+ if (isolate->has_scheduled_exception()) {
+ node->waiting_ = false;
+ return isolate->PromoteScheduledException();
+ }
- mutex_.Pointer()->Lock();
+ Object* result;
+ AtomicsWaitEvent callback_result = AtomicsWaitEvent::kWokenUp;
- if (node->interrupted_) {
- // An interrupt occurred while the mutex_ was unlocked. Don't wait yet.
- continue;
- }
+ do { // Not really a loop, just makes it easier to break out early.
+ base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
+ // Reset node->waiting_ = false when leaving this scope (but while
+ // still holding the lock).
+ ResetWaitingOnScopeExit reset_waiting(node);
- if (!node->waiting_) {
- result = isolate->heap()->ok();
+ if (*p != value) {
+ result = ReadOnlyRoots(isolate).not_equal();
+ callback_result = AtomicsWaitEvent::kNotEqual;
break;
}
- // No interrupts, now wait.
+ base::TimeTicks timeout_time;
+ base::TimeTicks current_time;
+
if (use_timeout) {
current_time = base::TimeTicks::Now();
- if (current_time >= timeout_time) {
- result = isolate->heap()->timed_out();
+ timeout_time = current_time + rel_timeout;
+ }
+
+ wait_list_.Pointer()->AddNode(node);
+
+ while (true) {
+ bool interrupted = node->interrupted_;
+ node->interrupted_ = false;
+
+ // Unlock the mutex here to prevent deadlock from lock ordering between
+ // mutex_ and mutexes locked by HandleInterrupts.
+ mutex_.Pointer()->Unlock();
+
+ // Because the mutex is unlocked, we have to be careful about not dropping
+ // an interrupt. The notification can happen in three different places:
+ // 1) Before Wait is called: the notification will be dropped, but
+ // interrupted_ will be set to 1. This will be checked below.
+ // 2) After interrupted has been checked here, but before mutex_ is
+ // acquired: interrupted is checked again below, with mutex_ locked.
+ // Because the wakeup signal also acquires mutex_, we know it will not
+ // be able to notify until mutex_ is released below, when waiting on
+ // the condition variable.
+ // 3) After the mutex is released in the call to WaitFor(): this
+ // notification will wake up the condition variable. node->waiting() will
+ // be false, so we'll loop and then check interrupts.
+ if (interrupted) {
+ Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
+ if (interrupt_object->IsException(isolate)) {
+ result = interrupt_object;
+ callback_result = AtomicsWaitEvent::kTerminatedExecution;
+ mutex_.Pointer()->Lock();
+ break;
+ }
+ }
+
+ mutex_.Pointer()->Lock();
+
+ if (node->interrupted_) {
+ // An interrupt occurred while the mutex_ was unlocked. Don't wait yet.
+ continue;
+ }
+
+ if (stop_handle.has_stopped()) {
+ node->waiting_ = false;
+ callback_result = AtomicsWaitEvent::kAPIStopped;
+ }
+
+ if (!node->waiting_) {
+ result = ReadOnlyRoots(isolate).ok();
break;
}
- base::TimeDelta time_until_timeout = timeout_time - current_time;
- DCHECK_GE(time_until_timeout.InMicroseconds(), 0);
- bool wait_for_result =
- node->cond_.WaitFor(mutex_.Pointer(), time_until_timeout);
- USE(wait_for_result);
- } else {
- node->cond_.Wait(mutex_.Pointer());
+ // No interrupts, now wait.
+ if (use_timeout) {
+ current_time = base::TimeTicks::Now();
+ if (current_time >= timeout_time) {
+ result = ReadOnlyRoots(isolate).timed_out();
+ callback_result = AtomicsWaitEvent::kTimedOut;
+ break;
+ }
+
+ base::TimeDelta time_until_timeout = timeout_time - current_time;
+ DCHECK_GE(time_until_timeout.InMicroseconds(), 0);
+ bool wait_for_result =
+ node->cond_.WaitFor(mutex_.Pointer(), time_until_timeout);
+ USE(wait_for_result);
+ } else {
+ node->cond_.Wait(mutex_.Pointer());
+ }
+
+ // Spurious wakeup, interrupt or timeout.
}
- // Spurious wakeup, interrupt or timeout.
- }
+ wait_list_.Pointer()->RemoveNode(node);
+ } while (0);
- wait_list_.Pointer()->RemoveNode(node);
- node->waiting_ = false;
+ isolate->RunAtomicsWaitCallback(callback_result, array_buffer, addr, value,
+ rel_timeout_ms, nullptr);
+
+ if (isolate->has_scheduled_exception()) {
+ CHECK_NE(callback_result, AtomicsWaitEvent::kTerminatedExecution);
+ result = isolate->PromoteScheduledException();
+ }
return result;
}
diff --git a/deps/v8/src/futex-emulation.h b/deps/v8/src/futex-emulation.h
index 801198fab8..a1580099d6 100644
--- a/deps/v8/src/futex-emulation.h
+++ b/deps/v8/src/futex-emulation.h
@@ -35,6 +35,18 @@ class Handle;
class Isolate;
class JSArrayBuffer;
+class AtomicsWaitWakeHandle {
+ public:
+ explicit AtomicsWaitWakeHandle(Isolate* isolate) : isolate_(isolate) {}
+
+ void Wake();
+ inline bool has_stopped() const { return stopped_; }
+
+ private:
+ Isolate* isolate_;
+ bool stopped_ = false;
+};
+
class FutexWaitListNode {
public:
FutexWaitListNode()
@@ -50,12 +62,17 @@ class FutexWaitListNode {
private:
friend class FutexEmulation;
friend class FutexWaitList;
+ friend class ResetWaitingOnScopeExit;
base::ConditionVariable cond_;
+ // prev_ and next_ are protected by FutexEmulation::mutex_.
FutexWaitListNode* prev_;
FutexWaitListNode* next_;
void* backing_store_;
size_t wait_addr_;
+ // waiting_ and interrupted_ are protected by FutexEmulation::mutex_
+ // if this node is currently contained in FutexEmulation::wait_list_
+ // or an AtomicsWaitWakeHandle has access to it.
bool waiting_;
bool interrupted_;
@@ -79,6 +96,16 @@ class FutexWaitList {
DISALLOW_COPY_AND_ASSIGN(FutexWaitList);
};
+class ResetWaitingOnScopeExit {
+ public:
+ explicit ResetWaitingOnScopeExit(FutexWaitListNode* node) : node_(node) {}
+ ~ResetWaitingOnScopeExit() { node_->waiting_ = false; }
+
+ private:
+ FutexWaitListNode* node_;
+
+ DISALLOW_COPY_AND_ASSIGN(ResetWaitingOnScopeExit);
+};
class FutexEmulation : public AllStatic {
public:
@@ -109,7 +136,13 @@ class FutexEmulation : public AllStatic {
private:
friend class FutexWaitListNode;
+ friend class AtomicsWaitWakeHandle;
+ // `mutex_` protects the composition of `wait_list_` (i.e. no elements may be
+ // added or removed without holding this mutex), as well as the `waiting_`
+ // and `interrupted_` fields for each individual list node that is currently
+ // part of the list. It must be the mutex used together with the `cond_`
+ // condition variable of such nodes.
static base::LazyMutex mutex_;
static base::LazyInstance<FutexWaitList>::type wait_list_;
};
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 48a0228006..4e2587dec2 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -1171,17 +1171,19 @@ class DebugInfoSection : public DebugSection {
fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start));
int params = scope->ParameterCount();
- int slots = scope->StackLocalCount();
int context_slots = scope->ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
- int locals = scope->StackLocalCount();
int current_abbreviation = 4;
+ EmbeddedVector<char, 256> buffer;
+ StringBuilder builder(buffer.start(), buffer.length());
+
for (int param = 0; param < params; ++param) {
w->WriteULEB128(current_abbreviation++);
- w->WriteString(
- scope->ParameterName(param)->ToCString(DISALLOW_NULLS).get());
+ builder.Reset();
+ builder.AddFormatted("param%d", param);
+ w->WriteString(builder.Finalize());
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1192,16 +1194,6 @@ class DebugInfoSection : public DebugSection {
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
- EmbeddedVector<char, 256> buffer;
- StringBuilder builder(buffer.start(), buffer.length());
-
- for (int slot = 0; slot < slots; ++slot) {
- w->WriteULEB128(current_abbreviation++);
- builder.Reset();
- builder.AddFormatted("slot%d", slot);
- w->WriteString(builder.Finalize());
- }
-
// See contexts.h for more information.
DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, 4);
DCHECK_EQ(Context::SCOPE_INFO_INDEX, 0);
@@ -1226,20 +1218,6 @@ class DebugInfoSection : public DebugSection {
w->WriteString(builder.Finalize());
}
- for (int local = 0; local < locals; ++local) {
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(
- scope->StackLocalName(local)->ToCString(DISALLOW_NULLS).get());
- w->Write<uint32_t>(ty_offset);
- Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
- uintptr_t block_start = w->position();
- w->Write<uint8_t>(DW_OP_fbreg);
- w->WriteSLEB128(
- JavaScriptFrameConstants::kLocal0Offset -
- kPointerSize * local);
- block_size.set(static_cast<uint32_t>(w->position() - block_start));
- }
-
{
w->WriteULEB128(current_abbreviation++);
w->WriteString("__function");
@@ -1370,13 +1348,11 @@ class DebugAbbrevSection : public DebugSection {
if (extra_info) {
ScopeInfo* scope = desc_->scope_info();
int params = scope->ParameterCount();
- int slots = scope->StackLocalCount();
int context_slots = scope->ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
- int locals = scope->StackLocalCount();
- // Total children is params + slots + context_slots + internal_slots +
- // locals + 2 (__function and __context).
+ // Total children is params + context_slots + internal_slots + 2
+ // (__function and __context).
// The extra duplication below seems to be necessary to keep
// gdb from getting upset on OSX.
@@ -1408,10 +1384,6 @@ class DebugAbbrevSection : public DebugSection {
WriteVariableAbbreviation(w, current_abbreviation++, true, true);
}
- for (int slot = 0; slot < slots; ++slot) {
- WriteVariableAbbreviation(w, current_abbreviation++, false, false);
- }
-
for (int internal_slot = 0;
internal_slot < internal_slots;
++internal_slot) {
@@ -1424,10 +1396,6 @@ class DebugAbbrevSection : public DebugSection {
WriteVariableAbbreviation(w, current_abbreviation++, false, false);
}
- for (int local = 0; local < locals; ++local) {
- WriteVariableAbbreviation(w, current_abbreviation++, true, false);
- }
-
// The function.
WriteVariableAbbreviation(w, current_abbreviation++, true, false);
@@ -1885,7 +1853,7 @@ extern "C" {
#ifdef OBJECT_PRINT
void __gdb_print_v8_object(Object* object) {
- OFStream os(stdout);
+ StdoutStream os;
object->Print(os);
os << std::flush;
}
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 0e9b678ceb..c7e5e76b34 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -509,30 +509,16 @@ class GlobalHandles::NodeIterator {
class GlobalHandles::PendingPhantomCallbacksSecondPassTask
: public v8::internal::CancelableTask {
public:
- // Takes ownership of the contents of pending_phantom_callbacks, leaving it in
- // the same state it would be after a call to Clear().
- PendingPhantomCallbacksSecondPassTask(
- std::vector<PendingPhantomCallback>* pending_phantom_callbacks,
- Isolate* isolate)
- : CancelableTask(isolate), isolate_(isolate) {
- pending_phantom_callbacks_.swap(*pending_phantom_callbacks);
- }
+ PendingPhantomCallbacksSecondPassTask(GlobalHandles* global_handles,
+ Isolate* isolate)
+ : CancelableTask(isolate), global_handles_(global_handles) {}
void RunInternal() override {
- TRACE_EVENT0("v8", "V8.GCPhantomHandleProcessingCallback");
- isolate()->heap()->CallGCPrologueCallbacks(
- GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
- InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate());
- isolate()->heap()->CallGCEpilogueCallbacks(
- GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
+ global_handles_->InvokeSecondPassPhantomCallbacksFromTask();
}
- Isolate* isolate() { return isolate_; }
-
private:
- Isolate* isolate_;
- std::vector<PendingPhantomCallback> pending_phantom_callbacks_;
-
+ GlobalHandles* global_handles_;
DISALLOW_COPY_AND_ASSIGN(PendingPhantomCallbacksSecondPassTask);
};
@@ -566,8 +552,7 @@ Handle<Object> GlobalHandles::Create(Object* value) {
Node* result = first_free_;
first_free_ = result->next_free();
result->Acquire(value);
- if (isolate_->heap()->InNewSpace(value) &&
- !result->is_in_new_space_list()) {
+ if (Heap::InNewSpace(value) && !result->is_in_new_space_list()) {
new_space_nodes_.push_back(result);
result->set_in_new_space_list(true);
}
@@ -577,7 +562,14 @@ Handle<Object> GlobalHandles::Create(Object* value) {
Handle<Object> GlobalHandles::CopyGlobal(Object** location) {
DCHECK_NOT_NULL(location);
- return Node::FromLocation(location)->GetGlobalHandles()->Create(*location);
+ GlobalHandles* global_handles =
+ Node::FromLocation(location)->GetGlobalHandles();
+#ifdef VERIFY_HEAP
+ if (i::FLAG_verify_heap) {
+ (*location)->ObjectVerify(global_handles->isolate());
+ }
+#endif // VERIFY_HEAP
+ return global_handles->Create(*location);
}
@@ -641,10 +633,11 @@ void GlobalHandles::IterateWeakRootsForFinalizers(RootVisitor* v) {
DISABLE_CFI_PERF
void GlobalHandles::IterateWeakRootsForPhantomHandles(
- WeakSlotCallback should_reset_handle) {
+ WeakSlotCallbackWithHeap should_reset_handle) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
Node* node = it.node();
- if (node->IsWeakRetainer() && should_reset_handle(node->location())) {
+ if (node->IsWeakRetainer() &&
+ should_reset_handle(isolate()->heap(), node->location())) {
if (node->IsPhantomResetHandle()) {
node->MarkPending();
node->ResetPhantomHandle();
@@ -658,10 +651,12 @@ void GlobalHandles::IterateWeakRootsForPhantomHandles(
}
}
-void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback should_reset_handle) {
+void GlobalHandles::IdentifyWeakHandles(
+ WeakSlotCallbackWithHeap should_reset_handle) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
Node* node = it.node();
- if (node->IsWeak() && should_reset_handle(node->location())) {
+ if (node->IsWeak() &&
+ should_reset_handle(isolate()->heap(), node->location())) {
if (!node->IsPhantomCallback() && !node->IsPhantomResetHandle()) {
node->MarkPending();
}
@@ -762,18 +757,27 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
}
}
-void GlobalHandles::InvokeSecondPassPhantomCallbacks(
- std::vector<PendingPhantomCallback>* callbacks, Isolate* isolate) {
- while (!callbacks->empty()) {
- auto callback = callbacks->back();
- callbacks->pop_back();
+void GlobalHandles::InvokeSecondPassPhantomCallbacksFromTask() {
+ DCHECK(second_pass_callbacks_task_posted_);
+ second_pass_callbacks_task_posted_ = false;
+ TRACE_EVENT0("v8", "V8.GCPhantomHandleProcessingCallback");
+ isolate()->heap()->CallGCPrologueCallbacks(
+ GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
+ InvokeSecondPassPhantomCallbacks();
+ isolate()->heap()->CallGCEpilogueCallbacks(
+ GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
+}
+
+void GlobalHandles::InvokeSecondPassPhantomCallbacks() {
+ while (!second_pass_callbacks_.empty()) {
+ auto callback = second_pass_callbacks_.back();
+ second_pass_callbacks_.pop_back();
DCHECK_NULL(callback.node());
// Fire second pass callback
- callback.Invoke(isolate);
+ callback.Invoke(isolate());
}
}
-
int GlobalHandles::PostScavengeProcessing(
const int initial_post_gc_processing_count) {
int freed_nodes = 0;
@@ -840,7 +844,7 @@ void GlobalHandles::UpdateListOfNewSpaceNodes() {
for (Node* node : new_space_nodes_) {
DCHECK(node->is_in_new_space_list());
if (node->IsRetainer()) {
- if (isolate_->heap()->InNewSpace(node->object())) {
+ if (Heap::InNewSpace(node->object())) {
new_space_nodes_[last++] = node;
isolate_->heap()->IncrementNodesCopiedInNewSpace();
} else {
@@ -861,28 +865,29 @@ void GlobalHandles::UpdateListOfNewSpaceNodes() {
int GlobalHandles::DispatchPendingPhantomCallbacks(
bool synchronous_second_pass) {
int freed_nodes = 0;
- std::vector<PendingPhantomCallback> second_pass_callbacks;
+ // Protect against callback modifying pending_phantom_callbacks_.
+ std::vector<PendingPhantomCallback> pending_phantom_callbacks;
+ pending_phantom_callbacks.swap(pending_phantom_callbacks_);
{
// The initial pass callbacks must simply clear the nodes.
- for (auto callback : pending_phantom_callbacks_) {
+ for (auto callback : pending_phantom_callbacks) {
// Skip callbacks that have already been processed once.
if (callback.node() == nullptr) continue;
callback.Invoke(isolate());
- if (callback.callback()) second_pass_callbacks.push_back(callback);
+ if (callback.callback()) second_pass_callbacks_.push_back(callback);
freed_nodes++;
}
}
- pending_phantom_callbacks_.clear();
- if (!second_pass_callbacks.empty()) {
+ if (!second_pass_callbacks_.empty()) {
if (FLAG_optimize_for_size || FLAG_predictable || synchronous_second_pass) {
isolate()->heap()->CallGCPrologueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
- InvokeSecondPassPhantomCallbacks(&second_pass_callbacks, isolate());
+ InvokeSecondPassPhantomCallbacks();
isolate()->heap()->CallGCEpilogueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
- } else {
- auto task = new PendingPhantomCallbacksSecondPassTask(
- &second_pass_callbacks, isolate());
+ } else if (!second_pass_callbacks_task_posted_) {
+ second_pass_callbacks_task_posted_ = true;
+ auto task = new PendingPhantomCallbacksSecondPassTask(this, isolate());
V8::GetCurrentPlatform()->CallOnForegroundThread(
reinterpret_cast<v8::Isolate*>(isolate()), task);
}
@@ -1129,11 +1134,10 @@ void EternalHandles::IterateNewSpaceRoots(RootVisitor* visitor) {
}
}
-
-void EternalHandles::PostGarbageCollectionProcessing(Heap* heap) {
+void EternalHandles::PostGarbageCollectionProcessing() {
size_t last = 0;
for (int index : new_space_indices_) {
- if (heap->InNewSpace(*GetLocation(index))) {
+ if (Heap::InNewSpace(*GetLocation(index))) {
new_space_indices_[last++] = index;
}
}
@@ -1145,19 +1149,19 @@ void EternalHandles::PostGarbageCollectionProcessing(Heap* heap) {
void EternalHandles::Create(Isolate* isolate, Object* object, int* index) {
DCHECK_EQ(kInvalidIndex, *index);
if (object == nullptr) return;
- DCHECK_NE(isolate->heap()->the_hole_value(), object);
+ Object* the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ DCHECK_NE(the_hole, object);
int block = size_ >> kShift;
int offset = size_ & kMask;
// need to resize
if (offset == 0) {
Object** next_block = new Object*[kSize];
- Object* the_hole = isolate->heap()->the_hole_value();
MemsetPointer(next_block, the_hole, kSize);
blocks_.push_back(next_block);
}
- DCHECK_EQ(isolate->heap()->the_hole_value(), blocks_[block][offset]);
+ DCHECK_EQ(the_hole, blocks_[block][offset]);
blocks_[block][offset] = object;
- if (isolate->heap()->InNewSpace(object)) {
+ if (Heap::InNewSpace(object)) {
new_space_indices_.push_back(size_);
}
*index = size_++;
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 2c2fbbd3f9..a379a74d44 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -137,11 +137,12 @@ class GlobalHandles {
// Iterates over weak roots on the heap.
void IterateWeakRootsForFinalizers(RootVisitor* v);
- void IterateWeakRootsForPhantomHandles(WeakSlotCallback should_reset_handle);
+ void IterateWeakRootsForPhantomHandles(
+ WeakSlotCallbackWithHeap should_reset_handle);
// Marks all handles that should be finalized based on the predicate
// |should_reset_handle| as pending.
- void IdentifyWeakHandles(WeakSlotCallback should_reset_handle);
+ void IdentifyWeakHandles(WeakSlotCallbackWithHeap should_reset_handle);
// NOTE: Five ...NewSpace... functions below are used during
// scavenge collections and iterate over sets of handles that are
@@ -180,6 +181,8 @@ class GlobalHandles {
void Print();
#endif // DEBUG
+ void InvokeSecondPassPhantomCallbacks();
+
private:
// Internal node structures.
class Node;
@@ -190,9 +193,7 @@ class GlobalHandles {
explicit GlobalHandles(Isolate* isolate);
- // Helpers for PostGarbageCollectionProcessing.
- static void InvokeSecondPassPhantomCallbacks(
- std::vector<PendingPhantomCallback>* callbacks, Isolate* isolate);
+ void InvokeSecondPassPhantomCallbacksFromTask();
int PostScavengeProcessing(int initial_post_gc_processing_count);
int PostMarkSweepProcessing(int initial_post_gc_processing_count);
int DispatchPendingPhantomCallbacks(bool synchronous_second_pass);
@@ -223,6 +224,8 @@ class GlobalHandles {
size_t number_of_phantom_handle_resets_;
std::vector<PendingPhantomCallback> pending_phantom_callbacks_;
+ std::vector<PendingPhantomCallback> second_pass_callbacks_;
+ bool second_pass_callbacks_task_posted_ = false;
friend class Isolate;
@@ -300,7 +303,7 @@ class EternalHandles {
// Iterates over all handles which might be in new space.
void IterateNewSpaceRoots(RootVisitor* visitor);
// Rebuilds new space list.
- void PostGarbageCollectionProcessing(Heap* heap);
+ void PostGarbageCollectionProcessing();
private:
static const int kInvalidIndex = -1;
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 2a79d32928..5fe6f232fe 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -183,7 +183,7 @@ constexpr int kDoubleSizeLog2 = 3;
// ARM64 only supports direct calls within a 128 MB range.
constexpr size_t kMaxWasmCodeMemory = 128 * MB;
#else
-constexpr size_t kMaxWasmCodeMemory = 256 * MB;
+constexpr size_t kMaxWasmCodeMemory = 512 * MB;
#endif
#if V8_HOST_ARCH_64_BIT
@@ -251,6 +251,10 @@ constexpr int kExternalAllocationSoftLimit = 64 * MB;
// Current value: Page::kAllocatableMemory (on 32-bit arch) - 512 (slack).
constexpr int kMaxRegularHeapObjectSize = 507136;
+// Objects smaller or equal kMaxNewSpaceHeapObjectSize are allocated in the
+// new large object space.
+constexpr int kMaxNewSpaceHeapObjectSize = 32 * KB;
+
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
constexpr int kBitsPerByte = 8;
@@ -373,8 +377,20 @@ constexpr int kNoSourcePosition = -1;
// This constant is used to indicate missing deoptimization information.
constexpr int kNoDeoptimizationId = -1;
-// Deoptimize bailout kind.
-enum class DeoptimizeKind : uint8_t { kEager, kSoft, kLazy };
+// Deoptimize bailout kind:
+// - Eager: a check failed in the optimized code and deoptimization happens
+// immediately.
+// - Lazy: the code has been marked as dependent on some assumption which
+// is checked elsewhere and can trigger deoptimization the next time the
+// code is executed.
+// - Soft: similar to lazy deoptimization, but does not contribute to the
+// total deopt count which can lead to disabling optimization for a function.
+enum class DeoptimizeKind : uint8_t {
+ kEager,
+ kSoft,
+ kLazy,
+ kLastDeoptimizeKind = kLazy
+};
inline size_t hash_value(DeoptimizeKind kind) {
return static_cast<size_t>(kind);
}
@@ -405,8 +421,26 @@ inline std::ostream& operator<<(std::ostream& os,
UNREACHABLE();
}
+static_assert(kSmiValueSize <= 32, "Unsupported Smi tagging scheme");
+// Smi sign bit position must be 32-bit aligned so we can use sign extension
+// instructions on 64-bit architectures without additional shifts.
+static_assert((kSmiValueSize + kSmiShiftSize + kSmiTagSize) % 32 == 0,
+ "Unsupported Smi tagging scheme");
+
+constexpr bool kIsSmiValueInUpper32Bits =
+ (kSmiValueSize + kSmiShiftSize + kSmiTagSize) == 64;
+constexpr bool kIsSmiValueInLower32Bits =
+ (kSmiValueSize + kSmiShiftSize + kSmiTagSize) == 32;
+static_assert(!SmiValuesAre32Bits() == SmiValuesAre31Bits(),
+ "Unsupported Smi tagging scheme");
+static_assert(SmiValuesAre32Bits() == kIsSmiValueInUpper32Bits,
+ "Unsupported Smi tagging scheme");
+static_assert(SmiValuesAre31Bits() == kIsSmiValueInLower32Bits,
+ "Unsupported Smi tagging scheme");
+
// Mask for the sign bit in a smi.
-constexpr intptr_t kSmiSignMask = kIntptrSignBit;
+constexpr intptr_t kSmiSignMask = static_cast<intptr_t>(
+ uintptr_t{1} << (kSmiValueSize + kSmiShiftSize + kSmiTagSize - 1));
constexpr int kObjectAlignmentBits = kPointerSizeLog2;
constexpr intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
@@ -506,6 +540,7 @@ class MapSpace;
class MarkCompactCollector;
class MaybeObject;
class NewSpace;
+class NewLargeObjectSpace;
class Object;
class OldSpace;
class ParameterCount;
@@ -540,18 +575,20 @@ typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
enum AllocationSpace {
// TODO(v8:7464): Actually map this space's memory as read-only.
RO_SPACE, // Immortal, immovable and immutable objects,
- NEW_SPACE, // Semispaces collected with copying collector.
- OLD_SPACE, // May contain pointers to new space.
- CODE_SPACE, // No pointers to new space, marked executable.
- MAP_SPACE, // Only and all map objects.
- LO_SPACE, // Promoted large objects.
+ NEW_SPACE, // Young generation semispaces for regular objects collected with
+ // Scavenger.
+ OLD_SPACE, // Old generation regular object space.
+ CODE_SPACE, // Old generation code object space, marked executable.
+ MAP_SPACE, // Old generation map object space, non-movable.
+ LO_SPACE, // Old generation large object space.
+ NEW_LO_SPACE, // Young generation large object space.
FIRST_SPACE = RO_SPACE,
- LAST_SPACE = LO_SPACE,
+ LAST_SPACE = NEW_LO_SPACE,
FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
LAST_GROWABLE_PAGED_SPACE = MAP_SPACE
};
-constexpr int kSpaceTagSize = 4;
+constexpr int kSpaceTagSize = 3;
STATIC_ASSERT(FIRST_SPACE == 0);
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
@@ -935,26 +972,26 @@ constexpr uint64_t kHoleNanInt64 =
constexpr double kMaxSafeInteger = 9007199254740991.0; // 2^53-1
// The order of this enum has to be kept in sync with the predicates below.
-enum VariableMode : uint8_t {
+enum class VariableMode : uint8_t {
// User declared variables:
- LET, // declared via 'let' declarations (first lexical)
+ kLet, // declared via 'let' declarations (first lexical)
- CONST, // declared via 'const' declarations (last lexical)
+ kConst, // declared via 'const' declarations (last lexical)
- VAR, // declared via 'var', and 'function' declarations
+ kVar, // declared via 'var', and 'function' declarations
// Variables introduced by the compiler:
- TEMPORARY, // temporary variables (not user-visible), stack-allocated
- // unless the scope as a whole has forced context allocation
+ kTemporary, // temporary variables (not user-visible), stack-allocated
+ // unless the scope as a whole has forced context allocation
- DYNAMIC, // always require dynamic lookup (we don't know
- // the declaration)
+ kDynamic, // always require dynamic lookup (we don't know
+ // the declaration)
- DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
+ kDynamicGlobal, // requires dynamic lookup, but we know that the
// variable is global unless it has been shadowed
// by an eval-introduced variable
- DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
+ kDynamicLocal // requires dynamic lookup, but we know that the
// variable is local and where it is unless it
// has been shadowed by an eval-introduced
// variable
@@ -964,19 +1001,19 @@ enum VariableMode : uint8_t {
#ifdef DEBUG
inline const char* VariableMode2String(VariableMode mode) {
switch (mode) {
- case VAR:
+ case VariableMode::kVar:
return "VAR";
- case LET:
+ case VariableMode::kLet:
return "LET";
- case CONST:
+ case VariableMode::kConst:
return "CONST";
- case DYNAMIC:
+ case VariableMode::kDynamic:
return "DYNAMIC";
- case DYNAMIC_GLOBAL:
+ case VariableMode::kDynamicGlobal:
return "DYNAMIC_GLOBAL";
- case DYNAMIC_LOCAL:
+ case VariableMode::kDynamicLocal:
return "DYNAMIC_LOCAL";
- case TEMPORARY:
+ case VariableMode::kTemporary:
return "TEMPORARY";
}
UNREACHABLE();
@@ -991,19 +1028,19 @@ enum VariableKind : uint8_t {
};
inline bool IsDynamicVariableMode(VariableMode mode) {
- return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
+ return mode >= VariableMode::kDynamic && mode <= VariableMode::kDynamicLocal;
}
-
inline bool IsDeclaredVariableMode(VariableMode mode) {
- STATIC_ASSERT(LET == 0); // Implies that mode >= LET.
- return mode <= VAR;
+ STATIC_ASSERT(static_cast<uint8_t>(VariableMode::kLet) ==
+ 0); // Implies that mode >= VariableMode::kLet.
+ return mode <= VariableMode::kVar;
}
-
inline bool IsLexicalVariableMode(VariableMode mode) {
- STATIC_ASSERT(LET == 0); // Implies that mode >= LET.
- return mode <= CONST;
+ STATIC_ASSERT(static_cast<uint8_t>(VariableMode::kLet) ==
+ 0); // Implies that mode >= VariableMode::kLet.
+ return mode <= VariableMode::kConst;
}
enum VariableLocation : uint8_t {
@@ -1544,6 +1581,7 @@ enum class PoisoningMitigationLevel {
kDontPoison,
kPoisonCriticalOnly
};
+
enum class LoadSensitivity {
kCritical, // Critical loads are poisoned whenever we can run untrusted
// code (i.e., when --untrusted-code-mitigations is on).
@@ -1552,6 +1590,17 @@ enum class LoadSensitivity {
kSafe // Safe loads are never poisoned.
};
+// The reason for a WebAssembly trap.
+#define FOREACH_WASM_TRAPREASON(V) \
+ V(TrapUnreachable) \
+ V(TrapMemOutOfBounds) \
+ V(TrapDivByZero) \
+ V(TrapDivUnrepresentable) \
+ V(TrapRemByZero) \
+ V(TrapFloatUnrepresentable) \
+ V(TrapFuncInvalid) \
+ V(TrapFuncSigMismatch)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 7285d9cc1e..536388476b 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -35,9 +35,6 @@ HandleScope::HandleScope(Isolate* isolate) {
}
template <typename T>
-Handle<T>::Handle(T* object) : Handle(object, object->GetIsolate()) {}
-
-template <typename T>
Handle<T>::Handle(T* object, Isolate* isolate) : HandleBase(object, isolate) {}
template <typename T>
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index cc79ee17ff..ff4a5f88de 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -26,7 +26,9 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
Object* object = *location_;
if (object->IsSmi()) return true;
HeapObject* heap_object = HeapObject::cast(object);
- Heap* heap = heap_object->GetHeap();
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(heap_object);
+ if (chunk->owner()->identity() == RO_SPACE) return true;
+ Heap* heap = chunk->heap();
Object** roots_array_start = heap->roots_array_start();
if (roots_array_start <= location_ &&
location_ < roots_array_start + Heap::kStrongRootListLength &&
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 2513193dee..714139869f 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -97,7 +97,6 @@ class Handle final : public HandleBase {
"static type violation");
}
- V8_INLINE explicit Handle(T* object);
V8_INLINE Handle(T* object, Isolate* isolate);
// Allocate a new handle for the object, do not canonicalize.
@@ -165,11 +164,6 @@ V8_INLINE Handle<T> handle(T* object, Isolate* isolate) {
return Handle<T>(object, isolate);
}
-template <typename T>
-V8_INLINE Handle<T> handle(T* object) {
- return Handle<T>(object);
-}
-
// ----------------------------------------------------------------------------
// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle
// into a Handle requires checking that it does not point to nullptr. This
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index df4923c06a..32ff8e3fd8 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -7,6 +7,7 @@
#define INTERNALIZED_STRING_LIST(V) \
V(add_string, "add") \
+ V(always_string, "always") \
V(anonymous_function_string, "(anonymous function)") \
V(anonymous_string, "anonymous") \
V(apply_string, "apply") \
@@ -19,6 +20,7 @@
V(ArrayIterator_string, "Array Iterator") \
V(assign_string, "assign") \
V(async_string, "async") \
+ V(auto_string, "auto") \
V(await_string, "await") \
V(BigInt_string, "BigInt") \
V(bigint_string, "bigint") \
@@ -115,6 +117,8 @@
V(line_string, "line") \
V(LinkError_string, "LinkError") \
V(literal_string, "literal") \
+ V(locale_string, "locale") \
+ V(long_string, "long") \
V(Map_string, "Map") \
V(MapIterator_string, "Map Iterator") \
V(message_string, "message") \
@@ -128,6 +132,7 @@
V(name_string, "name") \
V(NaN_string, "NaN") \
V(nan_string, "nan") \
+ V(narrow_string, "narrow") \
V(native_string, "native") \
V(new_target_string, ".new.target") \
V(next_string, "next") \
@@ -141,6 +146,7 @@
V(Number_string, "Number") \
V(number_string, "number") \
V(number_to_string, "[object Number]") \
+ V(numeric_string, "numeric") \
V(Object_string, "Object") \
V(object_string, "object") \
V(object_to_string, "[object Object]") \
@@ -172,6 +178,8 @@
V(RuntimeError_string, "RuntimeError") \
V(Script_string, "Script") \
V(script_string, "script") \
+ V(short_string, "short") \
+ V(style_string, "style") \
V(second_string, "second") \
V(set_space_string, "set ") \
V(Set_string, "Set") \
@@ -210,6 +218,7 @@
V(undefined_string, "undefined") \
V(undefined_to_string, "[object Undefined]") \
V(unicode_string, "unicode") \
+ V(unit_string, "unit") \
V(URIError_string, "URIError") \
V(use_asm_string, "use asm") \
V(use_strict_string, "use strict") \
@@ -292,6 +301,15 @@
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
+#define TOP_MC_SCOPES(F) \
+ F(MC_CLEAR) \
+ F(MC_EPILOGUE) \
+ F(MC_EVACUATE) \
+ F(MC_FINISH) \
+ F(MC_MARK) \
+ F(MC_PROLOGUE) \
+ F(MC_SWEEP)
+
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
F(HEAP_EPILOGUE) \
@@ -300,7 +318,7 @@
F(HEAP_EXTERNAL_PROLOGUE) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
- F(MC_CLEAR) \
+ TOP_MC_SCOPES(F) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
@@ -310,8 +328,6 @@
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_CLEAR_WEAK_REFERENCES) \
- F(MC_EPILOGUE) \
- F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
@@ -323,21 +339,19 @@
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MC_FINISH) \
- F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_MAIN) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
- F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
+ F(MC_MARK_WEAK_CLOSURE_EPHEMERON) \
+ F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \
+ F(MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
- F(MC_PROLOGUE) \
- F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
@@ -370,6 +384,7 @@
F(SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS) \
F(SCAVENGER_SCAVENGE_PARALLEL) \
F(SCAVENGER_SCAVENGE_ROOTS) \
+ F(SCAVENGER_SCAVENGE_UPDATE_REFS) \
F(SCAVENGER_SCAVENGE_WEAK)
#define TRACER_BACKGROUND_SCOPES(F) \
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index 63aa8db17e..bce22c39ba 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -12,18 +12,18 @@ namespace v8 {
namespace internal {
void ArrayBufferCollector::AddGarbageAllocations(
- std::vector<JSArrayBuffer::Allocation>* allocations) {
+ std::vector<JSArrayBuffer::Allocation> allocations) {
base::LockGuard<base::Mutex> guard(&allocations_mutex_);
- allocations_.push_back(allocations);
+ allocations_.push_back(std::move(allocations));
}
void ArrayBufferCollector::FreeAllocations() {
base::LockGuard<base::Mutex> guard(&allocations_mutex_);
- for (std::vector<JSArrayBuffer::Allocation>* allocations : allocations_) {
- for (auto alloc : *allocations) {
+ for (const std::vector<JSArrayBuffer::Allocation>& allocations :
+ allocations_) {
+ for (JSArrayBuffer::Allocation alloc : allocations) {
JSArrayBuffer::FreeBackingStore(heap_->isolate(), alloc);
}
- delete allocations;
}
allocations_.clear();
}
diff --git a/deps/v8/src/heap/array-buffer-collector.h b/deps/v8/src/heap/array-buffer-collector.h
index 002eba9a43..b44af2f2ad 100644
--- a/deps/v8/src/heap/array-buffer-collector.h
+++ b/deps/v8/src/heap/array-buffer-collector.h
@@ -28,7 +28,7 @@ class ArrayBufferCollector {
// These allocations will begin to be freed once FreeAllocations() is called,
// or on TearDown.
void AddGarbageAllocations(
- std::vector<JSArrayBuffer::Allocation>* allocations);
+ std::vector<JSArrayBuffer::Allocation> allocations);
// Calls FreeAllocations() on a background thread.
void FreeAllocationsOnBackgroundThread();
@@ -42,7 +42,7 @@ class ArrayBufferCollector {
Heap* heap_;
base::Mutex allocations_mutex_;
- std::vector<std::vector<JSArrayBuffer::Allocation>*> allocations_;
+ std::vector<std::vector<JSArrayBuffer::Allocation>> allocations_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 157c71d3ab..697d4405d8 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -53,19 +53,19 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
heap->update_external_memory(-static_cast<intptr_t>(length));
}
+Space* LocalArrayBufferTracker::space() { return page_->owner(); }
+
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
- Isolate* isolate = space_->heap()->isolate();
+ Isolate* isolate = page_->heap()->isolate();
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
- const size_t length = it->second;
+ const size_t length = it->second.length;
if (should_free(buffer)) {
- JSArrayBuffer::FreeBackingStore(
- isolate, {buffer->backing_store(), length, buffer->backing_store(),
- buffer->allocation_mode(), buffer->is_wasm_memory()});
+ JSArrayBuffer::FreeBackingStore(isolate, it->second);
it = array_buffers_.erase(it);
freed_memory += length;
} else {
@@ -73,11 +73,11 @@ void LocalArrayBufferTracker::Free(Callback should_free) {
}
}
if (freed_memory > 0) {
- // Update the Space with any freed backing-store bytes.
- space_->DecrementExternalBackingStoreBytes(freed_memory);
+ page_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, freed_memory);
// TODO(wez): Remove backing-store from external memory accounting.
- space_->heap()->update_external_memory_concurrently_freed(
+ page_->heap()->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
}
}
@@ -96,10 +96,13 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
}
void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
- // Track the backing-store usage against the owning Space.
- space_->IncrementExternalBackingStoreBytes(length);
+ page_->IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, length);
- auto ret = array_buffers_.insert({buffer, length});
+ auto ret = array_buffers_.insert(
+ {buffer,
+ {buffer->backing_store(), length, buffer->backing_store(),
+ buffer->is_wasm_memory()}});
USE(ret);
// Check that we indeed inserted a new value and did not overwrite an existing
// one (which would be a bug).
@@ -107,13 +110,13 @@ void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
}
void LocalArrayBufferTracker::Remove(JSArrayBuffer* buffer, size_t length) {
- // Remove the backing-store accounting from the owning Space.
- space_->DecrementExternalBackingStoreBytes(length);
+ page_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, length);
TrackingData::iterator it = array_buffers_.find(buffer);
// Check that we indeed find a key to remove.
DCHECK(it != array_buffers_.end());
- DCHECK_EQ(length, it->second);
+ DCHECK_EQ(length, it->second.length);
array_buffers_.erase(it);
}
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 322b8a4aa9..4f92e7e17c 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -20,19 +20,20 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) {
- std::vector<JSArrayBuffer::Allocation>* backing_stores_to_free =
- new std::vector<JSArrayBuffer::Allocation>();
+ std::vector<JSArrayBuffer::Allocation> backing_stores_to_free;
+ TrackingData kept_array_buffers;
JSArrayBuffer* new_buffer = nullptr;
JSArrayBuffer* old_buffer = nullptr;
size_t freed_memory = 0;
size_t moved_memory = 0;
for (TrackingData::iterator it = array_buffers_.begin();
- it != array_buffers_.end();) {
- old_buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
+ it != array_buffers_.end(); ++it) {
+ old_buffer = it->first;
+ Page* old_page = Page::FromAddress(old_buffer->address());
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
- ++it;
+ kept_array_buffers.insert(*it);
} else if (result == kUpdateEntry) {
DCHECK_NOT_NULL(new_buffer);
Page* target_page = Page::FromAddress(new_buffer->address());
@@ -45,43 +46,45 @@ void LocalArrayBufferTracker::Process(Callback callback) {
}
DCHECK_NOT_NULL(tracker);
const size_t size = NumberToSize(new_buffer->byte_length());
+ // We should decrement before adding to avoid potential overflows in
+ // the external memory counters.
+ DCHECK_EQ(it->first->is_wasm_memory(), it->second.is_wasm_memory);
+ old_page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, it->second.length);
tracker->Add(new_buffer, size);
}
- moved_memory += it->second;
- it = array_buffers_.erase(it);
+ moved_memory += it->second.length;
+
} else if (result == kRemoveEntry) {
- freed_memory += it->second;
+ freed_memory += it->second.length;
// We pass backing_store() and stored length to the collector for freeing
// the backing store. Wasm allocations will go through their own tracker
// based on the backing store.
- backing_stores_to_free->emplace_back(
- old_buffer->backing_store(), it->second, old_buffer->backing_store(),
- old_buffer->allocation_mode(), old_buffer->is_wasm_memory());
- it = array_buffers_.erase(it);
+ backing_stores_to_free.push_back(it->second);
+ old_page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, it->second.length);
} else {
UNREACHABLE();
}
}
if (moved_memory || freed_memory) {
- // Update the Space with any moved or freed backing-store bytes.
- space_->DecrementExternalBackingStoreBytes(freed_memory + moved_memory);
-
// TODO(wez): Remove backing-store from external memory accounting.
- space_->heap()->update_external_memory_concurrently_freed(
+ page_->heap()->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
}
+ array_buffers_.swap(kept_array_buffers);
+
// Pass the backing stores that need to be freed to the main thread for later
// distribution.
- // ArrayBufferCollector takes ownership of this pointer.
- space_->heap()->array_buffer_collector()->AddGarbageAllocations(
- backing_stores_to_free);
+ page_->heap()->array_buffer_collector()->AddGarbageAllocations(
+ std::move(backing_stores_to_free));
}
void ArrayBufferTracker::PrepareToFreeDeadInNewSpace(Heap* heap) {
DCHECK_EQ(heap->gc_state(), Heap::HeapState::SCAVENGE);
- for (Page* page : PageRange(heap->new_space()->FromSpaceStart(),
- heap->new_space()->FromSpaceEnd())) {
+ for (Page* page :
+ PageRange(heap->new_space()->from_space().first_page(), nullptr)) {
bool empty = ProcessBuffers(page, kUpdateForwardedRemoveOthers);
CHECK(empty);
}
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index fde4fe182e..347260dde0 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -10,11 +10,11 @@
#include "src/allocation.h"
#include "src/base/platform/mutex.h"
#include "src/globals.h"
+#include "src/objects/js-array.h"
namespace v8 {
namespace internal {
-class JSArrayBuffer;
class MarkingState;
class Page;
class Space;
@@ -67,7 +67,7 @@ class LocalArrayBufferTracker {
enum CallbackResult { kKeepEntry, kUpdateEntry, kRemoveEntry };
enum FreeMode { kFreeDead, kFreeAll };
- explicit LocalArrayBufferTracker(Space* space) : space_(space) {}
+ explicit LocalArrayBufferTracker(Page* page) : page_(page) {}
~LocalArrayBufferTracker();
inline void Add(JSArrayBuffer* buffer, size_t length);
@@ -110,9 +110,12 @@ class LocalArrayBufferTracker {
// HeapNumber. The reason for tracking the length is that in the case of
// length being a HeapNumber, the buffer and its length may be stored on
// different memory pages, making it impossible to guarantee order of freeing.
- typedef std::unordered_map<JSArrayBuffer*, size_t, Hasher> TrackingData;
+ typedef std::unordered_map<JSArrayBuffer*, JSArrayBuffer::Allocation, Hasher>
+ TrackingData;
- Space* space_;
+ inline Space* space();
+
+ Page* page_;
// The set contains raw heap pointers which are removed by the GC upon
// processing the tracker through its owning page.
TrackingData array_buffers_;
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 0f117386e0..4d41df88af 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -19,6 +19,7 @@
#include "src/heap/objects-visiting.h"
#include "src/heap/worklist.h"
#include "src/isolate.h"
+#include "src/objects/hash-table-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -156,8 +157,9 @@ class ConcurrentMarkingVisitor final
Object* object = snapshot.value(i);
DCHECK(!HasWeakHeapObjectTag(object));
if (!object->IsHeapObject()) continue;
- MarkObject(HeapObject::cast(object));
- MarkCompactCollector::RecordSlot(host, slot, object);
+ HeapObject* heap_object = HeapObject::cast(object);
+ MarkObject(heap_object);
+ MarkCompactCollector::RecordSlot(host, slot, heap_object);
}
}
@@ -352,9 +354,59 @@ class ConcurrentMarkingVisitor final
}
int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
- // TODO(ulan): implement iteration of strong fields.
- bailout_.Push(object);
- return 0;
+ return VisitJSObjectSubclass(map, object);
+ }
+
+ int VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
+ if (!ShouldVisit(table)) return 0;
+ weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
+
+ for (int i = 0; i < table->Capacity(); i++) {
+ Object** key_slot =
+ table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
+ HeapObject* key = HeapObject::cast(table->KeyAt(i));
+ MarkCompactCollector::RecordSlot(table, key_slot, key);
+
+ Object** value_slot =
+ table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
+
+ if (marking_state_.IsBlackOrGrey(key)) {
+ VisitPointer(table, value_slot);
+
+ } else {
+ Object* value_obj = table->ValueAt(i);
+
+ if (value_obj->IsHeapObject()) {
+ HeapObject* value = HeapObject::cast(value_obj);
+ MarkCompactCollector::RecordSlot(table, value_slot, value);
+
+ // Revisit ephemerons with both key and value unreachable at end
+ // of concurrent marking cycle.
+ if (marking_state_.IsWhite(value)) {
+ weak_objects_->discovered_ephemerons.Push(task_id_,
+ Ephemeron{key, value});
+ }
+ }
+ }
+ }
+
+ return table->SizeFromMap(map);
+ }
+
+ // Implements ephemeron semantics: Marks value if key is already reachable.
+ // Returns true if value was actually marked.
+ bool VisitEphemeron(HeapObject* key, HeapObject* value) {
+ if (marking_state_.IsBlackOrGrey(key)) {
+ if (marking_state_.WhiteToGrey(value)) {
+ shared_.Push(value);
+ return true;
+ }
+
+ } else if (marking_state_.IsWhite(value)) {
+ weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
+ }
+
+ return false;
}
void MarkObject(HeapObject* object) {
@@ -535,9 +587,21 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
heap_->isolate()->PrintWithTimestamp(
"Starting concurrent marking task %d\n", task_id);
}
+ bool ephemeron_marked = false;
+
{
TimedScope scope(&time_ms);
+ {
+ Ephemeron ephemeron;
+
+ while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
+ if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
+ ephemeron_marked = true;
+ }
+ }
+ }
+
bool done = false;
while (!done) {
size_t current_marked_bytes = 0;
@@ -563,21 +627,41 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
- if (task_state->preemption_request.Value()) {
+ if (task_state->preemption_request) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ConcurrentMarking::Run Preempted");
break;
}
}
+
+ if (done) {
+ Ephemeron ephemeron;
+
+ while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
+ if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
+ ephemeron_marked = true;
+ }
+ }
+ }
+
shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
+ weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
+ weak_objects_->current_ephemerons.FlushToGlobal(task_id);
+ weak_objects_->next_ephemerons.FlushToGlobal(task_id);
+ weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
weak_objects_->weak_references.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
+
+ if (ephemeron_marked) {
+ set_ephemeron_marked(true);
+ }
+
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
is_pending_[task_id] = false;
@@ -619,7 +703,7 @@ void ConcurrentMarking::ScheduleTasks() {
heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i);
}
- task_state_[i].preemption_request.SetValue(false);
+ task_state_[i].preemption_request = false;
is_pending_[i] = true;
++pending_task_count_;
auto task =
@@ -637,7 +721,9 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return;
}
- if (!shared_->IsGlobalPoolEmpty()) {
+ if (!shared_->IsGlobalPoolEmpty() ||
+ !weak_objects_->current_ephemerons.IsEmpty() ||
+ !weak_objects_->discovered_ephemerons.IsEmpty()) {
ScheduleTasks();
}
}
@@ -658,7 +744,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
is_pending_[i] = false;
--pending_task_count_;
} else if (stop_request == StopRequest::PREEMPT_TASKS) {
- task_state_[i].preemption_request.SetValue(true);
+ task_state_[i].preemption_request = true;
}
}
}
@@ -672,6 +758,13 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
return true;
}
+bool ConcurrentMarking::IsStopped() {
+ if (!FLAG_concurrent_marking) return true;
+
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ return pending_task_count_ == 0;
+}
+
void ConcurrentMarking::FlushLiveBytes(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 2ef9d58997..0b8ffd9336 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -81,13 +81,21 @@ class ConcurrentMarking {
int TaskCount() { return task_count_; }
+ // Checks if all threads are stopped.
+ bool IsStopped();
+
size_t TotalMarkedBytes();
+ void set_ephemeron_marked(bool ephemeron_marked) {
+ ephemeron_marked_.store(ephemeron_marked);
+ }
+ bool ephemeron_marked() { return ephemeron_marked_.load(); }
+
private:
struct TaskState {
// The main thread sets this flag to true when it wants the concurrent
// marker to give up the worker thread.
- base::AtomicValue<bool> preemption_request;
+ std::atomic<bool> preemption_request;
LiveBytesMap live_bytes;
size_t marked_bytes = 0;
@@ -102,6 +110,7 @@ class ConcurrentMarking {
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
+ std::atomic<bool> ephemeron_marked_{false};
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
int pending_task_count_ = 0;
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 1d20918ef3..d8659ec889 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -45,11 +45,10 @@ bool LocalEmbedderHeapTracer::Trace(
return remote_tracer_->AdvanceTracing(deadline, actions);
}
-size_t LocalEmbedderHeapTracer::NumberOfWrappersToTrace() {
- return (InUse())
- ? cached_wrappers_to_trace_.size() +
- remote_tracer_->NumberOfWrappersToTrace()
- : 0;
+bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
+ return (InUse()) ? cached_wrappers_to_trace_.empty() &&
+ remote_tracer_->IsTracingDone()
+ : true;
}
void LocalEmbedderHeapTracer::RegisterWrappersWithRemoteTracer() {
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 8146a1281c..994695942b 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -18,10 +18,23 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
public:
typedef std::pair<void*, void*> WrapperInfo;
- LocalEmbedderHeapTracer()
- : remote_tracer_(nullptr), num_v8_marking_worklist_was_empty_(0) {}
+ explicit LocalEmbedderHeapTracer(Isolate* isolate)
+ : isolate_(isolate),
+ remote_tracer_(nullptr),
+ num_v8_marking_worklist_was_empty_(0) {}
+
+ ~LocalEmbedderHeapTracer() {
+ if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
+ }
+
+ void SetRemoteTracer(EmbedderHeapTracer* tracer) {
+ if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
+
+ remote_tracer_ = tracer;
+ if (remote_tracer_)
+ remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
+ }
- void SetRemoteTracer(EmbedderHeapTracer* tracer) { remote_tracer_ = tracer; }
bool InUse() { return remote_tracer_ != nullptr; }
void TracePrologue();
@@ -30,8 +43,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void EnterFinalPause();
bool Trace(double deadline,
EmbedderHeapTracer::AdvanceTracingActions actions);
+ bool IsRemoteTracingDone();
- size_t NumberOfWrappersToTrace();
size_t NumberOfCachedWrappersToTrace() {
return cached_wrappers_to_trace_.size();
}
@@ -51,13 +64,14 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool ShouldFinalizeIncrementalMarking() {
static const size_t kMaxIncrementalFixpointRounds = 3;
return !FLAG_incremental_marking_wrappers || !InUse() ||
- NumberOfWrappersToTrace() == 0 ||
+ IsRemoteTracingDone() ||
num_v8_marking_worklist_was_empty_ > kMaxIncrementalFixpointRounds;
}
private:
typedef std::vector<WrapperInfo> WrapperCache;
+ Isolate* const isolate_;
EmbedderHeapTracer* remote_tracer_;
WrapperCache cached_wrappers_to_trace_;
size_t num_v8_marking_worklist_was_empty_;
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 84dd29eb4c..85f2679b3f 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -30,6 +30,14 @@ ROOT_LIST(ROOT_ACCESSOR)
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
+ Handle<Map> Factory::name##_map() { \
+ return Handle<Map>(bit_cast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##Size##MapRootIndex])); \
+ }
+ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
+#undef ALLOCATION_SITE_MAP_ACCESSOR
+
#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
Handle<Map> Factory::name##_map() { \
return Handle<Map>(bit_cast<Map**>( \
@@ -109,23 +117,37 @@ Handle<Object> Factory::NewNumberFromInt64(int64_t value,
return NewNumber(static_cast<double>(value), pretenure);
}
-Handle<HeapNumber> Factory::NewHeapNumber(double value, MutableMode mode,
+Handle<HeapNumber> Factory::NewHeapNumber(double value,
PretenureFlag pretenure) {
- Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+ Handle<HeapNumber> heap_number = NewHeapNumber(pretenure);
heap_number->set_value(value);
return heap_number;
}
+Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
+ double value, PretenureFlag pretenure) {
+ Handle<MutableHeapNumber> number = NewMutableHeapNumber(pretenure);
+ number->set_value(value);
+ return number;
+}
+
Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
- MutableMode mode,
PretenureFlag pretenure) {
- Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+ Handle<HeapNumber> heap_number = NewHeapNumber(pretenure);
heap_number->set_value_as_bits(bits);
return heap_number;
}
-Handle<HeapNumber> Factory::NewMutableHeapNumber(PretenureFlag pretenure) {
- return NewHeapNumberFromBits(kHoleNanInt64, MUTABLE, pretenure);
+Handle<MutableHeapNumber> Factory::NewMutableHeapNumberFromBits(
+ uint64_t bits, PretenureFlag pretenure) {
+ Handle<MutableHeapNumber> number = NewMutableHeapNumber(pretenure);
+ number->set_value_as_bits(bits);
+ return number;
+}
+
+Handle<MutableHeapNumber> Factory::NewMutableHeapNumberWithHoleNaN(
+ PretenureFlag pretenure) {
+ return NewMutableHeapNumberFromBits(kHoleNanInt64, pretenure);
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index ff1b773bbb..a04e2e734b 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -17,11 +17,15 @@
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
-#include "src/objects/module.h"
+#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/scope-info.h"
#include "src/unicode-cache.h"
@@ -47,9 +51,9 @@ int ComputeCodeObjectSize(const CodeDesc& desc) {
return object_size;
}
-void InitializeCode(Handle<Code> code, int object_size, const CodeDesc& desc,
- Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index,
+void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
+ const CodeDesc& desc, Code::Kind kind,
+ Handle<Object> self_ref, int32_t builtin_index,
Handle<ByteArray> source_position_table,
Handle<DeoptimizationData> deopt_data,
Handle<ByteArray> reloc_info,
@@ -57,17 +61,17 @@ void InitializeCode(Handle<Code> code, int object_size, const CodeDesc& desc,
bool is_turbofanned, int stack_slots,
int safepoint_table_offset, int handler_table_offset) {
DCHECK(IsAligned(code->address(), kCodeAlignment));
- DCHECK(
- !code->GetIsolate()->heap()->memory_allocator()->code_range()->valid() ||
- code->GetIsolate()->heap()->memory_allocator()->code_range()->contains(
- code->address()) ||
- object_size <= code->GetIsolate()->heap()->code_space()->AreaSize());
+ DCHECK(!heap->memory_allocator()->code_range()->valid() ||
+ heap->memory_allocator()->code_range()->contains(code->address()) ||
+ object_size <= heap->code_space()->AreaSize());
bool has_unwinding_info = desc.unwinding_info != nullptr;
code->set_raw_instruction_size(desc.instr_size);
code->set_relocation_info(*reloc_info);
- code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
+ const bool is_off_heap_trampoline = false;
+ code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots,
+ is_off_heap_trampoline);
code->set_safepoint_table_offset(safepoint_table_offset);
code->set_handler_table_offset(handler_table_offset);
code->set_code_data_container(*data_container);
@@ -82,10 +86,10 @@ void InitializeCode(Handle<Code> code, int object_size, const CodeDesc& desc,
if (!self_ref.is_null()) {
DCHECK(self_ref->IsOddball());
DCHECK(Oddball::cast(*self_ref)->kind() == Oddball::kSelfReferenceMarker);
-#ifdef V8_EMBEDDED_BUILTINS
- auto builder = code->GetIsolate()->builtins_constants_table_builder();
- if (builder != nullptr) builder->PatchSelfReference(self_ref, code);
-#endif // V8_EMBEDDED_BUILTINS
+ if (FLAG_embedded_builtins) {
+ auto builder = heap->isolate()->builtins_constants_table_builder();
+ if (builder != nullptr) builder->PatchSelfReference(self_ref, code);
+ }
*(self_ref.location()) = *code;
}
@@ -94,12 +98,12 @@ void InitializeCode(Handle<Code> code, int object_size, const CodeDesc& desc,
// that are dereferenced during the copy to point directly to the actual heap
// objects. These pointers can include references to the code object itself,
// through the self_reference parameter.
- code->CopyFromNoFlush(desc);
+ code->CopyFromNoFlush(heap, desc);
code->clear_padding();
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) code->ObjectVerify();
+ if (FLAG_verify_heap) code->ObjectVerify(heap->isolate());
#endif
}
@@ -139,7 +143,6 @@ void Factory::InitializeAllocationMemento(AllocationMemento* memento,
AllocationSite* allocation_site) {
memento->set_map_after_allocation(*allocation_memento_map(),
SKIP_WRITE_BARRIER);
- DCHECK(allocation_site->map() == *allocation_site_map());
memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
if (FLAG_allocation_site_pretenuring) {
allocation_site->IncrementMementoCreateCount();
@@ -202,7 +205,7 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo> result =
Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE, TENURED));
- result->set_prototype_users(FixedArrayOfWeakCells::Empty());
+ result->set_prototype_users(*empty_weak_array_list());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_bit_field(0);
return result;
@@ -233,12 +236,13 @@ Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
return result;
}
-Handle<ConstantElementsPair> Factory::NewConstantElementsPair(
+Handle<ArrayBoilerplateDescription> Factory::NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
- Handle<ConstantElementsPair> result =
- Handle<ConstantElementsPair>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ Handle<ArrayBoilerplateDescription> result =
+ Handle<ArrayBoilerplateDescription>::cast(
+ NewStruct(ARRAY_BOILERPLATE_DESCRIPTION_TYPE, TENURED));
result->set_elements_kind(elements_kind);
- result->set_constant_values(*constant_values);
+ result->set_constant_elements(*constant_values);
return result;
}
@@ -331,6 +335,10 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(Heap::RootListIndex map_root_index,
template Handle<FixedArray> Factory::NewFixedArrayWithMap<FixedArray>(
Heap::RootListIndex, int, PretenureFlag);
+template Handle<DescriptorArray>
+Factory::NewWeakFixedArrayWithMap<DescriptorArray>(Heap::RootListIndex, int,
+ PretenureFlag);
+
Handle<FixedArray> Factory::NewFixedArray(int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
@@ -371,7 +379,8 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
result->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<FixedArray> array(FixedArray::cast(result), isolate());
array->set_length(length);
- MemsetPointer(array->data_start(), heap->undefined_value(), length);
+ MemsetPointer(array->data_start(), ReadOnlyRoots(heap).undefined_value(),
+ length);
return array;
}
@@ -418,7 +427,7 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
return vector;
}
-Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
+Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
int boilerplate, int all_properties, int index_keys, bool has_seen_proto) {
DCHECK_GE(boilerplate, 0);
DCHECK_GE(all_properties, index_keys);
@@ -429,21 +438,18 @@ Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
DCHECK_GE(backing_store_size, 0);
bool has_different_size_backing_store = boilerplate != backing_store_size;
- // Space for name and value for every boilerplate property.
- int size = 2 * boilerplate;
+ // Space for name and value for every boilerplate property + LiteralType flag.
+ int size =
+ 2 * boilerplate + ObjectBoilerplateDescription::kDescriptionStartIndex;
if (has_different_size_backing_store) {
// An extra entry for the backing store size.
size++;
}
- if (size == 0) {
- return isolate()->factory()->empty_boilerplate_description();
- }
-
- Handle<BoilerplateDescription> description =
- Handle<BoilerplateDescription>::cast(NewFixedArrayWithMap(
- Heap::kBoilerplateDescriptionMapRootIndex, size, TENURED));
+ Handle<ObjectBoilerplateDescription> description =
+ Handle<ObjectBoilerplateDescription>::cast(NewFixedArrayWithMap(
+ Heap::kObjectBoilerplateDescriptionMapRootIndex, size, TENURED));
if (has_different_size_backing_store) {
DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
@@ -451,6 +457,8 @@ Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
description->set_backing_store_size(isolate(), backing_store_size);
}
+ description->set_flags(0);
+
return description;
}
@@ -566,7 +574,7 @@ Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
Handle<String> Factory::InternalizeOneByteString(
Handle<SeqOneByteString> string, int from, int length) {
- SeqOneByteSubStringKey key(string, from, length);
+ SeqOneByteSubStringKey key(isolate(), string, from, length);
return InternalizeStringWithKey(&key);
}
@@ -905,7 +913,7 @@ MaybeHandle<Map> GetInternalizedStringMap(Factory* f, Handle<String> string) {
MaybeHandle<Map> Factory::InternalizedStringMapForString(
Handle<String> string) {
// If the string is in new space it cannot be used as internalized.
- if (isolate()->heap()->InNewSpace(*string)) return MaybeHandle<Map>();
+ if (Heap::InNewSpace(*string)) return MaybeHandle<Map>();
return GetInternalizedStringMap(this, string);
}
@@ -1137,8 +1145,8 @@ Handle<String> Factory::NewConsString(Handle<String> left, Handle<String> right,
result->set_hash_field(String::kEmptyHashField);
result->set_length(length);
- result->set_first(*left, mode);
- result->set_second(*right, mode);
+ result->set_first(isolate(), *left, mode);
+ result->set_second(isolate(), *right, mode);
return result;
}
@@ -1159,11 +1167,11 @@ Handle<String> Factory::NewSurrogatePairString(uint16_t lead, uint16_t trail) {
Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
int end) {
#if VERIFY_HEAP
- if (FLAG_verify_heap) str->StringVerify();
+ if (FLAG_verify_heap) str->StringVerify(isolate());
#endif
DCHECK(begin > 0 || end < str->length());
- str = String::Flatten(str);
+ str = String::Flatten(isolate(), str);
int length = end - begin;
if (length <= 0) return empty_string();
@@ -1218,7 +1226,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
slice->set_hash_field(String::kEmptyHashField);
slice->set_length(length);
- slice->set_parent(*str);
+ slice->set_parent(isolate(), *str);
slice->set_offset(offset);
return slice;
}
@@ -1243,6 +1251,7 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
+ isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
}
@@ -1275,6 +1284,7 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
+ isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
}
@@ -1290,6 +1300,7 @@ Handle<ExternalOneByteString> Factory::NewNativeSourceString(
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
+ isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
}
@@ -1297,7 +1308,7 @@ Handle<ExternalOneByteString> Factory::NewNativeSourceString(
Handle<JSStringIterator> Factory::NewJSStringIterator(Handle<String> string) {
Handle<Map> map(isolate()->native_context()->string_iterator_map(),
isolate());
- Handle<String> flat_string = String::Flatten(string);
+ Handle<String> flat_string = String::Flatten(isolate(), string);
Handle<JSStringIterator> iterator =
Handle<JSStringIterator>::cast(NewJSObjectFromMap(map));
iterator->set_string(*flat_string);
@@ -1471,6 +1482,17 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
return context;
}
+Handle<Context> Factory::NewBuiltinContext(Handle<Context> native_context,
+ int length) {
+ DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
+ Handle<Context> context =
+ NewFixedArrayWithMap<Context>(Heap::kFunctionContextMapRootIndex, length);
+ context->set_scope_info(ReadOnlyRoots(isolate()).empty_scope_info());
+ context->set_extension(*the_hole_value());
+ context->set_native_context(*native_context);
+ return context;
+}
+
Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
Map* map;
switch (type) {
@@ -1509,29 +1531,62 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
}
Handle<Script> Factory::NewScript(Handle<String> source, PretenureFlag tenure) {
+ return NewScriptWithId(source, isolate()->heap()->NextScriptId(), tenure);
+}
+
+Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id,
+ PretenureFlag tenure) {
DCHECK(tenure == TENURED || tenure == TENURED_READ_ONLY);
// Create and initialize script object.
Heap* heap = isolate()->heap();
+ ReadOnlyRoots roots(heap);
Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE, tenure));
script->set_source(*source);
- script->set_name(heap->undefined_value());
- script->set_id(isolate()->heap()->NextScriptId());
+ script->set_name(roots.undefined_value());
+ script->set_id(script_id);
script->set_line_offset(0);
script->set_column_offset(0);
- script->set_context_data(heap->undefined_value());
+ script->set_context_data(roots.undefined_value());
script->set_type(Script::TYPE_NORMAL);
- script->set_wrapper(heap->undefined_value());
- script->set_line_ends(heap->undefined_value());
- script->set_eval_from_shared_or_wrapped_arguments(heap->undefined_value());
+ script->set_line_ends(roots.undefined_value());
+ script->set_eval_from_shared_or_wrapped_arguments(roots.undefined_value());
script->set_eval_from_position(0);
script->set_shared_function_infos(*empty_weak_fixed_array(),
SKIP_WRITE_BARRIER);
script->set_flags(0);
script->set_host_defined_options(*empty_fixed_array());
- heap->set_script_list(*FixedArrayOfWeakCells::Add(script_list(), script));
+ heap->set_script_list(
+ *FixedArrayOfWeakCells::Add(isolate(), script_list(), script));
+ LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
return script;
}
+Handle<Script> Factory::CloneScript(Handle<Script> script) {
+ Heap* heap = isolate()->heap();
+ int script_id = isolate()->heap()->NextScriptId();
+ Handle<Script> new_script =
+ Handle<Script>::cast(NewStruct(SCRIPT_TYPE, TENURED));
+ new_script->set_source(script->source());
+ new_script->set_name(script->name());
+ new_script->set_id(script_id);
+ new_script->set_line_offset(script->line_offset());
+ new_script->set_column_offset(script->column_offset());
+ new_script->set_context_data(script->context_data());
+ new_script->set_type(script->type());
+ new_script->set_line_ends(ReadOnlyRoots(heap).undefined_value());
+ new_script->set_eval_from_shared_or_wrapped_arguments(
+ script->eval_from_shared_or_wrapped_arguments());
+ new_script->set_shared_function_infos(*empty_weak_fixed_array(),
+ SKIP_WRITE_BARRIER);
+ new_script->set_eval_from_position(script->eval_from_position());
+ new_script->set_flags(script->flags());
+ new_script->set_host_defined_options(script->host_defined_options());
+ heap->set_script_list(
+ *FixedArrayOfWeakCells::Add(isolate(), script_list(), new_script));
+ LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
+ return new_script;
+}
+
Handle<CallableTask> Factory::NewCallableTask(Handle<JSReceiver> callable,
Handle<Context> context) {
DCHECK(callable->IsCallable());
@@ -1598,7 +1653,7 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
// Bytecode array is pretenured, so constant pool array should be too.
- DCHECK(!isolate()->heap()->InNewSpace(*constant_pool));
+ DCHECK(!Heap::InNewSpace(*constant_pool));
int size = BytecodeArray::SizeFor(length);
HeapObject* result =
@@ -1746,15 +1801,18 @@ Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
return array;
}
-Handle<AllocationSite> Factory::NewAllocationSite() {
- Handle<Map> map = allocation_site_map();
+Handle<AllocationSite> Factory::NewAllocationSite(bool with_weak_next) {
+ Handle<Map> map = with_weak_next ? allocation_site_map()
+ : allocation_site_without_weaknext_map();
Handle<AllocationSite> site(AllocationSite::cast(New(map, TENURED)),
isolate());
site->Initialize();
- // Link the site
- site->set_weak_next(isolate()->heap()->allocation_sites_list());
- isolate()->heap()->set_allocation_sites_list(*site);
+ if (with_weak_next) {
+ // Link the site
+ site->set_weak_next(isolate()->heap()->allocation_sites_list());
+ isolate()->heap()->set_allocation_sites_list(*site);
+ }
return site;
}
@@ -1847,7 +1905,7 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
HeapObject* raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail(
adjusted_object_size, NEW_SPACE);
- SLOW_DCHECK(isolate()->heap()->InNewSpace(raw_clone));
+ SLOW_DCHECK(Heap::InNewSpace(raw_clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
Heap::CopyBlock(raw_clone->address(), source->address(), object_size);
@@ -1980,7 +2038,7 @@ Handle<WeakFixedArray> Factory::CopyWeakFixedArrayAndGrow(
WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
for (int i = 0; i < old_len; i++) result->Set(i, src->Get(i), mode);
HeapObjectReference* undefined_reference =
- HeapObjectReference::Strong(isolate()->heap()->undefined_value());
+ HeapObjectReference::Strong(ReadOnlyRoots(isolate()).undefined_value());
MemsetPointer(result->data_start() + old_len, undefined_reference, grow_by);
return Handle<WeakFixedArray>(result, isolate());
}
@@ -2002,7 +2060,7 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
for (int i = 0; i < old_capacity; i++) result->Set(i, src->Get(i), mode);
HeapObjectReference* undefined_reference =
- HeapObjectReference::Strong(isolate()->heap()->undefined_value());
+ HeapObjectReference::Strong(ReadOnlyRoots(isolate()).undefined_value());
MemsetPointer(result->data_start() + old_capacity, undefined_reference,
grow_by);
return Handle<WeakArrayList>(result, isolate());
@@ -2039,7 +2097,7 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray(
Handle<FixedArray> array) {
- DCHECK(isolate()->heap()->InNewSpace(*array));
+ DCHECK(Heap::InNewSpace(*array));
Handle<FixedArray> result =
CopyFixedArrayUpTo(array, array->length(), TENURED);
@@ -2096,16 +2154,14 @@ Handle<Object> Factory::NewNumber(double value, PretenureFlag pretenure) {
if (DoubleToSmiInteger(value, &int_value)) {
return handle(Smi::FromInt(int_value), isolate());
}
-
- // Materialize the value in the heap.
- return NewHeapNumber(value, IMMUTABLE, pretenure);
+ return NewHeapNumber(value, pretenure);
}
Handle<Object> Factory::NewNumberFromInt(int32_t value,
PretenureFlag pretenure) {
if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
// Bypass NewNumber to avoid various redundant checks.
- return NewHeapNumber(FastI2D(value), IMMUTABLE, pretenure);
+ return NewHeapNumber(FastI2D(value), pretenure);
}
Handle<Object> Factory::NewNumberFromUint(uint32_t value,
@@ -2114,18 +2170,26 @@ Handle<Object> Factory::NewNumberFromUint(uint32_t value,
if (int32v >= 0 && Smi::IsValid(int32v)) {
return handle(Smi::FromInt(int32v), isolate());
}
- return NewHeapNumber(FastUI2D(value), IMMUTABLE, pretenure);
+ return NewHeapNumber(FastUI2D(value), pretenure);
}
-Handle<HeapNumber> Factory::NewHeapNumber(MutableMode mode,
- PretenureFlag pretenure) {
+Handle<HeapNumber> Factory::NewHeapNumber(PretenureFlag pretenure) {
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
- Map* map = mode == MUTABLE ? *mutable_heap_number_map() : *heap_number_map();
+ Map* map = *heap_number_map();
HeapObject* result = AllocateRawWithImmortalMap(HeapNumber::kSize, pretenure,
map, kDoubleUnaligned);
return handle(HeapNumber::cast(result), isolate());
}
+Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
+ PretenureFlag pretenure) {
+ STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
+ Map* map = *mutable_heap_number_map();
+ HeapObject* result = AllocateRawWithImmortalMap(
+ MutableHeapNumber::kSize, pretenure, map, kDoubleUnaligned);
+ return handle(MutableHeapNumber::cast(result), isolate());
+}
+
Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
PretenureFlag pretenure) {
if (length < 0 || length > BigInt::kMaxLength) {
@@ -2328,27 +2392,32 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
- Handle<Context> native_context(function->context()->native_context());
+ Handle<Context> native_context(function->context()->native_context(),
+ isolate());
Handle<Map> new_map;
if (V8_UNLIKELY(IsAsyncGeneratorFunction(function->shared()->kind()))) {
- new_map = handle(native_context->async_generator_object_prototype_map());
+ new_map = handle(native_context->async_generator_object_prototype_map(),
+ isolate());
} else if (IsResumableFunction(function->shared()->kind())) {
// Generator and async function prototypes can share maps since they
// don't have "constructor" properties.
- new_map = handle(native_context->generator_object_prototype_map());
+ new_map =
+ handle(native_context->generator_object_prototype_map(), isolate());
} else {
// Each function prototype gets a fresh map to avoid unwanted sharing of
// maps between prototypes of different constructors.
- Handle<JSFunction> object_function(native_context->object_function());
+ Handle<JSFunction> object_function(native_context->object_function(),
+ isolate());
DCHECK(object_function->has_initial_map());
- new_map = handle(object_function->initial_map());
+ new_map = handle(object_function->initial_map(), isolate());
}
DCHECK(!new_map->is_prototype_map());
Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
if (!IsResumableFunction(function->shared()->kind())) {
- JSObject::AddProperty(prototype, constructor_string(), function, DONT_ENUM);
+ JSObject::AddProperty(isolate(), prototype, constructor_string(), function,
+ DONT_ENUM);
}
return prototype;
@@ -2358,7 +2427,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
PretenureFlag pretenure) {
Handle<Map> initial_map(
- Map::cast(context->native_context()->get(info->function_map_index())));
+ Map::cast(context->native_context()->get(info->function_map_index())),
+ isolate());
return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
pretenure);
}
@@ -2367,7 +2437,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
Handle<FeedbackCell> feedback_cell, PretenureFlag pretenure) {
Handle<Map> initial_map(
- Map::cast(context->native_context()->get(info->function_map_index())));
+ Map::cast(context->native_context()->get(info->function_map_index())),
+ isolate());
return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
feedback_cell, pretenure);
}
@@ -2427,11 +2498,50 @@ Handle<ModuleInfo> Factory::NewModuleInfo() {
ModuleInfo::kLength, TENURED);
}
-Handle<PreParsedScopeData> Factory::NewPreParsedScopeData() {
- Handle<PreParsedScopeData> result =
- Handle<PreParsedScopeData>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+Handle<PreParsedScopeData> Factory::NewPreParsedScopeData(int length) {
+ int size = PreParsedScopeData::SizeFor(length);
+ Handle<PreParsedScopeData> result(
+ PreParsedScopeData::cast(AllocateRawWithImmortalMap(
+ size, TENURED, *pre_parsed_scope_data_map())),
+ isolate());
result->set_scope_data(PodArray<uint8_t>::cast(*empty_byte_array()));
- result->set_child_data(*empty_fixed_array());
+ result->set_length(length);
+ MemsetPointer(result->child_data_start(), *null_value(), length);
+
+ result->clear_padding();
+ return result;
+}
+
+Handle<UncompiledDataWithoutPreParsedScope>
+Factory::NewUncompiledDataWithoutPreParsedScope(int32_t start_position,
+ int32_t end_position,
+ int32_t function_literal_id) {
+ Handle<UncompiledDataWithoutPreParsedScope> result(
+ UncompiledDataWithoutPreParsedScope::cast(
+ New(uncompiled_data_without_pre_parsed_scope_map(), TENURED)),
+ isolate());
+ result->set_start_position(start_position);
+ result->set_end_position(end_position);
+ result->set_function_literal_id(function_literal_id);
+
+ result->clear_padding();
+ return result;
+}
+
+Handle<UncompiledDataWithPreParsedScope>
+Factory::NewUncompiledDataWithPreParsedScope(
+ int32_t start_position, int32_t end_position, int32_t function_literal_id,
+ Handle<PreParsedScopeData> pre_parsed_scope_data) {
+ Handle<UncompiledDataWithPreParsedScope> result(
+ UncompiledDataWithPreParsedScope::cast(
+ New(uncompiled_data_with_pre_parsed_scope_map(), TENURED)),
+ isolate());
+ result->set_start_position(start_position);
+ result->set_end_position(end_position);
+ result->set_function_literal_id(function_literal_id);
+ result->set_pre_parsed_scope_data(*pre_parsed_scope_data);
+
+ result->clear_padding();
return result;
}
@@ -2475,7 +2585,7 @@ MaybeHandle<Code> Factory::TryNewCode(
Heap* heap = isolate()->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject* result =
- heap->AllocateRawWithLigthRetry(object_size, CODE_SPACE);
+ heap->AllocateRawWithLightRetry(object_size, CODE_SPACE);
// Return an empty handle if we cannot allocate the code object.
if (!result) return MaybeHandle<Code>();
@@ -2491,7 +2601,7 @@ MaybeHandle<Code> Factory::TryNewCode(
result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
code = handle(Code::cast(result), isolate());
- InitializeCode(code, object_size, desc, kind, self_ref, builtin_index,
+ InitializeCode(heap, code, object_size, desc, kind, self_ref, builtin_index,
source_position_table, deopt_data, reloc_info,
data_container, stub_key, is_turbofanned, stack_slots,
safepoint_table_offset, handler_table_offset);
@@ -2539,7 +2649,7 @@ Handle<Code> Factory::NewCode(
result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
code = handle(Code::cast(result), isolate());
- InitializeCode(code, object_size, desc, kind, self_ref, builtin_index,
+ InitializeCode(heap, code, object_size, desc, kind, self_ref, builtin_index,
source_position_table, deopt_data, reloc_info,
data_container, stub_key, is_turbofanned, stack_slots,
safepoint_table_offset, handler_table_offset);
@@ -2566,13 +2676,12 @@ Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
return handle(Code::cast(result), isolate());
}
-#ifdef V8_EMBEDDED_BUILTINS
Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Address off_heap_entry) {
- DCHECK(isolate()->serializer_enabled());
- DCHECK_NOT_NULL(isolate()->embedded_blob());
- DCHECK_NE(0, isolate()->embedded_blob_size());
- DCHECK(Builtins::IsEmbeddedBuiltin(*code));
+ CHECK(isolate()->serializer_enabled());
+ CHECK_NOT_NULL(isolate()->embedded_blob());
+ CHECK_NE(0, isolate()->embedded_blob_size());
+ CHECK(Builtins::IsIsolateIndependentBuiltin(*code));
Handle<Code> result =
Builtins::GenerateOffHeapTrampolineFor(isolate(), off_heap_entry);
@@ -2580,9 +2689,11 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
// The trampoline code object must inherit specific flags from the original
// builtin (e.g. the safepoint-table offset). We set them manually here.
+ const bool set_is_off_heap_trampoline = true;
const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
result->initialize_flags(code->kind(), code->has_unwinding_info(),
- code->is_turbofanned(), stack_slots);
+ code->is_turbofanned(), stack_slots,
+ set_is_off_heap_trampoline);
result->set_builtin_index(code->builtin_index());
result->set_handler_table_offset(code->handler_table_offset());
result->code_data_container()->set_kind_specific_flags(
@@ -2594,7 +2705,6 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
return result;
}
-#endif
Handle<Code> Factory::CopyCode(Handle<Code> code) {
Handle<CodeDataContainer> data_container =
@@ -2621,7 +2731,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
heap->RecordWritesIntoCode(*new_code);
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) new_code->ObjectVerify();
+ if (FLAG_verify_heap) new_code->ObjectVerify(isolate());
#endif
DCHECK(IsAligned(new_code->address(), kCodeAlignment));
DCHECK(
@@ -2656,16 +2766,16 @@ Handle<BytecodeArray> Factory::CopyBytecodeArray(
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
JSFunction::EnsureHasInitialMap(constructor);
- Handle<Map> map(constructor->initial_map());
+ Handle<Map> map(constructor->initial_map(), isolate());
return NewJSObjectFromMap(map, pretenure);
}
Handle<JSObject> Factory::NewJSObjectWithNullProto(PretenureFlag pretenure) {
Handle<JSObject> result =
NewJSObject(isolate()->object_function(), pretenure);
- Handle<Map> new_map =
- Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
- Map::SetPrototype(new_map, null_value());
+ Handle<Map> new_map = Map::Copy(
+ isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto");
+ Map::SetPrototype(isolate(), new_map, null_value());
JSObject::MigrateToMap(result, new_map);
return result;
}
@@ -2673,7 +2783,7 @@ Handle<JSObject> Factory::NewJSObjectWithNullProto(PretenureFlag pretenure) {
Handle<JSGlobalObject> Factory::NewJSGlobalObject(
Handle<JSFunction> constructor) {
DCHECK(constructor->has_initial_map());
- Handle<Map> map(constructor->initial_map());
+ Handle<Map> map(constructor->initial_map(), isolate());
DCHECK(map->is_dictionary_map());
// Make sure no field properties are described in the initial map.
@@ -2698,18 +2808,18 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
- Handle<DescriptorArray> descs(map->instance_descriptors());
+ Handle<DescriptorArray> descs(map->instance_descriptors(), isolate());
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
DCHECK_EQ(kAccessor, details.kind());
PropertyDetails d(kAccessor, details.attributes(),
PropertyCellType::kMutable);
- Handle<Name> name(descs->GetKey(i));
+ Handle<Name> name(descs->GetKey(i), isolate());
Handle<PropertyCell> cell = NewPropertyCell(name);
- cell->set_value(descs->GetValue(i));
+ cell->set_value(descs->GetStrongValue(i));
// |dictionary| already contains enough space for all properties.
- USE(GlobalDictionary::Add(dictionary, name, cell, d));
+ USE(GlobalDictionary::Add(isolate(), dictionary, name, cell, d));
}
// Allocate the global object and initialize it with the backing store.
@@ -2718,7 +2828,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
InitializeJSObjectFromMap(global, dictionary, map);
// Create a new map for the global object.
- Handle<Map> new_map = Map::CopyDropDescriptors(map);
+ Handle<Map> new_map = Map::CopyDropDescriptors(isolate(), map);
new_map->set_may_have_interesting_symbols(true);
new_map->set_is_dictionary_map(true);
@@ -2769,7 +2879,7 @@ void Factory::InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
}
obj->InitializeBody(*map, start_offset, *undefined_value(), filler);
if (in_progress) {
- map->FindRootMap()->InobjectSlackTrackingStep();
+ map->FindRootMap(isolate())->InobjectSlackTrackingStep(isolate());
}
}
@@ -2814,7 +2924,8 @@ Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
JSFunction* array_function = native_context->array_function();
map = array_function->initial_map();
}
- return Handle<JSArray>::cast(NewJSObjectFromMap(handle(map), pretenure));
+ return Handle<JSArray>::cast(
+ NewJSObjectFromMap(handle(map, isolate()), pretenure));
}
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
@@ -2875,8 +2986,9 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
Handle<JSWeakMap> Factory::NewJSWeakMap() {
Context* native_context = isolate()->raw_native_context();
- Handle<Map> map(native_context->js_weak_map_fun()->initial_map());
- Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)));
+ Handle<Map> map(native_context->js_weak_map_fun()->initial_map(), isolate());
+ Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)),
+ isolate());
{
// Do not leak handles for the hash table, it would make entries strong.
HandleScope scope(isolate());
@@ -2892,7 +3004,7 @@ Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
FieldIndex index = FieldIndex::ForDescriptor(
*map, JSModuleNamespace::kToStringTagFieldIndex);
module_namespace->FastPropertyAtPut(index,
- isolate()->heap()->Module_string());
+ ReadOnlyRoots(isolate()).Module_string());
return module_namespace;
}
@@ -2900,7 +3012,7 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
Handle<JSFunction> function) {
DCHECK(IsResumableFunction(function->shared()->kind()));
JSFunction::EnsureHasInitialMap(function);
- Handle<Map> map(function->initial_map());
+ Handle<Map> map(function->initial_map(), isolate());
DCHECK(map->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -2922,18 +3034,19 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
: empty_fixed_array();
+ ReadOnlyRoots roots(isolate());
Handle<Module> module = Handle<Module>::cast(NewStruct(MODULE_TYPE, TENURED));
module->set_code(*code);
module->set_exports(*exports);
module->set_regular_exports(*regular_exports);
module->set_regular_imports(*regular_imports);
module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
- module->set_module_namespace(isolate()->heap()->undefined_value());
+ module->set_module_namespace(roots.undefined_value());
module->set_requested_modules(*requested_modules);
module->set_script(Script::cast(code->script()));
module->set_status(Module::kUninstantiated);
- module->set_exception(isolate()->heap()->the_hole_value());
- module->set_import_meta(isolate()->heap()->the_hole_value());
+ module->set_exception(roots.the_hole_value());
+ module->set_import_meta(roots.the_hole_value());
module->set_dfs_index(-1);
module->set_dfs_ancestor_index(-1);
return module;
@@ -2944,14 +3057,16 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
Handle<JSFunction> array_buffer_fun(
shared == SharedFlag::kShared
? isolate()->native_context()->shared_array_buffer_fun()
- : isolate()->native_context()->array_buffer_fun());
+ : isolate()->native_context()->array_buffer_fun(),
+ isolate());
Handle<Map> map(array_buffer_fun->initial_map(), isolate());
return Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, pretenure));
}
Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
bool done) {
- Handle<Map> map(isolate()->native_context()->iterator_result_map());
+ Handle<Map> map(isolate()->native_context()->iterator_result_map(),
+ isolate());
Handle<JSIteratorResult> js_iter_result =
Handle<JSIteratorResult>::cast(NewJSObjectFromMap(map));
js_iter_result->set_value(*value);
@@ -2961,7 +3076,8 @@ Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
Handle<JSAsyncFromSyncIterator> Factory::NewJSAsyncFromSyncIterator(
Handle<JSReceiver> sync_iterator, Handle<Object> next) {
- Handle<Map> map(isolate()->native_context()->async_from_sync_iterator_map());
+ Handle<Map> map(isolate()->native_context()->async_from_sync_iterator_map(),
+ isolate());
Handle<JSAsyncFromSyncIterator> iterator =
Handle<JSAsyncFromSyncIterator>::cast(NewJSObjectFromMap(map));
@@ -2971,14 +3087,14 @@ Handle<JSAsyncFromSyncIterator> Factory::NewJSAsyncFromSyncIterator(
}
Handle<JSMap> Factory::NewJSMap() {
- Handle<Map> map(isolate()->native_context()->js_map_map());
+ Handle<Map> map(isolate()->native_context()->js_map_map(), isolate());
Handle<JSMap> js_map = Handle<JSMap>::cast(NewJSObjectFromMap(map));
JSMap::Initialize(js_map, isolate());
return js_map;
}
Handle<JSSet> Factory::NewJSSet() {
- Handle<Map> map(isolate()->native_context()->js_set_map());
+ Handle<Map> map(isolate()->native_context()->js_set_map(), isolate());
Handle<JSSet> js_set = Handle<JSSet>::cast(NewJSObjectFromMap(map));
JSSet::Initialize(js_set, isolate());
return js_set;
@@ -3096,7 +3212,8 @@ void SetupArrayBufferView(i::Isolate* isolate,
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
PretenureFlag pretenure) {
- Handle<JSFunction> typed_array_fun(GetTypedArrayFun(type, isolate()));
+ Handle<JSFunction> typed_array_fun(GetTypedArrayFun(type, isolate()),
+ isolate());
Handle<Map> map(typed_array_fun->initial_map(), isolate());
return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, pretenure));
}
@@ -3222,7 +3339,7 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
? isolate()->bound_function_with_constructor_map()
: isolate()->bound_function_without_constructor_map();
if (map->prototype() != *prototype) {
- map = Map::TransitionToPrototype(map, prototype);
+ map = Map::TransitionToPrototype(isolate(), map, prototype);
}
DCHECK_EQ(target_function->IsConstructor(), map->is_constructor());
@@ -3278,11 +3395,11 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
isolate());
if (old_map->is_prototype_map()) {
- map = Map::Copy(map, "CopyAsPrototypeForJSGlobalProxy");
+ map = Map::Copy(isolate(), map, "CopyAsPrototypeForJSGlobalProxy");
map->set_is_prototype_map(true);
}
JSObject::NotifyMapChange(old_map, map, isolate());
- old_map->NotifyLeafMapLayoutChange();
+ old_map->NotifyLeafMapLayoutChange(isolate());
// Check that the already allocated object has the same size and type as
// objects allocated using the constructor.
@@ -3306,13 +3423,14 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfoForBuiltin(
literal->name(), Builtins::kCompileLazy, kind);
SharedFunctionInfo::InitFromFunctionLiteral(shared, literal, is_toplevel);
- SharedFunctionInfo::SetScript(shared, script, false);
+ SharedFunctionInfo::SetScript(shared, script, literal->function_literal_id(),
+ false);
return shared;
}
Handle<JSMessageObject> Factory::NewJSMessageObject(
MessageTemplate::Template message, Handle<Object> argument,
- int start_position, int end_position, Handle<Object> script,
+ int start_position, int end_position, Handle<Script> script,
Handle<Object> stack_frames) {
Handle<Map> map = message_object_map();
Handle<JSMessageObject> message_obj(
@@ -3354,7 +3472,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> shared_name;
bool has_shared_name = maybe_name.ToHandle(&shared_name);
if (has_shared_name) {
- shared_name = String::Flatten(shared_name, TENURED);
+ shared_name = String::Flatten(isolate(), shared_name, TENURED);
}
Handle<Map> map = shared_function_info_map();
@@ -3392,9 +3510,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
*empty_feedback_metadata(), SKIP_WRITE_BARRIER);
}
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
- share->set_debug_info(Smi::kZero, SKIP_WRITE_BARRIER);
- share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
- share->set_function_literal_id(FunctionLiteral::kIdTypeInvalid);
+ share->set_function_identifier_or_debug_info(*undefined_value(),
+ SKIP_WRITE_BARRIER);
#if V8_SFI_HAS_UNIQUE_ID
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
@@ -3403,9 +3520,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_length(0);
share->set_internal_formal_parameter_count(0);
share->set_expected_nof_properties(0);
- share->set_raw_start_position_and_type(0);
- share->set_raw_end_position(0);
- share->set_function_token_position(0);
+ share->set_raw_function_token_offset(0);
// All flags default to false or 0.
share->set_flags(0);
share->CalculateConstructAsBuiltin();
@@ -3414,13 +3529,12 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->clear_padding();
}
// Link into the list.
- Handle<Object> new_noscript_list =
- FixedArrayOfWeakCells::Add(noscript_shared_function_infos(), share);
+ Handle<Object> new_noscript_list = FixedArrayOfWeakCells::Add(
+ isolate(), noscript_shared_function_infos(), share);
isolate()->heap()->set_noscript_shared_function_infos(*new_noscript_list);
- DCHECK_EQ(SharedFunctionInfo::kNoDebuggingId, share->debugging_id());
#ifdef VERIFY_HEAP
- share->SharedFunctionInfoVerify();
+ share->SharedFunctionInfoVerify(isolate());
#endif
return share;
}
@@ -3497,12 +3611,17 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE, TENURED));
debug_info->set_flags(DebugInfo::kNone);
debug_info->set_shared(*shared);
- debug_info->set_debugger_hints(shared->debugger_hints());
- debug_info->set_debug_bytecode_array(heap->undefined_value());
- debug_info->set_break_points(heap->empty_fixed_array());
+ debug_info->set_debugger_hints(0);
+ DCHECK_EQ(DebugInfo::kNoDebuggingId, debug_info->debugging_id());
+ DCHECK(!shared->HasDebugInfo());
+ debug_info->set_function_identifier(
+ shared->function_identifier_or_debug_info());
+ debug_info->set_original_bytecode_array(
+ ReadOnlyRoots(heap).undefined_value());
+ debug_info->set_break_points(ReadOnlyRoots(heap).empty_fixed_array());
// Link debug info to function.
- shared->set_debug_info(*debug_info);
+ shared->SetDebugInfo(*debug_info);
return debug_info;
}
@@ -3578,10 +3697,12 @@ Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
DCHECK(!isolate()->has_pending_exception());
Handle<JSObject> result = NewJSObjectFromMap(map);
Handle<Smi> value(Smi::FromInt(length), isolate());
- Object::SetProperty(result, length_string(), value, LanguageMode::kStrict)
+ Object::SetProperty(isolate(), result, length_string(), value,
+ LanguageMode::kStrict)
.Assert();
if (!strict_mode_callee) {
- Object::SetProperty(result, callee_string(), callee, LanguageMode::kStrict)
+ Object::SetProperty(isolate(), result, callee_string(), callee,
+ LanguageMode::kStrict)
.Assert();
}
return result;
@@ -3722,9 +3843,11 @@ Handle<RegExpMatchInfo> Factory::NewRegExpMatchInfo() {
}
Handle<Object> Factory::GlobalConstantFor(Handle<Name> name) {
- if (Name::Equals(name, undefined_string())) return undefined_value();
- if (Name::Equals(name, NaN_string())) return nan_value();
- if (Name::Equals(name, Infinity_string())) return infinity_value();
+ if (Name::Equals(isolate(), name, undefined_string())) {
+ return undefined_value();
+ }
+ if (Name::Equals(isolate(), name, NaN_string())) return nan_value();
+ if (Name::Equals(isolate(), name, Infinity_string())) return infinity_value();
return Handle<Object>::null();
}
@@ -3761,13 +3884,13 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
map->set_is_callable(true);
Handle<JSFunction> empty_function;
if (maybe_empty_function.ToHandle(&empty_function)) {
- Map::SetPrototype(map, empty_function);
+ Map::SetPrototype(isolate(), map, empty_function);
}
//
// Setup descriptors array.
//
- Map::EnsureDescriptorSlack(map, descriptors_count);
+ Map::EnsureDescriptorSlack(isolate(), map, descriptors_count);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -3838,12 +3961,12 @@ Handle<Map> Factory::CreateStrictFunctionMap(
map->set_has_prototype_slot(has_prototype);
map->set_is_constructor(has_prototype);
map->set_is_callable(true);
- Map::SetPrototype(map, empty_function);
+ Map::SetPrototype(isolate(), map, empty_function);
//
// Setup descriptors array.
//
- Map::EnsureDescriptorSlack(map, descriptors_count);
+ Map::EnsureDescriptorSlack(isolate(), map, descriptors_count);
PropertyAttributes rw_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
@@ -3903,12 +4026,12 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
map->set_is_constructor(true);
map->set_is_prototype_map(true);
map->set_is_callable(true);
- Map::SetPrototype(map, empty_function);
+ Map::SetPrototype(isolate(), map, empty_function);
//
// Setup descriptors array.
//
- Map::EnsureDescriptorSlack(map, 2);
+ Map::EnsureDescriptorSlack(isolate(), map, 2);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -3954,7 +4077,7 @@ Handle<CallHandlerInfo> Factory::NewCallHandlerInfo(bool has_no_side_effect) {
: side_effect_call_handler_info_map();
Handle<CallHandlerInfo> info(CallHandlerInfo::cast(New(map, TENURED)),
isolate());
- Object* undefined_value = isolate()->heap()->undefined_value();
+ Object* undefined_value = ReadOnlyRoots(isolate()).undefined_value();
info->set_callback(undefined_value);
info->set_js_callback(undefined_value);
info->set_data(undefined_value);
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 073f0fbcf5..b73e8a922a 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -23,13 +23,14 @@ namespace internal {
// Forward declarations.
class AliasedArgumentsEntry;
-class BoilerplateDescription;
+class ObjectBoilerplateDescription;
class BreakPoint;
class BreakPointInfo;
class CallableTask;
class CallbackTask;
class CallHandlerInfo;
-class ConstantElementsPair;
+class Expression;
+class ArrayBoilerplateDescription;
class CoverageInfo;
class DebugInfo;
class EnumCache;
@@ -50,6 +51,8 @@ class RegExpMatchInfo;
class ScriptContextTable;
class StoreHandler;
class TemplateObjectDescription;
+class UncompiledDataWithoutPreParsedScope;
+class UncompiledDataWithPreParsedScope;
class WasmExportedFunctionData;
struct SourceRange;
template <typename T>
@@ -143,10 +146,8 @@ class V8_EXPORT_PRIVATE Factory {
// Allocates a fixed array for name-value pairs of boilerplate properties and
// calculates the number of properties we need to store in the backing store.
- Handle<BoilerplateDescription> NewBoilerplateDescription(int boilerplate,
- int all_properties,
- int index_keys,
- bool has_seen_proto);
+ Handle<ObjectBoilerplateDescription> NewObjectBoilerplateDescription(
+ int boilerplate, int all_properties, int index_keys, bool has_seen_proto);
// Allocate a new uninitialized fixed double array.
// The function returns a pre-allocated empty fixed array for length = 0,
@@ -190,8 +191,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
Handle<Object> value3, PretenureFlag pretenure);
- // Create a new ConstantElementsPair struct.
- Handle<ConstantElementsPair> NewConstantElementsPair(
+ // Create a new ArrayBoilerplateDescription struct.
+ Handle<ArrayBoilerplateDescription> NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
// Create a new TemplateObjectDescription struct.
@@ -391,6 +392,13 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Context> NewBlockContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info);
+ // Create a context that's used by builtin functions.
+ //
+ // These are similar to function context but don't have a previous
+ // context or any scope info. These are used to store spec defined
+ // context values.
+ Handle<Context> NewBuiltinContext(Handle<Context> native_context, int length);
+
Handle<Struct> NewStruct(InstanceType type,
PretenureFlag pretenure = NOT_TENURED);
@@ -401,6 +409,9 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Script> NewScript(Handle<String> source,
PretenureFlag tenure = TENURED);
+ Handle<Script> NewScriptWithId(Handle<String> source, int script_id,
+ PretenureFlag tenure = TENURED);
+ Handle<Script> CloneScript(Handle<Script> script);
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
Handle<BreakPoint> NewBreakPoint(int id, Handle<String> condition);
@@ -454,7 +465,7 @@ class V8_EXPORT_PRIVATE Factory {
int slack = 0);
// Allocate a tenured AllocationSite. Its payload is null.
- Handle<AllocationSite> NewAllocationSite();
+ Handle<AllocationSite> NewAllocationSite(bool with_weak_next);
// Allocates and initializes a new Map.
Handle<Map> NewMap(InstanceType type, int instance_size,
@@ -525,18 +536,21 @@ class V8_EXPORT_PRIVATE Factory {
inline Handle<Object> NewNumberFromInt64(
int64_t value, PretenureFlag pretenure = NOT_TENURED);
inline Handle<HeapNumber> NewHeapNumber(
- double value, MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED);
+ double value, PretenureFlag pretenure = NOT_TENURED);
inline Handle<HeapNumber> NewHeapNumberFromBits(
- uint64_t bits, MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED);
- // Creates mutable heap number object with value field set to hole NaN.
- inline Handle<HeapNumber> NewMutableHeapNumber(
- PretenureFlag pretenure = NOT_TENURED);
+ uint64_t bits, PretenureFlag pretenure = NOT_TENURED);
// Creates heap number object with not yet set value field.
- Handle<HeapNumber> NewHeapNumber(MutableMode mode,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<HeapNumber> NewHeapNumber(PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<MutableHeapNumber> NewMutableHeapNumber(
+ PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<MutableHeapNumber> NewMutableHeapNumber(
+ double value, PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<MutableHeapNumber> NewMutableHeapNumberFromBits(
+ uint64_t bits, PretenureFlag pretenure = NOT_TENURED);
+ inline Handle<MutableHeapNumber> NewMutableHeapNumberWithHoleNaN(
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates a new BigInt with {length} digits. Only to be used by
// MutableBigInt::New*.
@@ -710,7 +724,16 @@ class V8_EXPORT_PRIVATE Factory {
Handle<ModuleInfo> NewModuleInfo();
- Handle<PreParsedScopeData> NewPreParsedScopeData();
+ Handle<PreParsedScopeData> NewPreParsedScopeData(int length);
+
+ Handle<UncompiledDataWithoutPreParsedScope>
+ NewUncompiledDataWithoutPreParsedScope(int32_t start_position,
+ int32_t end_position,
+ int32_t function_literal_id);
+
+ Handle<UncompiledDataWithPreParsedScope> NewUncompiledDataWithPreParsedScope(
+ int32_t start_position, int32_t end_position, int32_t function_literal_id,
+ Handle<PreParsedScopeData>);
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
@@ -755,12 +778,10 @@ class V8_EXPORT_PRIVATE Factory {
// initialization by the caller.
Handle<Code> NewCodeForDeserialization(uint32_t size);
-#ifdef V8_EMBEDDED_BUILTINS
// Allocates a new code object and initializes it as the trampoline to the
// given off-heap entry point.
Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code,
Address off_heap_entry);
-#endif
Handle<Code> CopyCode(Handle<Code> code);
@@ -809,6 +830,11 @@ class V8_EXPORT_PRIVATE Factory {
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
+ inline Handle<Map> name##_map();
+ ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
+#undef ALLOCATION_SITE_MAP_ACCESSOR
+
#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
inline Handle<Map> name##_map();
DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
@@ -873,7 +899,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Object> argument,
int start_position,
int end_position,
- Handle<Object> script,
+ Handle<Script> script,
Handle<Object> stack_frames);
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 9900b343fd..60a3b256c8 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -253,9 +253,6 @@ void GCTracer::Start(GarbageCollector collector,
current_.scopes[i] = 0;
}
- size_t committed_memory = heap_->CommittedMemory() / KB;
- size_t used_memory = current_.start_object_size / KB;
-
Counters* counters = heap_->isolate()->counters();
if (Heap::IsYoungGenerationCollector(collector)) {
@@ -263,9 +260,6 @@ void GCTracer::Start(GarbageCollector collector,
} else {
counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
}
- counters->aggregated_memory_heap_committed()->AddSample(start_time,
- committed_memory);
- counters->aggregated_memory_heap_used()->AddSample(start_time, used_memory);
}
void GCTracer::ResetIncrementalMarkingCounters() {
@@ -283,9 +277,11 @@ void GCTracer::ResetIncrementalMarkingCounters() {
void GCTracer::Stop(GarbageCollector collector) {
start_counter_--;
if (start_counter_ != 0) {
- heap_->isolate()->PrintWithTimestamp("[Finished reentrant %s during %s.]\n",
- Heap::CollectorName(collector),
- current_.TypeName(false));
+ if (FLAG_trace_gc_verbose) {
+ heap_->isolate()->PrintWithTimestamp(
+ "[Finished reentrant %s during %s.]\n",
+ Heap::CollectorName(collector), current_.TypeName(false));
+ }
return;
}
@@ -305,13 +301,6 @@ void GCTracer::Stop(GarbageCollector collector) {
AddAllocation(current_.end_time);
- size_t committed_memory = heap_->CommittedMemory() / KB;
- size_t used_memory = current_.end_object_size / KB;
- heap_->isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
- current_.end_time, committed_memory);
- heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
- current_.end_time, used_memory);
-
double duration = current_.end_time - current_.start_time;
switch (current_.type) {
@@ -532,6 +521,7 @@ void GCTracer::PrintNVP() const {
"scavenge.weak_global_handles.identify=%.2f "
"scavenge.weak_global_handles.process=%.2f "
"scavenge.parallel=%.2f "
+ "scavenge.update_refs=%.2f "
"background.scavenge.parallel=%.2f "
"background.array_buffer_free=%.2f "
"background.store_buffer=%.2f "
@@ -580,6 +570,7 @@ void GCTracer::PrintNVP() const {
current_
.scopes[Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS],
current_.scopes[Scope::SCAVENGER_SCAVENGE_PARALLEL],
+ current_.scopes[Scope::SCAVENGER_SCAVENGE_UPDATE_REFS],
current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL],
current_.scopes[Scope::BACKGROUND_ARRAY_BUFFER_FREE],
current_.scopes[Scope::BACKGROUND_STORE_BUFFER],
@@ -697,7 +688,9 @@ void GCTracer::PrintNVP() const {
"mark.roots=%.1f "
"mark.main=%.1f "
"mark.weak_closure=%.1f "
- "mark.weak_closure.ephemeral=%.1f "
+ "mark.weak_closure.ephemeron=%.1f "
+ "mark.weak_closure.ephemeron.marking=%.1f "
+ "mark.weak_closure.ephemeron.linear=%.1f "
"mark.weak_closure.weak_handles=%.1f "
"mark.weak_closure.weak_roots=%.1f "
"mark.weak_closure.harmony=%.1f "
@@ -791,7 +784,9 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_ROOTS],
current_.scopes[Scope::MC_MARK_MAIN],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
- current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_HARMONY],
@@ -1100,5 +1095,27 @@ void GCTracer::AddBackgroundScopeSample(
}
}
+void GCTracer::RecordMarkCompactHistograms(HistogramTimer* gc_timer) {
+ Counters* counters = heap_->isolate()->counters();
+ if (gc_timer == counters->gc_finalize()) {
+ DCHECK_EQ(Scope::FIRST_TOP_MC_SCOPE, Scope::MC_CLEAR);
+ counters->gc_finalize_clear()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_CLEAR]));
+ counters->gc_finalize_epilogue()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_EPILOGUE]));
+ counters->gc_finalize_evacuate()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_EVACUATE]));
+ counters->gc_finalize_finish()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_FINISH]));
+ counters->gc_finalize_mark()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_MARK]));
+ counters->gc_finalize_prologue()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_PROLOGUE]));
+ counters->gc_finalize_sweep()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_SWEEP]));
+ DCHECK_EQ(Scope::LAST_TOP_MC_SCOPE, Scope::MC_SWEEP);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index f35fa50d45..62e077be50 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -79,6 +79,8 @@ class V8_EXPORT_PRIVATE GCTracer {
LAST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_UNMAPPER,
FIRST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_EVACUATE_COPY,
LAST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_SWEEPING,
+ FIRST_TOP_MC_SCOPE = MC_CLEAR,
+ LAST_TOP_MC_SCOPE = MC_SWEEP,
FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL
};
@@ -319,6 +321,8 @@ class V8_EXPORT_PRIVATE GCTracer {
void AddBackgroundScopeSample(BackgroundScope::ScopeId scope, double duration,
RuntimeCallCounter* runtime_call_counter);
+ void RecordMarkCompactHistograms(HistogramTimer* gc_timer);
+
private:
FRIEND_TEST(GCTracer, AverageSpeed);
FRIEND_TEST(GCTracerTest, AllocationThroughput);
@@ -334,6 +338,7 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
FRIEND_TEST(GCTracerTest, MutatorUtilization);
+ FRIEND_TEST(GCTracerTest, RecordMarkCompactHistograms);
struct BackgroundCounter {
double total_duration_ms;
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
new file mode 100644
index 0000000000..41f1a6bb3a
--- /dev/null
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -0,0 +1,160 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/heap-controller.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+
+const double HeapController::kMinHeapGrowingFactor = 1.1;
+const double HeapController::kMaxHeapGrowingFactor = 4.0;
+const double HeapController::kConservativeHeapGrowingFactor = 1.3;
+const double HeapController::kTargetMutatorUtilization = 0.97;
+
+// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
+// (mutator speed), this function returns the heap growing factor that will
+// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
+// remain the same until the next GC.
+//
+// For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
+// TM / (TM + TG), where TM is the time spent in the mutator and TG is the
+// time spent in the garbage collector.
+//
+// Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
+// time-frame from the end of the current GC to the end of the next GC. Based
+// on the MU we can compute the heap growing factor F as
+//
+// F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
+//
+// This formula can be derived as follows.
+//
+// F = Limit / Live by definition, where the Limit is the allocation limit,
+// and the Live is size of live objects.
+// Let’s assume that we already know the Limit. Then:
+// TG = Limit / gc_speed
+// TM = (TM + TG) * MU, by definition of MU.
+// TM = TG * MU / (1 - MU)
+// TM = Limit * MU / (gc_speed * (1 - MU))
+// On the other hand, if the allocation throughput remains constant:
+// Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
+// Solving it for TM, we get
+// TM = (Limit - Live) / mutator_speed
+// Combining the two equation for TM:
+// (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
+// (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
+// substitute R = gc_speed / mutator_speed
+// (Limit - Live) = Limit * MU / (R * (1 - MU))
+// substitute F = Limit / Live
+// F - 1 = F * MU / (R * (1 - MU))
+// F - F * MU / (R * (1 - MU)) = 1
+// F * (1 - MU / (R * (1 - MU))) = 1
+// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
+// F = R * (1 - MU) / (R * (1 - MU) - MU)
+double HeapController::HeapGrowingFactor(double gc_speed, double mutator_speed,
+ double max_factor) {
+ DCHECK_LE(kMinHeapGrowingFactor, max_factor);
+ DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
+ if (gc_speed == 0 || mutator_speed == 0) return max_factor;
+
+ const double speed_ratio = gc_speed / mutator_speed;
+ const double mu = kTargetMutatorUtilization;
+
+ const double a = speed_ratio * (1 - mu);
+ const double b = speed_ratio * (1 - mu) - mu;
+
+ // The factor is a / b, but we need to check for small b first.
+ double factor = (a < b * max_factor) ? a / b : max_factor;
+ factor = Min(factor, max_factor);
+ factor = Max(factor, kMinHeapGrowingFactor);
+ return factor;
+}
+
+double HeapController::MaxHeapGrowingFactor(size_t max_old_generation_size) {
+ const double min_small_factor = 1.3;
+ const double max_small_factor = 2.0;
+ const double high_factor = 4.0;
+
+ size_t max_old_generation_size_in_mb = max_old_generation_size / MB;
+ max_old_generation_size_in_mb =
+ Max(max_old_generation_size_in_mb,
+ static_cast<size_t>(kMinOldGenerationSize));
+
+ // If we are on a device with lots of memory, we allow a high heap
+ // growing factor.
+ if (max_old_generation_size_in_mb >= kMaxOldGenerationSize) {
+ return high_factor;
+ }
+
+ DCHECK_GE(max_old_generation_size_in_mb, kMinOldGenerationSize);
+ DCHECK_LT(max_old_generation_size_in_mb, kMaxOldGenerationSize);
+
+ // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
+ double factor = (max_old_generation_size_in_mb - kMinOldGenerationSize) *
+ (max_small_factor - min_small_factor) /
+ (kMaxOldGenerationSize - kMinOldGenerationSize) +
+ min_small_factor;
+ return factor;
+}
+
+size_t HeapController::CalculateOldGenerationAllocationLimit(
+ size_t old_gen_size, size_t max_old_generation_size, double gc_speed,
+ double mutator_speed, size_t new_space_capacity,
+ Heap::HeapGrowingMode growing_mode) {
+ double max_factor = MaxHeapGrowingFactor(max_old_generation_size);
+ double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
+
+ if (FLAG_trace_gc_verbose) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
+ "(gc=%.f, mutator=%.f)\n",
+ factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
+ mutator_speed);
+ }
+
+ if (growing_mode == Heap::HeapGrowingMode::kConservative ||
+ growing_mode == Heap::HeapGrowingMode::kSlow) {
+ factor = Min(factor, kConservativeHeapGrowingFactor);
+ }
+
+ if (growing_mode == Heap::HeapGrowingMode::kMinimal) {
+ factor = kMinHeapGrowingFactor;
+ }
+
+ if (FLAG_heap_growing_percent > 0) {
+ factor = 1.0 + FLAG_heap_growing_percent / 100.0;
+ }
+
+ CHECK_LT(1.0, factor);
+ CHECK_LT(0, old_gen_size);
+ uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
+ limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
+ MinimumAllocationLimitGrowingStep(growing_mode));
+ limit += new_space_capacity;
+ uint64_t halfway_to_the_max =
+ (static_cast<uint64_t>(old_gen_size) + max_old_generation_size) / 2;
+ size_t result = static_cast<size_t>(Min(limit, halfway_to_the_max));
+
+ if (FLAG_trace_gc_verbose) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Heap Controller Limit: old size: %" PRIuS " KB, new limit: %" PRIuS
+ " KB (%.1f)\n",
+ old_gen_size / KB, result / KB, factor);
+ }
+
+ return result;
+}
+
+size_t HeapController::MinimumAllocationLimitGrowingStep(
+ Heap::HeapGrowingMode growing_mode) {
+ const size_t kRegularAllocationLimitGrowingStep = 8;
+ const size_t kLowMemoryAllocationLimitGrowingStep = 2;
+ size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
+ return limit * (growing_mode == Heap::HeapGrowingMode::kConservative
+ ? kLowMemoryAllocationLimitGrowingStep
+ : kRegularAllocationLimitGrowingStep);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/heap-controller.h b/deps/v8/src/heap/heap-controller.h
new file mode 100644
index 0000000000..717c97a5b8
--- /dev/null
+++ b/deps/v8/src/heap/heap-controller.h
@@ -0,0 +1,55 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_CONTROLLER_H_
+#define V8_HEAP_HEAP_CONTROLLER_H_
+
+#include <cstddef>
+#include "src/allocation.h"
+#include "src/heap/heap.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace v8 {
+namespace internal {
+
+class HeapController {
+ public:
+ explicit HeapController(Heap* heap) : heap_(heap) {}
+
+ // Computes the allocation limit to trigger the next full garbage collection.
+ V8_EXPORT_PRIVATE size_t CalculateOldGenerationAllocationLimit(
+ size_t old_gen_size, size_t max_old_generation_size, double gc_speed,
+ double mutator_speed, size_t new_space_capacity,
+ Heap::HeapGrowingMode growing_mode);
+
+ size_t MinimumAllocationLimitGrowingStep(Heap::HeapGrowingMode growing_mode);
+
+ // The old space size has to be a multiple of Page::kPageSize.
+ // Sizes are in MB.
+ static const size_t kMinOldGenerationSize = 128 * Heap::kPointerMultiplier;
+ static const size_t kMaxOldGenerationSize = 1024 * Heap::kPointerMultiplier;
+
+ private:
+ FRIEND_TEST(HeapController, HeapGrowingFactor);
+ FRIEND_TEST(HeapController, MaxHeapGrowingFactor);
+ FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
+
+ V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
+ V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
+ V8_EXPORT_PRIVATE static const double kConservativeHeapGrowingFactor;
+ V8_EXPORT_PRIVATE static double MaxHeapGrowingFactor(
+ size_t max_old_generation_size);
+ V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
+ double mutator_speed,
+ double max_factor);
+
+ static const double kTargetMutatorUtilization;
+
+ Heap* heap_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_HEAP_CONTROLLER_H_
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 836923f31a..5ad1a1bdd6 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -47,14 +47,9 @@ HeapObject* AllocationResult::ToObjectChecked() {
#define ROOT_ACCESSOR(type, name, camel_name) \
type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
-ROOT_LIST(ROOT_ACCESSOR)
+MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- Map* Heap::name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
-STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
Map* Heap::name##_map() { \
return Map::cast(roots_[k##Name##Size##MapRootIndex]); \
@@ -62,22 +57,6 @@ STRUCT_LIST(STRUCT_MAP_ACCESSOR)
DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
#undef DATA_HANDLER_MAP_ACCESSOR
-#define STRING_ACCESSOR(name, str) \
- String* Heap::name() { return String::cast(roots_[k##name##RootIndex]); }
-INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name) \
- Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
-PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name, description) \
- Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
-PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
-WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
AccessorInfo* Heap::accessor_name##_accessor() { \
return AccessorInfo::cast(roots_[k##AccessorName##AccessorRootIndex]); \
@@ -147,13 +126,19 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
#endif
bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
+ bool new_large_object = FLAG_young_generation_large_objects &&
+ size_in_bytes > kMaxNewSpaceHeapObjectSize;
HeapObject* object = nullptr;
AllocationResult allocation;
if (NEW_SPACE == space) {
if (large_object) {
space = LO_SPACE;
} else {
- allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
+ if (new_large_object) {
+ allocation = new_lo_space_->AllocateRaw(size_in_bytes);
+ } else {
+ allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
+ }
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
}
@@ -314,53 +299,68 @@ void Heap::FinalizeExternalString(String* string) {
Address Heap::NewSpaceTop() { return new_space_->top(); }
+// static
bool Heap::InNewSpace(Object* object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InNewSpace(HeapObject::cast(object));
}
+// static
bool Heap::InNewSpace(MaybeObject* object) {
HeapObject* heap_object;
return object->ToStrongOrWeakHeapObject(&heap_object) &&
InNewSpace(heap_object);
}
+// static
bool Heap::InNewSpace(HeapObject* heap_object) {
// Inlined check from NewSpace::Contains.
bool result = MemoryChunk::FromHeapObject(heap_object)->InNewSpace();
- DCHECK(!result || // Either not in new space
- gc_state_ != NOT_IN_GC || // ... or in the middle of GC
- InToSpace(heap_object)); // ... or in to-space (where we allocate).
+#ifdef DEBUG
+ // If in NEW_SPACE, then check we're either not in the middle of GC or the
+ // object is in to-space.
+ if (result) {
+ // If the object is in NEW_SPACE, then it's not in RO_SPACE so this is safe.
+ Heap* heap = Heap::FromWritableHeapObject(heap_object);
+ DCHECK(heap->gc_state_ != NOT_IN_GC || InToSpace(heap_object));
+ }
+#endif
return result;
}
+// static
bool Heap::InFromSpace(Object* object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InFromSpace(HeapObject::cast(object));
}
+// static
bool Heap::InFromSpace(MaybeObject* object) {
HeapObject* heap_object;
return object->ToStrongOrWeakHeapObject(&heap_object) &&
InFromSpace(heap_object);
}
+// static
bool Heap::InFromSpace(HeapObject* heap_object) {
return MemoryChunk::FromHeapObject(heap_object)
->IsFlagSet(Page::IN_FROM_SPACE);
}
+// static
bool Heap::InToSpace(Object* object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InToSpace(HeapObject::cast(object));
}
+// static
bool Heap::InToSpace(MaybeObject* object) {
HeapObject* heap_object;
return object->ToStrongOrWeakHeapObject(&heap_object) &&
InToSpace(heap_object);
}
+// static
bool Heap::InToSpace(HeapObject* heap_object) {
return MemoryChunk::FromHeapObject(heap_object)->IsFlagSet(Page::IN_TO_SPACE);
}
@@ -379,6 +379,19 @@ bool Heap::InOldSpaceSlow(Address address) {
return old_space_->ContainsSlow(address);
}
+// static
+Heap* Heap::FromWritableHeapObject(const HeapObject* obj) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
+ // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
+ // find a heap. The exception is when the ReadOnlySpace is writeable, during
+ // bootstrapping, so explicitly allow this case.
+ SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
+ static_cast<ReadOnlySpace*>(chunk->owner())->writable());
+ Heap* heap = chunk->heap();
+ SLOW_DCHECK(heap != nullptr);
+ return heap;
+}
+
bool Heap::ShouldBePromoted(Address old_address) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
@@ -440,7 +453,7 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
- if (candidate_map != allocation_memento_map()) {
+ if (candidate_map != ReadOnlyRoots(this).allocation_memento_map()) {
return nullptr;
}
@@ -489,13 +502,12 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
- DCHECK(InFromSpace(object) ||
- (InToSpace(object) &&
- Page::FromAddress(object->address())
- ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
- (!InNewSpace(object) &&
- Page::FromAddress(object->address())
- ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
+ DCHECK(
+ InFromSpace(object) ||
+ (InToSpace(object) && Page::FromAddress(object->address())
+ ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
+ (!InNewSpace(object) && Page::FromAddress(object->address())
+ ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(map->instance_type()))
return;
@@ -518,7 +530,7 @@ Isolate* Heap::isolate() {
void Heap::ExternalStringTable::AddString(String* string) {
DCHECK(string->IsExternalString());
- if (heap_->InNewSpace(string)) {
+ if (InNewSpace(string)) {
new_space_strings_.push_back(string);
} else {
old_space_strings_.push_back(string);
@@ -526,16 +538,17 @@ void Heap::ExternalStringTable::AddString(String* string) {
}
Oddball* Heap::ToBoolean(bool condition) {
- return condition ? true_value() : false_value();
+ ReadOnlyRoots roots(this);
+ return condition ? roots.true_value() : roots.false_value();
}
-uint32_t Heap::HashSeed() {
- uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
+uint64_t Heap::HashSeed() {
+ uint64_t seed;
+ hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed), kInt64Size);
DCHECK(FLAG_randomize_hashes || seed == 0);
return seed;
}
-
int Heap::NextScriptId() {
int last_id = last_script_id()->value();
if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
@@ -546,8 +559,8 @@ int Heap::NextScriptId() {
int Heap::NextDebuggingId() {
int last_id = last_debugging_id()->value();
- if (last_id == SharedFunctionInfo::DebuggingIdBits::kMax) {
- last_id = SharedFunctionInfo::kNoDebuggingId;
+ if (last_id == DebugInfo::DebuggingIdBits::kMax) {
+ last_id = DebugInfo::kNoDebuggingId;
}
last_id++;
set_last_debugging_id(Smi::FromInt(last_id));
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index d8fad6dd6f..6fd93f659f 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -30,6 +30,7 @@
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
@@ -164,6 +165,7 @@ Heap::Heap()
code_space_(nullptr),
map_space_(nullptr),
lo_space_(nullptr),
+ new_lo_space_(nullptr),
read_only_space_(nullptr),
write_protect_code_memory_(false),
code_space_memory_modification_scope_depth_(0),
@@ -240,7 +242,6 @@ Heap::Heap()
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(nullptr);
set_allocation_sites_list(Smi::kZero);
- set_encountered_weak_collections(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(kNullAddress, false);
@@ -252,6 +253,15 @@ size_t Heap::MaxReserved() {
(2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
}
+size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
+ const size_t old_space_physical_memory_factor = 4;
+ size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
+ old_space_physical_memory_factor *
+ kPointerMultiplier);
+ return Max(Min(computed_size, HeapController::kMaxOldGenerationSize),
+ HeapController::kMinOldGenerationSize);
+}
+
size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
@@ -281,6 +291,13 @@ size_t Heap::CommittedOldGenerationMemory() {
return total + lo_space_->Size();
}
+size_t Heap::CommittedMemoryOfHeapAndUnmapper() {
+ if (!HasBeenSetUp()) return 0;
+
+ return CommittedMemory() +
+ memory_allocator()->unmapper()->CommittedBufferedMemory();
+}
+
size_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
@@ -336,9 +353,8 @@ bool Heap::CanExpandOldGeneration(size_t size) {
}
bool Heap::HasBeenSetUp() {
- return old_space_ != nullptr && code_space_ != nullptr &&
- map_space_ != nullptr && lo_space_ != nullptr &&
- read_only_space_ != nullptr;
+ // We will always have a new space when the heap is set up.
+ return new_space_ != nullptr;
}
@@ -382,13 +398,14 @@ void Heap::SetGCState(HeapState state) {
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
- " KB,"
- " available: %6" PRIuS " KB\n",
+ PrintIsolate(isolate_,
+ "Memory allocator, used: %6" PRIuS
+ " KB,"
+ " available: %6" PRIuS " KB\n",
memory_allocator()->Size() / KB,
memory_allocator()->Available() / KB);
PrintIsolate(isolate_,
- "Read-only space, used: %6" PRIuS
+ "Read-only space, used: %6" PRIuS
" KB"
", available: %6" PRIuS
" KB"
@@ -396,48 +413,67 @@ void Heap::PrintShortHeapStatistics() {
read_only_space_->Size() / KB,
read_only_space_->Available() / KB,
read_only_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "New space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ PrintIsolate(isolate_,
+ "New space, used: %6" PRIuS
+ " KB"
+ ", available: %6" PRIuS
+ " KB"
+ ", committed: %6" PRIuS " KB\n",
new_space_->Size() / KB, new_space_->Available() / KB,
new_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Old space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ PrintIsolate(isolate_,
+ "New large object space, used: %6" PRIuS
+ " KB"
+ ", available: %6" PRIuS
+ " KB"
+ ", committed: %6" PRIuS " KB\n",
+ new_lo_space_->SizeOfObjects() / KB,
+ new_lo_space_->Available() / KB,
+ new_lo_space_->CommittedMemory() / KB);
+ PrintIsolate(isolate_,
+ "Old space, used: %6" PRIuS
+ " KB"
+ ", available: %6" PRIuS
+ " KB"
+ ", committed: %6" PRIuS " KB\n",
old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
old_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Code space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS "KB\n",
+ PrintIsolate(isolate_,
+ "Code space, used: %6" PRIuS
+ " KB"
+ ", available: %6" PRIuS
+ " KB"
+ ", committed: %6" PRIuS "KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Map space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ PrintIsolate(isolate_,
+ "Map space, used: %6" PRIuS
+ " KB"
+ ", available: %6" PRIuS
+ " KB"
+ ", committed: %6" PRIuS " KB\n",
map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ PrintIsolate(isolate_,
+ "Large object space, used: %6" PRIuS
+ " KB"
+ ", available: %6" PRIuS
+ " KB"
+ ", committed: %6" PRIuS " KB\n",
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "All spaces, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS "KB\n",
+ PrintIsolate(isolate_,
+ "All spaces, used: %6" PRIuS
+ " KB"
+ ", available: %6" PRIuS
+ " KB"
+ ", committed: %6" PRIuS "KB\n",
this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
+ PrintIsolate(isolate_,
+ "Unmapper buffering %d chunks of committed: %6" PRIuS " KB\n",
+ memory_allocator()->unmapper()->NumberOfChunks(),
+ CommittedMemoryOfHeapAndUnmapper() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
external_memory_ / KB);
PrintIsolate(isolate_, "External memory global %zu KB\n",
@@ -479,7 +515,7 @@ void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
} else {
int index = 0;
Handle<FixedArrayOfWeakCells> array = FixedArrayOfWeakCells::Add(
- handle(retaining_path_targets(), isolate()), object, &index);
+ isolate(), handle(retaining_path_targets(), isolate()), object, &index);
set_retaining_path_targets(*array);
retaining_path_target_option_[index] = option;
}
@@ -508,16 +544,16 @@ void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
HeapObject* object = target;
std::vector<std::pair<HeapObject*, bool>> retaining_path;
Root root = Root::kUnknown;
- bool ephemeral = false;
+ bool ephemeron = false;
while (true) {
- retaining_path.push_back(std::make_pair(object, ephemeral));
- if (option == RetainingPathOption::kTrackEphemeralPath &&
- ephemeral_retainer_.count(object)) {
- object = ephemeral_retainer_[object];
- ephemeral = true;
+ retaining_path.push_back(std::make_pair(object, ephemeron));
+ if (option == RetainingPathOption::kTrackEphemeronPath &&
+ ephemeron_retainer_.count(object)) {
+ object = ephemeron_retainer_[object];
+ ephemeron = true;
} else if (retainer_.count(object)) {
object = retainer_[object];
- ephemeral = false;
+ ephemeron = false;
} else {
if (retaining_root_.count(object)) {
root = retaining_root_[object];
@@ -528,11 +564,11 @@ void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
int distance = static_cast<int>(retaining_path.size());
for (auto node : retaining_path) {
HeapObject* object = node.first;
- bool ephemeral = node.second;
+ bool ephemeron = node.second;
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
PrintF("Distance from root %d%s: ", distance,
- ephemeral ? " (ephemeral)" : "");
+ ephemeron ? " (ephemeron)" : "");
object->ShortPrint();
PrintF("\n");
#ifdef OBJECT_PRINT
@@ -553,20 +589,20 @@ void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) {
RetainingPathOption option = RetainingPathOption::kDefault;
if (IsRetainingPathTarget(object, &option)) {
// Check if the retaining path was already printed in
- // AddEphemeralRetainer().
- if (ephemeral_retainer_.count(object) == 0 ||
+ // AddEphemeronRetainer().
+ if (ephemeron_retainer_.count(object) == 0 ||
option == RetainingPathOption::kDefault) {
PrintRetainingPath(object, option);
}
}
}
-void Heap::AddEphemeralRetainer(HeapObject* retainer, HeapObject* object) {
- if (ephemeral_retainer_.count(object)) return;
- ephemeral_retainer_[object] = retainer;
+void Heap::AddEphemeronRetainer(HeapObject* retainer, HeapObject* object) {
+ if (ephemeron_retainer_.count(object)) return;
+ ephemeron_retainer_[object] = retainer;
RetainingPathOption option = RetainingPathOption::kDefault;
if (IsRetainingPathTarget(object, &option) &&
- option == RetainingPathOption::kTrackEphemeralPath) {
+ option == RetainingPathOption::kTrackEphemeronPath) {
// Check if the retaining path was already printed in AddRetainer().
if (retainer_.count(object) == 0) {
PrintRetainingPath(object, option);
@@ -627,7 +663,7 @@ void Heap::GarbageCollectionPrologue() {
UpdateNewSpaceAllocationCounter();
if (FLAG_track_retaining_path) {
retainer_.clear();
- ephemeral_retainer_.clear();
+ ephemeron_retainer_.clear();
retaining_root_.clear();
}
}
@@ -654,6 +690,8 @@ const char* Heap::GetSpaceName(int idx) {
return "code_space";
case LO_SPACE:
return "large_object_space";
+ case NEW_LO_SPACE:
+ return "new_large_object_space";
case RO_SPACE:
return "read_only_space";
default:
@@ -851,17 +889,16 @@ void Heap::ProcessPretenuringFeedback() {
// Step 2: Deopt maybe tenured allocation sites if necessary.
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
if (deopt_maybe_tenured) {
- Object* list_element = allocation_sites_list();
- while (list_element->IsAllocationSite()) {
- site = AllocationSite::cast(list_element);
- DCHECK(site->IsAllocationSite());
- allocation_sites++;
- if (site->IsMaybeTenure()) {
- site->set_deopt_dependent_code(true);
- trigger_deoptimization = true;
- }
- list_element = site->weak_next();
- }
+ ForeachAllocationSite(
+ allocation_sites_list(),
+ [&allocation_sites, &trigger_deoptimization](AllocationSite* site) {
+ DCHECK(site->IsAllocationSite());
+ allocation_sites++;
+ if (site->IsMaybeTenure()) {
+ site->set_deopt_dependent_code(true);
+ trigger_deoptimization = true;
+ }
+ });
}
if (trigger_deoptimization) {
@@ -888,36 +925,34 @@ void Heap::ProcessPretenuringFeedback() {
void Heap::InvalidateCodeEmbeddedObjects(Code* code) {
MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
CodePageMemoryModificationScope modification_scope(chunk);
- code->InvalidateEmbeddedObjects();
+ code->InvalidateEmbeddedObjects(this);
}
void Heap::InvalidateCodeDeoptimizationData(Code* code) {
MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
CodePageMemoryModificationScope modification_scope(chunk);
- code->set_deoptimization_data(empty_fixed_array());
+ code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
}
void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
// performance issue, use a cache data structure in heap instead.
- Object* list_element = allocation_sites_list();
- while (list_element->IsAllocationSite()) {
- AllocationSite* site = AllocationSite::cast(list_element);
+
+ ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite* site) {
if (site->deopt_dependent_code()) {
site->dependent_code()->MarkCodeForDeoptimization(
isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
site->set_deopt_dependent_code(false);
}
- list_element = site->weak_next();
- }
+ });
+
Deoptimizer::DeoptimizeMarkedCode(isolate_);
}
void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
- // In release mode, we only zap the from space under heap verification.
- if (Heap::ShouldZapGarbage()) {
+ if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
ZapFromSpace();
}
@@ -1058,7 +1093,7 @@ void Heap::HandleGCRequest() {
incremental_marking()->IsMarking() &&
!incremental_marking()->finalize_marking_completed()) {
incremental_marking()->reset_request_type();
- FinalizeIncrementalMarking(
+ FinalizeIncrementalMarkingIncrementally(
GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
}
}
@@ -1068,41 +1103,6 @@ void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
}
-void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
- if (FLAG_trace_incremental_marking) {
- isolate()->PrintWithTimestamp(
- "[IncrementalMarking] (%s).\n",
- Heap::GarbageCollectionReasonToString(gc_reason));
- }
-
- HistogramTimerScope incremental_marking_scope(
- isolate()->counters()->gc_incremental_marking_finalize());
- TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
-
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
- }
- }
- incremental_marking()->FinalizeIncrementally();
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
- }
- }
-}
-
HistogramTimer* Heap::GCTypePriorityTimer(GarbageCollector collector) {
if (IsYoungGenerationCollector(collector)) {
if (isolate_->IsIsolateInBackground()) {
@@ -1175,7 +1175,8 @@ intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
return 0;
}
-void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
+void ReportDuplicates(Isolate* isolate, int size,
+ std::vector<HeapObject*>& objects) {
if (objects.size() == 0) return;
sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
@@ -1273,7 +1274,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
++it) {
- ReportDuplicates(it->first, it->second);
+ ReportDuplicates(isolate(), it->first, it->second);
}
}
}
@@ -1342,6 +1343,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
InvokeNearHeapLimitCallback();
}
+ // Ensure that all pending phantom callbacks are invoked.
+ isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
+
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate());
@@ -1384,11 +1388,18 @@ bool Heap::CollectGarbage(AllocationSpace space,
TRACE_EVENT0("v8", gc_type_timer->name());
HistogramTimer* gc_type_priority_timer = GCTypePriorityTimer(collector);
- HistogramTimerScope histogram_timer_priority_scope(
- gc_type_priority_timer);
+ OptionalHistogramTimerScopeMode mode =
+ isolate_->IsMemorySavingsModeActive()
+ ? OptionalHistogramTimerScopeMode::DONT_TAKE_TIME
+ : OptionalHistogramTimerScopeMode::TAKE_TIME;
+ OptionalHistogramTimerScope histogram_timer_priority_scope(
+ gc_type_priority_timer, mode);
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
+ if (collector == MARK_COMPACTOR) {
+ tracer()->RecordMarkCompactHistograms(gc_type_timer);
+ }
}
GarbageCollectionEpilogue();
@@ -1414,7 +1425,6 @@ bool Heap::CollectGarbage(AllocationSpace space,
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
}
- memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
}
tracer()->Stop(collector);
@@ -1492,7 +1502,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
int len, WriteBarrierMode mode) {
if (len == 0) return;
- DCHECK(array->map() != fixed_cow_array_map());
+ DCHECK(array->map() != ReadOnlyRoots(this).fixed_cow_array_map());
Object** dst = array->data_start() + dst_index;
Object** src = array->data_start() + src_index;
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
@@ -1519,15 +1529,16 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
public:
+ explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
+
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
DCHECK(!HasWeakHeapObjectTag(*p));
if ((*p)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*p);
- Isolate* isolate = object->GetIsolate();
// Check that the string is actually internalized.
- CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
+ CHECK(object->IsTheHole(isolate_) || object->IsUndefined(isolate_) ||
object->IsInternalizedString());
}
}
@@ -1536,12 +1547,14 @@ class StringTableVerifier : public ObjectVisitor {
MaybeObject** end) override {
UNREACHABLE();
}
-};
+ private:
+ Isolate* isolate_;
+};
-static void VerifyStringTable(Heap* heap) {
- StringTableVerifier verifier;
- heap->string_table()->IterateElements(&verifier);
+static void VerifyStringTable(Isolate* isolate) {
+ StringTableVerifier verifier(isolate);
+ isolate->heap()->string_table()->IterateElements(&verifier);
}
#endif // VERIFY_HEAP
@@ -1555,7 +1568,10 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
space < SerializerDeserializer::kNumberOfSpaces; space++) {
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->size());
- if (reservation->at(0).size == 0) continue;
+ if (reservation->at(0).size == 0) {
+ DCHECK_EQ(1, reservation->size());
+ continue;
+ }
bool perform_gc = false;
if (space == MAP_SPACE) {
// We allocate each map individually to avoid fragmentation.
@@ -1690,7 +1706,7 @@ bool Heap::PerformGarbageCollection(
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifyStringTable(this);
+ VerifyStringTable(this->isolate());
}
#endif
@@ -1775,7 +1791,7 @@ bool Heap::PerformGarbageCollection(
}
gc_post_processing_depth_--;
- isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
+ isolate_->eternal_handles()->PostGarbageCollectionProcessing();
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing(isolate_);
@@ -1788,12 +1804,22 @@ bool Heap::PerformGarbageCollection(
// Register the amount of external allocated memory.
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
- SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
+
+ size_t new_limit = heap_controller()->CalculateOldGenerationAllocationLimit(
+ old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
+ new_space()->Capacity(), CurrentHeapGrowingMode());
+ old_generation_allocation_limit_ = new_limit;
+
CheckIneffectiveMarkCompact(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
- DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
+ size_t new_limit = heap_controller()->CalculateOldGenerationAllocationLimit(
+ old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
+ new_space()->Capacity(), CurrentHeapGrowingMode());
+ if (new_limit < old_generation_allocation_limit_) {
+ old_generation_allocation_limit_ = new_limit;
+ }
}
{
@@ -1809,7 +1835,7 @@ bool Heap::PerformGarbageCollection(
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifyStringTable(this);
+ VerifyStringTable(this->isolate());
}
#endif
@@ -1936,16 +1962,14 @@ void Heap::CheckNewSpaceExpansionCriteria() {
}
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
- return heap->InFromSpace(*p) &&
+ return Heap::InFromSpace(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
- explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
-
virtual Object* RetainAs(Object* object) {
- if (!heap_->InFromSpace(object)) {
+ if (!Heap::InFromSpace(object)) {
return object;
}
@@ -1955,9 +1979,6 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
}
return nullptr;
}
-
- private:
- Heap* heap_;
};
void Heap::EvacuateYoungGeneration() {
@@ -1975,10 +1996,10 @@ void Heap::EvacuateYoungGeneration() {
LOG(isolate_, ResourceEvent("scavenge", "begin"));
// Move pages from new->old generation.
- PageRange range(new_space()->bottom(), new_space()->top());
+ PageRange range(new_space()->first_allocatable_address(), new_space()->top());
for (auto it = range.begin(); it != range.end();) {
Page* p = (*++it)->prev_page();
- p->Unlink();
+ new_space()->from_space().RemovePage(p);
Page::ConvertNewToOld(p);
if (incremental_marking()->IsMarking())
mark_compact_collector()->RecordLiveSlotsOnPage(p);
@@ -2137,7 +2158,7 @@ void Heap::Scavenge() {
job.AddItem(new PageScavengingItem(chunk));
});
- RootScavengeVisitor root_scavenge_visitor(this, scavengers[kMainThreadId]);
+ RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
{
// Identify weak unmodified handles. Requires an unmodified graph.
@@ -2153,16 +2174,11 @@ void Heap::Scavenge() {
IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
}
{
- // Weak collections are held strongly by the Scavenger.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK);
- IterateEncounteredWeakCollections(&root_scavenge_visitor);
- }
- {
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
job.Run(isolate()->async_counters());
- DCHECK(copied_list.IsGlobalEmpty());
- DCHECK(promotion_list.IsGlobalEmpty());
+ DCHECK(copied_list.IsEmpty());
+ DCHECK(promotion_list.IsEmpty());
}
{
// Scavenge weak global handles.
@@ -2176,8 +2192,8 @@ void Heap::Scavenge() {
&root_scavenge_visitor);
scavengers[kMainThreadId]->Process();
- DCHECK(copied_list.IsGlobalEmpty());
- DCHECK(promotion_list.IsGlobalEmpty());
+ DCHECK(copied_list.IsEmpty());
+ DCHECK(promotion_list.IsEmpty());
isolate()
->global_handles()
->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
@@ -2190,21 +2206,24 @@ void Heap::Scavenge() {
}
}
- UpdateNewSpaceReferencesInExternalStringTable(
- &UpdateNewSpaceReferenceInExternalStringTableEntry);
+ {
+ // Update references into new space
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
+ UpdateNewSpaceReferencesInExternalStringTable(
+ &UpdateNewSpaceReferenceInExternalStringTableEntry);
- incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+ incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+ }
if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are
// going to be unmapped.
- for (Page* p : PageRange(new_space()->FromSpaceStart(),
- new_space()->FromSpaceEnd())) {
+ for (Page* p : PageRange(new_space()->from_space().first_page(), nullptr)) {
concurrent_marking()->ClearLiveness(p);
}
}
- ScavengeWeakObjectRetainer weak_object_retainer(this);
+ ScavengeWeakObjectRetainer weak_object_retainer;
ProcessYoungWeakReferences(&weak_object_retainer);
// Set age mark.
@@ -2243,7 +2262,7 @@ void Heap::ComputeFastPromotionMode() {
!FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
!ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
- if (FLAG_trace_gc_verbose) {
+ if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
PrintIsolate(
isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
@@ -2304,12 +2323,12 @@ void Heap::ExternalStringTable::Verify() {
#ifdef DEBUG
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
Object* obj = Object::cast(new_space_strings_[i]);
- DCHECK(heap_->InNewSpace(obj));
+ DCHECK(InNewSpace(obj));
DCHECK(!obj->IsTheHole(heap_->isolate()));
}
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
Object* obj = Object::cast(old_space_strings_[i]);
- DCHECK(!heap_->InNewSpace(obj));
+ DCHECK(!InNewSpace(obj));
DCHECK(!obj->IsTheHole(heap_->isolate()));
}
#endif
@@ -2330,7 +2349,7 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
DCHECK(target->IsExternalString());
- if (heap_->InNewSpace(target)) {
+ if (InNewSpace(target)) {
// String is still in new space. Update the table entry.
*last = target;
++last;
@@ -2425,20 +2444,37 @@ void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
}
+void Heap::ForeachAllocationSite(Object* list,
+ std::function<void(AllocationSite*)> visitor) {
+ DisallowHeapAllocation disallow_heap_allocation;
+ Object* current = list;
+ while (current->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(current);
+ visitor(site);
+ Object* current_nested = site->nested_site();
+ while (current_nested->IsAllocationSite()) {
+ AllocationSite* nested_site = AllocationSite::cast(current_nested);
+ visitor(nested_site);
+ current_nested = nested_site->nested_site();
+ }
+ current = site->weak_next();
+ }
+}
+
void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
DisallowHeapAllocation no_allocation_scope;
- Object* cur = allocation_sites_list();
bool marked = false;
- while (cur->IsAllocationSite()) {
- AllocationSite* casted = AllocationSite::cast(cur);
- if (casted->GetPretenureMode() == flag) {
- casted->ResetPretenureDecision();
- casted->set_deopt_dependent_code(true);
- marked = true;
- RemoveAllocationSitePretenuringFeedback(casted);
- }
- cur = casted->weak_next();
- }
+
+ ForeachAllocationSite(allocation_sites_list(),
+ [&marked, flag, this](AllocationSite* site) {
+ if (site->GetPretenureMode() == flag) {
+ site->ResetPretenureDecision();
+ site->set_deopt_dependent_code(true);
+ marked = true;
+ RemoveAllocationSitePretenuringFeedback(site);
+ return;
+ }
+ });
if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
@@ -2473,20 +2509,21 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
class ExternalStringTableVisitorAdapter : public RootVisitor {
public:
explicit ExternalStringTableVisitorAdapter(
- v8::ExternalResourceVisitor* visitor)
- : visitor_(visitor) {}
+ Isolate* isolate, v8::ExternalResourceVisitor* visitor)
+ : isolate_(isolate), visitor_(visitor) {}
virtual void VisitRootPointers(Root root, const char* description,
Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
- Utils::ToLocal(Handle<String>(String::cast(*p))));
+ Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
}
}
private:
+ Isolate* isolate_;
v8::ExternalResourceVisitor* visitor_;
- } external_string_table_visitor(visitor);
+ } external_string_table_visitor(isolate(), visitor);
external_string_table_.IterateAll(&external_string_table_visitor);
}
@@ -2559,7 +2596,8 @@ void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
- Max(MinimumAllocationLimitGrowingStep(),
+ Max(heap_controller()->MinimumAllocationLimitGrowingStep(
+ CurrentHeapGrowingMode()),
static_cast<size_t>(
static_cast<double>(old_generation_allocation_limit_) *
(tracer()->AverageSurvivalRatio() / 100)));
@@ -2789,6 +2827,29 @@ bool Heap::IsImmovable(HeapObject* object) {
return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
}
+#ifdef ENABLE_SLOW_DCHECKS
+namespace {
+
+class LeftTrimmerVerifierRootVisitor : public RootVisitor {
+ public:
+ explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase* to_check)
+ : to_check_(to_check) {}
+
+ virtual void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
+ for (Object** p = start; p < end; ++p) {
+ DCHECK_NE(*p, to_check_);
+ }
+ }
+
+ private:
+ FixedArrayBase* to_check_;
+
+ DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
+};
+} // namespace
+#endif // ENABLE_SLOW_DCHECKS
+
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
CHECK_NOT_NULL(object);
@@ -2804,7 +2865,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
DCHECK(!lo_space()->Contains(object));
- DCHECK(object->map() != fixed_cow_array_map());
+ DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
@@ -2844,6 +2905,16 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Notify the heap profiler of change in object layout.
OnMoveEvent(new_object, object, new_object->Size());
+
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ // Make sure the stack or other roots (e.g., Handles) don't contain pointers
+ // to the original FixedArray (which is now the filler object).
+ LeftTrimmerVerifierRootVisitor root_visitor(object);
+ IterateRoots(&root_visitor, VISIT_ALL);
+ }
+#endif // ENABLE_SLOW_DCHECKS
+
return new_object;
}
@@ -2859,9 +2930,11 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
bytes_to_trim = ByteArray::SizeFor(len) - new_size;
DCHECK_GE(bytes_to_trim, 0);
} else if (object->IsFixedArray()) {
+ CHECK_NE(elements_to_trim, len);
bytes_to_trim = elements_to_trim * kPointerSize;
} else {
DCHECK(object->IsFixedDoubleArray());
+ CHECK_NE(elements_to_trim, len);
bytes_to_trim = elements_to_trim * kDoubleSize;
}
@@ -2870,6 +2943,10 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
void Heap::RightTrimWeakFixedArray(WeakFixedArray* object,
int elements_to_trim) {
+ // This function is safe to use only at the end of the mark compact
+ // collection: When marking, we record the weak slots, and shrinking
+ // invalidates them.
+ DCHECK_EQ(gc_state(), MARK_COMPACT);
CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
elements_to_trim * kPointerSize);
}
@@ -2881,7 +2958,7 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
object->IsWeakFixedArray());
// For now this trick is only applied to objects in new and paged space.
- DCHECK(object->map() != fixed_cow_array_map());
+ DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
if (bytes_to_trim == 0) {
DCHECK_EQ(elements_to_trim, 0);
@@ -3042,7 +3119,8 @@ bool Heap::HasHighFragmentation(size_t used, size_t committed) {
bool Heap::ShouldOptimizeForMemoryUsage() {
const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
- HighMemoryPressure() || !CanExpandOldGeneration(kOldGenerationSlack);
+ isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
+ !CanExpandOldGeneration(kOldGenerationSlack);
}
void Heap::ActivateMemoryReducerIfNeeded() {
@@ -3084,7 +3162,7 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
(!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_worklist()->IsEmpty() &&
local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
- FinalizeIncrementalMarking(gc_reason);
+ FinalizeIncrementalMarkingIncrementally(gc_reason);
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_worklist()->IsEmpty() &&
local_embedder_heap_tracer()
@@ -3093,6 +3171,48 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
}
}
+void Heap::FinalizeIncrementalMarkingAtomically(
+ GarbageCollectionReason gc_reason) {
+ DCHECK(!incremental_marking()->IsStopped());
+ CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
+}
+
+void Heap::FinalizeIncrementalMarkingIncrementally(
+ GarbageCollectionReason gc_reason) {
+ if (FLAG_trace_incremental_marking) {
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] (%s).\n",
+ Heap::GarbageCollectionReasonToString(gc_reason));
+ }
+
+ HistogramTimerScope incremental_marking_scope(
+ isolate()->counters()->gc_incremental_marking_finalize());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
+
+ {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
+ }
+ }
+ incremental_marking()->FinalizeIncrementally();
+ {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
+ }
+ }
+}
+
void Heap::RegisterDeserializedObjectsForBlackAllocation(
Reservation* reservations, const std::vector<HeapObject*>& large_objects,
const std::vector<Address>& maps) {
@@ -3268,16 +3388,6 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
contexts_disposed_ = 0;
- if (deadline_in_ms - start_ms >
- GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
- int committed_memory = static_cast<int>(CommittedMemory() / KB);
- int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
- isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
- start_ms, committed_memory);
- isolate()->counters()->aggregated_memory_heap_used()->AddSample(
- start_ms, used_memory);
- }
-
if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
FLAG_trace_idle_notification_verbose) {
isolate_->PrintWithTimestamp(
@@ -3363,9 +3473,14 @@ void Heap::CheckMemoryPressure() {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
}
- if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
+ MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
+ // Reset the memory pressure level to avoid recursive GCs triggered by
+ // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
+ // the finalizers.
+ memory_pressure_level_ = MemoryPressureLevel::kNone;
+ if (memory_pressure_level == MemoryPressureLevel::kCritical) {
CollectGarbageOnMemoryPressure();
- } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
+ } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
StartIncrementalMarking(kReduceMemoryFootprintMask,
GarbageCollectionReason::kMemoryPressure);
@@ -3417,8 +3532,8 @@ void Heap::CollectGarbageOnMemoryPressure() {
void Heap::MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked) {
- MemoryPressureLevel previous = memory_pressure_level_.Value();
- memory_pressure_level_.SetValue(level);
+ MemoryPressureLevel previous = memory_pressure_level_;
+ memory_pressure_level_ = level;
if ((previous != MemoryPressureLevel::kCritical &&
level == MemoryPressureLevel::kCritical) ||
(previous == MemoryPressureLevel::kNone &&
@@ -3550,6 +3665,8 @@ const char* Heap::GarbageCollectionReasonToString(
return "snapshot creator";
case GarbageCollectionReason::kTesting:
return "testing";
+ case GarbageCollectionReason::kExternalFinalize:
+ return "external finalize";
case GarbageCollectionReason::kUnknown:
return "unknown";
}
@@ -3594,6 +3711,8 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
return map_space_->Contains(value);
case LO_SPACE:
return lo_space_->Contains(value);
+ case NEW_LO_SPACE:
+ return new_lo_space_->Contains(value);
case RO_SPACE:
return read_only_space_->Contains(value);
}
@@ -3617,13 +3736,14 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
return map_space_->ContainsSlow(addr);
case LO_SPACE:
return lo_space_->ContainsSlow(addr);
+ case NEW_LO_SPACE:
+ return new_lo_space_->ContainsSlow(addr);
case RO_SPACE:
return read_only_space_->ContainsSlow(addr);
}
UNREACHABLE();
}
-
bool Heap::IsValidAllocationSpace(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
@@ -3631,6 +3751,7 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
case CODE_SPACE:
case MAP_SPACE:
case LO_SPACE:
+ case NEW_LO_SPACE:
case RO_SPACE:
return true;
default:
@@ -3658,19 +3779,22 @@ bool Heap::RootIsImmortalImmovable(int root_index) {
#ifdef VERIFY_HEAP
class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
+ public:
+ explicit VerifyReadOnlyPointersVisitor(Heap* heap)
+ : VerifyPointersVisitor(heap) {}
+
protected:
void VerifyPointers(HeapObject* host, MaybeObject** start,
MaybeObject** end) override {
- Heap* heap = host->GetIsolate()->heap();
if (host != nullptr) {
- CHECK(heap->InReadOnlySpace(host->map()));
+ CHECK(heap_->InReadOnlySpace(host->map()));
}
VerifyPointersVisitor::VerifyPointers(host, start, end);
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
if ((*current)->ToStrongOrWeakHeapObject(&object)) {
- CHECK(heap->InReadOnlySpace(object));
+ CHECK(heap_->InReadOnlySpace(object));
}
}
}
@@ -3683,24 +3807,24 @@ void Heap::Verify() {
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
- VerifyPointersVisitor visitor;
+ VerifyPointersVisitor visitor(this);
IterateRoots(&visitor, VISIT_ONLY_STRONG);
VerifySmisVisitor smis_visitor;
IterateSmiRoots(&smis_visitor);
- new_space_->Verify();
+ new_space_->Verify(isolate());
- old_space_->Verify(&visitor);
- map_space_->Verify(&visitor);
+ old_space_->Verify(isolate(), &visitor);
+ map_space_->Verify(isolate(), &visitor);
- VerifyPointersVisitor no_dirty_regions_visitor;
- code_space_->Verify(&no_dirty_regions_visitor);
+ VerifyPointersVisitor no_dirty_regions_visitor(this);
+ code_space_->Verify(isolate(), &no_dirty_regions_visitor);
- lo_space_->Verify();
+ lo_space_->Verify(isolate());
- VerifyReadOnlyPointersVisitor read_only_visitor;
- read_only_space_->Verify(&read_only_visitor);
+ VerifyReadOnlyPointersVisitor read_only_visitor(this);
+ read_only_space_->Verify(isolate(), &read_only_visitor);
}
class SlotVerifyingVisitor : public ObjectVisitor {
@@ -3760,20 +3884,17 @@ class SlotVerifyingVisitor : public ObjectVisitor {
class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
public:
- OldToNewSlotVerifyingVisitor(Heap* heap, std::set<Address>* untyped,
- std::set<std::pair<SlotType, Address> >* typed)
- : SlotVerifyingVisitor(untyped, typed), heap_(heap) {}
+ OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
+ std::set<std::pair<SlotType, Address>>* typed)
+ : SlotVerifyingVisitor(untyped, typed) {}
bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject* target) override {
DCHECK_IMPLIES(
- target->IsStrongOrWeakHeapObject() && heap_->InNewSpace(target),
- heap_->InToSpace(target));
- return target->IsStrongOrWeakHeapObject() && heap_->InNewSpace(target) &&
- !heap_->InNewSpace(host);
+ target->IsStrongOrWeakHeapObject() && Heap::InNewSpace(target),
+ Heap::InToSpace(target));
+ return target->IsStrongOrWeakHeapObject() && Heap::InNewSpace(target) &&
+ !Heap::InNewSpace(host);
}
-
- private:
- Heap* heap_;
};
template <RememberedSetType direction>
@@ -3810,7 +3931,7 @@ void Heap::VerifyRememberedSetFor(HeapObject* object) {
if (!InNewSpace(object)) {
store_buffer()->MoveAllEntriesToRememberedSet();
CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
- OldToNewSlotVerifyingVisitor visitor(this, &old_to_new, &typed_old_to_new);
+ OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new);
object->IterateBody(&visitor);
}
// TODO(ulan): Add old to old slot set verification once all weak objects
@@ -3838,12 +3959,10 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
void Heap::ZapFromSpace() {
if (!new_space_->IsFromSpaceCommitted()) return;
- for (Page* page :
- PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
- for (Address cursor = page->area_start(), limit = page->area_end();
- cursor < limit; cursor += kPointerSize) {
- Memory::Address_at(cursor) = static_cast<Address>(kFromSpaceZapValue);
- }
+ for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
+ memory_allocator()->ZapBlock(page->area_start(),
+ page->HighWaterMark() - page->area_start(),
+ ZapValue());
}
}
@@ -3855,6 +3974,26 @@ void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
#endif
}
+Code* Heap::builtin(int index) {
+ DCHECK(Builtins::IsBuiltinId(index));
+ // Code::cast cannot be used here since we access builtins
+ // during the marking phase of mark sweep. See IC::Clear.
+ return reinterpret_cast<Code*>(builtins_[index]);
+}
+
+Address Heap::builtin_address(int index) {
+ DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
+ return reinterpret_cast<Address>(&builtins_[index]);
+}
+
+void Heap::set_builtin(int index, HeapObject* builtin) {
+ DCHECK(Builtins::IsBuiltinId(index));
+ DCHECK(Internals::HasHeapObjectTag(builtin));
+ // The given builtin may be completely uninitialized thus we cannot check its
+ // type here.
+ builtins_[index] = builtin;
+}
+
void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
@@ -3886,11 +4025,6 @@ void Heap::IterateSmiRoots(RootVisitor* v) {
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
-void Heap::IterateEncounteredWeakCollections(RootVisitor* visitor) {
- visitor->VisitRootPointer(Root::kWeakCollections, nullptr,
- &encountered_weak_collections_);
-}
-
// We cannot avoid stale handles to left-trimmed objects, but can only make
// sure all handles still needed are updated. Filter out a stale pointer
// and clear the slot to allow post processing of handles (needed because
@@ -3921,9 +4055,10 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
// We need to find a FixedArrayBase map after walking the fillers.
while (current->IsFiller()) {
Address next = reinterpret_cast<Address>(current);
- if (current->map() == heap_->one_pointer_filler_map()) {
+ if (current->map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
next += kPointerSize;
- } else if (current->map() == heap_->two_pointer_filler_map()) {
+ } else if (current->map() ==
+ ReadOnlyRoots(heap_).two_pointer_filler_map()) {
next += 2 * kPointerSize;
} else {
next += current->Size();
@@ -3970,7 +4105,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
if (!isMinorGC) {
- isolate_->builtins()->IterateBuiltins(v);
+ IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
isolate_->interpreter()->IterateDispatchTable(v);
v->Synchronize(VisitorSynchronization::kDispatchTable);
@@ -4034,14 +4169,18 @@ void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
isolate_->global_handles()->IterateWeakRoots(v);
}
+void Heap::IterateBuiltins(RootVisitor* v) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ v->VisitRootPointer(Root::kBuiltins, Builtins::name(i), &builtins_[i]);
+ }
+}
+
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
+void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
size_t max_old_generation_size_in_mb,
size_t code_range_size_in_mb) {
- if (HasBeenSetUp()) return false;
-
// Overwrite default configuration.
if (max_semi_space_size_in_kb != 0) {
max_semi_space_size_ =
@@ -4129,7 +4268,6 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
code_range_size_ = code_range_size_in_mb * MB;
configured_ = true;
- return true;
}
@@ -4156,7 +4294,7 @@ void Heap::GetFromRingBuffer(char* buffer) {
memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
}
-bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0); }
+void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
@@ -4218,176 +4356,6 @@ uint64_t Heap::PromotedExternalMemorySize() {
external_memory_at_last_mark_compact_);
}
-
-const double Heap::kMinHeapGrowingFactor = 1.1;
-const double Heap::kMaxHeapGrowingFactor = 4.0;
-const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
-const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
-const double Heap::kConservativeHeapGrowingFactor = 1.3;
-const double Heap::kTargetMutatorUtilization = 0.97;
-
-// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
-// (mutator speed), this function returns the heap growing factor that will
-// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
-// remain the same until the next GC.
-//
-// For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
-// TM / (TM + TG), where TM is the time spent in the mutator and TG is the
-// time spent in the garbage collector.
-//
-// Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
-// time-frame from the end of the current GC to the end of the next GC. Based
-// on the MU we can compute the heap growing factor F as
-//
-// F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
-//
-// This formula can be derived as follows.
-//
-// F = Limit / Live by definition, where the Limit is the allocation limit,
-// and the Live is size of live objects.
-// Let’s assume that we already know the Limit. Then:
-// TG = Limit / gc_speed
-// TM = (TM + TG) * MU, by definition of MU.
-// TM = TG * MU / (1 - MU)
-// TM = Limit * MU / (gc_speed * (1 - MU))
-// On the other hand, if the allocation throughput remains constant:
-// Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
-// Solving it for TM, we get
-// TM = (Limit - Live) / mutator_speed
-// Combining the two equation for TM:
-// (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
-// (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
-// substitute R = gc_speed / mutator_speed
-// (Limit - Live) = Limit * MU / (R * (1 - MU))
-// substitute F = Limit / Live
-// F - 1 = F * MU / (R * (1 - MU))
-// F - F * MU / (R * (1 - MU)) = 1
-// F * (1 - MU / (R * (1 - MU))) = 1
-// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
-// F = R * (1 - MU) / (R * (1 - MU) - MU)
-double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed,
- double max_factor) {
- DCHECK_LE(kMinHeapGrowingFactor, max_factor);
- DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
- if (gc_speed == 0 || mutator_speed == 0) return max_factor;
-
- const double speed_ratio = gc_speed / mutator_speed;
- const double mu = kTargetMutatorUtilization;
-
- const double a = speed_ratio * (1 - mu);
- const double b = speed_ratio * (1 - mu) - mu;
-
- // The factor is a / b, but we need to check for small b first.
- double factor = (a < b * max_factor) ? a / b : max_factor;
- factor = Min(factor, max_factor);
- factor = Max(factor, kMinHeapGrowingFactor);
- return factor;
-}
-
-double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) {
- const double min_small_factor = 1.3;
- const double max_small_factor = 2.0;
- const double high_factor = 4.0;
-
- size_t max_old_generation_size_in_mb = max_old_generation_size / MB;
- max_old_generation_size_in_mb =
- Max(max_old_generation_size_in_mb,
- static_cast<size_t>(kMinOldGenerationSize));
-
- // If we are on a device with lots of memory, we allow a high heap
- // growing factor.
- if (max_old_generation_size_in_mb >= kMaxOldGenerationSize) {
- return high_factor;
- }
-
- DCHECK_GE(max_old_generation_size_in_mb, kMinOldGenerationSize);
- DCHECK_LT(max_old_generation_size_in_mb, kMaxOldGenerationSize);
-
- // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
- double factor = (max_old_generation_size_in_mb - kMinOldGenerationSize) *
- (max_small_factor - min_small_factor) /
- (kMaxOldGenerationSize - kMinOldGenerationSize) +
- min_small_factor;
- return factor;
-}
-
-size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
- size_t old_gen_size) {
- CHECK_LT(1.0, factor);
- CHECK_LT(0, old_gen_size);
- uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
- limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
- MinimumAllocationLimitGrowingStep());
- limit += new_space_->Capacity();
- uint64_t halfway_to_the_max =
- (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
- return static_cast<size_t>(Min(limit, halfway_to_the_max));
-}
-
-size_t Heap::MinimumAllocationLimitGrowingStep() {
- const size_t kRegularAllocationLimitGrowingStep = 8;
- const size_t kLowMemoryAllocationLimitGrowingStep = 2;
- size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
- return limit * (ShouldOptimizeForMemoryUsage()
- ? kLowMemoryAllocationLimitGrowingStep
- : kRegularAllocationLimitGrowingStep);
-}
-
-void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
- double mutator_speed) {
- double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
- double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
-
- if (FLAG_trace_gc_verbose) {
- isolate_->PrintWithTimestamp(
- "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
- "(gc=%.f, mutator=%.f)\n",
- factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
- mutator_speed);
- }
-
- if (memory_reducer_->ShouldGrowHeapSlowly() ||
- ShouldOptimizeForMemoryUsage()) {
- factor = Min(factor, kConservativeHeapGrowingFactor);
- }
-
- if (FLAG_stress_compaction || ShouldReduceMemory()) {
- factor = kMinHeapGrowingFactor;
- }
-
- if (FLAG_heap_growing_percent > 0) {
- factor = 1.0 + FLAG_heap_growing_percent / 100.0;
- }
-
- old_generation_allocation_limit_ =
- CalculateOldGenerationAllocationLimit(factor, old_gen_size);
-
- if (FLAG_trace_gc_verbose) {
- isolate_->PrintWithTimestamp(
- "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
- old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
- }
-}
-
-void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
- double gc_speed,
- double mutator_speed) {
- double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
- double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
- size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
- if (limit < old_generation_allocation_limit_) {
- if (FLAG_trace_gc_verbose) {
- isolate_->PrintWithTimestamp(
- "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
- " KB, "
- "new limit: %" PRIuS " KB (%.1f)\n",
- old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
- factor);
- }
- old_generation_allocation_limit_ = limit;
- }
-}
-
bool Heap::ShouldOptimizeForLoadTime() {
return isolate()->rail_mode() == PERFORMANCE_LOAD &&
!AllocationLimitOvershotByLargeMargin() &&
@@ -4420,6 +4388,22 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
return true;
}
+Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
+ if (ShouldReduceMemory() || FLAG_stress_compaction) {
+ return Heap::HeapGrowingMode::kMinimal;
+ }
+
+ if (ShouldOptimizeForMemoryUsage()) {
+ return Heap::HeapGrowingMode::kConservative;
+ }
+
+ if (memory_reducer()->ShouldGrowHeapSlowly()) {
+ return Heap::HeapGrowingMode::kSlow;
+ }
+
+ return Heap::HeapGrowingMode::kDefault;
+}
+
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
// The kNoLimit means that either incremental marking is disabled or it is too
// early to start incremental marking.
@@ -4530,7 +4514,7 @@ HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object,
DCHECK_GE(object_size, 0);
if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() ||
- code_space_->FirstPage()->Contains(heap_object->address())) {
+ code_space_->first_page()->Contains(heap_object->address())) {
MemoryChunk::FromAddress(heap_object->address())->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
@@ -4546,12 +4530,12 @@ HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object,
return heap_object;
}
-HeapObject* Heap::AllocateRawWithLigthRetry(int size, AllocationSpace space,
+HeapObject* Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
AllocationAlignment alignment) {
HeapObject* result;
AllocationResult alloc = AllocateRaw(size, space, alignment);
if (alloc.To(&result)) {
- DCHECK(result != exception());
+ DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// Two GCs before panicking. In newspace will almost always succeed.
@@ -4560,7 +4544,7 @@ HeapObject* Heap::AllocateRawWithLigthRetry(int size, AllocationSpace space,
GarbageCollectionReason::kAllocationFailure);
alloc = AllocateRaw(size, space, alignment);
if (alloc.To(&result)) {
- DCHECK(result != exception());
+ DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
}
@@ -4570,7 +4554,7 @@ HeapObject* Heap::AllocateRawWithLigthRetry(int size, AllocationSpace space,
HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
AllocationAlignment alignment) {
AllocationResult alloc;
- HeapObject* result = AllocateRawWithLigthRetry(size, space, alignment);
+ HeapObject* result = AllocateRawWithLightRetry(size, space, alignment);
if (result) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
@@ -4580,7 +4564,7 @@ HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
alloc = AllocateRaw(size, space, alignment);
}
if (alloc.To(&result)) {
- DCHECK(result != exception());
+ DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// TODO(1181417): Fix this.
@@ -4594,7 +4578,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
AllocationResult alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
HeapObject* result;
if (alloc.To(&result)) {
- DCHECK(result != exception());
+ DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// Two GCs before panicking.
@@ -4603,7 +4587,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
GarbageCollectionReason::kAllocationFailure);
alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
if (alloc.To(&result)) {
- DCHECK(result != exception());
+ DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
}
@@ -4614,7 +4598,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
}
if (alloc.To(&result)) {
- DCHECK(result != exception());
+ DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// TODO(1181417): Fix this.
@@ -4622,33 +4606,31 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
return nullptr;
}
-bool Heap::SetUp() {
+void Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
#endif
- // Initialize heap spaces and initial maps and objects. Whenever something
- // goes wrong, just return false. The caller should check the results and
- // call Heap::TearDown() to release allocated memory.
+ // Initialize heap spaces and initial maps and objects.
//
// If the heap is not yet configured (e.g. through the API), configure it.
// Configuration is based on the flags new-space-size (really the semispace
// size) and old-space-size if set or the initial values of semispace_size_
// and old_generation_size_ otherwise.
- if (!configured_) {
- if (!ConfigureHeapDefault()) return false;
- }
+ if (!configured_) ConfigureHeapDefault();
mmap_region_base_ =
reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
// Set up memory allocator.
- memory_allocator_ = new MemoryAllocator(isolate_);
- if (!memory_allocator_->SetUp(MaxReserved(), code_range_size_)) return false;
+ memory_allocator_ =
+ new MemoryAllocator(isolate_, MaxReserved(), code_range_size_);
store_buffer_ = new StoreBuffer(this);
+ heap_controller_ = new HeapController(this);
+
mark_compact_collector_ = new MarkCompactCollector(this);
incremental_marking_ =
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
@@ -4665,37 +4647,18 @@ bool Heap::SetUp() {
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
}
- for (int i = 0; i <= LAST_SPACE; i++) {
+ for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
space_[i] = nullptr;
}
- space_[NEW_SPACE] = new_space_ = new NewSpace(this);
- if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
- return false;
- }
-
+ space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
+ space_[NEW_SPACE] = new_space_ =
+ new NewSpace(this, initial_semispace_size_, max_semi_space_size_);
space_[OLD_SPACE] = old_space_ = new OldSpace(this);
- if (!old_space_->SetUp()) return false;
-
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
- if (!code_space_->SetUp()) return false;
-
- space_[MAP_SPACE] = map_space_ = new MapSpace(this, MAP_SPACE);
- if (!map_space_->SetUp()) return false;
-
- // The large object code space may contain code or data. We set the memory
- // to be non-executable here for safety, but this means we need to enable it
- // explicitly when allocating large code objects.
- space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
- if (!lo_space_->SetUp()) return false;
-
- space_[RO_SPACE] = read_only_space_ =
- new ReadOnlySpace(this, RO_SPACE, NOT_EXECUTABLE);
- if (!read_only_space_->SetUp()) return false;
-
- // Set up the seed that is used to randomize the string hash function.
- DCHECK_EQ(Smi::kZero, hash_seed());
- if (FLAG_randomize_hashes) InitializeHashSeed();
+ space_[MAP_SPACE] = map_space_ = new MapSpace(this);
+ space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
+ space_[NEW_LO_SPACE] = new_lo_space_ = new NewLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
i++) {
@@ -4716,7 +4679,7 @@ bool Heap::SetUp() {
dead_object_stats_ = new ObjectStats(this);
}
scavenge_job_ = new ScavengeJob();
- local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer();
+ local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer(isolate());
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -4751,17 +4714,17 @@ bool Heap::SetUp() {
write_protect_code_memory_ = FLAG_write_protect_code_memory;
external_reference_table_.Init(isolate_);
-
- return true;
}
void Heap::InitializeHashSeed() {
+ uint64_t new_hash_seed;
if (FLAG_hash_seed == 0) {
- int rnd = isolate()->random_number_generator()->NextInt();
- set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
+ int64_t rnd = isolate()->random_number_generator()->NextInt64();
+ new_hash_seed = static_cast<uint64_t>(rnd);
} else {
- set_hash_seed(Smi::FromInt(FLAG_hash_seed));
+ new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
}
+ hash_seed()->copy_in(0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
}
void Heap::SetStackLimits() {
@@ -4840,8 +4803,8 @@ void Heap::TracePossibleWrapper(JSObject* js_object) {
DCHECK(js_object->WasConstructedFromApiFunction());
if (js_object->GetEmbedderFieldCount() >= 2 &&
js_object->GetEmbedderField(0) &&
- js_object->GetEmbedderField(0) != undefined_value() &&
- js_object->GetEmbedderField(1) != undefined_value()) {
+ js_object->GetEmbedderField(0) != ReadOnlyRoots(this).undefined_value() &&
+ js_object->GetEmbedderField(1) != ReadOnlyRoots(this).undefined_value()) {
DCHECK_EQ(0,
reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
@@ -4905,6 +4868,11 @@ void Heap::TearDown() {
stress_scavenge_observer_ = nullptr;
}
+ if (heap_controller_ != nullptr) {
+ delete heap_controller_;
+ heap_controller_ = nullptr;
+ }
+
if (mark_compact_collector_ != nullptr) {
mark_compact_collector_->TearDown();
delete mark_compact_collector_;
@@ -4967,34 +4935,9 @@ void Heap::TearDown() {
delete tracer_;
tracer_ = nullptr;
- new_space_->TearDown();
- delete new_space_;
- new_space_ = nullptr;
-
- if (old_space_ != nullptr) {
- delete old_space_;
- old_space_ = nullptr;
- }
-
- if (code_space_ != nullptr) {
- delete code_space_;
- code_space_ = nullptr;
- }
-
- if (map_space_ != nullptr) {
- delete map_space_;
- map_space_ = nullptr;
- }
-
- if (lo_space_ != nullptr) {
- lo_space_->TearDown();
- delete lo_space_;
- lo_space_ = nullptr;
- }
-
- if (read_only_space_ != nullptr) {
- delete read_only_space_;
- read_only_space_ = nullptr;
+ for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
+ delete space_[i];
+ space_[i] = nullptr;
}
store_buffer()->TearDown();
@@ -5062,30 +5005,40 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
}
namespace {
-void CompactFixedArrayOfWeakCells(Object* object) {
+void CompactFixedArrayOfWeakCells(Isolate* isolate, Object* object) {
if (object->IsFixedArrayOfWeakCells()) {
FixedArrayOfWeakCells* array = FixedArrayOfWeakCells::cast(object);
- array->Compact<FixedArrayOfWeakCells::NullCallback>();
+ array->Compact<FixedArrayOfWeakCells::NullCallback>(isolate);
}
}
} // anonymous namespace
void Heap::CompactFixedArraysOfWeakCells() {
- // Find known FixedArrayOfWeakCells and compact them.
- HeapIterator iterator(this);
- for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
- if (o->IsPrototypeInfo()) {
- Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
- if (prototype_users->IsFixedArrayOfWeakCells()) {
- FixedArrayOfWeakCells* array =
- FixedArrayOfWeakCells::cast(prototype_users);
- array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
+ // Find known PrototypeUsers and compact them.
+ std::vector<Handle<PrototypeInfo>> prototype_infos;
+ {
+ HeapIterator iterator(this);
+ for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
+ if (o->IsPrototypeInfo()) {
+ PrototypeInfo* prototype_info = PrototypeInfo::cast(o);
+ if (prototype_info->prototype_users()->IsWeakArrayList()) {
+ prototype_infos.emplace_back(handle(prototype_info, isolate()));
+ }
}
}
}
- CompactFixedArrayOfWeakCells(noscript_shared_function_infos());
- CompactFixedArrayOfWeakCells(script_list());
- CompactFixedArrayOfWeakCells(weak_stack_trace_list());
+ for (auto& prototype_info : prototype_infos) {
+ Handle<WeakArrayList> array(
+ WeakArrayList::cast(prototype_info->prototype_users()), isolate());
+ WeakArrayList* new_array = PrototypeUsers::Compact(
+ array, this, JSObject::PrototypeRegistryCompactionCallback);
+ prototype_info->set_prototype_users(new_array);
+ }
+
+ // Find known FixedArrayOfWeakCells and compact them.
+ CompactFixedArrayOfWeakCells(isolate(), noscript_shared_function_infos());
+ CompactFixedArrayOfWeakCells(isolate(), script_list());
+ CompactFixedArrayOfWeakCells(isolate(), weak_stack_trace_list());
}
void Heap::AddRetainedMap(Handle<Map> map) {
@@ -5097,7 +5050,10 @@ void Heap::AddRetainedMap(Handle<Map> map) {
CompactRetainedMaps(*array);
}
array =
- WeakArrayList::Add(array, map, Smi::FromInt(FLAG_retain_maps_for_n_gc));
+ WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
+ array = WeakArrayList::AddToEnd(
+ isolate(), array,
+ MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
if (*array != retained_maps()) {
set_retained_maps(*array);
}
@@ -5130,7 +5086,7 @@ void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
new_length += 2;
}
number_of_disposed_maps_ = new_number_of_disposed_maps;
- HeapObject* undefined = undefined_value();
+ HeapObject* undefined = ReadOnlyRoots(this).undefined_value();
for (int i = new_length; i < length; i++) {
retained_maps->Set(i, HeapObjectReference::Strong(undefined));
}
@@ -5221,7 +5177,7 @@ void Heap::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
if (rinfo->IsInConstantPool()) {
addr = rinfo->constant_pool_entry_address();
- if (RelocInfo::IsCodeTarget(rmode)) {
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rmode));
@@ -5454,12 +5410,11 @@ void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
if (o->IsTheHole(isolate)) {
continue;
}
- if (o->IsThinString()) {
- o = ThinString::cast(o)->actual();
- if (!o->IsExternalString()) continue;
- }
+ // The real external string is already in one of these vectors and was or
+ // will be processed. Re-processing it will add a duplicate to the vector.
+ if (o->IsThinString()) continue;
DCHECK(o->IsExternalString());
- if (heap_->InNewSpace(o)) {
+ if (InNewSpace(o)) {
new_space_strings_[last++] = o;
} else {
old_space_strings_.push_back(o);
@@ -5477,12 +5432,11 @@ void Heap::ExternalStringTable::CleanUpAll() {
if (o->IsTheHole(isolate)) {
continue;
}
- if (o->IsThinString()) {
- o = ThinString::cast(o)->actual();
- if (!o->IsExternalString()) continue;
- }
+ // The real external string is already in one of these vectors and was or
+ // will be processed. Re-processing it will add a duplicate to the vector.
+ if (o->IsThinString()) continue;
DCHECK(o->IsExternalString());
- DCHECK(!heap_->InNewSpace(o));
+ DCHECK(!InNewSpace(o));
old_space_strings_[last++] = o;
}
old_space_strings_.resize(last);
@@ -5648,6 +5602,8 @@ const char* AllocationSpaceName(AllocationSpace space) {
return "MAP_SPACE";
case LO_SPACE:
return "LO_SPACE";
+ case NEW_LO_SPACE:
+ return "NEW_LO_SPACE";
case RO_SPACE:
return "RO_SPACE";
default:
@@ -5680,7 +5636,7 @@ void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
if ((*current)->ToStrongOrWeakHeapObject(&object)) {
- CHECK(object->GetIsolate()->heap()->Contains(object));
+ CHECK(heap_->Contains(object));
CHECK(object->map()->IsMap());
} else {
CHECK((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject());
@@ -5708,7 +5664,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
//
// Since this function is used for debugging only, we do not place
// asserts here, but check everything explicitly.
- if (obj->map() == one_pointer_filler_map()) return false;
+ if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
InstanceType type = obj->map()->instance_type();
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
AllocationSpace src = chunk->owner()->identity();
@@ -5721,6 +5677,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
case LO_SPACE:
+ case NEW_LO_SPACE:
case RO_SPACE:
return false;
}
@@ -5772,20 +5729,16 @@ Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
- DCHECK(map == code->GetHeap()->code_map());
-#ifdef V8_EMBEDDED_BUILTINS
+ DCHECK(map == ReadOnlyRoots(this).code_map());
if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
-#endif
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
return start <= addr && addr < end;
}
Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
-#ifdef V8_EMBEDDED_BUILTINS
Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
if (code != nullptr) return code;
-#endif
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = lo_space()->FindPage(inner_pointer);
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index a051d7262e..0f3c9ea389 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -24,6 +24,7 @@
#include "src/objects.h"
#include "src/objects/fixed-array.h"
#include "src/objects/string-table.h"
+#include "src/roots.h"
#include "src/visitors.h"
namespace v8 {
@@ -39,7 +40,7 @@ class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
-class BoilerplateDescription;
+class ObjectBoilerplateDescription;
class BytecodeArray;
class CodeDataContainer;
class DeoptimizationData;
@@ -49,375 +50,120 @@ class JSArrayBuffer;
using v8::MemoryPressureLevel;
-// Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V) \
- /* Cluster the most popular ones in a few cache lines here at the top. */ \
- /* The first 32 entries are most often used in the startup snapshot and */ \
- /* can use a shorter representation in the serialization format. */ \
- V(Map, free_space_map, FreeSpaceMap) \
- V(Map, one_pointer_filler_map, OnePointerFillerMap) \
- V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
- V(Oddball, uninitialized_value, UninitializedValue) \
- V(Oddball, undefined_value, UndefinedValue) \
- V(Oddball, the_hole_value, TheHoleValue) \
- V(Oddball, null_value, NullValue) \
- V(Oddball, true_value, TrueValue) \
- V(Oddball, false_value, FalseValue) \
- V(String, empty_string, empty_string) \
- V(Map, meta_map, MetaMap) \
- V(Map, byte_array_map, ByteArrayMap) \
- V(Map, fixed_array_map, FixedArrayMap) \
- V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
- V(Map, hash_table_map, HashTableMap) \
- V(Map, symbol_map, SymbolMap) \
- V(Map, one_byte_string_map, OneByteStringMap) \
- V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
- V(Map, scope_info_map, ScopeInfoMap) \
- V(Map, shared_function_info_map, SharedFunctionInfoMap) \
- V(Map, code_map, CodeMap) \
- V(Map, function_context_map, FunctionContextMap) \
- V(Map, cell_map, CellMap) \
- V(Map, weak_cell_map, WeakCellMap) \
- V(Map, global_property_cell_map, GlobalPropertyCellMap) \
- V(Map, foreign_map, ForeignMap) \
- V(Map, heap_number_map, HeapNumberMap) \
- V(Map, transition_array_map, TransitionArrayMap) \
- V(Map, feedback_vector_map, FeedbackVectorMap) \
- V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
- V(FixedArray, empty_fixed_array, EmptyFixedArray) \
- V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
- /* Entries beyond the first 32 */ \
- /* The roots above this line should be boring from a GC point of view. */ \
- /* This means they are never in new space and never on a page that is */ \
- /* being compacted.*/ \
- /* Oddballs */ \
- V(Oddball, arguments_marker, ArgumentsMarker) \
- V(Oddball, exception, Exception) \
- V(Oddball, termination_exception, TerminationException) \
- V(Oddball, optimized_out, OptimizedOut) \
- V(Oddball, stale_register, StaleRegister) \
- /* Context maps */ \
- V(Map, native_context_map, NativeContextMap) \
- V(Map, module_context_map, ModuleContextMap) \
- V(Map, eval_context_map, EvalContextMap) \
- V(Map, script_context_map, ScriptContextMap) \
- V(Map, block_context_map, BlockContextMap) \
- V(Map, catch_context_map, CatchContextMap) \
- V(Map, with_context_map, WithContextMap) \
- V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
- V(Map, script_context_table_map, ScriptContextTableMap) \
- /* Maps */ \
- V(Map, feedback_metadata_map, FeedbackMetadataArrayMap) \
- V(Map, array_list_map, ArrayListMap) \
- V(Map, bigint_map, BigIntMap) \
- V(Map, boilerplate_description_map, BoilerplateDescriptionMap) \
- V(Map, bytecode_array_map, BytecodeArrayMap) \
- V(Map, code_data_container_map, CodeDataContainerMap) \
- V(Map, descriptor_array_map, DescriptorArrayMap) \
- V(Map, external_map, ExternalMap) \
- V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
- V(Map, global_dictionary_map, GlobalDictionaryMap) \
- V(Map, many_closures_cell_map, ManyClosuresCellMap) \
- V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, module_info_map, ModuleInfoMap) \
- V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
- V(Map, name_dictionary_map, NameDictionaryMap) \
- V(Map, no_closures_cell_map, NoClosuresCellMap) \
- V(Map, number_dictionary_map, NumberDictionaryMap) \
- V(Map, one_closure_cell_map, OneClosureCellMap) \
- V(Map, ordered_hash_map_map, OrderedHashMapMap) \
- V(Map, ordered_hash_set_map, OrderedHashSetMap) \
- V(Map, property_array_map, PropertyArrayMap) \
- V(Map, side_effect_call_handler_info_map, SideEffectCallHandlerInfoMap) \
- V(Map, side_effect_free_call_handler_info_map, \
- SideEffectFreeCallHandlerInfoMap) \
- V(Map, next_call_side_effect_free_call_handler_info_map, \
- NextCallSideEffectFreeCallHandlerInfoMap) \
- V(Map, simple_number_dictionary_map, SimpleNumberDictionaryMap) \
- V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
- V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
- V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
- V(Map, string_table_map, StringTableMap) \
- V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
- V(Map, weak_array_list_map, WeakArrayListMap) \
- /* String maps */ \
- V(Map, native_source_string_map, NativeSourceStringMap) \
- V(Map, string_map, StringMap) \
- V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
- V(Map, cons_string_map, ConsStringMap) \
- V(Map, thin_one_byte_string_map, ThinOneByteStringMap) \
- V(Map, thin_string_map, ThinStringMap) \
- V(Map, sliced_string_map, SlicedStringMap) \
- V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
- V(Map, external_string_map, ExternalStringMap) \
- V(Map, external_string_with_one_byte_data_map, \
- ExternalStringWithOneByteDataMap) \
- V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
- V(Map, short_external_string_map, ShortExternalStringMap) \
- V(Map, short_external_string_with_one_byte_data_map, \
- ShortExternalStringWithOneByteDataMap) \
- V(Map, internalized_string_map, InternalizedStringMap) \
- V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
- V(Map, external_internalized_string_with_one_byte_data_map, \
- ExternalInternalizedStringWithOneByteDataMap) \
- V(Map, external_one_byte_internalized_string_map, \
- ExternalOneByteInternalizedStringMap) \
- V(Map, short_external_internalized_string_map, \
- ShortExternalInternalizedStringMap) \
- V(Map, short_external_internalized_string_with_one_byte_data_map, \
- ShortExternalInternalizedStringWithOneByteDataMap) \
- V(Map, short_external_one_byte_internalized_string_map, \
- ShortExternalOneByteInternalizedStringMap) \
- V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
- /* Array element maps */ \
- V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
- V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
- V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
- V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
- V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
- V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
- V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
- V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
- V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
- V(Map, fixed_biguint64_array_map, FixedBigUint64ArrayMap) \
- V(Map, fixed_bigint64_array_map, FixedBigInt64ArrayMap) \
- /* Oddball maps */ \
- V(Map, undefined_map, UndefinedMap) \
- V(Map, the_hole_map, TheHoleMap) \
- V(Map, null_map, NullMap) \
- V(Map, boolean_map, BooleanMap) \
- V(Map, uninitialized_map, UninitializedMap) \
- V(Map, arguments_marker_map, ArgumentsMarkerMap) \
- V(Map, exception_map, ExceptionMap) \
- V(Map, termination_exception_map, TerminationExceptionMap) \
- V(Map, optimized_out_map, OptimizedOutMap) \
- V(Map, stale_register_map, StaleRegisterMap) \
- V(Map, self_reference_marker_map, SelfReferenceMarkerMap) \
- /* Canonical empty values */ \
- V(EnumCache, empty_enum_cache, EmptyEnumCache) \
- V(PropertyArray, empty_property_array, EmptyPropertyArray) \
- V(ByteArray, empty_byte_array, EmptyByteArray) \
- V(BoilerplateDescription, empty_boilerplate_description, \
- EmptyBoilerplateDescription) \
- V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
- V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
- V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
- V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
- V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
- V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
- V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
- V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
- V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
- EmptyFixedUint8ClampedArray) \
- V(FixedTypedArrayBase, empty_fixed_biguint64_array, \
- EmptyFixedBigUint64Array) \
- V(FixedTypedArrayBase, empty_fixed_bigint64_array, EmptyFixedBigInt64Array) \
- V(Script, empty_script, EmptyScript) \
- V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
- V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
- V(NumberDictionary, empty_slow_element_dictionary, \
- EmptySlowElementDictionary) \
- V(FixedArray, empty_ordered_hash_map, EmptyOrderedHashMap) \
- V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet) \
- V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata) \
- V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
- V(WeakCell, empty_weak_cell, EmptyWeakCell) \
- V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
- V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
- V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
- V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList) \
- /* Protectors */ \
- V(Cell, array_constructor_protector, ArrayConstructorProtector) \
- V(PropertyCell, no_elements_protector, NoElementsProtector) \
- V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
- V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
- V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
- V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
- V(Cell, string_length_protector, StringLengthProtector) \
- V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
- V(PropertyCell, array_buffer_neutering_protector, \
- ArrayBufferNeuteringProtector) \
- V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
- V(Cell, promise_resolve_protector, PromiseResolveProtector) \
- V(PropertyCell, promise_then_protector, PromiseThenProtector) \
- /* Special numbers */ \
- V(HeapNumber, nan_value, NanValue) \
- V(HeapNumber, hole_nan_value, HoleNanValue) \
- V(HeapNumber, infinity_value, InfinityValue) \
- V(HeapNumber, minus_zero_value, MinusZeroValue) \
- V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
- /* Marker for self-references during code-generation */ \
- V(HeapObject, self_reference_marker, SelfReferenceMarker) \
- /* Caches */ \
- V(FixedArray, number_string_cache, NumberStringCache) \
- V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
- V(FixedArray, string_split_cache, StringSplitCache) \
- V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
- /* Lists and dictionaries */ \
- V(NameDictionary, empty_property_dictionary, EmptyPropertyDictionary) \
- V(NameDictionary, public_symbol_table, PublicSymbolTable) \
- V(NameDictionary, api_symbol_table, ApiSymbolTable) \
- V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
- V(Object, script_list, ScriptList) \
- V(SimpleNumberDictionary, code_stubs, CodeStubs) \
- V(FixedArray, materialized_objects, MaterializedObjects) \
- V(FixedArray, microtask_queue, MicrotaskQueue) \
- V(FixedArray, detached_contexts, DetachedContexts) \
- V(HeapObject, retaining_path_targets, RetainingPathTargets) \
- V(WeakArrayList, retained_maps, RetainedMaps) \
- /* Indirection lists for isolate-independent builtins */ \
- V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
- /* Feedback vectors that we need for code coverage or type profile */ \
- V(Object, feedback_vectors_for_profiling_tools, \
- FeedbackVectorsForProfilingTools) \
- V(Object, weak_stack_trace_list, WeakStackTraceList) \
- V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
- V(FixedArray, serialized_objects, SerializedObjects) \
- V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
- V(TemplateList, message_listeners, MessageListeners) \
- /* DeserializeLazy handlers for lazy bytecode deserialization */ \
- V(Object, deserialize_lazy_handler, DeserializeLazyHandler) \
- V(Object, deserialize_lazy_handler_wide, DeserializeLazyHandlerWide) \
- V(Object, deserialize_lazy_handler_extra_wide, \
- DeserializeLazyHandlerExtraWide) \
- /* JS Entries */ \
- V(Code, js_entry_code, JsEntryCode) \
- V(Code, js_construct_entry_code, JsConstructEntryCode) \
- V(Code, js_run_microtasks_entry_code, JsRunMicrotasksEntryCode)
-
-// Entries in this list are limited to Smis and are not visited during GC.
-#define SMI_ROOT_LIST(V) \
- V(Smi, stack_limit, StackLimit) \
- V(Smi, real_stack_limit, RealStackLimit) \
- V(Smi, last_script_id, LastScriptId) \
- V(Smi, last_debugging_id, LastDebuggingId) \
- V(Smi, hash_seed, HashSeed) \
- /* To distinguish the function templates, so that we can find them in the */ \
- /* function cache of the native context. */ \
- V(Smi, next_template_serial_number, NextTemplateSerialNumber) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_create_deopt_pc_offset, \
- ConstructStubCreateDeoptPCOffset) \
- V(Smi, construct_stub_invoke_deopt_pc_offset, \
- ConstructStubInvokeDeoptPCOffset) \
- V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
-
-#define ROOT_LIST(V) \
- STRONG_ROOT_LIST(V) \
- SMI_ROOT_LIST(V) \
- V(StringTable, string_table, StringTable)
-
-
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers. This list is not complete and has omissions.
-#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
- V(ArgumentsMarker) \
- V(ArgumentsMarkerMap) \
- V(ArrayBufferNeuteringProtector) \
- V(ArrayIteratorProtector) \
- V(BigIntMap) \
- V(BlockContextMap) \
- V(BoilerplateDescriptionMap) \
- V(BooleanMap) \
- V(ByteArrayMap) \
- V(BytecodeArrayMap) \
- V(CatchContextMap) \
- V(CellMap) \
- V(CodeMap) \
- V(DebugEvaluateContextMap) \
- V(DescriptorArrayMap) \
- V(EmptyByteArray) \
- V(EmptyDescriptorArray) \
- V(EmptyFixedArray) \
- V(EmptyFixedFloat32Array) \
- V(EmptyFixedFloat64Array) \
- V(EmptyFixedInt16Array) \
- V(EmptyFixedInt32Array) \
- V(EmptyFixedInt8Array) \
- V(EmptyFixedUint16Array) \
- V(EmptyFixedUint32Array) \
- V(EmptyFixedUint8Array) \
- V(EmptyFixedUint8ClampedArray) \
- V(EmptyOrderedHashMap) \
- V(EmptyOrderedHashSet) \
- V(EmptyPropertyCell) \
- V(EmptyScopeInfo) \
- V(EmptyScript) \
- V(EmptySloppyArgumentsElements) \
- V(EmptySlowElementDictionary) \
- V(EmptyWeakCell) \
- V(EvalContextMap) \
- V(Exception) \
- V(FalseValue) \
- V(FixedArrayMap) \
- V(FixedCOWArrayMap) \
- V(FixedDoubleArrayMap) \
- V(ForeignMap) \
- V(FreeSpaceMap) \
- V(FunctionContextMap) \
- V(GlobalDictionaryMap) \
- V(GlobalPropertyCellMap) \
- V(HashTableMap) \
- V(HeapNumberMap) \
- V(HoleNanValue) \
- V(InfinityValue) \
- V(IsConcatSpreadableProtector) \
- V(JSMessageObjectMap) \
- V(JsConstructEntryCode) \
- V(JsEntryCode) \
- V(ManyClosuresCell) \
- V(ManyClosuresCellMap) \
- V(MetaMap) \
- V(MinusInfinityValue) \
- V(MinusZeroValue) \
- V(ModuleContextMap) \
- V(ModuleInfoMap) \
- V(MutableHeapNumberMap) \
- V(NameDictionaryMap) \
- V(NanValue) \
- V(NativeContextMap) \
- V(NoClosuresCellMap) \
- V(NoElementsProtector) \
- V(NullMap) \
- V(NullValue) \
- V(NumberDictionaryMap) \
- V(OneClosureCellMap) \
- V(OnePointerFillerMap) \
- V(OptimizedOut) \
- V(OrderedHashMapMap) \
- V(OrderedHashSetMap) \
- V(PropertyArrayMap) \
- V(ScopeInfoMap) \
- V(ScriptContextMap) \
- V(ScriptContextTableMap) \
- V(SelfReferenceMarker) \
- V(SharedFunctionInfoMap) \
- V(SimpleNumberDictionaryMap) \
- V(SloppyArgumentsElementsMap) \
- V(SmallOrderedHashMapMap) \
- V(SmallOrderedHashSetMap) \
- V(ArraySpeciesProtector) \
- V(TypedArraySpeciesProtector) \
- V(PromiseSpeciesProtector) \
- V(StaleRegister) \
- V(StringLengthProtector) \
- V(StringTableMap) \
- V(SymbolMap) \
- V(TerminationException) \
- V(TheHoleMap) \
- V(TheHoleValue) \
- V(TransitionArrayMap) \
- V(TrueValue) \
- V(TwoPointerFillerMap) \
- V(UndefinedMap) \
- V(UndefinedValue) \
- V(UninitializedMap) \
- V(UninitializedValue) \
- V(WeakCellMap) \
- V(WeakFixedArrayMap) \
- V(WeakArrayListMap) \
- V(WithContextMap) \
- V(empty_string) \
+#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
+ V(ArgumentsMarker) \
+ V(ArgumentsMarkerMap) \
+ V(ArrayBufferNeuteringProtector) \
+ V(ArrayIteratorProtector) \
+ V(BigIntMap) \
+ V(BlockContextMap) \
+ V(ObjectBoilerplateDescriptionMap) \
+ V(BooleanMap) \
+ V(ByteArrayMap) \
+ V(BytecodeArrayMap) \
+ V(CatchContextMap) \
+ V(CellMap) \
+ V(CodeMap) \
+ V(DebugEvaluateContextMap) \
+ V(DescriptorArrayMap) \
+ V(EphemeronHashTableMap) \
+ V(EmptyByteArray) \
+ V(EmptyDescriptorArray) \
+ V(EmptyFixedArray) \
+ V(EmptyFixedFloat32Array) \
+ V(EmptyFixedFloat64Array) \
+ V(EmptyFixedInt16Array) \
+ V(EmptyFixedInt32Array) \
+ V(EmptyFixedInt8Array) \
+ V(EmptyFixedUint16Array) \
+ V(EmptyFixedUint32Array) \
+ V(EmptyFixedUint8Array) \
+ V(EmptyFixedUint8ClampedArray) \
+ V(EmptyOrderedHashMap) \
+ V(EmptyOrderedHashSet) \
+ V(EmptyPropertyCell) \
+ V(EmptyScopeInfo) \
+ V(EmptyScript) \
+ V(EmptySloppyArgumentsElements) \
+ V(EmptySlowElementDictionary) \
+ V(EmptyWeakCell) \
+ V(EvalContextMap) \
+ V(Exception) \
+ V(FalseValue) \
+ V(FixedArrayMap) \
+ V(FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap) \
+ V(ForeignMap) \
+ V(FreeSpaceMap) \
+ V(FunctionContextMap) \
+ V(GlobalDictionaryMap) \
+ V(GlobalPropertyCellMap) \
+ V(HashTableMap) \
+ V(HeapNumberMap) \
+ V(HoleNanValue) \
+ V(InfinityValue) \
+ V(IsConcatSpreadableProtector) \
+ V(JSMessageObjectMap) \
+ V(JsConstructEntryCode) \
+ V(JsEntryCode) \
+ V(ManyClosuresCell) \
+ V(ManyClosuresCellMap) \
+ V(MetaMap) \
+ V(MinusInfinityValue) \
+ V(MinusZeroValue) \
+ V(ModuleContextMap) \
+ V(ModuleInfoMap) \
+ V(MutableHeapNumberMap) \
+ V(NameDictionaryMap) \
+ V(NanValue) \
+ V(NativeContextMap) \
+ V(NoClosuresCellMap) \
+ V(NoElementsProtector) \
+ V(NullMap) \
+ V(NullValue) \
+ V(NumberDictionaryMap) \
+ V(OneClosureCellMap) \
+ V(OnePointerFillerMap) \
+ V(OptimizedOut) \
+ V(OrderedHashMapMap) \
+ V(OrderedHashSetMap) \
+ V(PreParsedScopeDataMap) \
+ V(PropertyArrayMap) \
+ V(ScopeInfoMap) \
+ V(ScriptContextMap) \
+ V(ScriptContextTableMap) \
+ V(SelfReferenceMarker) \
+ V(SharedFunctionInfoMap) \
+ V(SimpleNumberDictionaryMap) \
+ V(SloppyArgumentsElementsMap) \
+ V(SmallOrderedHashMapMap) \
+ V(SmallOrderedHashSetMap) \
+ V(ArraySpeciesProtector) \
+ V(TypedArraySpeciesProtector) \
+ V(PromiseSpeciesProtector) \
+ V(StaleRegister) \
+ V(StringLengthProtector) \
+ V(StringTableMap) \
+ V(SymbolMap) \
+ V(TerminationException) \
+ V(TheHoleMap) \
+ V(TheHoleValue) \
+ V(TransitionArrayMap) \
+ V(TrueValue) \
+ V(TwoPointerFillerMap) \
+ V(UndefinedMap) \
+ V(UndefinedValue) \
+ V(UninitializedMap) \
+ V(UninitializedValue) \
+ V(UncompiledDataWithoutPreParsedScopeMap) \
+ V(UncompiledDataWithPreParsedScopeMap) \
+ V(WeakCellMap) \
+ V(WeakFixedArrayMap) \
+ V(WeakArrayListMap) \
+ V(WithContextMap) \
+ V(empty_string) \
PRIVATE_SYMBOL_LIST(V)
#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
@@ -434,6 +180,7 @@ class GCIdleTimeAction;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
+class HeapController;
class HeapObjectAllocationTracker;
class HeapObjectsFilter;
class HeapStats;
@@ -471,7 +218,7 @@ enum class FixedArrayVisitationMode { kRegular, kIncremental };
enum class TraceRetainingPathMode { kEnabled, kDisabled };
-enum class RetainingPathOption { kDefault, kTrackEphemeralPath };
+enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
enum class GarbageCollectionReason {
kUnknown = 0,
@@ -495,7 +242,8 @@ enum class GarbageCollectionReason {
kRuntime = 18,
kSamplingProfiler = 19,
kSnapshotCreator = 20,
- kTesting = 21
+ kTesting = 21,
+ kExternalFinalize = 22
// If you add new items here, then update the incremental_marking_reason,
// mark_compact_reason, and scavenge_reason counters in counters.h.
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
@@ -594,6 +342,10 @@ class Heap {
#undef DECL
#define DECL(NAME, Name, Size, name) k##Name##Size##MapRootIndex,
+ ALLOCATION_SITE_LIST(DECL)
+#undef DECL
+
+#define DECL(NAME, Name, Size, name) k##Name##Size##MapRootIndex,
DATA_HANDLER_LIST(DECL)
#undef DECL
@@ -650,21 +402,9 @@ class Heap {
static const size_t kMaxSemiSpaceSizeInKB =
16 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
- // The old space size has to be a multiple of Page::kPageSize.
- // Sizes are in MB.
- static const size_t kMinOldGenerationSize = 128 * kPointerMultiplier;
- static const size_t kMaxOldGenerationSize = 1024 * kPointerMultiplier;
-
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
- V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
- V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
- static const double kMaxHeapGrowingFactorMemoryConstrained;
- static const double kMaxHeapGrowingFactorIdle;
- static const double kConservativeHeapGrowingFactor;
- static const double kTargetMutatorUtilization;
-
static const int kNoGCFlags = 0;
static const int kReduceMemoryFootprintMask = 1;
static const int kAbortIncrementalMarkingMask = 2;
@@ -720,6 +460,10 @@ class Heap {
#endif
}
+ static uintptr_t ZapValue() {
+ return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
+ }
+
static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
}
@@ -744,12 +488,6 @@ class Heap {
return "Unknown collector";
}
- V8_EXPORT_PRIVATE static double MaxHeapGrowingFactor(
- size_t max_old_generation_size);
- V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
- double mutator_speed,
- double max_factor);
-
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
@@ -816,13 +554,10 @@ class Heap {
// Used in CreateAllocationSiteStub and the (de)serializer.
Object** allocation_sites_list_address() { return &allocation_sites_list_; }
- void set_encountered_weak_collections(Object* weak_collection) {
- encountered_weak_collections_ = weak_collection;
- }
- Object* encountered_weak_collections() const {
- return encountered_weak_collections_;
- }
- void IterateEncounteredWeakCollections(RootVisitor* visitor);
+ // Traverse all the allocaions_sites [nested_site and weak_next] in the list
+ // and foreach call the visitor
+ void ForeachAllocationSite(Object* list,
+ std::function<void(AllocationSite*)> visitor);
// Number of mark-sweeps.
int ms_count() const { return ms_count_; }
@@ -917,7 +652,7 @@ class Heap {
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
- inline uint32_t HashSeed();
+ inline uint64_t HashSeed();
inline int NextScriptId();
inline int NextDebuggingId();
@@ -969,7 +704,7 @@ class Heap {
bool ShouldOptimizeForMemoryUsage();
bool HighMemoryPressure() {
- return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
+ return memory_pressure_level_ != MemoryPressureLevel::kNone;
}
void RestoreHeapLimit(size_t heap_limit) {
@@ -987,15 +722,14 @@ class Heap {
// max_semi_space_size_in_kb: maximum semi-space size in KB
// max_old_generation_size_in_mb: maximum old generation size in MB
// code_range_size_in_mb: code range size in MB
- // Return false if the heap has been set up already.
- bool ConfigureHeap(size_t max_semi_space_size_in_kb,
+ void ConfigureHeap(size_t max_semi_space_size_in_kb,
size_t max_old_generation_size_in_mb,
size_t code_range_size_in_mb);
- bool ConfigureHeapDefault();
+ void ConfigureHeapDefault();
// Prepares the heap, setting up memory areas that are needed in the isolate
// without actually creating any objects.
- bool SetUp();
+ void SetUp();
// (Re-)Initialize hash seed from flag or RNG.
void InitializeHashSeed();
@@ -1027,6 +761,7 @@ class Heap {
CodeSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
+ NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
ReadOnlySpace* read_only_space() { return read_only_space_; }
inline PagedSpace* paged_space(int idx);
@@ -1060,35 +795,19 @@ class Heap {
// ===========================================================================
// Root set access. ==========================================================
// ===========================================================================
+ friend class ReadOnlyRoots;
- // Heap root getters.
+ public:
+// Heap root getters.
#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
- ROOT_LIST(ROOT_ACCESSOR)
+ MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
- // Utility type maps.
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
- STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
inline Map* name##_map();
DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
#undef DATA_HANDLER_MAP_ACCESSOR
-#define STRING_ACCESSOR(name, str) inline String* name();
- INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name) inline Symbol* name();
- PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
- PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
- WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
inline AccessorInfo* accessor_name##_accessor();
ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
@@ -1119,6 +838,15 @@ class Heap {
return kRootsExternalReferenceTableOffset;
}
+ static constexpr int roots_to_builtins_offset() {
+ return kRootsBuiltinsOffset;
+ }
+
+ Address root_register_addressable_end() {
+ return reinterpret_cast<Address>(roots_array_start()) +
+ kRootRegisterAddressableEndOffset;
+ }
+
// Sets the stub_cache_ (only used when expanding the dictionary).
void SetRootCodeStubs(SimpleNumberDictionary* value);
@@ -1186,14 +914,14 @@ class Heap {
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- bool CollectGarbage(
+ V8_EXPORT_PRIVATE bool CollectGarbage(
AllocationSpace space, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
- void CollectAllGarbage(
+ V8_EXPORT_PRIVATE void CollectAllGarbage(
int flags, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
@@ -1216,6 +944,14 @@ class Heap {
void HandleGCRequest();
// ===========================================================================
+ // Builtins. =================================================================
+ // ===========================================================================
+
+ Code* builtin(int index);
+ Address builtin_address(int index);
+ void set_builtin(int index, HeapObject* builtin);
+
+ // ===========================================================================
// Iterators. ================================================================
// ===========================================================================
@@ -1228,6 +964,8 @@ class Heap {
void IterateWeakRoots(RootVisitor* v, VisitMode mode);
// Iterates over weak global handles.
void IterateWeakGlobalHandles(RootVisitor* v);
+ // Iterates over builtins.
+ void IterateBuiltins(RootVisitor* v);
// ===========================================================================
// Store buffer API. =========================================================
@@ -1283,6 +1021,8 @@ class Heap {
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
+ // Synchronously finalizes incremental marking.
+ void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason);
void RegisterDeserializedObjectsForBlackAllocation(
Reservation* reservations, const std::vector<HeapObject*>& large_objects,
@@ -1358,15 +1098,15 @@ class Heap {
// ===========================================================================
// Returns whether the object resides in new space.
- inline bool InNewSpace(Object* object);
- inline bool InNewSpace(MaybeObject* object);
- inline bool InNewSpace(HeapObject* heap_object);
- inline bool InFromSpace(Object* object);
- inline bool InFromSpace(MaybeObject* object);
- inline bool InFromSpace(HeapObject* heap_object);
- inline bool InToSpace(Object* object);
- inline bool InToSpace(MaybeObject* object);
- inline bool InToSpace(HeapObject* heap_object);
+ static inline bool InNewSpace(Object* object);
+ static inline bool InNewSpace(MaybeObject* object);
+ static inline bool InNewSpace(HeapObject* heap_object);
+ static inline bool InFromSpace(Object* object);
+ static inline bool InFromSpace(MaybeObject* object);
+ static inline bool InFromSpace(HeapObject* heap_object);
+ static inline bool InToSpace(Object* object);
+ static inline bool InToSpace(MaybeObject* object);
+ static inline bool InToSpace(HeapObject* heap_object);
// Returns whether the object resides in old space.
inline bool InOldSpace(Object* object);
@@ -1389,6 +1129,10 @@ class Heap {
inline bool InNewSpaceSlow(Address address);
inline bool InOldSpaceSlow(Address address);
+ // Find the heap which owns this HeapObject. Should never be called for
+ // objects in RO space.
+ static inline Heap* FromWritableHeapObject(const HeapObject* obj);
+
// ===========================================================================
// Object statistics tracking. ===============================================
// ===========================================================================
@@ -1431,14 +1175,8 @@ class Heap {
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
- static size_t ComputeMaxOldGenerationSize(uint64_t physical_memory) {
- const size_t old_space_physical_memory_factor = 4;
- size_t computed_size = static_cast<size_t>(
- physical_memory / i::MB / old_space_physical_memory_factor *
- kPointerMultiplier);
- return Max(Min(computed_size, kMaxOldGenerationSize),
- kMinOldGenerationSize);
- }
+ V8_EXPORT_PRIVATE static size_t ComputeMaxOldGenerationSize(
+ uint64_t physical_memory);
static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
const uint64_t min_physical_memory = 512 * MB;
@@ -1462,6 +1200,10 @@ class Heap {
// Returns the capacity of the old generation.
size_t OldGenerationCapacity();
+ // Returns the amount of memory currently committed for the heap and memory
+ // held alive by the unmapper.
+ size_t CommittedMemoryOfHeapAndUnmapper();
+
// Returns the amount of memory currently committed for the heap.
size_t CommittedMemory();
@@ -1658,6 +1400,7 @@ class Heap {
return !allocation_trackers_.empty();
}
+ // ===========================================================================
// Retaining path tracking. ==================================================
// ===========================================================================
@@ -1902,9 +1645,9 @@ class Heap {
// These five Create*EntryStub functions are here and forced to not be inlined
// because of a gcc-4.4 bug that assigns wrong vtable entries.
- NO_INLINE(void CreateJSEntryStub());
- NO_INLINE(void CreateJSConstructEntryStub());
- NO_INLINE(void CreateJSRunMicrotasksEntryStub());
+ V8_NOINLINE void CreateJSEntryStub();
+ V8_NOINLINE void CreateJSConstructEntryStub();
+ V8_NOINLINE void CreateJSRunMicrotasksEntryStub();
void CreateFixedStubs();
@@ -1981,7 +1724,8 @@ class Heap {
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
// objects that die later.
- void FinalizeIncrementalMarking(GarbageCollectionReason gc_reason);
+ void FinalizeIncrementalMarkingIncrementally(
+ GarbageCollectionReason gc_reason);
// Returns the timer used for a given GC type.
// - GCScavenger: young generation GC
@@ -2084,6 +1828,9 @@ class Heap {
// Growing strategy. =========================================================
// ===========================================================================
+ HeapController* heap_controller() { return heap_controller_; }
+ MemoryReducer* memory_reducer() { return memory_reducer_; }
+
// For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
// This constant limits the effect of load RAIL mode on GC.
// The value is arbitrary and chosen as the largest load time observed in
@@ -2092,22 +1839,6 @@ class Heap {
bool ShouldOptimizeForLoadTime();
- // Decrease the allocation limit if the new limit based on the given
- // parameters is lower than the current limit.
- void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
- double mutator_speed);
-
- // Calculates the allocation limit based on a given growing factor and a
- // given old generation size.
- size_t CalculateOldGenerationAllocationLimit(double factor,
- size_t old_gen_size);
-
- // Sets the allocation limit to trigger the next full garbage collection.
- void SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
- double mutator_speed);
-
- size_t MinimumAllocationLimitGrowingStep();
-
size_t old_generation_allocation_limit() const {
return old_generation_allocation_limit_;
}
@@ -2118,6 +1849,10 @@ class Heap {
bool ShouldExpandOldGenerationOnSlowAllocation();
+ enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
+
+ HeapGrowingMode CurrentHeapGrowingMode();
+
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
IncrementalMarkingLimit IncrementalMarkingLimitReached();
@@ -2161,7 +1896,7 @@ class Heap {
// triggered and the allocation is retried. This is performed multiple times.
// If after that retry procedure the allocation still fails nullptr is
// returned.
- HeapObject* AllocateRawWithLigthRetry(
+ HeapObject* AllocateRawWithLightRetry(
int size, AllocationSpace space,
AllocationAlignment alignment = kWordAligned);
@@ -2202,7 +1937,7 @@ class Heap {
// ===========================================================================
void AddRetainer(HeapObject* retainer, HeapObject* object);
- void AddEphemeralRetainer(HeapObject* retainer, HeapObject* object);
+ void AddEphemeronRetainer(HeapObject* retainer, HeapObject* object);
void AddRetainingRoot(Root root, HeapObject* object);
// Returns true if the given object is a target of retaining path tracking.
// Stores the option corresponding to the object in the provided *option.
@@ -2234,6 +1969,20 @@ class Heap {
kRootListLength * kPointerSize;
ExternalReferenceTable external_reference_table_;
+ // As external references above, builtins are accessed through an offset from
+ // the roots register. Its offset from roots_ must remain static. This is
+ // verified in Isolate::Init() using runtime checks.
+ static constexpr int kRootsBuiltinsOffset =
+ kRootsExternalReferenceTableOffset +
+ ExternalReferenceTable::SizeInBytes();
+ Object* builtins_[Builtins::builtin_count];
+
+ // kRootRegister may be used to address any location that starts at the
+ // Isolate and ends at this point. Fields past this point are not guaranteed
+ // to live at a static offset from kRootRegister.
+ static constexpr int kRootRegisterAddressableEndOffset =
+ kRootsBuiltinsOffset + Builtins::builtin_count * kPointerSize;
+
size_t code_range_size_;
size_t max_semi_space_size_;
size_t initial_semispace_size_;
@@ -2256,7 +2005,7 @@ class Heap {
// Stores the memory pressure level that set by MemoryPressureNotification
// and reset by a mark-compact garbage collection.
- base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
+ std::atomic<MemoryPressureLevel> memory_pressure_level_;
std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
near_heap_limit_callbacks_;
@@ -2274,6 +2023,7 @@ class Heap {
CodeSpace* code_space_;
MapSpace* map_space_;
LargeObjectSpace* lo_space_;
+ NewLargeObjectSpace* new_lo_space_;
ReadOnlySpace* read_only_space_;
// Map from the space id to the space.
Space* space_[LAST_SPACE + 1];
@@ -2346,11 +2096,6 @@ class Heap {
Object* native_contexts_list_;
Object* allocation_sites_list_;
- // List of encountered weak collections (JSWeakMap and JSWeakSet) during
- // marking. It is initialized during marking, destroyed after marking and
- // contains Smi(0) while marking is not active.
- Object* encountered_weak_collections_;
-
std::vector<GCCallbackTuple> gc_epilogue_callbacks_;
std::vector<GCCallbackTuple> gc_prologue_callbacks_;
@@ -2394,6 +2139,8 @@ class Heap {
StoreBuffer* store_buffer_;
+ HeapController* heap_controller_;
+
IncrementalMarking* incremental_marking_;
ConcurrentMarking* concurrent_marking_;
@@ -2489,7 +2236,7 @@ class Heap {
std::map<HeapObject*, Root> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
// ephemeron is stored in this map.
- std::map<HeapObject*, HeapObject*> ephemeral_retainer_;
+ std::map<HeapObject*, HeapObject*> ephemeron_retainer_;
// For each index inthe retaining_path_targets_ array this map
// stores the option of the corresponding target.
std::map<int, RetainingPathOption> retaining_path_target_option_;
@@ -2499,8 +2246,10 @@ class Heap {
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class ConcurrentMarking;
+ friend class EphemeronHashTableMarkingTask;
friend class GCCallbacksScope;
friend class GCTracer;
+ friend class HeapController;
friend class HeapIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
@@ -2530,6 +2279,8 @@ class Heap {
// Used in cctest.
friend class heap::HeapTester;
+ FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
+
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2624,6 +2375,7 @@ class CodePageMemoryModificationScope {
// objects in a heap space but above the allocation pointer.
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
+ explicit VerifyPointersVisitor(Heap* heap) : heap_(heap) {}
void VisitPointers(HeapObject* host, Object** start, Object** end) override;
void VisitPointers(HeapObject* host, MaybeObject** start,
MaybeObject** end) override;
@@ -2633,6 +2385,8 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
protected:
virtual void VerifyPointers(HeapObject* host, MaybeObject** start,
MaybeObject** end);
+
+ Heap* heap_;
};
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index cdbd5b10ed..2b84a45999 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -16,6 +16,7 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/sweeper.h"
+#include "src/objects/hash-table-inl.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
#include "src/visitors.h"
@@ -42,7 +43,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
// Ensure that the new object is marked black.
HeapObject* object = HeapObject::FromAddress(addr);
if (incremental_marking_.marking_state()->IsWhite(object) &&
- !heap->InNewSpace(object)) {
+ !(Heap::InNewSpace(object) || heap->new_lo_space()->Contains(object))) {
if (heap->lo_space()->Contains(object)) {
incremental_marking_.marking_state()->WhiteToBlack(object);
} else {
@@ -98,14 +99,16 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
Object* value) {
if (BaseRecordWrite(obj, value) && slot != nullptr) {
// Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
+ heap_->mark_compact_collector()->RecordSlot(obj, slot,
+ HeapObject::cast(value));
}
}
-int IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
+int IncrementalMarking::RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
Isolate* isolate) {
DCHECK(obj->IsHeapObject());
- isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
+ isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(obj, slot,
+ *slot);
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
}
@@ -226,33 +229,10 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
Heap* heap_;
};
-void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
- bool is_marking) {
- if (is_marking) {
- chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- } else {
- chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- }
-}
-
-
-void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
- bool is_marking) {
- chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- if (is_marking) {
- chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- } else {
- chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- }
-}
-
-
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
PagedSpace* space) {
for (Page* p : *space) {
- SetOldSpacePageFlags(p, false);
+ p->SetOldGenerationPageFlags(false);
}
}
@@ -260,7 +240,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
NewSpace* space) {
for (Page* p : *space) {
- SetNewSpacePageFlags(p, false);
+ p->SetYoungGenerationPageFlags(false);
}
}
@@ -271,22 +251,22 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
- for (LargePage* lop : *heap_->lo_space()) {
- SetOldSpacePageFlags(lop, false);
+ for (LargePage* p : *heap_->lo_space()) {
+ p->SetOldGenerationPageFlags(false);
}
}
void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
for (Page* p : *space) {
- SetOldSpacePageFlags(p, true);
+ p->SetOldGenerationPageFlags(true);
}
}
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
for (Page* p : *space) {
- SetNewSpacePageFlags(p, true);
+ p->SetYoungGenerationPageFlags(true);
}
}
@@ -297,8 +277,8 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space());
- for (LargePage* lop : *heap_->lo_space()) {
- SetOldSpacePageFlags(lop, true);
+ for (LargePage* p : *heap_->lo_space()) {
+ p->SetOldGenerationPageFlags(true);
}
}
@@ -485,16 +465,14 @@ void IncrementalMarking::MarkRoots() {
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
}
-bool ShouldRetainMap(Map* map, int age) {
+bool IncrementalMarking::ShouldRetainMap(Map* map, int age) {
if (age == 0) {
// The map has aged. Do not retain this map.
return false;
}
Object* constructor = map->GetConstructor();
- Heap* heap = map->GetHeap();
if (!constructor->IsHeapObject() ||
- heap->incremental_marking()->marking_state()->IsWhite(
- HeapObject::cast(constructor))) {
+ marking_state()->IsWhite(HeapObject::cast(constructor))) {
// The constructor is dead, no new objects with this map can
// be created. Do not retain this map.
return false;
@@ -587,7 +565,7 @@ void IncrementalMarking::FinalizeIncrementally() {
void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
if (!IsMarking()) return;
- Map* filler_map = heap_->one_pointer_filler_map();
+ Map* filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
#ifdef ENABLE_MINOR_MC
MinorMarkCompactCollector::MarkingState* minor_marking_state =
@@ -600,7 +578,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
HeapObject* obj, HeapObject** out) -> bool {
DCHECK(obj->IsHeapObject());
// Only pointers to from space have to be updated.
- if (heap_->InFromSpace(obj)) {
+ if (Heap::InFromSpace(obj)) {
MapWord map_word = obj->map_word();
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist anymore,
@@ -614,7 +592,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
*out = dest;
return true;
- } else if (heap_->InToSpace(obj)) {
+ } else if (Heap::InToSpace(obj)) {
// The object may be on a page that was moved in new space.
DCHECK(
Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
@@ -652,43 +630,84 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
UpdateWeakReferencesAfterScavenge();
}
+namespace {
+template <typename T>
+T* ForwardingAddress(T* heap_obj) {
+ MapWord map_word = heap_obj->map_word();
+
+ if (map_word.IsForwardingAddress()) {
+ return T::cast(map_word.ToForwardingAddress());
+ } else if (Heap::InNewSpace(heap_obj)) {
+ return nullptr;
+ } else {
+ return heap_obj;
+ }
+}
+} // namespace
+
void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
weak_objects_->weak_references.Update(
[](std::pair<HeapObject*, HeapObjectReference**> slot_in,
std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first;
- MapWord map_word = heap_obj->map_word();
- if (map_word.IsForwardingAddress()) {
+ HeapObject* forwarded = ForwardingAddress(heap_obj);
+
+ if (forwarded) {
ptrdiff_t distance_to_slot =
reinterpret_cast<Address>(slot_in.second) -
reinterpret_cast<Address>(slot_in.first);
Address new_slot =
- reinterpret_cast<Address>(map_word.ToForwardingAddress()) +
- distance_to_slot;
- slot_out->first = map_word.ToForwardingAddress();
+ reinterpret_cast<Address>(forwarded) + distance_to_slot;
+ slot_out->first = forwarded;
slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
return true;
}
- if (heap_obj->GetHeap()->InNewSpace(heap_obj)) {
- // The new space object containing the weak reference died.
- return false;
- }
- *slot_out = slot_in;
- return true;
+
+ return false;
});
weak_objects_->weak_objects_in_code.Update(
[](std::pair<HeapObject*, Code*> slot_in,
std::pair<HeapObject*, Code*>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first;
- MapWord map_word = heap_obj->map_word();
- if (map_word.IsForwardingAddress()) {
- slot_out->first = map_word.ToForwardingAddress();
+ HeapObject* forwarded = ForwardingAddress(heap_obj);
+
+ if (forwarded) {
+ slot_out->first = forwarded;
slot_out->second = slot_in.second;
- } else {
- *slot_out = slot_in;
+ return true;
}
- return true;
+
+ return false;
});
+ weak_objects_->ephemeron_hash_tables.Update(
+ [](EphemeronHashTable* slot_in, EphemeronHashTable** slot_out) -> bool {
+ EphemeronHashTable* forwarded = ForwardingAddress(slot_in);
+
+ if (forwarded) {
+ *slot_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+
+ auto ephemeron_updater = [](Ephemeron slot_in, Ephemeron* slot_out) -> bool {
+ HeapObject* key = slot_in.key;
+ HeapObject* value = slot_in.value;
+ HeapObject* forwarded_key = ForwardingAddress(key);
+ HeapObject* forwarded_value = ForwardingAddress(value);
+
+ if (forwarded_key && forwarded_value) {
+ *slot_out = Ephemeron{forwarded_key, forwarded_value};
+ return true;
+ }
+
+ return false;
+ };
+
+ weak_objects_->current_ephemerons.Update(ephemeron_updater);
+ weak_objects_->next_ephemerons.Update(ephemeron_updater);
+ weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index ec0a8fe269..1a916693ba 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -196,7 +196,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
inline void RestartIfNotMarking();
- static int RecordWriteFromCode(HeapObject* obj, Object** slot,
+ static int RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
Isolate* isolate);
// Record a slot for compaction. Returns false for objects that are
@@ -226,14 +226,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// the concurrent marker.
void MarkBlackAndPush(HeapObject* obj);
- inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
- SetOldSpacePageFlags(chunk, IsMarking());
- }
-
- inline void SetNewSpacePageFlags(Page* chunk) {
- SetNewSpacePageFlags(chunk, IsMarking());
- }
-
bool IsCompacting() { return IsMarking() && is_compacting_; }
void ActivateGeneratedStub(Code* stub);
@@ -279,10 +271,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
IncrementalMarking& incremental_marking_;
};
- static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking);
-
- static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
-
void StartMarking();
void StartBlackAllocation();
@@ -290,6 +278,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinishBlackAllocation();
void MarkRoots();
+ bool ShouldRetainMap(Map* map, int age);
// Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
// increase chances of reusing of map transition tree in future.
void RetainMaps();
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 51e0afd401..1ad9d22fa4 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -49,17 +49,18 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
virtual ~Item() = default;
// Marks an item as being finished.
- void MarkFinished() { CHECK(state_.TrySetValue(kProcessing, kFinished)); }
+ void MarkFinished() { CHECK_EQ(kProcessing, state_.exchange(kFinished)); }
private:
- enum ProcessingState { kAvailable, kProcessing, kFinished };
+ enum ProcessingState : uintptr_t { kAvailable, kProcessing, kFinished };
bool TryMarkingAsProcessing() {
- return state_.TrySetValue(kAvailable, kProcessing);
+ ProcessingState available = kAvailable;
+ return state_.compare_exchange_strong(available, kProcessing);
}
- bool IsFinished() { return state_.Value() == kFinished; }
+ bool IsFinished() { return state_ == kFinished; }
- base::AtomicValue<ProcessingState> state_{kAvailable};
+ std::atomic<ProcessingState> state_{kAvailable};
friend class ItemParallelJob;
friend class ItemParallelJob::Task;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 249144835a..d200671955 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -9,6 +9,7 @@
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set.h"
+#include "src/objects/js-collection-inl.h"
namespace v8 {
namespace internal {
@@ -88,29 +89,38 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
- VisitJSWeakCollection(Map* map, JSWeakCollection* weak_collection) {
- // Enqueue weak collection in linked list of encountered weak collections.
- if (weak_collection->next() == heap_->undefined_value()) {
- weak_collection->set_next(heap_->encountered_weak_collections());
- heap_->set_encountered_weak_collections(weak_collection);
+ VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
+ collector_->AddEphemeronHashTable(table);
+
+ for (int i = 0; i < table->Capacity(); i++) {
+ Object** key_slot =
+ table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
+ HeapObject* key = HeapObject::cast(table->KeyAt(i));
+ collector_->RecordSlot(table, key_slot, key);
+
+ Object** value_slot =
+ table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
+
+ if (marking_state()->IsBlackOrGrey(key)) {
+ VisitPointer(table, value_slot);
+
+ } else {
+ Object* value_obj = *value_slot;
+
+ if (value_obj->IsHeapObject()) {
+ HeapObject* value = HeapObject::cast(value_obj);
+ collector_->RecordSlot(table, value_slot, value);
+
+ // Revisit ephemerons with both key and value unreachable at end
+ // of concurrent marking cycle.
+ if (marking_state()->IsWhite(value)) {
+ collector_->AddEphemeron(key, value);
+ }
+ }
+ }
}
- // Skip visiting the backing hash table containing the mappings and the
- // pointer to the other enqueued weak collections, both are post-processed.
- int size = JSWeakCollection::BodyDescriptorWeak::SizeOf(map, weak_collection);
- JSWeakCollection::BodyDescriptorWeak::IterateBody(map, weak_collection, size,
- this);
-
- // Partially initialized weak collection is enqueued, but table is ignored.
- if (!weak_collection->table()->IsHashTable()) return size;
-
- // Mark the backing hash table without pushing it on the marking stack.
- Object** slot =
- HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
- HeapObject* obj = HeapObject::cast(*slot);
- collector_->RecordSlot(weak_collection, slot, obj);
- MarkObjectWithoutPush(weak_collection, obj);
- return size;
+ return table->SizeFromMap(map);
}
template <FixedArrayVisitationMode fixed_array_mode,
@@ -162,7 +172,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
// Weak cells with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- collector_->RecordSlot(weak_cell, slot, *slot);
+ collector_->RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
} else {
// If we do not know about liveness of values of weak cells, we have to
// process them when we know the liveness of the whole transitive
@@ -250,7 +260,7 @@ template <FixedArrayVisitationMode fixed_array_mode,
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitCodeTarget(Code* host,
RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
collector_->RecordRelocSlot(host, rinfo, target);
MarkObject(host, target);
@@ -383,7 +393,7 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
#ifdef ENABLE_MINOR_MC
void MinorMarkCompactCollector::MarkRootObject(HeapObject* obj) {
- if (heap_->InNewSpace(obj) && non_atomic_marking_state_.WhiteToGrey(obj)) {
+ if (Heap::InNewSpace(obj) && non_atomic_marking_state_.WhiteToGrey(obj)) {
worklist_->Push(kMainThread, obj);
}
}
@@ -400,13 +410,13 @@ void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
}
void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
- Object* target) {
+ HeapObject* target) {
RecordSlot(object, reinterpret_cast<HeapObjectReference**>(slot), target);
}
void MarkCompactCollector::RecordSlot(HeapObject* object,
HeapObjectReference** slot,
- Object* target) {
+ HeapObject* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
@@ -420,9 +430,11 @@ template <LiveObjectIterationMode mode>
LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk, Bitmap* bitmap,
Address start)
: chunk_(chunk),
- one_word_filler_map_(chunk->heap()->one_pointer_filler_map()),
- two_word_filler_map_(chunk->heap()->two_pointer_filler_map()),
- free_space_map_(chunk->heap()->free_space_map()),
+ one_word_filler_map_(
+ ReadOnlyRoots(chunk->heap()).one_pointer_filler_map()),
+ two_word_filler_map_(
+ ReadOnlyRoots(chunk->heap()).two_pointer_filler_map()),
+ free_space_map_(ReadOnlyRoots(chunk->heap()).free_space_map()),
it_(chunk, bitmap) {
it_.Advance(Bitmap::IndexToCell(
Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(start))));
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index e69551f70e..6bc01238b8 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -103,7 +103,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
object = HeapObject::FromAddress(current);
// One word fillers at the end of a black area can be grey.
if (IsBlackOrGrey(object) &&
- object->map() != heap_->one_pointer_filler_map()) {
+ object->map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(this);
@@ -128,9 +128,10 @@ void MarkingVerifier::VerifyMarking(NewSpace* space) {
Address end = space->top();
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
- CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
+ CHECK_EQ(space->first_allocatable_address(),
+ space->first_page()->area_start());
- PageRange range(space->bottom(), end);
+ PageRange range(space->first_allocatable_address(), end);
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address limit = it != range.end() ? page->area_end() : end;
@@ -258,7 +259,7 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
}
void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
- PageRange range(space->bottom(), space->top());
+ PageRange range(space->first_allocatable_address(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address current = page->area_start();
@@ -298,8 +299,8 @@ class FullEvacuationVerifier : public EvacuationVerifier {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- if (heap()->InNewSpace(object)) {
- CHECK(heap()->InToSpace(object));
+ if (Heap::InNewSpace(object)) {
+ CHECK(Heap::InToSpace(object));
}
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
}
@@ -309,8 +310,8 @@ class FullEvacuationVerifier : public EvacuationVerifier {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
if ((*current)->ToStrongHeapObject(&object)) {
- if (heap()->InNewSpace(object)) {
- CHECK(heap()->InToSpace(object));
+ if (Heap::InNewSpace(object)) {
+ CHECK(Heap::InToSpace(object));
}
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
}
@@ -422,8 +423,7 @@ static void TraceFragmentation(PagedSpace* space) {
int number_of_pages = space->CountTotalPages();
intptr_t reserved = (number_of_pages * space->AreaSize());
intptr_t free = reserved - space->SizeOfObjects();
- PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
- AllocationSpaceName(space->identity()), number_of_pages,
+ PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
}
@@ -488,7 +488,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
- for (Page* p : PageRange(space->bottom(), space->top())) {
+ for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
}
@@ -711,9 +711,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
"fragmentation_limit_kb=%" PRIuS
" fragmentation_limit_percent=%d sum_compaction_kb=%zu "
"compaction_limit_kb=%zu\n",
- AllocationSpaceName(space->identity()), free_bytes / KB,
- free_bytes_threshold / KB, target_fragmentation_percent,
- total_live_bytes / KB, max_evacuated_bytes / KB);
+ space->name(), free_bytes / KB, free_bytes_threshold / KB,
+ target_fragmentation_percent, total_live_bytes / KB,
+ max_evacuated_bytes / KB);
}
}
// How many pages we will allocated for the evacuated objects
@@ -735,8 +735,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
PrintIsolate(isolate(),
"compaction-selection: space=%s reduce_memory=%d pages=%d "
"total_live_bytes=%zu\n",
- AllocationSpaceName(space->identity()), reduce_memory,
- candidate_count, total_live_bytes / KB);
+ space->name(), reduce_memory, candidate_count,
+ total_live_bytes / KB);
}
}
@@ -780,7 +780,6 @@ void MarkCompactCollector::Prepare() {
FinishConcurrentMarking(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
heap()->incremental_marking()->Deactivate();
ClearMarkbits();
- AbortWeakCollections();
AbortWeakObjects();
AbortCompaction();
heap_->local_embedder_heap_tracer()->AbortTracing();
@@ -831,9 +830,11 @@ void MarkCompactCollector::VerifyMarking() {
}
#endif
#ifdef VERIFY_HEAP
- heap()->old_space()->VerifyLiveBytes();
- heap()->map_space()->VerifyLiveBytes();
- heap()->code_space()->VerifyLiveBytes();
+ if (FLAG_verify_heap) {
+ heap()->old_space()->VerifyLiveBytes();
+ heap()->map_space()->VerifyLiveBytes();
+ heap()->code_space()->VerifyLiveBytes();
+ }
#endif
}
@@ -844,6 +845,10 @@ void MarkCompactCollector::Finish() {
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
+ CHECK(weak_objects_.current_ephemerons.IsEmpty());
+ CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ weak_objects_.next_ephemerons.Clear();
+
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
@@ -944,7 +949,7 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
- Object* the_hole = heap_->the_hole_value();
+ Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
for (Object** p = start; p < end; p++) {
@@ -957,8 +962,8 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
*p = the_hole;
} else {
// StringTable contains only old space strings.
- DCHECK(!heap_->InNewSpace(o));
- MarkCompactCollector::RecordSlot(table_, p, o);
+ DCHECK(!Heap::InNewSpace(o));
+ MarkCompactCollector::RecordSlot(table_, p, heap_object);
}
}
}
@@ -988,7 +993,7 @@ class ExternalStringTableCleaner : public RootVisitor {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
- Object* the_hole = heap_->the_hole_value();
+ Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (o->IsHeapObject()) {
@@ -1028,9 +1033,17 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
!(AllocationSite::cast(object)->IsZombie())) {
// "dead" AllocationSites need to live long enough for a traversal of new
// space. These sites get a one-time reprieve.
- AllocationSite* site = AllocationSite::cast(object);
- site->MarkZombie();
- marking_state_->WhiteToBlack(site);
+
+ Object* nested = object;
+ while (nested->IsAllocationSite()) {
+ AllocationSite* current_site = AllocationSite::cast(nested);
+ // MarkZombie will override the nested_site, read it first before
+ // marking
+ nested = current_site->nested_site();
+ current_site->MarkZombie();
+ marking_state_->WhiteToBlack(current_site);
+ }
+
return object;
} else {
return nullptr;
@@ -1074,11 +1087,11 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
- DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
- DCHECK(!collector_->heap()->InNewSpace(target));
+ DCHECK(!Heap::InNewSpace(target));
collector_->RecordRelocSlot(host, rinfo, target);
}
@@ -1136,7 +1149,7 @@ class ProfilingMigrationObserver final : public MigrationObserver {
int size) final {
if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
PROFILE(heap_->isolate(),
- CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
+ CodeMoveEvent(AbstractCode::cast(src), dst->address()));
}
heap_->OnMoveEvent(dst, src, size);
}
@@ -1365,7 +1378,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
break;
case NEW_TO_OLD: {
- page->Unlink();
+ page->heap()->new_space()->from_space().RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page);
DCHECK(!new_page->InNewSpace());
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
@@ -1426,14 +1439,12 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
Heap* heap_;
};
-bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
+bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, Object** p) {
Object* o = *p;
if (!o->IsHeapObject()) return false;
HeapObject* heap_object = HeapObject::cast(o);
- return heap_object->GetHeap()
- ->mark_compact_collector()
- ->non_atomic_marking_state()
- ->IsWhite(HeapObject::cast(o));
+ return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
+ heap_object);
}
void MarkCompactCollector::MarkStringTable(
@@ -1457,7 +1468,185 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
ProcessTopOptimizedFrame(custom_root_body_visitor);
}
+void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
+ bool work_to_do = true;
+ int iterations = 0;
+ int max_iterations = FLAG_ephemeron_fixpoint_iterations;
+
+ while (work_to_do) {
+ PerformWrapperTracing();
+
+ if (iterations >= max_iterations) {
+ // Give up fixpoint iteration and switch to linear algorithm.
+ ProcessEphemeronsLinear();
+ break;
+ }
+
+ // Move ephemerons from next_ephemerons into current_ephemerons to
+ // drain them in this iteration.
+ weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
+ heap()->concurrent_marking()->set_ephemeron_marked(false);
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
+
+ if (FLAG_parallel_marking) {
+ DCHECK(FLAG_concurrent_marking);
+ heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ }
+
+ work_to_do = ProcessEphemerons();
+ FinishConcurrentMarking(
+ ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
+ }
+
+ CHECK(weak_objects_.current_ephemerons.IsEmpty());
+ CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+
+ work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
+ heap()->concurrent_marking()->ephemeron_marked() ||
+ !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
+ ++iterations;
+ }
+
+ CHECK(marking_worklist()->IsEmpty());
+ CHECK(weak_objects_.current_ephemerons.IsEmpty());
+ CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+}
+
+bool MarkCompactCollector::ProcessEphemerons() {
+ Ephemeron ephemeron;
+ bool ephemeron_marked = false;
+
+ // Drain current_ephemerons and push ephemerons where key and value are still
+ // unreachable into next_ephemerons.
+ while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
+ if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
+ ephemeron_marked = true;
+ }
+ }
+
+ // Drain marking worklist and push discovered ephemerons into
+ // discovered_ephemerons.
+ ProcessMarkingWorklist();
+
+ // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
+ // before) and push ephemerons where key and value are still unreachable into
+ // next_ephemerons.
+ while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
+ if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
+ ephemeron_marked = true;
+ }
+ }
+
+ // Flush local ephemerons for main task to global pool.
+ weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThread);
+ weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
+
+ return ephemeron_marked;
+}
+
+void MarkCompactCollector::ProcessEphemeronsLinear() {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
+ CHECK(heap()->concurrent_marking()->IsStopped());
+ std::unordered_multimap<HeapObject*, HeapObject*> key_to_values;
+ Ephemeron ephemeron;
+
+ DCHECK(weak_objects_.current_ephemerons.IsEmpty());
+ weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
+
+ while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
+ VisitEphemeron(ephemeron.key, ephemeron.value);
+
+ if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
+ key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
+ }
+ }
+
+ ephemeron_marking_.newly_discovered_limit = key_to_values.size();
+ bool work_to_do = true;
+
+ while (work_to_do) {
+ PerformWrapperTracing();
+
+ ResetNewlyDiscovered();
+ ephemeron_marking_.newly_discovered_limit = key_to_values.size();
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
+ // Drain marking worklist and push all discovered objects into
+ // newly_discovered.
+ ProcessMarkingWorklistInternal<
+ MarkCompactCollector::MarkingWorklistProcessingMode::
+ kTrackNewlyDiscoveredObjects>();
+ }
+
+ while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
+ VisitEphemeron(ephemeron.key, ephemeron.value);
+
+ if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
+ key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
+ }
+ }
+
+ if (ephemeron_marking_.newly_discovered_overflowed) {
+ // If newly_discovered was overflowed just visit all ephemerons in
+ // next_ephemerons.
+ weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
+ if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
+ non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
+ marking_worklist()->Push(ephemeron.value);
+ }
+ });
+
+ } else {
+ // This is the good case: newly_discovered stores all discovered
+ // objects. Now use key_to_values to see if discovered objects keep more
+ // objects alive due to ephemeron semantics.
+ for (HeapObject* object : ephemeron_marking_.newly_discovered) {
+ auto range = key_to_values.equal_range(object);
+ for (auto it = range.first; it != range.second; ++it) {
+ HeapObject* value = it->second;
+ MarkObject(object, value);
+ }
+ }
+ }
+
+ // Do NOT drain marking worklist here, otherwise the current checks
+ // for work_to_do are not sufficient for determining if another iteration
+ // is necessary.
+
+ work_to_do = !marking_worklist()->IsEmpty() ||
+ !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
+ CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ }
+
+ ResetNewlyDiscovered();
+ ephemeron_marking_.newly_discovered.shrink_to_fit();
+
+ CHECK(marking_worklist()->IsEmpty());
+}
+
+void MarkCompactCollector::PerformWrapperTracing() {
+ if (heap_->local_embedder_heap_tracer()->InUse()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
+ heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+ heap_->local_embedder_heap_tracer()->Trace(
+ 0, EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+ }
+}
+
void MarkCompactCollector::ProcessMarkingWorklist() {
+ ProcessMarkingWorklistInternal<
+ MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>();
+}
+
+template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
+void MarkCompactCollector::ProcessMarkingWorklistInternal() {
HeapObject* object;
MarkCompactMarkingVisitor visitor(this, marking_state());
while ((object = marking_worklist()->Pop()) != nullptr) {
@@ -1466,6 +1655,10 @@ void MarkCompactCollector::ProcessMarkingWorklist() {
DCHECK(heap()->Contains(object));
DCHECK(!(marking_state()->IsWhite(object)));
marking_state()->GreyToBlack(object);
+ if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
+ kTrackNewlyDiscoveredObjects) {
+ AddNewlyDiscovered(object);
+ }
Map* map = object->map();
MarkObject(object, map);
visitor.Visit(map, object);
@@ -1473,23 +1666,31 @@ void MarkCompactCollector::ProcessMarkingWorklist() {
DCHECK(marking_worklist()->IsBailoutEmpty());
}
-void MarkCompactCollector::ProcessEphemeralMarking() {
- DCHECK(marking_worklist()->IsEmpty());
- bool work_to_do = true;
- while (work_to_do) {
- if (heap_->local_embedder_heap_tracer()->InUse()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
- heap_->local_embedder_heap_tracer()->Trace(
- 0, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
- }
- ProcessWeakCollections();
- work_to_do = !marking_worklist()->IsEmpty();
- ProcessMarkingWorklist();
+bool MarkCompactCollector::VisitEphemeron(HeapObject* key, HeapObject* value) {
+ if (marking_state()->IsBlackOrGrey(key)) {
+ if (marking_state()->WhiteToGrey(value)) {
+ marking_worklist()->Push(value);
+ return true;
+ }
+
+ } else if (marking_state()->IsWhite(value)) {
+ weak_objects_.next_ephemerons.Push(kMainThread, Ephemeron{key, value});
}
+
+ return false;
+}
+
+void MarkCompactCollector::ProcessEphemeronMarking() {
+ DCHECK(marking_worklist()->IsEmpty());
+
+ // Incremental marking might leave ephemerons in main task's local
+ // buffer, flush it into global pool.
+ weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
+
+ ProcessEphemeronsUntilFixpoint();
+
CHECK(marking_worklist()->IsEmpty());
- CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
+ CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
}
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
@@ -1588,8 +1789,8 @@ void MarkCompactCollector::MarkLiveObjects() {
// harmony weak maps.
{
TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
- ProcessEphemeralMarking();
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
+ ProcessEphemeronMarking();
DCHECK(marking_worklist()->IsEmpty());
}
@@ -1618,10 +1819,10 @@ void MarkCompactCollector::MarkLiveObjects() {
ProcessMarkingWorklist();
}
- // Repeat ephemeral processing from the newly marked objects.
+ // Repeat ephemeron processing from the newly marked objects.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
- ProcessEphemeralMarking();
+ ProcessEphemeronMarking();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
heap()->local_embedder_heap_tracer()->TraceEpilogue();
@@ -1679,10 +1880,10 @@ void MarkCompactCollector::ClearNonLiveReferences() {
ClearWeakCollections();
- DCHECK(weak_objects_.weak_cells.IsGlobalEmpty());
- DCHECK(weak_objects_.transition_arrays.IsGlobalEmpty());
- DCHECK(weak_objects_.weak_references.IsGlobalEmpty());
- DCHECK(weak_objects_.weak_objects_in_code.IsGlobalEmpty());
+ DCHECK(weak_objects_.weak_cells.IsEmpty());
+ DCHECK(weak_objects_.transition_arrays.IsEmpty());
+ DCHECK(weak_objects_.weak_references.IsEmpty());
+ DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
}
void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
@@ -1694,7 +1895,7 @@ void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
!code->marked_for_deoptimization()) {
code->SetMarkedForDeoptimization("weak objects");
- code->InvalidateEmbeddedObjects();
+ code->InvalidateEmbeddedObjects(heap_);
have_code_to_deoptimize_ = true;
}
}
@@ -1707,7 +1908,7 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
Map* parent = Map::cast(potential_parent);
DisallowHeapAllocation no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
- TransitionsAccessor(parent, &no_gc_obviously)
+ TransitionsAccessor(isolate(), parent, &no_gc_obviously)
.HasSimpleTransitionTo(dead_target)) {
ClearPotentialSimpleMapTransition(parent, dead_target);
}
@@ -1808,15 +2009,15 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
DescriptorArray* descriptors) {
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) {
- DCHECK(descriptors == heap_->empty_descriptor_array());
+ DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
return;
}
int number_of_descriptors = descriptors->number_of_descriptors_storage();
int to_trim = number_of_descriptors - number_of_own_descriptors;
if (to_trim > 0) {
- heap_->RightTrimFixedArray(descriptors,
- to_trim * DescriptorArray::kEntrySize);
+ heap_->RightTrimWeakFixedArray(descriptors,
+ to_trim * DescriptorArray::kEntrySize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
TrimEnumCache(map, descriptors);
@@ -1853,67 +2054,27 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
heap_->RightTrimFixedArray(indices, to_trim);
}
-void MarkCompactCollector::ProcessWeakCollections() {
- MarkCompactMarkingVisitor visitor(this, marking_state());
- Object* weak_collection_obj = heap()->encountered_weak_collections();
- while (weak_collection_obj != Smi::kZero) {
- JSWeakCollection* weak_collection =
- reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
- DCHECK(non_atomic_marking_state()->IsBlackOrGrey(weak_collection));
- if (weak_collection->table()->IsHashTable()) {
- ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
- for (int i = 0; i < table->Capacity(); i++) {
- HeapObject* heap_object = HeapObject::cast(table->KeyAt(i));
- if (non_atomic_marking_state()->IsBlackOrGrey(heap_object)) {
- Object** key_slot =
- table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
- RecordSlot(table, key_slot, *key_slot);
- Object** value_slot =
- table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
- if (V8_UNLIKELY(FLAG_track_retaining_path) &&
- (*value_slot)->IsHeapObject()) {
- heap()->AddEphemeralRetainer(heap_object,
- HeapObject::cast(*value_slot));
- }
- visitor.VisitPointer(table, value_slot);
- }
- }
- }
- weak_collection_obj = weak_collection->next();
- }
-}
-
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
- Object* weak_collection_obj = heap()->encountered_weak_collections();
- while (weak_collection_obj != Smi::kZero) {
- JSWeakCollection* weak_collection =
- reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
- DCHECK(non_atomic_marking_state()->IsBlackOrGrey(weak_collection));
- if (weak_collection->table()->IsHashTable()) {
- ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
- for (int i = 0; i < table->Capacity(); i++) {
- HeapObject* key = HeapObject::cast(table->KeyAt(i));
- if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
- table->RemoveEntry(i);
- }
+ EphemeronHashTable* table;
+
+ while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
+ for (int i = 0; i < table->Capacity(); i++) {
+ HeapObject* key = HeapObject::cast(table->KeyAt(i));
+#ifdef VERIFY_HEAP
+ Object* value = table->ValueAt(i);
+
+ if (value->IsHeapObject()) {
+ CHECK_IMPLIES(
+ non_atomic_marking_state()->IsBlackOrGrey(key),
+ non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value)));
+ }
+#endif
+ if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
+ table->RemoveEntry(i);
}
}
- weak_collection_obj = weak_collection->next();
- weak_collection->set_next(heap()->undefined_value());
- }
- heap()->set_encountered_weak_collections(Smi::kZero);
-}
-
-void MarkCompactCollector::AbortWeakCollections() {
- Object* weak_collection_obj = heap()->encountered_weak_collections();
- while (weak_collection_obj != Smi::kZero) {
- JSWeakCollection* weak_collection =
- reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
- weak_collection_obj = weak_collection->next();
- weak_collection->set_next(heap()->undefined_value());
}
- heap()->set_encountered_weak_collections(Smi::kZero);
}
void MarkCompactCollector::ClearWeakCells() {
@@ -1938,9 +2099,9 @@ void MarkCompactCollector::ClearWeakCells() {
// Resurrect the cell.
non_atomic_marking_state()->WhiteToBlack(value);
Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
- RecordSlot(value, slot, *slot);
+ RecordSlot(value, slot, HeapObject::cast(*slot));
slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- RecordSlot(weak_cell, slot, *slot);
+ RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
} else {
weak_cell->clear();
}
@@ -1951,7 +2112,7 @@ void MarkCompactCollector::ClearWeakCells() {
} else {
// The value of the weak cell is alive.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- RecordSlot(weak_cell, slot, *slot);
+ RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
}
}
}
@@ -1981,6 +2142,10 @@ void MarkCompactCollector::ClearWeakReferences() {
void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.weak_cells.Clear();
weak_objects_.transition_arrays.Clear();
+ weak_objects_.ephemeron_hash_tables.Clear();
+ weak_objects_.current_ephemerons.Clear();
+ weak_objects_.next_ephemerons.Clear();
+ weak_objects_.discovered_ephemerons.Clear();
weak_objects_.weak_references.Clear();
weak_objects_.weak_objects_in_code.Clear();
}
@@ -1997,7 +2162,7 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
if (rinfo->IsInConstantPool()) {
addr = rinfo->constant_pool_entry_address();
- if (RelocInfo::IsCodeTarget(rmode)) {
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rmode));
@@ -2015,7 +2180,7 @@ static inline SlotCallbackResult UpdateSlot(
HeapObjectReferenceType reference_type) {
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
- DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
+ DCHECK(Heap::InFromSpace(heap_obj) ||
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
Page::FromAddress(heap_obj->address())
->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
@@ -2028,7 +2193,7 @@ static inline SlotCallbackResult UpdateSlot(
} else {
base::AsAtomicPointer::Release_CompareAndSwap(slot, old, target);
}
- DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
+ DCHECK(!Heap::InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
}
// OLD_TO_OLD slots are always removed after updating.
@@ -2067,6 +2232,8 @@ static inline SlotCallbackResult UpdateStrongSlot(MaybeObject** maybe_slot) {
// nevers visits code objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
+ explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
+
void VisitPointer(HeapObject* host, Object** p) override {
UpdateStrongSlotInternal(p);
}
@@ -2100,7 +2267,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
UpdateTypedSlotHelper::UpdateEmbeddedPointer(
- rinfo, UpdateStrongMaybeObjectSlotInternal);
+ heap_, rinfo, UpdateStrongMaybeObjectSlotInternal);
}
void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
@@ -2125,6 +2292,8 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
static inline SlotCallbackResult UpdateSlotInternal(MaybeObject** slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
}
+
+ Heap* heap_;
};
static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -2142,7 +2311,8 @@ void MarkCompactCollector::EvacuatePrologue() {
// New space.
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
- for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
+ for (Page* p :
+ PageRange(new_space->first_allocatable_address(), new_space->top())) {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
@@ -2721,7 +2891,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
void ProcessVisitAll() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ToSpaceUpdatingItem::ProcessVisitAll");
- PointersUpdatingVisitor visitor;
+ PointersUpdatingVisitor visitor(chunk_->heap());
for (Address cur = start_; cur < end_;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
@@ -2736,7 +2906,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
"ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
- PointersUpdatingVisitor visitor;
+ PointersUpdatingVisitor visitor(chunk_->heap());
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
object_and_size.first->IterateBodyFast(&visitor);
@@ -2777,7 +2947,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (!(*slot)->ToStrongOrWeakHeapObject(&heap_object)) {
return REMOVE_SLOT;
}
- if (heap_->InFromSpace(heap_object)) {
+ if (Heap::InFromSpace(heap_object)) {
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
HeapObjectReference::Update(
@@ -2791,10 +2961,10 @@ class RememberedSetUpdatingItem : public UpdatingItem {
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
- if (heap_->InToSpace(heap_object)) {
+ if (Heap::InToSpace(heap_object)) {
return KEEP_SLOT;
}
- } else if (heap_->InToSpace(heap_object)) {
+ } else if (Heap::InToSpace(heap_object)) {
// Slots can point to "to" space if the page has been moved, or if the
// slot has been recorded multiple times in the remembered set, or
// if the slot was already updated during old->old updating.
@@ -2813,7 +2983,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
return KEEP_SLOT;
} else {
- DCHECK(!heap_->InNewSpace(heap_object));
+ DCHECK(!Heap::InNewSpace(heap_object));
}
return REMOVE_SLOT;
}
@@ -2859,7 +3029,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_, [this](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- slot_type, slot, [this](MaybeObject** slot) {
+ heap_, slot_type, slot, [this](MaybeObject** slot) {
return CheckAndUpdateOldToNewSlot(
reinterpret_cast<Address>(slot));
});
@@ -2870,11 +3040,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
nullptr)) {
CHECK_NE(chunk_->owner(), heap_->map_space());
RememberedSet<OLD_TO_OLD>::IterateTyped(
- chunk_, [](SlotType slot_type, Address host_addr, Address slot) {
+ chunk_, [this](SlotType slot_type, Address host_addr, Address slot) {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
return UpdateTypedSlotHelper::UpdateTypedSlot(
- slot_type, slot, UpdateStrongSlot<AccessMode::NON_ATOMIC>);
+ heap_, slot_type, slot,
+ UpdateStrongSlot<AccessMode::NON_ATOMIC>);
});
}
}
@@ -2899,19 +3070,23 @@ UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
class GlobalHandlesUpdatingItem : public UpdatingItem {
public:
- GlobalHandlesUpdatingItem(GlobalHandles* global_handles, size_t start,
- size_t end)
- : global_handles_(global_handles), start_(start), end_(end) {}
+ GlobalHandlesUpdatingItem(Heap* heap, GlobalHandles* global_handles,
+ size_t start, size_t end)
+ : heap_(heap),
+ global_handles_(global_handles),
+ start_(start),
+ end_(end) {}
virtual ~GlobalHandlesUpdatingItem() {}
void Process() override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"GlobalHandlesUpdatingItem::Process");
- PointersUpdatingVisitor updating_visitor;
+ PointersUpdatingVisitor updating_visitor(heap_);
global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
}
private:
+ Heap* heap_;
GlobalHandles* global_handles_;
size_t start_;
size_t end_;
@@ -2954,7 +3129,7 @@ class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
ItemParallelJob* job) {
// Seed to space pages.
- const Address space_start = heap()->new_space()->bottom();
+ const Address space_start = heap()->new_space()->first_allocatable_address();
const Address space_end = heap()->new_space()->top();
int pages = 0;
for (Page* page : PageRange(space_start, space_end)) {
@@ -3036,7 +3211,7 @@ int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor;
+ PointersUpdatingVisitor updating_visitor(heap());
{
TRACE_GC(heap()->tracer(),
@@ -3160,7 +3335,7 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
} else {
DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone());
- p->Unlink();
+ p->owner()->memory_chunk_list().Remove(p);
}
}
DCHECK_EQ(aborted_pages_verified, aborted_pages);
@@ -3204,7 +3379,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// that this adds unusable memory into the free list that is later on
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
- p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
+ p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
Heap::ShouldZapGarbage()
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
@@ -3221,6 +3396,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
static_cast<void*>(p));
}
ArrayBufferTracker::FreeAll(p);
+ space->memory_chunk_list().Remove(p);
space->ReleasePage(p);
continue;
}
@@ -3233,7 +3409,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (FLAG_gc_verbose) {
PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
- AllocationSpaceName(space->identity()), will_be_swept);
+ space->name(), will_be_swept);
}
}
@@ -3322,7 +3498,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
DCHECK(!HasWeakHeapObjectTag(*current));
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- if (!heap_->InNewSpace(object)) return;
+ if (!Heap::InNewSpace(object)) return;
CHECK(IsMarked(object));
}
}
@@ -3333,7 +3509,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
HeapObject* object;
// Minor MC treats weak references as strong.
if ((*current)->ToStrongOrWeakHeapObject(&object)) {
- if (!heap_->InNewSpace(object)) {
+ if (!Heap::InNewSpace(object)) {
continue;
}
CHECK(IsMarked(object));
@@ -3363,7 +3539,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
+ CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
}
}
}
@@ -3371,7 +3547,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
if ((*current)->ToStrongOrWeakHeapObject(&object)) {
- CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
+ CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
}
}
}
@@ -3380,7 +3556,8 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
#endif // VERIFY_HEAP
template <class ParallelItem>
-void SeedGlobalHandles(GlobalHandles* global_handles, ItemParallelJob* job) {
+void SeedGlobalHandles(Heap* heap, GlobalHandles* global_handles,
+ ItemParallelJob* job) {
// Create batches of global handles.
const size_t kGlobalHandlesBufferSize = 1000;
const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
@@ -3388,13 +3565,13 @@ void SeedGlobalHandles(GlobalHandles* global_handles, ItemParallelJob* job) {
start += kGlobalHandlesBufferSize) {
size_t end = start + kGlobalHandlesBufferSize;
if (end > new_space_nodes) end = new_space_nodes;
- job->AddItem(new ParallelItem(global_handles, start, end));
+ job->AddItem(new ParallelItem(heap, global_handles, start, end));
}
}
bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
- DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
- return heap->InNewSpace(*p) && !heap->minor_mark_compact_collector()
+ DCHECK_IMPLIES(Heap::InNewSpace(*p), Heap::InToSpace(*p));
+ return Heap::InNewSpace(*p) && !heap->minor_mark_compact_collector()
->non_atomic_marking_state()
->IsGrey(HeapObject::cast(*p));
}
@@ -3405,11 +3582,9 @@ class YoungGenerationMarkingVisitor final
: public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
public:
YoungGenerationMarkingVisitor(
- Heap* heap, MinorMarkCompactCollector::MarkingState* marking_state,
+ MinorMarkCompactCollector::MarkingState* marking_state,
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : heap_(heap),
- worklist_(global_worklist, task_id),
- marking_state_(marking_state) {}
+ : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
V8_INLINE void VisitPointers(HeapObject* host, Object** start,
Object** end) final {
@@ -3428,7 +3603,7 @@ class YoungGenerationMarkingVisitor final
V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
Object* target = *slot;
DCHECK(!HasWeakHeapObjectTag(target));
- if (heap_->InNewSpace(target)) {
+ if (Heap::InNewSpace(target)) {
HeapObject* target_object = HeapObject::cast(target);
MarkObjectViaMarkingWorklist(target_object);
}
@@ -3436,7 +3611,7 @@ class YoungGenerationMarkingVisitor final
V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** slot) final {
MaybeObject* target = *slot;
- if (heap_->InNewSpace(target)) {
+ if (Heap::InNewSpace(target)) {
HeapObject* target_object;
// Treat weak references as strong. TODO(marja): Proper weakness handling
// for minor-mcs.
@@ -3454,7 +3629,6 @@ class YoungGenerationMarkingVisitor final
}
}
- Heap* heap_;
MinorMarkCompactCollector::MarkingWorklist::View worklist_;
MinorMarkCompactCollector::MarkingState* marking_state_;
};
@@ -3467,7 +3641,7 @@ MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
main_marking_visitor_(new YoungGenerationMarkingVisitor(
- heap, marking_state(), worklist_, kMainMarker)),
+ marking_state(), worklist_, kMainMarker)),
page_parallel_job_semaphore_(0) {
static_assert(
kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
@@ -3486,7 +3660,9 @@ int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
// amount of marking that is required.
const int kPagesPerTask = 2;
const int wanted_tasks = Max(1, pages / kPagesPerTask);
- return Min(NumberOfAvailableCores(), Min(wanted_tasks, kNumMarkers));
+ return Min(NumberOfAvailableCores(),
+ Min(wanted_tasks,
+ MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks));
}
void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
@@ -3562,14 +3738,14 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor;
+ PointersUpdatingVisitor updating_visitor(heap());
ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
CollectNewSpaceArrayBufferTrackerItems(&updating_job);
// Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
- &updating_job);
+ SeedGlobalHandles<GlobalHandlesUpdatingItem>(
+ heap(), isolate()->global_handles(), &updating_job);
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
int remembered_set_pages = 0;
remembered_set_pages += CollectRememberedSetUpdatingItems(
@@ -3617,7 +3793,6 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
// Update pointers from external string table.
heap()->UpdateNewSpaceReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
- heap()->IterateEncounteredWeakCollections(&updating_visitor);
}
}
@@ -3676,8 +3851,8 @@ void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
- for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
- heap()->new_space()->FromSpaceEnd())) {
+ for (Page* p :
+ PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
non_atomic_marking_state()->ClearLiveness(p);
if (FLAG_concurrent_marking) {
@@ -3777,7 +3952,7 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
DCHECK(o->IsThinString());
}
// Set the entry to the_hole_value (as deleted).
- *p = heap_->the_hole_value();
+ *p = ReadOnlyRoots(heap_).the_hole_value();
}
}
}
@@ -3794,12 +3969,11 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit MinorMarkCompactWeakObjectRetainer(
MinorMarkCompactCollector* collector)
- : heap_(collector->heap()),
- marking_state_(collector->non_atomic_marking_state()) {}
+ : marking_state_(collector->non_atomic_marking_state()) {}
virtual Object* RetainAs(Object* object) {
HeapObject* heap_object = HeapObject::cast(object);
- if (!heap_->InNewSpace(heap_object)) return object;
+ if (!Heap::InNewSpace(heap_object)) return object;
// Young generation marking only marks to grey instead of black.
DCHECK(!marking_state_->IsBlack(heap_object));
@@ -3810,7 +3984,6 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
}
private:
- Heap* heap_;
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
@@ -3839,7 +4012,8 @@ void MinorMarkCompactCollector::ClearNonLiveReferences() {
void MinorMarkCompactCollector::EvacuatePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
- for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
+ for (Page* p :
+ PageRange(new_space->first_allocatable_address(), new_space->top())) {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
@@ -3885,7 +4059,7 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
collector_(collector),
marking_worklist_(global_worklist, task_id),
marking_state_(collector->marking_state()),
- visitor_(isolate->heap(), marking_state_, global_worklist, task_id) {
+ visitor_(marking_state_, global_worklist, task_id) {
local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
Page::kPageSize);
}
@@ -3913,7 +4087,7 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
};
void MarkObject(Object* object) {
- if (!collector_->heap()->InNewSpace(object)) return;
+ if (!Heap::InNewSpace(object)) return;
HeapObject* heap_object = HeapObject::cast(object);
if (marking_state_->WhiteToGrey(heap_object)) {
const int size = visitor_.Visit(heap_object);
@@ -3958,10 +4132,9 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
class PageMarkingItem : public MarkingItem {
public:
- explicit PageMarkingItem(MemoryChunk* chunk,
- base::AtomicNumber<intptr_t>* global_slots)
+ explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
: chunk_(chunk), global_slots_(global_slots), slots_(0) {}
- virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
+ virtual ~PageMarkingItem() { *global_slots_ = *global_slots_ + slots_; }
void Process(YoungGenerationMarkingTask* task) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
@@ -3986,7 +4159,7 @@ class PageMarkingItem : public MarkingItem {
chunk_,
[this, task](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- slot_type, slot, [this, task](MaybeObject** slot) {
+ heap(), slot_type, slot, [this, task](MaybeObject** slot) {
return CheckAndMarkObject(task,
reinterpret_cast<Address>(slot));
});
@@ -3996,10 +4169,10 @@ class PageMarkingItem : public MarkingItem {
SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
Address slot_address) {
MaybeObject* object = *reinterpret_cast<MaybeObject**>(slot_address);
- if (heap()->InNewSpace(object)) {
+ if (Heap::InNewSpace(object)) {
// Marking happens before flipping the young generation, so the object
// has to be in ToSpace.
- DCHECK(heap()->InToSpace(object));
+ DCHECK(Heap::InToSpace(object));
HeapObject* heap_object;
bool success = object->ToStrongOrWeakHeapObject(&heap_object);
USE(success);
@@ -4012,14 +4185,14 @@ class PageMarkingItem : public MarkingItem {
}
MemoryChunk* chunk_;
- base::AtomicNumber<intptr_t>* global_slots_;
- intptr_t slots_;
+ std::atomic<int>* global_slots_;
+ int slots_;
};
class GlobalHandlesMarkingItem : public MarkingItem {
public:
- GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
- size_t end)
+ GlobalHandlesMarkingItem(Heap* heap, GlobalHandles* global_handles,
+ size_t start, size_t end)
: global_handles_(global_handles), start_(start), end_(end) {}
virtual ~GlobalHandlesMarkingItem() {}
@@ -4063,7 +4236,7 @@ class GlobalHandlesMarkingItem : public MarkingItem {
void MinorMarkCompactCollector::MarkRootSetInParallel(
RootMarkingVisitor* root_visitor) {
- base::AtomicNumber<intptr_t> slots;
+ std::atomic<int> slots;
{
ItemParallelJob job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
@@ -4073,8 +4246,8 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
// Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
- &job);
+ SeedGlobalHandles<GlobalHandlesMarkingItem>(
+ heap(), isolate()->global_handles(), &job);
// Create items for each page.
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap(), [&job, &slots](MemoryChunk* chunk) {
@@ -4093,10 +4266,10 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
}
job.Run(isolate()->async_counters());
- DCHECK(worklist()->IsGlobalEmpty());
+ DCHECK(worklist()->IsEmpty());
}
}
- old_to_new_slots_ = static_cast<int>(slots.Value());
+ old_to_new_slots_ = slots;
}
void MinorMarkCompactCollector::MarkLiveObjects() {
@@ -4111,7 +4284,6 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
// Mark rest on the main thread.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
- heap()->IterateEncounteredWeakCollections(&root_visitor);
ProcessMarkingWorklist();
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index fa474d049d..169f2ae671 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -315,21 +315,15 @@ class MinorMarkingState final
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- reinterpret_cast<base::AtomicNumber<intptr_t>*>(
- &chunk->young_generation_live_byte_count_)
- ->Increment(by);
+ chunk->young_generation_live_byte_count_ += by;
}
intptr_t live_bytes(MemoryChunk* chunk) const {
- return reinterpret_cast<base::AtomicNumber<intptr_t>*>(
- &chunk->young_generation_live_byte_count_)
- ->Value();
+ return chunk->young_generation_live_byte_count_;
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- reinterpret_cast<base::AtomicNumber<intptr_t>*>(
- &chunk->young_generation_live_byte_count_)
- ->SetValue(value);
+ chunk->young_generation_live_byte_count_ = value;
}
};
@@ -384,19 +378,15 @@ class MajorAtomicMarkingState final
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
- ->Increment(by);
+ chunk->live_byte_count_ += by;
}
intptr_t live_bytes(MemoryChunk* chunk) const {
- return reinterpret_cast<base::AtomicNumber<intptr_t>*>(
- &chunk->live_byte_count_)
- ->Value();
+ return chunk->live_byte_count_;
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
- ->SetValue(value);
+ chunk->live_byte_count_ = value;
}
};
@@ -421,16 +411,52 @@ class MajorNonAtomicMarkingState final
}
};
+struct Ephemeron {
+ HeapObject* key;
+ HeapObject* value;
+};
+
+typedef Worklist<Ephemeron, 64> EphemeronWorklist;
+
// Weak objects encountered during marking.
struct WeakObjects {
Worklist<WeakCell*, 64> weak_cells;
Worklist<TransitionArray*, 64> transition_arrays;
+
+ // Keep track of all EphemeronHashTables in the heap to process
+ // them in the atomic pause.
+ Worklist<EphemeronHashTable*, 64> ephemeron_hash_tables;
+
+ // Keep track of all ephemerons for concurrent marking tasks. Only store
+ // ephemerons in these Worklists if both key and value are unreachable at the
+ // moment.
+ //
+ // MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
+ // worklists.
+ //
+ // current_ephemerons is used as draining worklist in the current fixpoint
+ // iteration.
+ EphemeronWorklist current_ephemerons;
+
+ // Stores ephemerons to visit in the next fixpoint iteration.
+ EphemeronWorklist next_ephemerons;
+
+ // When draining the marking worklist new discovered ephemerons are pushed
+ // into this worklist.
+ EphemeronWorklist discovered_ephemerons;
+
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
Worklist<std::pair<HeapObject*, Code*>, 64> weak_objects_in_code;
};
+struct EphemeronMarking {
+ std::vector<HeapObject*> newly_discovered;
+ bool newly_discovered_overflowed;
+ size_t newly_discovered_limit;
+};
+
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
@@ -529,8 +555,19 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Prints the stats about the global pool of the worklist.
void PrintWorklist(const char* worklist_name,
ConcurrentMarkingWorklist* worklist);
+
+ // Worklist used for most objects.
ConcurrentMarkingWorklist shared_;
+
+ // Concurrent marking uses this worklist to bail out of concurrently
+ // marking certain object types. These objects are handled later in a STW
+ // pause after concurrent marking has finished.
ConcurrentMarkingWorklist bailout_;
+
+ // Concurrent marking uses this worklist to bail out of marking objects
+ // in new space's linear allocation area. Used to avoid black allocation
+ // for new space. This allow the compiler to remove write barriers
+ // for freshly allocatd objects.
ConcurrentMarkingWorklist on_hold_;
};
@@ -581,9 +618,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
V8_INLINE static void RecordSlot(HeapObject* object, Object** slot,
- Object* target);
+ HeapObject* target);
V8_INLINE static void RecordSlot(HeapObject* object,
- HeapObjectReference** slot, Object* target);
+ HeapObjectReference** slot,
+ HeapObject* target);
void RecordLiveSlotsOnPage(Page* page);
void UpdateSlots(SlotsBuffer* buffer);
@@ -617,6 +655,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
+ void AddEphemeronHashTable(EphemeronHashTable* table) {
+ weak_objects_.ephemeron_hash_tables.Push(kMainThread, table);
+ }
+
+ void AddEphemeron(HeapObject* key, HeapObject* value) {
+ weak_objects_.discovered_ephemerons.Push(kMainThread,
+ Ephemeron{key, value});
+ }
+
void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
}
@@ -626,6 +673,22 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
std::make_pair(object, code));
}
+ void AddNewlyDiscovered(HeapObject* object) {
+ if (ephemeron_marking_.newly_discovered_overflowed) return;
+
+ if (ephemeron_marking_.newly_discovered.size() <
+ ephemeron_marking_.newly_discovered_limit) {
+ ephemeron_marking_.newly_discovered.push_back(object);
+ } else {
+ ephemeron_marking_.newly_discovered_overflowed = true;
+ }
+ }
+
+ void ResetNewlyDiscovered() {
+ ephemeron_marking_.newly_discovered_overflowed = false;
+ ephemeron_marking_.newly_discovered.clear();
+ }
+
Sweeper* sweeper() { return sweeper_; }
#ifdef DEBUG
@@ -680,7 +743,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void MarkStringTable(ObjectVisitor* visitor);
// Marks object reachable from harmony weak maps and wrapper tracing.
- void ProcessEphemeralMarking();
+ void ProcessEphemeronMarking();
// If the call-site of the top optimized code was not prepared for
// deoptimization, then treat embedded pointers in the code as strong as
@@ -694,9 +757,36 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// if no concurrent threads are running.
void ProcessMarkingWorklist() override;
+ enum class MarkingWorklistProcessingMode {
+ kDefault,
+ kTrackNewlyDiscoveredObjects
+ };
+
+ template <MarkingWorklistProcessingMode mode>
+ void ProcessMarkingWorklistInternal();
+
+ // Implements ephemeron semantics: Marks value if key is already reachable.
+ // Returns true if value was actually marked.
+ bool VisitEphemeron(HeapObject* key, HeapObject* value);
+
+ // Marks ephemerons and drains marking worklist iteratively
+ // until a fixpoint is reached.
+ void ProcessEphemeronsUntilFixpoint();
+
+ // Drains ephemeron and marking worklists. Single iteration of the
+ // fixpoint iteration.
+ bool ProcessEphemerons();
+
+ // Mark ephemerons and drain marking worklist with a linear algorithm.
+ // Only used if fixpoint iteration doesn't finish within a few iterations.
+ void ProcessEphemeronsLinear();
+
+ // Perform Wrapper Tracing if in use.
+ void PerformWrapperTracing();
+
// Callback function for telling whether the object *p is an unmarked
// heap object.
- static bool IsUnmarkedHeapObject(Object** p);
+ static bool IsUnmarkedHeapObject(Heap* heap, Object** p);
// Clear non-live references in weak cells, transition and descriptor arrays,
// and deoptimize dependent code of non-live maps.
@@ -715,20 +805,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
void TrimEnumCache(Map* map, DescriptorArray* descriptors);
- // Mark all values associated with reachable keys in weak collections
- // encountered so far. This might push new object or even new weak maps onto
- // the marking stack.
- void ProcessWeakCollections();
-
// After all reachable objects have been marked those weak map entries
// with an unreachable key are removed from all encountered weak maps.
// The linked list of all encountered weak maps is destroyed.
void ClearWeakCollections();
- // We have to remove all encountered weak maps from the list of weak
- // collections when incremental marking is aborted.
- void AbortWeakCollections();
-
// Goes through the list of encountered weak cells and clears those with
// dead values. If the value is a dead map and the parent map transitions to
// the dead map via weak cell, then this function also clears the map
@@ -763,6 +844,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void ClearMarkbitsInPagedSpace(PagedSpace* space);
void ClearMarkbitsInNewSpace(NewSpace* space);
+ static const int kEphemeronChunkSize = 8 * KB;
+
+ int NumberOfParallelEphemeronVisitingTasks(size_t elements);
+
base::Mutex mutex_;
base::Semaphore page_parallel_job_semaphore_;
@@ -795,6 +880,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MarkingWorklist marking_worklist_;
WeakObjects weak_objects_;
+ EphemeronMarking ephemeron_marking_;
// Candidates for pages that should be evacuated.
std::vector<Page*> evacuation_candidates_;
@@ -808,6 +894,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
+ friend class EphemeronHashTableMarkingTask;
friend class FullEvacuator;
friend class Heap;
friend class RecordMigratedSlotVisitor;
@@ -832,10 +919,10 @@ class MarkingVisitor final
V8_INLINE int VisitAllocationSite(Map* map, AllocationSite* object);
V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
V8_INLINE int VisitCodeDataContainer(Map* map, CodeDataContainer* object);
+ V8_INLINE int VisitEphemeronHashTable(Map* map, EphemeronHashTable* object);
V8_INLINE int VisitFixedArray(Map* map, FixedArray* object);
V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
- V8_INLINE int VisitJSWeakCollection(Map* map, JSWeakCollection* object);
V8_INLINE int VisitMap(Map* map, Map* object);
V8_INLINE int VisitNativeContext(Map* map, Context* object);
V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index bfa813091e..ccf25d6549 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -208,7 +208,7 @@ class Marking : public AllStatic {
// Impossible markbits: 01
static const char* kImpossibleBitPattern;
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool IsImpossible(MarkBit mark_bit)) {
+ V8_INLINE static bool IsImpossible(MarkBit mark_bit) {
if (mode == AccessMode::NON_ATOMIC) {
return !mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
}
@@ -226,14 +226,14 @@ class Marking : public AllStatic {
// Black markbits: 11
static const char* kBlackBitPattern;
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool IsBlack(MarkBit mark_bit)) {
+ V8_INLINE static bool IsBlack(MarkBit mark_bit) {
return mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
}
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool IsWhite(MarkBit mark_bit)) {
+ V8_INLINE static bool IsWhite(MarkBit mark_bit) {
DCHECK(!IsImpossible<mode>(mark_bit));
return !mark_bit.Get<mode>();
}
@@ -241,19 +241,19 @@ class Marking : public AllStatic {
// Grey markbits: 10
static const char* kGreyBitPattern;
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool IsGrey(MarkBit mark_bit)) {
+ V8_INLINE static bool IsGrey(MarkBit mark_bit) {
return mark_bit.Get<mode>() && !mark_bit.Next().Get<mode>();
}
// IsBlackOrGrey assumes that the first bit is set for black or grey
// objects.
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) {
+ V8_INLINE static bool IsBlackOrGrey(MarkBit mark_bit) {
return mark_bit.Get<mode>();
}
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static void MarkWhite(MarkBit markbit)) {
+ V8_INLINE static void MarkWhite(MarkBit markbit) {
STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
markbit.Clear<mode>();
markbit.Next().Clear<mode>();
@@ -263,23 +263,23 @@ class Marking : public AllStatic {
// If you know that nobody else will change the bits on the given location
// then you may use it.
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static void MarkBlack(MarkBit markbit)) {
+ V8_INLINE static void MarkBlack(MarkBit markbit) {
markbit.Set<mode>();
markbit.Next().Set<mode>();
}
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool WhiteToGrey(MarkBit markbit)) {
+ V8_INLINE static bool WhiteToGrey(MarkBit markbit) {
return markbit.Set<mode>();
}
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool WhiteToBlack(MarkBit markbit)) {
+ V8_INLINE static bool WhiteToBlack(MarkBit markbit) {
return markbit.Set<mode>() && markbit.Next().Set<mode>();
}
template <AccessMode mode = AccessMode::NON_ATOMIC>
- INLINE(static bool GreyToBlack(MarkBit markbit)) {
+ V8_INLINE static bool GreyToBlack(MarkBit markbit) {
return markbit.Get<mode>() && markbit.Next().Set<mode>();
}
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 0a25d774e4..4af7df87fd 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -20,6 +20,14 @@ const int MemoryReducer::kMaxNumberOfGCs = 3;
const double MemoryReducer::kCommittedMemoryFactor = 1.1;
const size_t MemoryReducer::kCommittedMemoryDelta = 10 * MB;
+MemoryReducer::MemoryReducer(Heap* heap)
+ : heap_(heap),
+ taskrunner_(V8::GetCurrentPlatform()->GetForegroundTaskRunner(
+ reinterpret_cast<v8::Isolate*>(heap->isolate()))),
+ state_(kDone, 0, 0.0, 0.0, 0),
+ js_calls_counter_(0),
+ js_calls_sample_time_ms_(0.0) {}
+
MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
: CancelableTask(memory_reducer->heap()->isolate()),
memory_reducer_(memory_reducer) {}
@@ -204,10 +212,9 @@ void MemoryReducer::ScheduleTimer(double delay_ms) {
if (heap()->IsTearingDown()) return;
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
- auto timer_task = new MemoryReducer::TimerTask(this);
- V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(
- isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
+ taskrunner_->PostDelayedTask(
+ base::make_unique<MemoryReducer::TimerTask>(this),
+ (delay_ms + kSlackMs) / 1000.0);
}
void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 6e5034ca45..90a955150e 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -110,11 +110,7 @@ class V8_EXPORT_PRIVATE MemoryReducer {
bool can_start_incremental_gc;
};
- explicit MemoryReducer(Heap* heap)
- : heap_(heap),
- state_(kDone, 0, 0.0, 0.0, 0),
- js_calls_counter_(0),
- js_calls_sample_time_ms_(0.0) {}
+ explicit MemoryReducer(Heap* heap);
// Callbacks.
void NotifyMarkCompact(const Event& event);
void NotifyPossibleGarbage(const Event& event);
@@ -159,6 +155,7 @@ class V8_EXPORT_PRIVATE MemoryReducer {
static bool WatchdogGC(const State& state, const Event& event);
Heap* heap_;
+ std::shared_ptr<v8::TaskRunner> taskrunner_;
State state_;
unsigned int js_calls_counter_;
double js_calls_sample_time_ms_;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 51905e8ea2..6c7a26b672 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -16,6 +16,8 @@
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/objects/templates.h"
#include "src/utils.h"
@@ -149,12 +151,7 @@ V8_NOINLINE static void PrintJSONArray(size_t* array, const int len) {
V8_NOINLINE static void DumpJSONArray(std::stringstream& stream, size_t* array,
const int len) {
- stream << "[";
- for (int i = 0; i < len; i++) {
- stream << array[i];
- if (i != (len - 1)) stream << ",";
- }
- stream << "]";
+ stream << PrintCollection(Vector<size_t>(array, len));
}
void ObjectStats::PrintKeyAndId(const char* key, int gc_count) {
@@ -364,7 +361,7 @@ class ObjectStatsCollectorImpl {
void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
// Specific recursion into constant pool or embedded code objects. Records
- // FixedArrays and Tuple2 that look like ConstantElementsPair.
+ // FixedArrays and Tuple2.
void RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
HeapObject* parent, HeapObject* object,
ObjectStats::VirtualInstanceType type);
@@ -385,6 +382,8 @@ class ObjectStatsCollectorImpl {
void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo* info);
void RecordVirtualJSFunctionDetails(JSFunction* function);
+ void RecordVirtualArrayBoilerplateDescription(
+ ArrayBoilerplateDescription* description);
Heap* heap_;
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
@@ -409,7 +408,7 @@ bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
bool cow_check = check_cow_array == kIgnoreCow || !IsCowArray(fixed_array);
return CanRecordFixedArray(fixed_array) && cow_check;
}
- if (obj == heap_->empty_property_array()) return false;
+ if (obj == ReadOnlyRoots(heap_).empty_property_array()) return false;
return true;
}
@@ -534,6 +533,8 @@ void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject* object) {
static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
MaybeObject* maybe_obj, FeedbackSlotKind kind, Isolate* isolate) {
+ if (maybe_obj->IsClearedWeakHeapObject())
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_OTHER_TYPE;
Object* obj = maybe_obj->GetHeapObjectOrSmi();
switch (kind) {
case FeedbackSlotKind::kCall:
@@ -593,6 +594,8 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
calculated_size += header_size;
// Iterate over the feedback slots and log each one.
+ if (!vector->shared_function_info()->HasFeedbackMetadata()) return;
+
FeedbackMetadataIterator it(vector->metadata());
while (it.HasNext()) {
FeedbackSlot slot = it.Next();
@@ -660,6 +663,9 @@ void ObjectStatsCollectorImpl::CollectStatistics(
RecordVirtualContext(Context::cast(obj));
} else if (obj->IsScript()) {
RecordVirtualScriptDetails(Script::cast(obj));
+ } else if (obj->IsArrayBoilerplateDescription()) {
+ RecordVirtualArrayBoilerplateDescription(
+ ArrayBoilerplateDescription::cast(obj));
} else if (obj->IsFixedArrayExact()) {
// Has to go last as it triggers too eagerly.
RecordVirtualFixedArrayDetails(FixedArray::cast(obj));
@@ -708,8 +714,6 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
ObjectStats::SCRIPT_LIST_TYPE);
// HashTable.
- RecordHashTableVirtualObjectStats(nullptr, heap_->string_table(),
- ObjectStats::STRING_TABLE_TYPE);
RecordHashTableVirtualObjectStats(nullptr, heap_->code_stubs(),
ObjectStats::CODE_STUBS_TABLE_TYPE);
}
@@ -723,14 +727,15 @@ void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
}
bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase* array) {
- return array != heap_->empty_fixed_array() &&
- array != heap_->empty_sloppy_arguments_elements() &&
- array != heap_->empty_slow_element_dictionary() &&
+ ReadOnlyRoots roots(heap_);
+ return array != roots.empty_fixed_array() &&
+ array != roots.empty_sloppy_arguments_elements() &&
+ array != roots.empty_slow_element_dictionary() &&
array != heap_->empty_property_dictionary();
}
bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase* array) {
- return array->map() == heap_->fixed_cow_array_map();
+ return array->map() == ReadOnlyRoots(heap_).fixed_cow_array_map();
}
bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
@@ -743,7 +748,8 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
// TODO(mlippautz): map->dependent_code(): DEPENDENT_CODE_TYPE.
DescriptorArray* array = map->instance_descriptors();
- if (map->owns_descriptors() && array != heap_->empty_descriptor_array()) {
+ if (map->owns_descriptors() &&
+ array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
// DescriptorArray has its own instance type.
EnumCache* enum_cache = array->GetEnumCache();
RecordSimpleVirtualObjectStats(array, enum_cache->keys(),
@@ -756,8 +762,8 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
if (map->prototype_info()->IsPrototypeInfo()) {
PrototypeInfo* info = PrototypeInfo::cast(map->prototype_info());
Object* users = info->prototype_users();
- if (users->IsFixedArrayOfWeakCells()) {
- RecordSimpleVirtualObjectStats(map, FixedArrayOfWeakCells::cast(users),
+ if (users->IsWeakFixedArray()) {
+ RecordSimpleVirtualObjectStats(map, WeakArrayList::cast(users),
ObjectStats::PROTOTYPE_USERS_TYPE);
}
}
@@ -805,17 +811,13 @@ void ObjectStatsCollectorImpl::RecordVirtualJSFunctionDetails(
ObjectStats::UNCOMPILED_JS_FUNCTION_TYPE);
}
}
-
-namespace {
-
-bool MatchesConstantElementsPair(Object* object) {
- if (!object->IsTuple2()) return false;
- Tuple2* tuple = Tuple2::cast(object);
- return tuple->value1()->IsSmi() && tuple->value2()->IsFixedArrayExact();
+void ObjectStatsCollectorImpl::RecordVirtualArrayBoilerplateDescription(
+ ArrayBoilerplateDescription* description) {
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ description, description->constant_elements(),
+ ObjectStats::ARRAY_BOILERPLATE_DESCRIPTION_ELEMENTS_TYPE);
}
-} // namespace
-
void ObjectStatsCollectorImpl::
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
HeapObject* parent, HeapObject* object,
@@ -829,10 +831,6 @@ void ObjectStatsCollectorImpl::
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
array, HeapObject::cast(entry), type);
}
- } else if (MatchesConstantElementsPair(object)) {
- Tuple2* tuple = Tuple2::cast(object);
- RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- tuple, HeapObject::cast(tuple->value2()), type);
}
}
@@ -846,7 +844,7 @@ void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
FixedArray* constant_pool = FixedArray::cast(bytecode->constant_pool());
for (int i = 0; i < constant_pool->length(); i++) {
Object* entry = constant_pool->get(i);
- if (entry->IsFixedArrayExact() || MatchesConstantElementsPair(entry)) {
+ if (entry->IsFixedArrayExact()) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
constant_pool, HeapObject::cast(entry),
ObjectStats::EMBEDDED_OBJECT_TYPE);
@@ -880,6 +878,20 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
CodeKindToVirtualInstanceType(code->kind()));
RecordSimpleVirtualObjectStats(code, code->deoptimization_data(),
ObjectStats::DEOPTIMIZATION_DATA_TYPE);
+ RecordSimpleVirtualObjectStats(code, code->relocation_info(),
+ ObjectStats::RELOC_INFO_TYPE);
+ Object* source_position_table = code->source_position_table();
+ if (source_position_table->IsSourcePositionTableWithFrameCache()) {
+ RecordSimpleVirtualObjectStats(
+ code,
+ SourcePositionTableWithFrameCache::cast(source_position_table)
+ ->source_position_table(),
+ ObjectStats::SOURCE_POSITION_TABLE_TYPE);
+ } else if (source_position_table->IsHeapObject()) {
+ RecordSimpleVirtualObjectStats(code,
+ HeapObject::cast(source_position_table),
+ ObjectStats::SOURCE_POSITION_TABLE_TYPE);
+ }
if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
DeoptimizationData* input_data =
DeoptimizationData::cast(code->deoptimization_data());
@@ -894,7 +906,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Object* target = it.rinfo()->target_object();
- if (target->IsFixedArrayExact() || MatchesConstantElementsPair(target)) {
+ if (target->IsFixedArrayExact()) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 1e72c2af18..a21b7f749f 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -14,56 +14,58 @@
// tracing.
//
// Update LAST_VIRTUAL_TYPE below when changing this macro.
-#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
- CODE_KIND_LIST(V) \
- V(BOILERPLATE_ELEMENTS_TYPE) \
- V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
- V(BOILERPLATE_PROPERTY_DICTIONARY_TYPE) \
- V(BYTECODE_ARRAY_CONSTANT_POOL_TYPE) \
- V(BYTECODE_ARRAY_HANDLER_TABLE_TYPE) \
- V(CODE_STUBS_TABLE_TYPE) \
- V(COW_ARRAY_TYPE) \
- V(DEOPTIMIZATION_DATA_TYPE) \
- V(DEPENDENT_CODE_TYPE) \
- V(ELEMENTS_TYPE) \
- V(EMBEDDED_OBJECT_TYPE) \
- V(ENUM_CACHE_TYPE) \
- V(ENUM_INDICES_CACHE_TYPE) \
- V(FEEDBACK_VECTOR_ENTRY_TYPE) \
- V(FEEDBACK_VECTOR_HEADER_TYPE) \
- V(FEEDBACK_VECTOR_SLOT_CALL_TYPE) \
- V(FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE) \
- V(FEEDBACK_VECTOR_SLOT_ENUM_TYPE) \
- V(FEEDBACK_VECTOR_SLOT_LOAD_TYPE) \
- V(FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE) \
- V(FEEDBACK_VECTOR_SLOT_OTHER_TYPE) \
- V(FEEDBACK_VECTOR_SLOT_STORE_TYPE) \
- V(FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE) \
- V(GLOBAL_ELEMENTS_TYPE) \
- V(GLOBAL_PROPERTIES_TYPE) \
- V(JS_ARRAY_BOILERPLATE_TYPE) \
- V(JS_COLLETION_TABLE_TYPE) \
- V(JS_OBJECT_BOILERPLATE_TYPE) \
- V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
- V(NUMBER_STRING_CACHE_TYPE) \
- V(OBJECT_PROPERTY_DICTIONARY_TYPE) \
- V(OBJECT_TO_CODE_TYPE) \
- V(OPTIMIZED_CODE_LITERALS_TYPE) \
- V(OTHER_CONTEXT_TYPE) \
- V(PROTOTYPE_USERS_TYPE) \
- V(REGEXP_MULTIPLE_CACHE_TYPE) \
- V(RETAINED_MAPS_TYPE) \
- V(SCRIPT_LIST_TYPE) \
- V(SCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
- V(SCRIPT_SOURCE_EXTERNAL_TYPE) \
- V(SCRIPT_SOURCE_NON_EXTERNAL_TYPE) \
- V(SERIALIZED_OBJECTS_TYPE) \
- V(SINGLE_CHARACTER_STRING_CACHE_TYPE) \
- V(STRING_SPLIT_CACHE_TYPE) \
- V(STRING_TABLE_TYPE) \
- V(UNCOMPILED_JS_FUNCTION_TYPE) \
- V(UNCOMPILED_SHARED_FUNCTION_INFO_TYPE) \
+#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
+ CODE_KIND_LIST(V) \
+ V(ARRAY_BOILERPLATE_DESCRIPTION_ELEMENTS_TYPE) \
+ V(BOILERPLATE_ELEMENTS_TYPE) \
+ V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
+ V(BOILERPLATE_PROPERTY_DICTIONARY_TYPE) \
+ V(BYTECODE_ARRAY_CONSTANT_POOL_TYPE) \
+ V(BYTECODE_ARRAY_HANDLER_TABLE_TYPE) \
+ V(CODE_STUBS_TABLE_TYPE) \
+ V(COW_ARRAY_TYPE) \
+ V(DEOPTIMIZATION_DATA_TYPE) \
+ V(DEPENDENT_CODE_TYPE) \
+ V(ELEMENTS_TYPE) \
+ V(EMBEDDED_OBJECT_TYPE) \
+ V(ENUM_CACHE_TYPE) \
+ V(ENUM_INDICES_CACHE_TYPE) \
+ V(FEEDBACK_VECTOR_ENTRY_TYPE) \
+ V(FEEDBACK_VECTOR_HEADER_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_CALL_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_ENUM_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_LOAD_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_OTHER_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_STORE_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE) \
+ V(GLOBAL_ELEMENTS_TYPE) \
+ V(GLOBAL_PROPERTIES_TYPE) \
+ V(JS_ARRAY_BOILERPLATE_TYPE) \
+ V(JS_COLLETION_TABLE_TYPE) \
+ V(JS_OBJECT_BOILERPLATE_TYPE) \
+ V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
+ V(NUMBER_STRING_CACHE_TYPE) \
+ V(OBJECT_PROPERTY_DICTIONARY_TYPE) \
+ V(OBJECT_TO_CODE_TYPE) \
+ V(OPTIMIZED_CODE_LITERALS_TYPE) \
+ V(OTHER_CONTEXT_TYPE) \
+ V(PROTOTYPE_USERS_TYPE) \
+ V(REGEXP_MULTIPLE_CACHE_TYPE) \
+ V(RELOC_INFO_TYPE) \
+ V(RETAINED_MAPS_TYPE) \
+ V(SCRIPT_LIST_TYPE) \
+ V(SCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
+ V(SCRIPT_SOURCE_EXTERNAL_TYPE) \
+ V(SCRIPT_SOURCE_NON_EXTERNAL_TYPE) \
+ V(SERIALIZED_OBJECTS_TYPE) \
+ V(SINGLE_CHARACTER_STRING_CACHE_TYPE) \
+ V(STRING_SPLIT_CACHE_TYPE) \
+ V(SOURCE_POSITION_TABLE_TYPE) \
+ V(UNCOMPILED_JS_FUNCTION_TYPE) \
+ V(UNCOMPILED_SHARED_FUNCTION_INFO_TYPE) \
V(WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE)
namespace v8 {
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index c7c384f52a..594b837f69 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -27,7 +27,7 @@ struct WeakListVisitor;
template <class T>
Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
- Object* undefined = heap->undefined_value();
+ Object* undefined = ReadOnlyRoots(heap).undefined_value();
Object* head = undefined;
T* tail = nullptr;
bool record_slots = MustRecordSlots(heap);
@@ -53,7 +53,8 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
HeapObject* slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
int slot_offset = WeakListVisitor<T>::WeakNextOffset();
Object** slot = HeapObject::RawField(slot_holder, slot_offset);
- MarkCompactCollector::RecordSlot(slot_holder, slot, retained);
+ MarkCompactCollector::RecordSlot(slot_holder, slot,
+ HeapObject::cast(retained));
}
}
// Retained object is new tail.
@@ -77,7 +78,7 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
template <class T>
static void ClearWeakList(Heap* heap, Object* list) {
- Object* undefined = heap->undefined_value();
+ Object* undefined = ReadOnlyRoots(heap).undefined_value();
while (list != undefined) {
T* candidate = reinterpret_cast<T*>(list);
list = WeakListVisitor<T>::WeakNext(candidate);
@@ -107,7 +108,7 @@ struct WeakListVisitor<Code> {
static void VisitPhantomObject(Heap* heap, Code* code) {
// Even though the code is dying, its code_data_container can still be
// alive. Clear the next_code_link slot to avoid a dangling pointer.
- SetWeakNext(code, heap->undefined_value());
+ SetWeakNext(code, ReadOnlyRoots(heap).undefined_value());
}
};
@@ -135,7 +136,8 @@ struct WeakListVisitor<Context> {
for (int idx = Context::FIRST_WEAK_SLOT;
idx < Context::NATIVE_CONTEXT_SLOTS; ++idx) {
Object** slot = Context::cast(context)->RawFieldOfElementAt(idx);
- MarkCompactCollector::RecordSlot(context, slot, *slot);
+ MarkCompactCollector::RecordSlot(context, slot,
+ HeapObject::cast(*slot));
}
// Code objects are always allocated in Code space, we do not have to
// visit them during scavenges.
@@ -157,7 +159,8 @@ struct WeakListVisitor<Context> {
// Record the updated slot if necessary.
Object** head_slot =
HeapObject::RawField(context, FixedArray::SizeFor(index));
- heap->mark_compact_collector()->RecordSlot(context, head_slot, list_head);
+ heap->mark_compact_collector()->RecordSlot(context, head_slot,
+ HeapObject::cast(list_head));
}
}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 30c3ea6cd6..cdb7c917b0 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -9,6 +9,7 @@
#include "src/layout-descriptor.h"
#include "src/objects-body-descriptors.h"
#include "src/objects.h"
+#include "src/objects/hash-table.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/string.h"
#include "src/visitors.h"
@@ -18,43 +19,50 @@ namespace internal {
class BigInt;
class BytecodeArray;
+class DataHandler;
class JSArrayBuffer;
class JSRegExp;
class JSWeakCollection;
-
-#define TYPED_VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(ConsString) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
- V(JSArrayBuffer) \
- V(JSFunction) \
- V(JSObject) \
- V(JSWeakCollection) \
- V(Map) \
- V(Oddball) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(SlicedString) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(Symbol) \
- V(ThinString) \
- V(TransitionArray) \
- V(WasmInstanceObject) \
+class UncompiledDataWithPreParsedScope;
+
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(ConsString) \
+ V(DataHandler) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(FixedFloat64Array) \
+ V(FixedTypedArrayBase) \
+ V(JSArrayBuffer) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(Oddball) \
+ V(PreParsedScopeData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(Symbol) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledDataWithPreParsedScope) \
+ V(WasmInstanceObject) \
V(WeakCell)
// The base class for visitors that need to dispatch on object type. The default
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index d3458f1dea..95b7b5b9d5 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -295,7 +295,7 @@ class UpdateTypedSlotHelper {
template <typename Callback>
static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
Callback callback) {
- DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code* old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* new_target = old_target;
SlotCallbackResult result =
@@ -311,7 +311,7 @@ class UpdateTypedSlotHelper {
// Updates an embedded pointer slot using an untyped slot callback.
// The callback accepts Object** and returns SlotCallbackResult.
template <typename Callback>
- static SlotCallbackResult UpdateEmbeddedPointer(RelocInfo* rinfo,
+ static SlotCallbackResult UpdateEmbeddedPointer(Heap* heap, RelocInfo* rinfo,
Callback callback) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* old_target = rinfo->target_object();
@@ -320,7 +320,7 @@ class UpdateTypedSlotHelper {
callback(reinterpret_cast<MaybeObject**>(&new_target));
DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
- rinfo->set_target_object(HeapObject::cast(new_target));
+ rinfo->set_target_object(heap, HeapObject::cast(new_target));
}
return result;
}
@@ -328,8 +328,8 @@ class UpdateTypedSlotHelper {
// Updates a typed slot using an untyped slot callback.
// The callback accepts MaybeObject** and returns SlotCallbackResult.
template <typename Callback>
- static SlotCallbackResult UpdateTypedSlot(SlotType slot_type, Address addr,
- Callback callback) {
+ static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
+ Address addr, Callback callback) {
switch (slot_type) {
case CODE_TARGET_SLOT: {
RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, nullptr);
@@ -340,7 +340,7 @@ class UpdateTypedSlotHelper {
}
case EMBEDDED_OBJECT_SLOT: {
RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, nullptr);
- return UpdateEmbeddedPointer(&rinfo, callback);
+ return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
case OBJECT_SLOT: {
return callback(reinterpret_cast<MaybeObject**>(addr));
@@ -353,7 +353,7 @@ class UpdateTypedSlotHelper {
};
inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTarget(rmode)) {
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
return CODE_TARGET_SLOT;
} else if (RelocInfo::IsEmbeddedObject(rmode)) {
return EMBEDDED_OBJECT_SLOT;
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 4b07f16d11..e581ebe571 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -150,7 +150,7 @@ void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
*slot = actual;
// ThinStrings always refer to internalized strings, which are
// always in old space.
- DCHECK(!heap()->InNewSpace(actual));
+ DCHECK(!Heap::InNewSpace(actual));
base::AsAtomicPointer::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(actual).ToMap());
@@ -165,12 +165,12 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
ConsString* object, int object_size) {
DCHECK(IsShortcutCandidate(map->instance_type()));
if (!is_incremental_marking_ &&
- object->unchecked_second() == heap()->empty_string()) {
+ object->unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
HeapObject* first = HeapObject::cast(object->unchecked_first());
*slot = first;
- if (!heap()->InNewSpace(first)) {
+ if (!Heap::InNewSpace(first)) {
base::AsAtomicPointer::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(first).ToMap());
@@ -202,7 +202,7 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
void Scavenger::EvacuateObject(HeapObjectReference** slot, Map* map,
HeapObject* source) {
- SLOW_DCHECK(heap_->InFromSpace(source));
+ SLOW_DCHECK(Heap::InFromSpace(source));
SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
int size = source->SizeFromMap(map);
// Cannot use ::cast() below because that would add checks in debug mode
@@ -227,7 +227,7 @@ void Scavenger::EvacuateObject(HeapObjectReference** slot, Map* map,
}
void Scavenger::ScavengeObject(HeapObjectReference** p, HeapObject* object) {
- DCHECK(heap()->InFromSpace(object));
+ DCHECK(Heap::InFromSpace(object));
// Synchronized load that consumes the publishing CAS of MigrateObject.
MapWord first_word = object->synchronized_map_word();
@@ -236,7 +236,7 @@ void Scavenger::ScavengeObject(HeapObjectReference** p, HeapObject* object) {
// copied.
if (first_word.IsForwardingAddress()) {
HeapObject* dest = first_word.ToForwardingAddress();
- DCHECK(heap()->InFromSpace(*p));
+ DCHECK(Heap::InFromSpace(*p));
if ((*p)->IsWeakHeapObject()) {
*p = HeapObjectReference::Weak(dest);
} else {
@@ -248,7 +248,7 @@ void Scavenger::ScavengeObject(HeapObjectReference** p, HeapObject* object) {
Map* map = first_word.ToMap();
// AllocationMementos are unrooted and shouldn't survive a scavenge
- DCHECK_NE(heap()->allocation_memento_map(), map);
+ DCHECK_NE(ReadOnlyRoots(heap()).allocation_memento_map(), map);
// Call the slow part of scavenge object.
EvacuateObject(p, map, object);
}
@@ -257,7 +257,7 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
Address slot_address) {
MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
MaybeObject* object = *slot;
- if (heap->InFromSpace(object)) {
+ if (Heap::InFromSpace(object)) {
HeapObject* heap_object;
bool success = object->ToStrongOrWeakHeapObject(&heap_object);
USE(success);
@@ -272,10 +272,10 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
PageMemoryFence(object);
- if (heap->InToSpace(object)) {
+ if (Heap::InToSpace(object)) {
return KEEP_SLOT;
}
- } else if (heap->InToSpace(object)) {
+ } else if (Heap::InToSpace(object)) {
// Already updated slot. This can happen when processing of the work list
// is interleaved with processing roots.
return KEEP_SLOT;
@@ -289,7 +289,7 @@ void ScavengeVisitor::VisitPointers(HeapObject* host, Object** start,
Object** end) {
for (Object** p = start; p < end; p++) {
Object* object = *p;
- if (!heap_->InNewSpace(object)) continue;
+ if (!Heap::InNewSpace(object)) continue;
scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
reinterpret_cast<HeapObject*>(object));
}
@@ -299,7 +299,7 @@ void ScavengeVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
MaybeObject** end) {
for (MaybeObject** p = start; p < end; p++) {
MaybeObject* object = *p;
- if (!heap_->InNewSpace(object)) continue;
+ if (!Heap::InNewSpace(object)) continue;
// Treat the weak reference as strong.
HeapObject* heap_object;
if (object->ToStrongOrWeakHeapObject(&heap_object)) {
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 2c55b354d1..d9f920ef7e 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -52,16 +52,16 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
reinterpret_cast<HeapObjectReference**>(slot_address);
scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
- if (heap_->InFromSpace(target)) {
+ if (Heap::InFromSpace(target)) {
scavenger_->ScavengeObject(slot, target);
bool success = (*slot)->ToStrongOrWeakHeapObject(&target);
USE(success);
DCHECK(success);
scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
- if (heap_->InNewSpace(target)) {
+ if (Heap::InNewSpace(target)) {
SLOW_DCHECK(target->IsHeapObject());
- SLOW_DCHECK(heap_->InToSpace(target));
+ SLOW_DCHECK(Heap::InToSpace(target));
RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
slot_address);
}
@@ -125,7 +125,7 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
page, [this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- type, addr, [this](MaybeObject** addr) {
+ heap_, type, addr, [this](MaybeObject** addr) {
return CheckAndScavengeObject(heap(),
reinterpret_cast<Address>(addr));
});
@@ -139,7 +139,7 @@ void Scavenger::Process(OneshotBarrier* barrier) {
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
- ScavengeVisitor scavenge_visitor(heap(), this);
+ ScavengeVisitor scavenge_visitor(this);
const bool have_barrier = barrier != nullptr;
bool done;
@@ -196,7 +196,7 @@ void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
void RootScavengeVisitor::ScavengePointer(Object** p) {
Object* object = *p;
DCHECK(!HasWeakHeapObjectTag(object));
- if (!heap_->InNewSpace(object)) return;
+ if (!Heap::InNewSpace(object)) return;
scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
reinterpret_cast<HeapObject*>(object));
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index de2f49f0e2..847a5b07fc 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -114,8 +114,7 @@ class Scavenger {
// filtering out non-HeapObjects and objects which do not reside in new space.
class RootScavengeVisitor final : public RootVisitor {
public:
- RootScavengeVisitor(Heap* heap, Scavenger* scavenger)
- : heap_(heap), scavenger_(scavenger) {}
+ explicit RootScavengeVisitor(Scavenger* scavenger) : scavenger_(scavenger) {}
void VisitRootPointer(Root root, const char* description, Object** p) final;
void VisitRootPointers(Root root, const char* description, Object** start,
@@ -124,14 +123,12 @@ class RootScavengeVisitor final : public RootVisitor {
private:
void ScavengePointer(Object** p);
- Heap* const heap_;
Scavenger* const scavenger_;
};
class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
public:
- ScavengeVisitor(Heap* heap, Scavenger* scavenger)
- : heap_(heap), scavenger_(scavenger) {}
+ explicit ScavengeVisitor(Scavenger* scavenger) : scavenger_(scavenger) {}
V8_INLINE void VisitPointers(HeapObject* host, Object** start,
Object** end) final;
@@ -139,7 +136,6 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
MaybeObject** end) final;
private:
- Heap* const heap_;
Scavenger* const scavenger_;
};
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 3b232ba310..fdb142ab56 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -22,7 +22,9 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
#include "src/objects/map.h"
+#include "src/objects/microtask.h"
#include "src/objects/module.h"
+#include "src/objects/promise.h"
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
@@ -46,8 +48,8 @@ bool Heap::CreateHeapObjects() {
CreateInternalAccessorInfoObjects();
CHECK_EQ(0u, gc_count_);
- set_native_contexts_list(undefined_value());
- set_allocation_sites_list(undefined_value());
+ set_native_contexts_list(ReadOnlyRoots(this).undefined_value());
+ set_allocation_sites_list(ReadOnlyRoots(this).undefined_value());
return true;
}
@@ -72,9 +74,14 @@ const Heap::StructTable Heap::struct_table[] = {
STRUCT_LIST(STRUCT_TABLE_ELEMENT)
#undef STRUCT_TABLE_ELEMENT
+#define ALLOCATION_SITE_ELEMENT(NAME, Name, Size, name) \
+ {NAME##_TYPE, Name::kSize##Size, k##Name##Size##MapRootIndex},
+ ALLOCATION_SITE_LIST(ALLOCATION_SITE_ELEMENT)
+#undef ALLOCATION_SITE_ELEMENT
+
#define DATA_HANDLER_ELEMENT(NAME, Name, Size, name) \
{NAME##_TYPE, Name::kSizeWithData##Size, k##Name##Size##MapRootIndex},
- DATA_HANDLER_LIST(DATA_HANDLER_ELEMENT)
+ DATA_HANDLER_LIST(DATA_HANDLER_ELEMENT)
#undef DATA_HANDLER_ELEMENT
};
@@ -95,7 +102,8 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
AllocateRaw(Map::kSize, is_js_object ? MAP_SPACE : RO_SPACE);
if (!allocation.To(&result)) return allocation;
- result->set_map_after_allocation(meta_map(), SKIP_WRITE_BARRIER);
+ result->set_map_after_allocation(ReadOnlyRoots(this).meta_map(),
+ SKIP_WRITE_BARRIER);
Map* map = isolate()->factory()->InitializeMap(
Map::cast(result), instance_type, instance_size, elements_kind,
inobject_properties);
@@ -104,8 +112,8 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
// Eagerly initialize the WeakCell cache for the map as it will not be
// writable in RO_SPACE.
HandleScope handle_scope(isolate());
- Handle<WeakCell> weak_cell =
- isolate()->factory()->NewWeakCell(Handle<Map>(map), TENURED_READ_ONLY);
+ Handle<WeakCell> weak_cell = isolate()->factory()->NewWeakCell(
+ Handle<Map>(map, isolate()), TENURED_READ_ONLY);
map->set_weak_cell_cache(*weak_cell);
}
@@ -146,20 +154,21 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
}
void Heap::FinalizePartialMap(Map* map) {
- map->set_dependent_code(DependentCode::cast(empty_fixed_array()));
+ ReadOnlyRoots roots(this);
+ map->set_dependent_code(DependentCode::cast(roots.empty_fixed_array()));
map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
- map->set_instance_descriptors(empty_descriptor_array());
+ map->set_instance_descriptors(roots.empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
- map->set_prototype(null_value());
- map->set_constructor_or_backpointer(null_value());
+ map->set_prototype(roots.null_value());
+ map->set_constructor_or_backpointer(roots.null_value());
// Eagerly initialize the WeakCell cache for the map as it will not be
// writable in RO_SPACE.
HandleScope handle_scope(isolate());
- Handle<WeakCell> weak_cell =
- isolate()->factory()->NewWeakCell(Handle<Map>(map), TENURED_READ_ONLY);
+ Handle<WeakCell> weak_cell = isolate()->factory()->NewWeakCell(
+ Handle<Map>(map, isolate()), TENURED_READ_ONLY);
map->set_weak_cell_cache(*weak_cell);
}
@@ -209,6 +218,7 @@ bool Heap::CreateInitialMaps() {
set_meta_map(new_meta_map);
new_meta_map->set_map_after_allocation(new_meta_map);
+ ReadOnlyRoots roots(this);
{ // Partial map allocation
#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
{ \
@@ -224,7 +234,7 @@ bool Heap::CreateInitialMaps() {
weak_array_list);
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel,
fixed_cow_array)
- DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
+ DCHECK_NE(roots.fixed_array_map(), roots.fixed_cow_array_map());
ALLOCATE_PARTIAL_MAP(DESCRIPTOR_ARRAY_TYPE, kVariableSizeSentinel,
descriptor_array)
@@ -241,7 +251,7 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), RO_SPACE);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
+ obj->set_map_after_allocation(roots.fixed_array_map(), SKIP_WRITE_BARRIER);
FixedArray::cast(obj)->set_length(0);
}
set_empty_fixed_array(FixedArray::cast(obj));
@@ -249,7 +259,8 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult alloc = AllocateRaw(WeakFixedArray::SizeFor(0), RO_SPACE);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(weak_fixed_array_map(), SKIP_WRITE_BARRIER);
+ obj->set_map_after_allocation(roots.weak_fixed_array_map(),
+ SKIP_WRITE_BARRIER);
WeakFixedArray::cast(obj)->set_length(0);
}
set_empty_weak_fixed_array(WeakFixedArray::cast(obj));
@@ -258,35 +269,36 @@ bool Heap::CreateInitialMaps() {
AllocationResult allocation =
AllocateRaw(WeakArrayList::SizeForCapacity(0), RO_SPACE);
if (!allocation.To(&obj)) return false;
- obj->set_map_after_allocation(weak_array_list_map(), SKIP_WRITE_BARRIER);
+ obj->set_map_after_allocation(roots.weak_array_list_map(),
+ SKIP_WRITE_BARRIER);
WeakArrayList::cast(obj)->set_capacity(0);
WeakArrayList::cast(obj)->set_length(0);
}
set_empty_weak_array_list(WeakArrayList::cast(obj));
{
- AllocationResult allocation = Allocate(null_map(), RO_SPACE);
+ AllocationResult allocation = Allocate(roots.null_map(), RO_SPACE);
if (!allocation.To(&obj)) return false;
}
set_null_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kNull);
{
- AllocationResult allocation = Allocate(undefined_map(), RO_SPACE);
+ AllocationResult allocation = Allocate(roots.undefined_map(), RO_SPACE);
if (!allocation.To(&obj)) return false;
}
set_undefined_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kUndefined);
- DCHECK(!InNewSpace(undefined_value()));
+ DCHECK(!InNewSpace(roots.undefined_value()));
{
- AllocationResult allocation = Allocate(the_hole_map(), RO_SPACE);
+ AllocationResult allocation = Allocate(roots.the_hole_map(), RO_SPACE);
if (!allocation.To(&obj)) return false;
}
set_the_hole_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kTheHole);
// Set preliminary exception sentinel value before actually initializing it.
- set_exception(null_value());
+ set_exception(roots.null_value());
// Setup the struct maps first (needed for the EnumCache).
for (unsigned i = 0; i < arraysize(struct_table); i++) {
@@ -298,41 +310,42 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty enum cache.
{
- AllocationResult allocation = Allocate(tuple2_map(), RO_SPACE);
+ AllocationResult allocation = Allocate(roots.tuple2_map(), RO_SPACE);
if (!allocation.To(&obj)) return false;
}
set_empty_enum_cache(EnumCache::cast(obj));
- EnumCache::cast(obj)->set_keys(empty_fixed_array());
- EnumCache::cast(obj)->set_indices(empty_fixed_array());
+ EnumCache::cast(obj)->set_keys(roots.empty_fixed_array());
+ EnumCache::cast(obj)->set_indices(roots.empty_fixed_array());
// Allocate the empty descriptor array.
{
STATIC_ASSERT(DescriptorArray::kFirstIndex != 0);
int length = DescriptorArray::kFirstIndex;
- int size = FixedArray::SizeFor(length);
+ int size = WeakFixedArray::SizeFor(length);
if (!AllocateRaw(size, RO_SPACE).To(&obj)) return false;
- obj->set_map_after_allocation(descriptor_array_map(), SKIP_WRITE_BARRIER);
+ obj->set_map_after_allocation(roots.descriptor_array_map(),
+ SKIP_WRITE_BARRIER);
DescriptorArray::cast(obj)->set_length(length);
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
- DescriptorArray::cast(obj)->set(DescriptorArray::kDescriptorLengthIndex,
- Smi::kZero);
- DescriptorArray::cast(obj)->set(DescriptorArray::kEnumCacheIndex,
- empty_enum_cache());
+ DescriptorArray::cast(obj)->SetNumberOfDescriptors(0);
+ WeakFixedArray::cast(obj)->Set(
+ DescriptorArray::kEnumCacheIndex,
+ MaybeObject::FromObject(roots.empty_enum_cache()));
// Fix the instance_descriptors for the existing maps.
- FinalizePartialMap(meta_map());
- FinalizePartialMap(weak_cell_map());
- FinalizePartialMap(fixed_array_map());
- FinalizePartialMap(weak_fixed_array_map());
- FinalizePartialMap(weak_array_list_map());
- FinalizePartialMap(fixed_cow_array_map());
- FinalizePartialMap(descriptor_array_map());
- FinalizePartialMap(undefined_map());
- undefined_map()->set_is_undetectable(true);
- FinalizePartialMap(null_map());
- null_map()->set_is_undetectable(true);
- FinalizePartialMap(the_hole_map());
+ FinalizePartialMap(roots.meta_map());
+ FinalizePartialMap(roots.weak_cell_map());
+ FinalizePartialMap(roots.fixed_array_map());
+ FinalizePartialMap(roots.weak_fixed_array_map());
+ FinalizePartialMap(roots.weak_array_list_map());
+ FinalizePartialMap(roots.fixed_cow_array_map());
+ FinalizePartialMap(roots.descriptor_array_map());
+ FinalizePartialMap(roots.undefined_map());
+ roots.undefined_map()->set_is_undetectable(true);
+ FinalizePartialMap(roots.null_map());
+ roots.null_map()->set_is_undetectable(true);
+ FinalizePartialMap(roots.the_hole_map());
for (unsigned i = 0; i < arraysize(struct_table); ++i) {
const StructTable& entry = struct_table[i];
FinalizePartialMap(Map::cast(roots_[entry.index]));
@@ -351,18 +364,18 @@ bool Heap::CreateInitialMaps() {
#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
constructor_function_index) \
- { \
- ALLOCATE_MAP((instance_type), (size), field_name); \
- field_name##_map()->SetConstructorFunctionIndex( \
- (constructor_function_index)); \
- }
+ { \
+ ALLOCATE_MAP((instance_type), (size), field_name); \
+ roots.field_name##_map()->SetConstructorFunctionIndex( \
+ (constructor_function_index)); \
+ }
ALLOCATE_VARSIZE_MAP(SCOPE_INFO_TYPE, scope_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
- ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
+ ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, MutableHeapNumber::kSize,
mutable_heap_number)
ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
Context::SYMBOL_FUNCTION_INDEX)
@@ -404,7 +417,7 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
- fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
+ roots.fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
ALLOCATE_VARSIZE_MAP(FEEDBACK_METADATA_TYPE, feedback_metadata)
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
@@ -429,7 +442,7 @@ bool Heap::CreateInitialMaps() {
Smi* value = Smi::FromInt(Map::kPrototypeChainInvalid);
AllocationResult alloc = AllocateRaw(Cell::kSize, OLD_SPACE);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(cell_map(), SKIP_WRITE_BARRIER);
+ obj->set_map_after_allocation(roots.cell_map(), SKIP_WRITE_BARRIER);
Cell::cast(obj)->set_value(value);
set_invalid_prototype_validity_cell(Cell::cast(obj));
}
@@ -441,21 +454,24 @@ bool Heap::CreateInitialMaps() {
// The "no closures" and "one closure" FeedbackCell maps need
// to be marked unstable because their objects can change maps.
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_closures_cell)
- no_closures_cell_map()->mark_unstable();
+ roots.no_closures_cell_map()->mark_unstable();
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
- one_closure_cell_map()->mark_unstable();
+ roots.one_closure_cell_map()->mark_unstable();
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, ordered_hash_map)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, ordered_hash_set)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, name_dictionary)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, global_dictionary)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, number_dictionary)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, simple_number_dictionary)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, string_table)
+ ALLOCATE_VARSIZE_MAP(ORDERED_HASH_MAP_TYPE, ordered_hash_map)
+ ALLOCATE_VARSIZE_MAP(ORDERED_HASH_SET_TYPE, ordered_hash_set)
+ ALLOCATE_VARSIZE_MAP(NAME_DICTIONARY_TYPE, name_dictionary)
+ ALLOCATE_VARSIZE_MAP(GLOBAL_DICTIONARY_TYPE, global_dictionary)
+ ALLOCATE_VARSIZE_MAP(NUMBER_DICTIONARY_TYPE, number_dictionary)
+ ALLOCATE_VARSIZE_MAP(SIMPLE_NUMBER_DICTIONARY_TYPE,
+ simple_number_dictionary)
+ ALLOCATE_VARSIZE_MAP(STRING_TABLE_TYPE, string_table)
+
+ ALLOCATE_VARSIZE_MAP(EPHEMERON_HASH_TABLE_TYPE, ephemeron_hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, array_list)
@@ -467,12 +483,13 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(MODULE_CONTEXT_TYPE, module_context)
ALLOCATE_VARSIZE_MAP(EVAL_CONTEXT_TYPE, eval_context)
ALLOCATE_VARSIZE_MAP(SCRIPT_CONTEXT_TYPE, script_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
+ ALLOCATE_VARSIZE_MAP(SCRIPT_CONTEXT_TABLE_TYPE, script_context_table)
- ALLOCATE_VARSIZE_MAP(BOILERPLATE_DESCRIPTION_TYPE, boilerplate_description)
+ ALLOCATE_VARSIZE_MAP(OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ object_boilerplate_description)
ALLOCATE_VARSIZE_MAP(NATIVE_CONTEXT_TYPE, native_context)
- native_context_map()->set_visitor_id(kVisitNativeContext);
+ roots.native_context_map()->set_visitor_id(kVisitNativeContext);
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
side_effect_call_handler_info)
@@ -481,6 +498,13 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
next_call_side_effect_free_call_handler_info)
+ ALLOCATE_VARSIZE_MAP(PRE_PARSED_SCOPE_DATA_TYPE, pre_parsed_scope_data)
+ ALLOCATE_MAP(UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE,
+ UncompiledDataWithoutPreParsedScope::kSize,
+ uncompiled_data_without_pre_parsed_scope)
+ ALLOCATE_MAP(UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE,
+ UncompiledDataWithPreParsedScope::kSize,
+ uncompiled_data_with_pre_parsed_scope)
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
@@ -498,29 +522,34 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), RO_SPACE);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(scope_info_map(), SKIP_WRITE_BARRIER);
+ obj->set_map_after_allocation(roots.scope_info_map(), SKIP_WRITE_BARRIER);
FixedArray::cast(obj)->set_length(0);
}
set_empty_scope_info(ScopeInfo::cast(obj));
{
- AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), RO_SPACE);
+ // Empty boilerplate needs a field for literal_flags
+ AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(1), RO_SPACE);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(boilerplate_description_map(),
+ obj->set_map_after_allocation(roots.object_boilerplate_description_map(),
SKIP_WRITE_BARRIER);
- FixedArray::cast(obj)->set_length(0);
+
+ FixedArray::cast(obj)->set_length(1);
+ FixedArray::cast(obj)->set(ObjectBoilerplateDescription::kLiteralTypeOffset,
+ Smi::kZero);
}
- set_empty_boilerplate_description(BoilerplateDescription::cast(obj));
+ set_empty_object_boilerplate_description(
+ ObjectBoilerplateDescription::cast(obj));
{
- AllocationResult allocation = Allocate(boolean_map(), RO_SPACE);
+ AllocationResult allocation = Allocate(roots.boolean_map(), RO_SPACE);
if (!allocation.To(&obj)) return false;
}
set_true_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kTrue);
{
- AllocationResult allocation = Allocate(boolean_map(), RO_SPACE);
+ AllocationResult allocation = Allocate(roots.boolean_map(), RO_SPACE);
if (!allocation.To(&obj)) return false;
}
set_false_value(Oddball::cast(obj));
@@ -529,7 +558,7 @@ bool Heap::CreateInitialMaps() {
// Empty arrays.
{
if (!AllocateRaw(ByteArray::SizeFor(0), RO_SPACE).To(&obj)) return false;
- obj->set_map_after_allocation(byte_array_map(), SKIP_WRITE_BARRIER);
+ obj->set_map_after_allocation(roots.byte_array_map(), SKIP_WRITE_BARRIER);
ByteArray::cast(obj)->set_length(0);
set_empty_byte_array(ByteArray::cast(obj));
}
@@ -538,7 +567,8 @@ bool Heap::CreateInitialMaps() {
if (!AllocateRaw(FixedArray::SizeFor(0), RO_SPACE).To(&obj)) {
return false;
}
- obj->set_map_after_allocation(property_array_map(), SKIP_WRITE_BARRIER);
+ obj->set_map_after_allocation(roots.property_array_map(),
+ SKIP_WRITE_BARRIER);
PropertyArray::cast(obj)->initialize_length(0);
set_empty_property_array(PropertyArray::cast(obj));
}
@@ -554,9 +584,10 @@ bool Heap::CreateInitialMaps() {
TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
- DCHECK(!InNewSpace(empty_fixed_array()));
+ DCHECK(!InNewSpace(roots.empty_fixed_array()));
- bigint_map()->SetConstructorFunctionIndex(Context::BIGINT_FUNCTION_INDEX);
+ roots.bigint_map()->SetConstructorFunctionIndex(
+ Context::BIGINT_FUNCTION_INDEX);
return true;
}
@@ -576,20 +607,22 @@ void Heap::CreateApiObjects() {
void Heap::CreateInitialObjects() {
HandleScope scope(isolate());
Factory* factory = isolate()->factory();
+ ReadOnlyRoots roots(this);
// The -0 value must be set before NewNumber works.
- set_minus_zero_value(
- *factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED_READ_ONLY));
- DCHECK(std::signbit(minus_zero_value()->Number()));
+ set_minus_zero_value(*factory->NewHeapNumber(-0.0, TENURED_READ_ONLY));
+ DCHECK(std::signbit(roots.minus_zero_value()->Number()));
set_nan_value(*factory->NewHeapNumber(
- std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED_READ_ONLY));
- set_hole_nan_value(*factory->NewHeapNumberFromBits(kHoleNanInt64, IMMUTABLE,
- TENURED_READ_ONLY));
- set_infinity_value(
- *factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED_READ_ONLY));
+ std::numeric_limits<double>::quiet_NaN(), TENURED_READ_ONLY));
+ set_hole_nan_value(
+ *factory->NewHeapNumberFromBits(kHoleNanInt64, TENURED_READ_ONLY));
+ set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, TENURED_READ_ONLY));
set_minus_infinity_value(
- *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED_READ_ONLY));
+ *factory->NewHeapNumber(-V8_INFINITY, TENURED_READ_ONLY));
+
+ set_hash_seed(*factory->NewByteArray(kInt64Size, TENURED));
+ InitializeHashSeed();
// Allocate cache for single character one byte strings.
set_single_character_string_cache(
@@ -727,13 +760,13 @@ void Heap::CreateInitialObjects() {
// Microtask queue uses the empty fixed array as a sentinel for "empty".
// Number of queued microtasks stored in Isolate::pending_microtask_count().
- set_microtask_queue(empty_fixed_array());
+ set_microtask_queue(roots.empty_fixed_array());
{
Handle<FixedArray> empty_sloppy_arguments_elements =
factory->NewFixedArray(2, TENURED_READ_ONLY);
empty_sloppy_arguments_elements->set_map_after_allocation(
- sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
+ roots.sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
}
@@ -744,11 +777,11 @@ void Heap::CreateInitialObjects() {
cell->clear();
}
- set_detached_contexts(empty_fixed_array());
- set_retained_maps(empty_weak_array_list());
- set_retaining_path_targets(undefined_value());
+ set_detached_contexts(roots.empty_fixed_array());
+ set_retained_maps(roots.empty_weak_array_list());
+ set_retaining_path_targets(roots.undefined_value());
- set_feedback_vectors_for_profiling_tools(undefined_value());
+ set_feedback_vectors_for_profiling_tools(roots.undefined_value());
set_script_list(Smi::kZero);
@@ -762,7 +795,7 @@ void Heap::CreateInitialObjects() {
// Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
- set_last_debugging_id(Smi::FromInt(SharedFunctionInfo::kNoDebuggingId));
+ set_last_debugging_id(Smi::FromInt(DebugInfo::kNoDebuggingId));
set_next_template_serial_number(Smi::kZero);
// Allocate the empty OrderedHashMap.
@@ -804,7 +837,7 @@ void Heap::CreateInitialObjects() {
set_no_elements_protector(*cell);
cell = factory->NewPropertyCell(factory->empty_string(), TENURED_READ_ONLY);
- cell->set_value(the_hole_value());
+ cell->set_value(roots.the_hole_value());
set_empty_property_cell(*cell);
cell = factory->NewPropertyCell(factory->empty_string());
@@ -847,8 +880,8 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_promise_then_protector(*cell);
- set_serialized_objects(empty_fixed_array());
- set_serialized_global_proxy_sizes(empty_fixed_array());
+ set_serialized_objects(roots.empty_fixed_array());
+ set_serialized_global_proxy_sizes(roots.empty_fixed_array());
set_weak_stack_trace_list(Smi::kZero);
@@ -864,7 +897,7 @@ void Heap::CreateInitialObjects() {
isolate()->factory()->one_string()->Hash();
// Initialize builtins constants table.
- set_builtins_constants_table(empty_fixed_array());
+ set_builtins_constants_table(roots.empty_fixed_array());
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index bdd793d5b7..2ddcf6cf36 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -45,7 +45,7 @@ HeapObject* SemiSpaceIterator::Next() {
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page();
- DCHECK(!page->is_anchor());
+ DCHECK(page);
current_ = page->area_start();
if (current_ == limit_) return nullptr;
}
@@ -468,10 +468,6 @@ V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
return AllocateRaw(size_in_bytes, alignment);
}
-size_t LargeObjectSpace::Available() {
- return ObjectSizeFor(heap()->memory_allocator()->Available());
-}
-
LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
return LocalAllocationBuffer(
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index f23323c135..59ce145474 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -14,6 +14,7 @@
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
@@ -34,7 +35,7 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
- page_range_(space->anchor()->next_page(), space->anchor()),
+ page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {}
HeapObjectIterator::HeapObjectIterator(Page* page)
@@ -93,13 +94,15 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
// -----------------------------------------------------------------------------
// CodeRange
-CodeRange::CodeRange(Isolate* isolate)
+static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
+ LAZY_INSTANCE_INITIALIZER;
+
+CodeRange::CodeRange(Isolate* isolate, size_t requested)
: isolate_(isolate),
free_list_(0),
allocation_list_(0),
- current_allocation_block_index_(0) {}
-
-bool CodeRange::SetUp(size_t requested) {
+ current_allocation_block_index_(0),
+ requested_code_range_size_(0) {
DCHECK(!virtual_memory_.IsReserved());
if (requested == 0) {
@@ -109,7 +112,7 @@ bool CodeRange::SetUp(size_t requested) {
if (kRequiresCodeRange) {
requested = kMaximalCodeRangeSize;
} else {
- return true;
+ return;
}
}
@@ -124,11 +127,15 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
+ requested_code_range_size_ = requested;
+
VirtualMemory reservation;
+ void* hint = code_range_address_hint.Pointer()->GetAddressHint(requested);
if (!AlignedAllocVirtualMemory(
- requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()),
- GetRandomMmapAddr(), &reservation)) {
- return false;
+ requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()), hint,
+ &reservation)) {
+ V8::FatalProcessOutOfMemory(isolate,
+ "CodeRange setup: allocate virtual memory");
}
// We are sure that we have mapped a block of requested addresses.
@@ -140,7 +147,7 @@ bool CodeRange::SetUp(size_t requested) {
if (reserved_area > 0) {
if (!reservation.SetPermissions(base, reserved_area,
PageAllocator::kReadWrite))
- return false;
+ V8::FatalProcessOutOfMemory(isolate, "CodeRange setup: set permissions");
base += reserved_area;
}
@@ -153,7 +160,15 @@ bool CodeRange::SetUp(size_t requested) {
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
requested));
virtual_memory_.TakeControl(&reservation);
- return true;
+}
+
+CodeRange::~CodeRange() {
+ if (virtual_memory_.IsReserved()) {
+ Address addr = start();
+ virtual_memory_.Free();
+ code_range_address_hint.Pointer()->NotifyFreedCodeRange(
+ reinterpret_cast<void*>(addr), requested_code_range_size_);
+ }
}
bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
@@ -262,31 +277,38 @@ void CodeRange::ReleaseBlock(const FreeBlock* block) {
free_list_.push_back(*block);
}
+void* CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ auto it = recently_freed_.find(code_range_size);
+ if (it == recently_freed_.end() || it->second.empty()) {
+ return GetRandomMmapAddr();
+ }
+ void* result = it->second.back();
+ it->second.pop_back();
+ return result;
+}
+
+void CodeRangeAddressHint::NotifyFreedCodeRange(void* code_range_start,
+ size_t code_range_size) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ recently_freed_[code_range_size].push_back(code_range_start);
+}
// -----------------------------------------------------------------------------
// MemoryAllocator
//
-MemoryAllocator::MemoryAllocator(Isolate* isolate)
+MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
+ size_t code_range_size)
: isolate_(isolate),
code_range_(nullptr),
- capacity_(0),
+ capacity_(RoundUp(capacity, Page::kPageSize)),
size_(0),
size_executable_(0),
lowest_ever_allocated_(static_cast<Address>(-1ll)),
highest_ever_allocated_(kNullAddress),
- unmapper_(isolate->heap(), this) {}
-
-bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
- capacity_ = ::RoundUp(capacity, Page::kPageSize);
-
- size_ = 0;
- size_executable_ = 0;
-
- code_range_ = new CodeRange(isolate_);
- if (!code_range_->SetUp(code_range_size)) return false;
-
- return true;
+ unmapper_(isolate->heap(), this) {
+ code_range_ = new CodeRange(isolate_, code_range_size);
}
@@ -294,7 +316,7 @@ void MemoryAllocator::TearDown() {
unmapper()->TearDown();
// Check that spaces were torn down before MemoryAllocator.
- DCHECK_EQ(size_.Value(), 0u);
+ DCHECK_EQ(size_, 0u);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK_EQ(0, size_executable_);
capacity_ = 0;
@@ -319,7 +341,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- unmapper_->active_unmapping_tasks_.Decrement(1);
+ unmapper_->active_unmapping_tasks_--;
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
if (FLAG_trace_unmapper) {
PrintIsolate(unmapper_->heap_->isolate(),
@@ -350,9 +372,9 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
task->id());
}
DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
- DCHECK_LE(active_unmapping_tasks_.Value(), pending_unmapping_tasks_);
- DCHECK_GE(active_unmapping_tasks_.Value(), 0);
- active_unmapping_tasks_.Increment(1);
+ DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
+ DCHECK_GE(active_unmapping_tasks_, 0);
+ active_unmapping_tasks_++;
task_ids_[pending_unmapping_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
@@ -368,7 +390,7 @@ void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
}
}
pending_unmapping_tasks_ = 0;
- active_unmapping_tasks_.SetValue(0);
+ active_unmapping_tasks_ = 0;
if (FLAG_trace_unmapper) {
PrintIsolate(
@@ -391,7 +413,7 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
- if (active_unmapping_tasks_.Value() == 0 && pending_unmapping_tasks_ > 0) {
+ if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
// All previous unmapping tasks have been run to completion.
// Finalize those tasks to make room for new ones.
CancelAndWaitForPendingTasks();
@@ -449,6 +471,21 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
return static_cast<int>(result);
}
+size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+
+ size_t sum = 0;
+ // kPooled chunks are already uncommited. We only have to account for
+ // kRegular and kNonRegular chunks.
+ for (auto& chunk : chunks_[kRegular]) {
+ sum += chunk->size();
+ }
+ for (auto& chunk : chunks_[kNonRegular]) {
+ sum += chunk->size();
+ }
+ return sum;
+}
+
bool MemoryAllocator::CommitMemory(Address base, size_t size) {
if (!SetPermissions(base, size, PageAllocator::kReadWrite)) {
return false;
@@ -491,7 +528,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
}
Address result = reservation.address();
- size_.Increment(reservation.size());
+ size_ += reservation.size();
controller->TakeControl(&reservation);
return result;
}
@@ -523,7 +560,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
// Failed to commit the body. Free the mapping and any partially committed
// regions inside it.
reservation.Free();
- size_.Decrement(reserve_size);
+ size_ -= reserve_size;
return kNullAddress;
}
@@ -531,14 +568,6 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
-void Page::InitializeAsAnchor(Space* space) {
- set_owner(space);
- set_next_chunk(this);
- set_prev_chunk(this);
- SetFlags(0, static_cast<uintptr_t>(~0));
- SetFlag(ANCHOR);
-}
-
Heap* MemoryChunk::synchronized_heap() {
return reinterpret_cast<Heap*>(
base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
@@ -624,18 +653,21 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->invalidated_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
- chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
- chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
+ chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
+ chunk->set_concurrent_sweeping_state(kSweepingDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
chunk->mutex_ = new base::Mutex();
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
- chunk->set_next_chunk(nullptr);
- chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr;
+ chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
+ 0;
+ chunk->external_backing_store_bytes_
+ [ExternalBackingStoreType::kExternalString] = 0;
+
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
chunk->categories_[i] = nullptr;
}
@@ -678,9 +710,10 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes();
- heap()->incremental_marking()->SetOldSpacePageFlags(page);
+ page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
+ page->list_node().Initialize();
page->InitializationMemoryFence();
return page;
}
@@ -693,8 +726,9 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
Page* page = static_cast<Page*>(chunk);
- heap()->incremental_marking()->SetNewSpacePageFlags(page);
+ page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateLocalTracker();
+ page->list_node().Initialize();
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
@@ -714,19 +748,11 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
- // Initialize the owner field for each contained page (except the first, which
- // is initialized by MemoryChunk::Initialize).
- for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
- addr < chunk->area_end(); addr += Page::kPageSize) {
- // Clear out kPageHeaderTag.
- Memory::Address_at(addr) = 0;
- }
LargePage* page = static_cast<LargePage*>(chunk);
- page->InitializationMemoryFence();
+ page->list_node().Initialize();
return page;
}
@@ -753,7 +779,7 @@ void Page::ReleaseFreeListCategories() {
}
Page* Page::ConvertNewToOld(Page* old_page) {
- DCHECK(!old_page->is_anchor());
+ DCHECK(old_page);
DCHECK(old_page->InNewSpace());
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
@@ -766,32 +792,13 @@ Page* Page::ConvertNewToOld(Page* old_page) {
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size();
- return high_water_mark_.Value();
+ return high_water_mark_;
}
bool MemoryChunk::IsPagedSpace() const {
return owner()->identity() != LO_SPACE;
}
-void MemoryChunk::InsertAfter(MemoryChunk* other) {
- MemoryChunk* other_next = other->next_chunk();
-
- set_next_chunk(other_next);
- set_prev_chunk(other);
- other_next->set_prev_chunk(this);
- other->set_next_chunk(this);
-}
-
-
-void MemoryChunk::Unlink() {
- MemoryChunk* next_element = next_chunk();
- MemoryChunk* prev_element = prev_chunk();
- next_element->set_prev_chunk(prev_element);
- prev_element->set_next_chunk(next_element);
- set_prev_chunk(nullptr);
- set_next_chunk(nullptr);
-}
-
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
@@ -857,21 +864,21 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
DCHECK(IsAligned(base, MemoryChunk::kAlignment));
if (base == kNullAddress) return nullptr;
- size_.Increment(chunk_size);
+ size_ += chunk_size;
// Update executable memory size.
- size_executable_.Increment(chunk_size);
+ size_executable_ += chunk_size;
} else {
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
address_hint, &reservation);
if (base == kNullAddress) return nullptr;
// Update executable memory size.
- size_executable_.Increment(reservation.size());
+ size_executable_ += reservation.size();
}
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
+ ZapBlock(base, CodePageGuardStartOffset(), kZapValue);
+ ZapBlock(base + CodePageAreaStartOffset(), commit_area_size, kZapValue);
}
area_start = base + CodePageAreaStartOffset();
@@ -889,7 +896,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
if (base == kNullAddress) return nullptr;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
+ ZapBlock(base, Page::kObjectStartOffset + commit_area_size, kZapValue);
}
area_start = base + Page::kObjectStartOffset;
@@ -911,9 +918,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
CHECK(!last_chunk_.IsReserved());
last_chunk_.TakeControl(&reservation);
UncommitBlock(last_chunk_.address(), last_chunk_.size());
- size_.Decrement(chunk_size);
+ size_ -= chunk_size;
if (executable == EXECUTABLE) {
- size_executable_.Decrement(chunk_size);
+ size_executable_ -= chunk_size;
}
CHECK(last_chunk_.IsReserved());
return AllocateChunk(reserve_area_size, commit_area_size, executable,
@@ -928,8 +935,36 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
return chunk;
}
+void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
+ if (is_marking) {
+ SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else {
+ ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ }
+}
+
+void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
+ SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ if (is_marking) {
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else {
+ ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ }
+}
+
void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
+void Page::AllocateLocalTracker() {
+ DCHECK_NULL(local_tracker_);
+ local_tracker_ = new LocalArrayBufferTracker(this);
+}
+
+bool Page::contains_array_buffers() {
+ return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
+}
+
void Page::ResetFreeListStatistics() {
wasted_memory_ = 0;
}
@@ -1042,8 +1077,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
// partially starting at |start_free| will also release the potentially
// unused part behind the current page.
const size_t released_bytes = reservation->Release(start_free);
- DCHECK_GE(size_.Value(), released_bytes);
- size_.Decrement(released_bytes);
+ DCHECK_GE(size_, released_bytes);
+ size_ -= released_bytes;
isolate_->counters()->memory_allocated()->Decrement(
static_cast<int>(released_bytes));
}
@@ -1058,12 +1093,12 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
- DCHECK_GE(size_.Value(), static_cast<size_t>(size));
- size_.Decrement(size);
+ DCHECK_GE(size_, static_cast<size_t>(size));
+ size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (chunk->executable() == EXECUTABLE) {
- DCHECK_GE(size_executable_.Value(), size);
- size_executable_.Decrement(size);
+ DCHECK_GE(size_executable_, size);
+ size_executable_ -= size;
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
@@ -1172,7 +1207,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
VirtualMemory reservation(start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, &reservation);
- size_.Increment(size);
+ size_ += size;
return chunk;
}
@@ -1180,7 +1215,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size) {
if (!CommitMemory(start, size)) return false;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(start, size);
+ ZapBlock(start, size, kZapValue);
}
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
@@ -1194,10 +1229,12 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
return true;
}
-
-void MemoryAllocator::ZapBlock(Address start, size_t size) {
+void MemoryAllocator::ZapBlock(Address start, size_t size,
+ uintptr_t zap_value) {
+ DCHECK_EQ(start % kPointerSize, 0);
+ DCHECK_EQ(size % kPointerSize, 0);
for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory::Address_at(start + s) = static_cast<Address>(kZapValue);
+ Memory::Address_at(start + s) = static_cast<Address>(zap_value);
}
}
@@ -1272,10 +1309,6 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
-bool MemoryChunk::contains_array_buffers() {
- return local_tracker() != nullptr && !local_tracker()->IsEmpty();
-}
-
void MemoryChunk::ReleaseAllocatedMemory() {
if (skip_list_ != nullptr) {
delete skip_list_;
@@ -1393,11 +1426,6 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
}
}
-void MemoryChunk::AllocateLocalTracker() {
- DCHECK_NULL(local_tracker_);
- local_tracker_ = new LocalArrayBufferTracker(owner());
-}
-
void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_;
@@ -1415,6 +1443,19 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
+void MemoryChunk::IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType type, size_t amount) {
+ external_backing_store_bytes_[type] += amount;
+ owner()->IncrementExternalBackingStoreBytes(type, amount);
+}
+
+void MemoryChunk::DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType type, size_t amount) {
+ DCHECK_GE(external_backing_store_bytes_[type], amount);
+ external_backing_store_bytes_[type] -= amount;
+ owner()->DecrementExternalBackingStoreBytes(type, amount);
+}
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -1464,25 +1505,17 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
- : SpaceWithLinearArea(heap, space), executable_(executable), anchor_(this) {
+ : SpaceWithLinearArea(heap, space), executable_(executable) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
}
-
-bool PagedSpace::SetUp() { return true; }
-
-
-bool PagedSpace::HasBeenSetUp() { return true; }
-
-
void PagedSpace::TearDown() {
- for (auto it = begin(); it != end();) {
- Page* page = *(it++); // Will be erased.
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
+ while (!memory_chunk_list_.Empty()) {
+ MemoryChunk* chunk = memory_chunk_list_.front();
+ memory_chunk_list_.Remove(chunk);
+ heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
}
- anchor_.set_next_page(&anchor_);
- anchor_.set_prev_page(&anchor_);
accounting_stats_.Clear();
}
@@ -1526,8 +1559,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
DCHECK(identity() == other->identity());
// Unmerged fields:
// area_size_
- // anchor_
-
other->FreeLinearAllocationArea();
// The linear allocation area of {other} should be destroyed now.
@@ -1610,20 +1641,28 @@ Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
size_t PagedSpace::AddPage(Page* page) {
CHECK(page->SweepingDone());
page->set_owner(this);
- page->InsertAfter(anchor()->prev_page());
+ memory_chunk_list_.PushBack(page);
AccountCommitted(page->size());
IncreaseCapacity(page->area_size());
IncreaseAllocatedBytes(page->allocated_bytes(), page);
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
return RelinkFreeListCategories(page);
}
void PagedSpace::RemovePage(Page* page) {
CHECK(page->SweepingDone());
- page->Unlink();
+ memory_chunk_list_.Remove(page);
UnlinkFreeListCategories(page);
DecreaseAllocatedBytes(page->allocated_bytes(), page);
DecreaseCapacity(page->area_size());
AccountUncommitted(page->size());
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
}
size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
@@ -1668,7 +1707,6 @@ bool PagedSpace::Expand() {
AddPage(page);
Free(page->area_start(), page->area_size(),
SpaceAccountingMode::kSpaceAccounted);
- DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
return true;
}
@@ -1815,11 +1853,6 @@ void PagedSpace::ReleasePage(Page* page) {
allocation_info_.Reset(kNullAddress, kNullAddress);
}
- // If page is still in a list, unlink it from that list.
- if (page->next_chunk() != nullptr) {
- DCHECK_NOT_NULL(page->prev_chunk());
- page->Unlink();
- }
AccountUncommitted(page->size());
accounting_stats_.DecreaseCapacity(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
@@ -1904,11 +1937,23 @@ void PagedSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
-void PagedSpace::Verify(ObjectVisitor* visitor) {
+void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
+ size_t external_space_bytes[kNumTypes];
+ size_t external_page_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
for (Page* page : *this) {
CHECK(page->owner() == this);
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
@@ -1916,6 +1961,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
HeapObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
+
for (HeapObject* object = it.Next(); object != nullptr;
object = it.Next()) {
CHECK(end_of_previous_object <= object->address());
@@ -1931,7 +1977,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
VerifyObject(object);
// The object itself should look OK.
- object->ObjectVerify();
+ object->ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
heap()->VerifyRememberedSetFor(object);
@@ -1942,8 +1988,25 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
object->IterateBody(map, size, visitor);
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
+
+ if (object->IsJSArrayBuffer()) {
+ JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
+ if (ArrayBufferTracker::IsTracked(array_buffer)) {
+ size_t size = NumberToSize(array_buffer->byte_length());
+ external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
+ }
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
+ external_space_bytes[t] += external_page_bytes[t];
}
}
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
CHECK(allocation_pointer_found_in_space);
#ifdef DEBUG
VerifyCountersAfterSweeping();
@@ -2021,24 +2084,25 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
// -----------------------------------------------------------------------------
// NewSpace implementation
-bool NewSpace::SetUp(size_t initial_semispace_capacity,
- size_t maximum_semispace_capacity) {
- DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
- DCHECK(base::bits::IsPowerOfTwo(
- static_cast<uint32_t>(maximum_semispace_capacity)));
-
- to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
- from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
+NewSpace::NewSpace(Heap* heap, size_t initial_semispace_capacity,
+ size_t max_semispace_capacity)
+ : SpaceWithLinearArea(heap, NEW_SPACE),
+ to_space_(heap, kToSpace),
+ from_space_(heap, kFromSpace),
+ reservation_() {
+ DCHECK(initial_semispace_capacity <= max_semispace_capacity);
+ DCHECK(
+ base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
+
+ to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
+ from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
if (!to_space_.Commit()) {
- return false;
+ V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
}
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
ResetLinearAllocationArea();
-
- return true;
}
-
void NewSpace::TearDown() {
allocation_info_.Reset(kNullAddress, kNullAddress);
@@ -2101,23 +2165,29 @@ bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) {
const int expected_pages =
static_cast<int>(current_capacity_ / Page::kPageSize);
+ MemoryChunk* current_page = first_page();
int actual_pages = 0;
- Page* current_page = anchor()->next_page();
- while (current_page != anchor()) {
+
+ // First iterate through the pages list until expected pages if so many
+ // pages exist.
+ while (current_page != nullptr && actual_pages < expected_pages) {
actual_pages++;
- current_page = current_page->next_page();
- if (actual_pages > expected_pages) {
- Page* to_remove = current_page->prev_page();
- // Make sure we don't overtake the actual top pointer.
- CHECK_NE(to_remove, current_page_);
- to_remove->Unlink();
- // Clear new space flags to avoid this page being treated as a new
- // space page that is potentially being swept.
- to_remove->SetFlags(0, Page::kIsInNewSpaceMask);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
- to_remove);
- }
+ current_page = current_page->list_node().next();
+ }
+
+ // Free all overallocated pages which are behind current_page.
+ while (current_page) {
+ MemoryChunk* next_current = current_page->list_node().next();
+ memory_chunk_list_.Remove(current_page);
+ // Clear new space flags to avoid this page being treated as a new
+ // space page that is potentially being swept.
+ current_page->SetFlags(0, Page::kIsInNewSpaceMask);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
+ current_page);
+ current_page = next_current;
}
+
+ // Add more pages if we have less than expected_pages.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
while (actual_pages < expected_pages) {
@@ -2127,9 +2197,9 @@ bool SemiSpace::EnsureCurrentCapacity() {
Page::kAllocatableMemory, this, NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
- current_page->InsertAfter(anchor());
+ memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
- current_page->SetFlags(anchor()->prev_page()->GetFlags(),
+ current_page->SetFlags(first_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
@@ -2191,8 +2261,8 @@ void NewSpace::UpdateLinearAllocationArea() {
Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
- original_top_.SetValue(top());
- original_limit_.SetValue(limit());
+ original_top_ = top();
+ original_limit_ = limit();
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -2285,6 +2355,10 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return true;
}
+size_t LargeObjectSpace::Available() {
+ return ObjectSizeFor(heap()->memory_allocator()->Available());
+}
+
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
@@ -2358,7 +2432,7 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
-void NewSpace::Verify() {
+void NewSpace::Verify(Isolate* isolate) {
// The allocation pointer should be in the space or at the very end.
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@@ -2367,6 +2441,11 @@ void NewSpace::Verify() {
Address current = to_space_.first_page()->area_start();
CHECK_EQ(current, to_space_.space_start());
+ size_t external_space_bytes[kNumTypes];
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
while (current != top()) {
if (!Page::IsAlignedToPageSize(current)) {
// The allocation pointer should not be in the middle of an object.
@@ -2387,23 +2466,34 @@ void NewSpace::Verify() {
CHECK(!object->IsAbstractCode());
// The object itself should look OK.
- object->ObjectVerify();
+ object->ObjectVerify(isolate);
// All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor;
+ VerifyPointersVisitor visitor(heap());
int size = object->Size();
object->IterateBody(map, size, &visitor);
+ if (object->IsJSArrayBuffer()) {
+ JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
+ if (ArrayBufferTracker::IsTracked(array_buffer)) {
+ size_t size = NumberToSize(array_buffer->byte_length());
+ external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
+ }
+ }
+
current += size;
} else {
// At end of page, switch to next page.
Page* page = Page::FromAllocationAreaAddress(current)->next_page();
- // Next page should be valid.
- CHECK(!page->is_anchor());
current = page->area_start();
}
}
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
+
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
CHECK_EQ(to_space_.id(), kToSpace);
@@ -2435,18 +2525,16 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() {
DCHECK(!is_committed());
- Page* current = anchor();
const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
Page::kAllocatableMemory, this, NOT_EXECUTABLE);
if (new_page == nullptr) {
- RewindPages(current, pages_added);
+ if (pages_added) RewindPages(pages_added);
return false;
}
- new_page->InsertAfter(current);
- current = new_page;
+ memory_chunk_list_.PushBack(new_page);
}
Reset();
AccountCommitted(current_capacity_);
@@ -2460,12 +2548,12 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
DCHECK(is_committed());
- for (auto it = begin(); it != end();) {
- Page* p = *(it++);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
+ while (!memory_chunk_list_.Empty()) {
+ MemoryChunk* chunk = memory_chunk_list_.front();
+ memory_chunk_list_.Remove(chunk);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
}
- anchor()->set_next_page(anchor());
- anchor()->set_prev_page(anchor());
+ current_page_ = nullptr;
AccountUncommitted(current_capacity_);
committed_ = false;
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
@@ -2492,8 +2580,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
const size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
- Page* last_page = anchor()->prev_page();
- DCHECK_NE(last_page, anchor());
+ DCHECK(last_page());
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
@@ -2501,29 +2588,26 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
Page::kAllocatableMemory, this, NOT_EXECUTABLE);
if (new_page == nullptr) {
- RewindPages(last_page, pages_added);
+ if (pages_added) RewindPages(pages_added);
return false;
}
- new_page->InsertAfter(last_page);
+ memory_chunk_list_.PushBack(new_page);
marking_state->ClearLiveness(new_page);
// Duplicate the flags that was set on the old page.
- new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
- last_page = new_page;
+ new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
}
AccountCommitted(delta);
current_capacity_ = new_capacity;
return true;
}
-void SemiSpace::RewindPages(Page* start, int num_pages) {
- Page* new_last_page = nullptr;
- Page* last_page = start;
+void SemiSpace::RewindPages(int num_pages) {
+ DCHECK_GT(num_pages, 0);
+ DCHECK(last_page());
while (num_pages > 0) {
- DCHECK_NE(last_page, anchor());
- new_last_page = last_page->prev_page();
- last_page->prev_page()->set_next_page(last_page->next_page());
- last_page->next_page()->set_prev_page(last_page->prev_page());
- last_page = new_last_page;
+ MemoryChunk* last = last_page();
+ memory_chunk_list_.Remove(last);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
num_pages--;
}
}
@@ -2534,19 +2618,9 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity;
- DCHECK(IsAligned(delta, AllocatePageSize()));
+ DCHECK(IsAligned(delta, Page::kPageSize));
int delta_pages = static_cast<int>(delta / Page::kPageSize);
- Page* new_last_page;
- Page* last_page;
- while (delta_pages > 0) {
- last_page = anchor()->prev_page();
- new_last_page = last_page->prev_page();
- new_last_page->set_next_page(anchor());
- anchor()->set_prev_page(new_last_page);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
- last_page);
- delta_pages--;
- }
+ RewindPages(delta_pages);
AccountUncommitted(delta);
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
@@ -2555,10 +2629,6 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
}
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
- anchor_.set_owner(this);
- anchor_.prev_page()->set_next_page(&anchor_);
- anchor_.next_page()->set_prev_page(&anchor_);
-
for (Page* page : *this) {
page->set_owner(this);
page->SetFlags(flags, mask);
@@ -2579,30 +2649,41 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
void SemiSpace::Reset() {
- DCHECK_NE(anchor_.next_page(), &anchor_);
- current_page_ = anchor_.next_page();
+ DCHECK(first_page());
+ DCHECK(last_page());
+ current_page_ = first_page();
pages_used_ = 0;
}
void SemiSpace::RemovePage(Page* page) {
if (current_page_ == page) {
- current_page_ = page->prev_page();
+ if (page->prev_page()) {
+ current_page_ = page->prev_page();
+ }
+ }
+ memory_chunk_list_.Remove(page);
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
- page->Unlink();
}
void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
page->set_owner(this);
- page->InsertAfter(anchor());
+ memory_chunk_list_.PushFront(page);
pages_used_++;
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them.
- DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
- DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
+ DCHECK(from->first_page());
+ DCHECK(to->first_page());
intptr_t saved_to_space_flags = to->current_page()->GetFlags();
@@ -2612,8 +2693,10 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->minimum_capacity_, to->minimum_capacity_);
std::swap(from->age_mark_, to->age_mark_);
std::swap(from->committed_, to->committed_);
- std::swap(from->anchor_, to->anchor_);
+ std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
std::swap(from->current_page_, to->current_page_);
+ std::swap(from->external_backing_store_bytes_,
+ to->external_backing_store_bytes_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
from->FixPagesFlags(0, 0);
@@ -2640,9 +2723,13 @@ void SemiSpace::Print() {}
#ifdef VERIFY_HEAP
void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace);
- Page* page = anchor_.next_page();
- CHECK(anchor_.owner() == this);
- while (page != &anchor_) {
+ size_t external_backing_store_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ for (Page* page : *this) {
CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
@@ -2660,8 +2747,17 @@ void SemiSpace::Verify() {
!page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
}
}
- CHECK_EQ(page->prev_page()->next_page(), page);
- page = page->next_page();
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
+ }
+
+ CHECK_IMPLIES(page->list_node().prev(),
+ page->list_node().prev()->list_node().next() == page);
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
}
}
#endif
@@ -2681,8 +2777,8 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
} else {
while (page != end_page) {
page = page->next_page();
- DCHECK_NE(page, space->anchor());
}
+ DCHECK(page);
}
}
#endif
@@ -2692,7 +2788,7 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
// SemiSpaceIterator implementation.
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
- Initialize(space->bottom(), space->top());
+ Initialize(space->first_allocatable_address(), space->top());
}
@@ -2786,9 +2882,9 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
while (n != nullptr) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == nullptr) {
- *map_location = heap->free_space_map();
+ *map_location = ReadOnlyRoots(heap).free_space_map();
} else {
- DCHECK(*map_location == heap->free_space_map());
+ DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
}
n = n->next();
}
@@ -2823,7 +2919,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
// Blocks have to be a minimum size to hold free list items.
if (size_in_bytes < kMinBlockSize) {
page->add_wasted_memory(size_in_bytes);
- wasted_bytes_.Increment(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
return size_in_bytes;
}
@@ -2995,7 +3091,7 @@ size_t FreeListCategory::SumFreeList() {
size_t sum = 0;
FreeSpace* cur = top();
while (cur != nullptr) {
- DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
+ DCHECK(cur->map() == page()->heap()->root(Heap::kFreeSpaceMapRootIndex));
sum += cur->relaxed_read_size();
cur = cur->next();
}
@@ -3184,9 +3280,8 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
#endif
-ReadOnlySpace::ReadOnlySpace(Heap* heap, AllocationSpace id,
- Executability executable)
- : PagedSpace(heap, id, executable),
+ReadOnlySpace::ReadOnlySpace(Heap* heap)
+ : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
}
@@ -3269,7 +3364,7 @@ void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
// LargeObjectIterator
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
- current_ = space->first_page_;
+ current_ = space->first_page();
}
@@ -3285,33 +3380,27 @@ HeapObject* LargeObjectIterator::Next() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
+LargeObjectSpace::LargeObjectSpace(Heap* heap)
+ : LargeObjectSpace(heap, LO_SPACE) {}
+
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
- : Space(heap, id), // Managed on a per-allocation basis
- first_page_(nullptr),
+ : Space(heap, id),
size_(0),
page_count_(0),
objects_size_(0),
chunk_map_(1024) {}
-LargeObjectSpace::~LargeObjectSpace() {}
-
-bool LargeObjectSpace::SetUp() {
- return true;
-}
-
void LargeObjectSpace::TearDown() {
- while (first_page_ != nullptr) {
- LargePage* page = first_page_;
- first_page_ = first_page_->next_page();
+ while (!memory_chunk_list_.Empty()) {
+ LargePage* page = first_page();
LOG(heap()->isolate(),
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
+ memory_chunk_list_.Remove(page);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
- SetUp();
}
-
AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
@@ -3321,17 +3410,35 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity());
}
+ LargePage* page = AllocateLargePage(object_size, executable);
+ if (page == nullptr) return AllocationResult::Retry(identity());
+ page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ HeapObject* object = page->GetObject();
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
+ if (heap()->incremental_marking()->black_allocation()) {
+ heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
+ }
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(object));
+ page->InitializationMemoryFence();
+ return object;
+}
+
+LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
+ Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
- if (page == nullptr) return AllocationResult::Retry(identity());
+ if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
- page->set_next_page(first_page_);
- first_page_ = page;
+ memory_chunk_list_.PushBack(page);
InsertChunkMapEntries(page);
@@ -3341,23 +3448,13 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
// Make the object consistent so the heap can be verified in OldSpaceStep.
// We only need to do this in debug builds or if verify_heap is on.
reinterpret_cast<Object**>(object->address())[0] =
- heap()->fixed_array_map();
+ ReadOnlyRoots(heap()).fixed_array_map();
reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
}
-
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap()->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
- if (heap()->incremental_marking()->black_allocation()) {
- heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
- }
AllocationStep(object_size, object->address(), object_size);
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(object));
- return object;
+ return page;
}
@@ -3437,12 +3534,12 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
}
void LargeObjectSpace::FreeUnmarkedObjects() {
- LargePage* previous = nullptr;
- LargePage* current = first_page_;
+ LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
objects_size_ = 0;
- while (current != nullptr) {
+ while (current) {
+ LargePage* next_current = current->next_page();
HeapObject* object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
@@ -3462,26 +3559,19 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
- previous = current;
- current = current->next_page();
} else {
- LargePage* page = current;
- // Cut the chunk out from the chunk list.
- current = current->next_page();
- if (previous == nullptr) {
- first_page_ = current;
- } else {
- previous->set_next_page(current);
- }
+ memory_chunk_list_.Remove(current);
// Free the chunk.
- size_ -= static_cast<int>(page->size());
- AccountUncommitted(page->size());
+ size_ -= static_cast<int>(current->size());
+ AccountUncommitted(current->size());
page_count_--;
- RemoveChunkMapEntries(page);
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+ RemoveChunkMapEntries(current);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
+ current);
}
+ current = next_current;
}
}
@@ -3504,8 +3594,14 @@ std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
-void LargeObjectSpace::Verify() {
- for (LargePage* chunk = first_page_; chunk != nullptr;
+void LargeObjectSpace::Verify(Isolate* isolate) {
+ size_t external_backing_store_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
@@ -3527,10 +3623,10 @@ void LargeObjectSpace::Verify() {
object->IsWeakFixedArray() || object->IsWeakArrayList() ||
object->IsPropertyArray() || object->IsByteArray() ||
object->IsFeedbackVector() || object->IsBigInt() ||
- object->IsFreeSpace());
+ object->IsFreeSpace() || object->IsFeedbackMetadata());
// The object itself should look OK.
- object->ObjectVerify();
+ object->ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
heap()->VerifyRememberedSetFor(object);
@@ -3538,7 +3634,7 @@ void LargeObjectSpace::Verify() {
// Byte arrays and strings don't have interior pointers.
if (object->IsAbstractCode()) {
- VerifyPointersVisitor code_visitor;
+ VerifyPointersVisitor code_visitor(heap());
object->IterateBody(map, object->Size(), &code_visitor);
} else if (object->IsFixedArray()) {
FixedArray* array = FixedArray::cast(object);
@@ -3561,13 +3657,21 @@ void LargeObjectSpace::Verify() {
}
}
}
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
}
}
#endif
#ifdef DEBUG
void LargeObjectSpace::Print() {
- OFStream os(stdout);
+ StdoutStream os;
LargeObjectIterator it(this);
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
obj->Print(os);
@@ -3577,7 +3681,7 @@ void LargeObjectSpace::Print() {
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
- AllocationSpaceName(this->owner()->identity()));
+ this->owner()->name());
printf(" --------------------------------------\n");
HeapObjectIterator objects(this);
unsigned mark_size = 0;
@@ -3598,5 +3702,23 @@ void Page::Print() {
}
#endif // DEBUG
+
+NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
+ : LargeObjectSpace(heap, NEW_LO_SPACE) {}
+
+AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
+ // TODO(hpayer): Add heap growing strategy here.
+ LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
+ if (page == nullptr) return AllocationResult::Retry(identity());
+ page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ page->SetFlag(MemoryChunk::IN_TO_SPACE);
+ page->InitializationMemoryFence();
+ return page->GetObject();
+}
+
+size_t NewLargeObjectSpace::Available() {
+ // TODO(hpayer): Update as soon as we have a growing strategy.
+ return 0;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 71aab937ef..dbd0d82008 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -15,6 +15,7 @@
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/iterator.h"
+#include "src/base/list.h"
#include "src/base/platform/mutex.h"
#include "src/cancelable-task.h"
#include "src/flags.h"
@@ -141,6 +142,12 @@ enum FreeMode { kLinkCategory, kDoNotLinkCategory };
enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
+enum ExternalBackingStoreType {
+ kArrayBuffer,
+ kExternalString,
+ kNumTypes
+};
+
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
@@ -256,7 +263,7 @@ class MemoryChunk {
IS_EXECUTABLE = 1u << 0,
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
- // A page in new space has one of the next to flags set.
+ // A page in new space has one of the next two flags set.
IN_FROM_SPACE = 1u << 3,
IN_TO_SPACE = 1u << 4,
NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
@@ -303,12 +310,9 @@ class MemoryChunk {
// triggering on the same page.
COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
- // |ANCHOR|: Flag is set if page is an anchor.
- ANCHOR = 1u << 17,
-
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
// to iterate the page.
- SWEEP_TO_ITERATE = 1u << 18
+ SWEEP_TO_ITERATE = 1u << 17
};
using Flags = uintptr_t;
@@ -359,24 +363,26 @@ class MemoryChunk {
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
+ kIntptrSize // intptr_t progress_bar_
- + kIntptrSize // intptr_t live_byte_count_
+ + kIntptrSize // std::atomic<intptr_t> live_byte_count_
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kPointerSize // InvalidatedSlots* invalidated_slots_
+ kPointerSize // SkipList* skip_list_
- + kPointerSize // AtomicValue high_water_mark_
+ + kPointerSize // std::atomic<intptr_t> high_water_mark_
+ kPointerSize // base::Mutex* mutex_
- + kPointerSize // base::AtomicWord concurrent_sweeping_
+ +
+ kPointerSize // std::atomic<ConcurrentSweepingState> concurrent_sweeping_
+ kPointerSize // base::Mutex* page_protection_change_mutex_
+ kPointerSize // unitptr_t write_unprotect_counter_
- + kSizetSize // size_t allocated_bytes_
- + kSizetSize // size_t wasted_memory_
- + kPointerSize // AtomicValue next_chunk_
- + kPointerSize // AtomicValue prev_chunk_
+ + kSizetSize * kNumTypes
+ // std::atomic<size_t> external_backing_store_bytes_
+ + kSizetSize // size_t allocated_bytes_
+ + kSizetSize // size_t wasted_memory_
+ + kPointerSize * 2 // base::ListNode
+ kPointerSize * kNumberOfCategories
// FreeListCategory categories_[kNumberOfCategories]
- + kPointerSize // LocalArrayBufferTracker* local_tracker_
- + kIntptrSize // intptr_t young_generation_live_byte_count_
+ + kPointerSize // LocalArrayBufferTracker* local_tracker_
+ + kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kPointerSize; // Bitmap* young_generation_bitmap_
// We add some more space to the computed header size to amount for missing
@@ -410,11 +416,14 @@ class MemoryChunk {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromHeapObject(HeapObject* o) {
+ static MemoryChunk* FromHeapObject(const HeapObject* o) {
return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(o) &
~kAlignmentMask);
}
+ void SetOldGenerationPageFlags(bool is_marking);
+ void SetYoungGenerationPageFlags(bool is_marking);
+
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
static inline void UpdateHighWaterMark(Address mark) {
@@ -426,9 +435,10 @@ class MemoryChunk {
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = 0;
do {
- old_mark = chunk->high_water_mark_.Value();
- } while ((new_mark > old_mark) &&
- !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
+ old_mark = chunk->high_water_mark_;
+ } while (
+ (new_mark > old_mark) &&
+ !chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
}
Address address() const {
@@ -447,14 +457,16 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end();
}
- base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
- return concurrent_sweeping_;
+ void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
+ concurrent_sweeping_ = state;
}
- bool SweepingDone() {
- return concurrent_sweeping_state().Value() == kSweepingDone;
+ ConcurrentSweepingState concurrent_sweeping_state() {
+ return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
}
+ bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
+
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
@@ -502,10 +514,7 @@ class MemoryChunk {
void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
- void AllocateLocalTracker();
void ReleaseLocalTracker();
- inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
- bool contains_array_buffers();
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
@@ -517,7 +526,7 @@ class MemoryChunk {
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory();
- Address HighWaterMark() { return address() + high_water_mark_.Value(); }
+ Address HighWaterMark() { return address() + high_water_mark_; }
int progress_bar() {
DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
@@ -535,6 +544,14 @@ class MemoryChunk {
}
}
+ void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+ void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+ size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
+ return external_backing_store_bytes_[type];
+ }
+
inline uint32_t AddressToMarkbitIndex(Address addr) const {
return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
}
@@ -606,23 +623,12 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
- MemoryChunk* next_chunk() { return next_chunk_.Value(); }
-
- MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
+ Space* owner() const { return owner_; }
- void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
-
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
-
- Space* owner() const { return owner_.Value(); }
-
- void set_owner(Space* space) { owner_.SetValue(space); }
+ void set_owner(Space* space) { owner_ = space; }
bool IsPagedSpace() const;
- void InsertAfter(MemoryChunk* other);
- void Unlink();
-
// Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
@@ -630,6 +636,8 @@ class MemoryChunk {
void SetReadAndExecutable();
void SetReadAndWritable();
+ base::ListNode<MemoryChunk>& list_node() { return list_node_; }
+
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -652,7 +660,7 @@ class MemoryChunk {
VirtualMemory reservation_;
// The space owning this memory chunk.
- base::AtomicValue<Space*> owner_;
+ std::atomic<Space*> owner_;
Heap* heap_;
@@ -661,7 +669,7 @@ class MemoryChunk {
intptr_t progress_bar_;
// Count of bytes marked black on page.
- intptr_t live_byte_count_;
+ std::atomic<intptr_t> live_byte_count_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
@@ -674,11 +682,11 @@ class MemoryChunk {
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
- base::AtomicValue<intptr_t> high_water_mark_;
+ std::atomic<intptr_t> high_water_mark_;
base::Mutex* mutex_;
- base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
+ std::atomic<intptr_t> concurrent_sweeping_;
base::Mutex* page_protection_change_mutex_;
@@ -699,19 +707,20 @@ class MemoryChunk {
// Byte allocated on the page, which includes all objects on the page
// and the linear allocation area.
size_t allocated_bytes_;
+
+ // Tracks off-heap memory used by this memory chunk.
+ std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
+
// Freed memory that was not added to the free list.
size_t wasted_memory_;
- // next_chunk_ holds a pointer of type MemoryChunk
- base::AtomicValue<MemoryChunk*> next_chunk_;
- // prev_chunk_ holds a pointer of type MemoryChunk
- base::AtomicValue<MemoryChunk*> prev_chunk_;
+ base::ListNode<MemoryChunk> list_node_;
FreeListCategory* categories_[kNumberOfCategories];
LocalArrayBufferTracker* local_tracker_;
- intptr_t young_generation_live_byte_count_;
+ std::atomic<intptr_t> young_generation_live_byte_count_;
Bitmap* young_generation_bitmap_;
private:
@@ -729,6 +738,9 @@ class MemoryChunk {
friend class PagedSpace;
};
+static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize,
+ "sizeof(std::atomic<intptr_t>) == kPointerSize");
+
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
@@ -779,18 +791,12 @@ class Page : public MemoryChunk {
static Page* ConvertNewToOld(Page* old_page);
- // Create a Page object that is only used as anchor for the doubly-linked
- // list of real pages.
- explicit Page(Space* owner) { InitializeAsAnchor(owner); }
-
inline void MarkNeverAllocateForTesting();
inline void MarkEvacuationCandidate();
inline void ClearEvacuationCandidate();
- Page* next_page() { return static_cast<Page*>(next_chunk()); }
- Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
- void set_next_page(Page* page) { set_next_chunk(page); }
- void set_prev_page(Page* page) { set_prev_chunk(page); }
+ Page* next_page() { return static_cast<Page*>(list_node_.next()); }
+ Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
@@ -817,6 +823,10 @@ class Page : public MemoryChunk {
DCHECK(SweepingDone());
}
+ void AllocateLocalTracker();
+ inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
+ bool contains_array_buffers();
+
void ResetFreeListStatistics();
size_t AvailableInFreeList();
@@ -830,8 +840,6 @@ class Page : public MemoryChunk {
return categories_[type];
}
- bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
-
size_t wasted_memory() { return wasted_memory_; }
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
size_t allocated_bytes() { return allocated_bytes_; }
@@ -863,8 +871,6 @@ class Page : public MemoryChunk {
private:
enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
- void InitializeAsAnchor(Space* owner);
-
friend class MemoryAllocator;
};
@@ -880,11 +886,9 @@ class LargePage : public MemoryChunk {
HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
inline LargePage* next_page() {
- return static_cast<LargePage*>(next_chunk());
+ return static_cast<LargePage*>(list_node_.next());
}
- inline void set_next_page(LargePage* page) { set_next_chunk(page); }
-
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
@@ -914,16 +918,26 @@ class Space : public Malloced {
heap_(heap),
id_(id),
committed_(0),
- max_committed_(0),
- external_backing_store_bytes_(0) {}
+ max_committed_(0) {
+ external_backing_store_bytes_ =
+ new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
+ external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
+ external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
+ 0;
+ }
- virtual ~Space() {}
+ virtual ~Space() {
+ delete[] external_backing_store_bytes_;
+ external_backing_store_bytes_ = nullptr;
+ }
Heap* heap() const { return heap_; }
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
+ const char* name() { return AllocationSpaceName(id_); }
+
V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
AllocationObserver* observer);
@@ -952,8 +966,9 @@ class Space : public Malloced {
virtual size_t SizeOfObjects() { return Size(); }
// Returns amount of off-heap memory in-use by objects in this Space.
- virtual size_t ExternalBackingStoreBytes() const {
- return external_backing_store_bytes_;
+ virtual size_t ExternalBackingStoreBytes(
+ ExternalBackingStoreType type) const {
+ return external_backing_store_bytes_[type];
}
// Approximate amount of physical memory committed for this space.
@@ -985,15 +1000,23 @@ class Space : public Malloced {
committed_ -= bytes;
}
- void IncrementExternalBackingStoreBytes(size_t amount) {
- external_backing_store_bytes_ += amount;
+ void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount) {
+ external_backing_store_bytes_[type] += amount;
}
- void DecrementExternalBackingStoreBytes(size_t amount) {
- external_backing_store_bytes_ -= amount;
+ void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount) {
+ DCHECK_GE(external_backing_store_bytes_[type], amount);
+ external_backing_store_bytes_[type] -= amount;
}
V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
+ MemoryChunk* first_page() { return memory_chunk_list_.front(); }
+ MemoryChunk* last_page() { return memory_chunk_list_.back(); }
+
+ base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
+
#ifdef DEBUG
virtual void Print() = 0;
#endif
@@ -1006,6 +1029,12 @@ class Space : public Malloced {
std::vector<AllocationObserver*> allocation_observers_;
+ // The List manages the pages that belong to the given space.
+ base::List<MemoryChunk> memory_chunk_list_;
+
+ // Tracks off-heap memory used by this space.
+ std::atomic<size_t>* external_backing_store_bytes_;
+
private:
bool allocation_observers_paused_;
Heap* heap_;
@@ -1015,9 +1044,6 @@ class Space : public Malloced {
size_t committed_;
size_t max_committed_;
- // Tracks off-heap memory used by this space.
- std::atomic<size_t> external_backing_store_bytes_;
-
DISALLOW_COPY_AND_ASSIGN(Space);
};
@@ -1042,15 +1068,8 @@ class MemoryChunkValidator {
// manages a range of virtual memory.
class CodeRange {
public:
- explicit CodeRange(Isolate* isolate);
- ~CodeRange() {
- if (virtual_memory_.IsReserved()) virtual_memory_.Free();
- }
-
- // Reserves a range of virtual memory, but does not commit any of it.
- // Can only be called once, at heap initialization time.
- // Returns false on failure.
- bool SetUp(size_t requested_size);
+ CodeRange(Isolate* isolate, size_t requested_size);
+ ~CodeRange();
bool valid() { return virtual_memory_.IsReserved(); }
Address start() {
@@ -1123,10 +1142,31 @@ class CodeRange {
// The block at current_allocation_block_index_ is the current block.
std::vector<FreeBlock> allocation_list_;
size_t current_allocation_block_index_;
+ size_t requested_code_range_size_;
DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
+// The process-wide singleton that keeps track of code range regions with the
+// intention to reuse free code range regions as a workaround for CFG memory
+// leaks (see crbug.com/870054).
+class CodeRangeAddressHint {
+ public:
+ // Returns the most recently freed code range start address for the given
+ // size. If there is no such entry, then a random address is returned.
+ V8_EXPORT_PRIVATE void* GetAddressHint(size_t code_range_size);
+
+ V8_EXPORT_PRIVATE void NotifyFreedCodeRange(void* code_range_start,
+ size_t code_range_size);
+
+ private:
+ base::Mutex mutex_;
+ // A map from code range size to an array of recently freed code range
+ // addresses. There should be O(1) different code range sizes.
+ // The length of each array is limited by the peak number of code ranges,
+ // which should be also O(1).
+ std::map<size_t, std::vector<void*>> recently_freed_;
+};
class SkipList {
public:
@@ -1234,6 +1274,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void EnsureUnmappingCompleted();
void TearDown();
int NumberOfChunks();
+ size_t CommittedBufferedMemory();
private:
static const int kReservedQueueingSlots = 64;
@@ -1281,7 +1322,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t pending_unmapping_tasks_;
- base::AtomicNumber<intptr_t> active_unmapping_tasks_;
+ std::atomic<intptr_t> active_unmapping_tasks_;
friend class MemoryAllocator;
};
@@ -1318,11 +1359,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
static intptr_t GetCommitPageSize();
- explicit MemoryAllocator(Isolate* isolate);
-
- // Initializes its internal bookkeeping structures.
- // Max capacity of the total space and executable memory limit.
- bool SetUp(size_t max_capacity, size_t code_range_size);
+ MemoryAllocator(Isolate* isolate, size_t max_capacity,
+ size_t code_range_size);
void TearDown();
@@ -1340,10 +1378,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void Free(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
- size_t Size() { return size_.Value(); }
+ size_t Size() { return size_; }
// Returns allocated executable spaces in bytes.
- size_t SizeExecutable() { return size_executable_.Value(); }
+ size_t SizeExecutable() { return size_executable_; }
// Returns the maximum available bytes of heaps.
size_t Available() {
@@ -1359,8 +1397,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator.
V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
- return address < lowest_ever_allocated_.Value() ||
- address >= highest_ever_allocated_.Value();
+ return address < lowest_ever_allocated_ ||
+ address >= highest_ever_allocated_;
}
// Returns a MemoryChunk in which the memory region from commit_area_size to
@@ -1405,9 +1443,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// and false otherwise.
bool UncommitBlock(Address start, size_t size);
- // Zaps a contiguous block of memory [start..(start+size)[ thus
- // filling it up with a recognizable non-nullptr bit pattern.
- void ZapBlock(Address start, size_t size);
+ // Zaps a contiguous block of memory [start..(start+size)[ with
+ // a given zap value.
+ void ZapBlock(Address start, size_t size, uintptr_t zap_value);
V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
Address start,
@@ -1443,11 +1481,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// values only if they did not change in between.
Address ptr = kNullAddress;
do {
- ptr = lowest_ever_allocated_.Value();
- } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
+ ptr = lowest_ever_allocated_;
+ } while ((low < ptr) &&
+ !lowest_ever_allocated_.compare_exchange_weak(ptr, low));
do {
- ptr = highest_ever_allocated_.Value();
- } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
+ ptr = highest_ever_allocated_;
+ } while ((high > ptr) &&
+ !highest_ever_allocated_.compare_exchange_weak(ptr, high));
}
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
@@ -1469,17 +1509,17 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
size_t capacity_;
// Allocated space size in bytes.
- base::AtomicNumber<size_t> size_;
+ std::atomic<size_t> size_;
// Allocated executable space size in bytes.
- base::AtomicNumber<size_t> size_executable_;
+ std::atomic<size_t> size_executable_;
// We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is
// conservative, i.e. not all addresses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end.
- base::AtomicValue<Address> lowest_ever_allocated_;
- base::AtomicValue<Address> highest_ever_allocated_;
+ std::atomic<Address> lowest_ever_allocated_;
+ std::atomic<Address> highest_ever_allocated_;
VirtualMemory last_chunk_;
Unmapper unmapper_;
@@ -1606,25 +1646,21 @@ class LinearAllocationArea {
set_limit(limit);
}
- INLINE(void set_top(Address top)) {
+ V8_INLINE void set_top(Address top) {
SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
top_ = top;
}
- INLINE(Address top()) const {
+ V8_INLINE Address top() const {
SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
return top_;
}
Address* top_address() { return &top_; }
- INLINE(void set_limit(Address limit)) {
- limit_ = limit;
- }
+ V8_INLINE void set_limit(Address limit) { limit_ = limit; }
- INLINE(Address limit()) const {
- return limit_;
- }
+ V8_INLINE Address limit() const { return limit_; }
Address* limit_address() { return &limit_; }
@@ -1669,7 +1705,7 @@ class AllocationStats BASE_EMBEDDED {
}
// Accessors for the allocation statistics.
- size_t Capacity() { return capacity_.Value(); }
+ size_t Capacity() { return capacity_; }
size_t MaxCapacity() { return max_capacity_; }
size_t Size() { return size_; }
#ifdef DEBUG
@@ -1694,19 +1730,16 @@ class AllocationStats BASE_EMBEDDED {
}
void DecreaseCapacity(size_t bytes) {
- size_t capacity = capacity_.Value();
- DCHECK_GE(capacity, bytes);
- DCHECK_GE(capacity - bytes, size_);
- USE(capacity);
- capacity_.Decrement(bytes);
+ DCHECK_GE(capacity_, bytes);
+ DCHECK_GE(capacity_ - bytes, size_);
+ capacity_ -= bytes;
}
void IncreaseCapacity(size_t bytes) {
- size_t capacity = capacity_.Value();
- DCHECK_GE(capacity + bytes, capacity);
- capacity_.Increment(bytes);
- if (capacity > max_capacity_) {
- max_capacity_ = capacity;
+ DCHECK_GE(capacity_ + bytes, capacity_);
+ capacity_ += bytes;
+ if (capacity_ > max_capacity_) {
+ max_capacity_ = capacity_;
}
}
@@ -1715,7 +1748,7 @@ class AllocationStats BASE_EMBEDDED {
// bookkeeping structures) currently in the space.
// During evacuation capacity of the main spaces is accessed from multiple
// threads to check the old generation hard limit.
- base::AtomicNumber<size_t> capacity_;
+ std::atomic<size_t> capacity_;
// |max_capacity_|: The maximum capacity ever observed.
size_t max_capacity_;
@@ -1806,7 +1839,7 @@ class V8_EXPORT_PRIVATE FreeList {
void Reset();
void ResetStats() {
- wasted_bytes_.SetValue(0);
+ wasted_bytes_ = 0;
ForAllFreeListCategories(
[](FreeListCategory* category) { category->ResetStats(); });
}
@@ -1834,7 +1867,7 @@ class V8_EXPORT_PRIVATE FreeList {
size_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page);
- size_t wasted_bytes() { return wasted_bytes_.Value(); }
+ size_t wasted_bytes() { return wasted_bytes_; }
template <typename Callback>
void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
@@ -1930,7 +1963,7 @@ class V8_EXPORT_PRIVATE FreeList {
return categories_[type];
}
- base::AtomicNumber<size_t> wasted_bytes_;
+ std::atomic<size_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory;
@@ -2060,16 +2093,6 @@ class V8_EXPORT_PRIVATE PagedSpace
~PagedSpace() override { TearDown(); }
- // Set up the space using the given address range of virtual memory (from
- // the memory allocator's initial chunk) if possible. If the block of
- // addresses is not big enough to contain a single page-aligned page, a
- // fresh chunk will be allocated.
- bool SetUp();
-
- // Returns true if the space has been successfully set up and not
- // subsequently torn down.
- bool HasBeenSetUp();
-
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
inline bool Contains(Object* o);
@@ -2196,11 +2219,10 @@ class V8_EXPORT_PRIVATE PagedSpace
void RefineAllocatedBytesAfterSweeping(Page* page);
- // The dummy page that anchors the linked list of pages.
- Page* anchor() { return &anchor_; }
-
Page* InitializePage(MemoryChunk* chunk, Executability executable);
+
void ReleasePage(Page* page);
+
// Adds the page to this space and returns the number of bytes added to the
// free list of the space.
size_t AddPage(Page* page);
@@ -2214,7 +2236,7 @@ class V8_EXPORT_PRIVATE PagedSpace
#ifdef VERIFY_HEAP
// Verify integrity of this space.
- virtual void Verify(ObjectVisitor* visitor);
+ virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
void VerifyLiveBytes();
@@ -2234,9 +2256,6 @@ class V8_EXPORT_PRIVATE PagedSpace
static void ResetCodeStatistics(Isolate* isolate);
#endif
- Page* FirstPage() { return anchor_.next_page(); }
- Page* LastPage() { return anchor_.prev_page(); }
-
bool CanExpand(size_t size);
// Returns the number of total pages in this space.
@@ -2262,8 +2281,10 @@ class V8_EXPORT_PRIVATE PagedSpace
inline void UnlinkFreeListCategories(Page* page);
inline size_t RelinkFreeListCategories(Page* page);
- iterator begin() { return iterator(anchor_.next_page()); }
- iterator end() { return iterator(&anchor_); }
+ Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
+
+ iterator begin() { return iterator(first_page()); }
+ iterator end() { return iterator(nullptr); }
// Shrink immortal immovable pages of the space to be exactly the size needed
// using the high water mark.
@@ -2294,7 +2315,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// smaller, initial pages.
virtual bool snapshotable() { return true; }
- bool HasPages() { return anchor_.next_page() != &anchor_; }
+ bool HasPages() { return first_page() != nullptr; }
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
@@ -2346,9 +2367,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Accounting information for this space.
AllocationStats accounting_stats_;
- // The dummy page that anchors the double linked list of pages.
- Page anchor_;
-
// The space's free list.
FreeList free_list_;
@@ -2384,7 +2402,6 @@ class SemiSpace : public Space {
age_mark_(kNullAddress),
committed_(false),
id_(semispace),
- anchor_(this),
current_page_(nullptr),
pages_used_(0) {}
@@ -2394,7 +2411,6 @@ class SemiSpace : public Space {
void SetUp(size_t initial_capacity, size_t maximum_capacity);
void TearDown();
- bool HasBeenSetUp() { return maximum_capacity_ != 0; }
bool Commit();
bool Uncommit();
@@ -2411,19 +2427,17 @@ class SemiSpace : public Space {
bool EnsureCurrentCapacity();
+ Address space_end() { return memory_chunk_list_.back()->area_end(); }
+
// Returns the start address of the first page of the space.
Address space_start() {
- DCHECK_NE(anchor_.next_page(), anchor());
- return anchor_.next_page()->area_start();
+ DCHECK_NE(memory_chunk_list_.front(), nullptr);
+ return memory_chunk_list_.front()->area_start();
}
- Page* first_page() { return anchor_.next_page(); }
Page* current_page() { return current_page_; }
int pages_used() { return pages_used_; }
- // Returns one past the end address of the space.
- Address space_end() { return anchor_.prev_page()->area_end(); }
-
// Returns the start address of the current page of the space.
Address page_low() { return current_page_->area_start(); }
@@ -2436,7 +2450,7 @@ class SemiSpace : public Space {
// that we need to account for the next page already for this check as we
// could potentially fill the whole page after advancing.
const bool reached_max_pages = (pages_used_ + 1) == max_pages();
- if (next_page == anchor() || reached_max_pages) {
+ if (next_page == nullptr || reached_max_pages) {
return false;
}
current_page_ = next_page;
@@ -2449,6 +2463,7 @@ class SemiSpace : public Space {
void RemovePage(Page* page);
void PrependPage(Page* page);
+
Page* InitializePage(MemoryChunk* chunk, Executability executable);
// Age mark accessors.
@@ -2482,8 +2497,11 @@ class SemiSpace : public Space {
UNREACHABLE();
}
- iterator begin() { return iterator(anchor_.next_page()); }
- iterator end() { return iterator(anchor()); }
+ Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
+ Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
+
+ iterator begin() { return iterator(first_page()); }
+ iterator end() { return iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
@@ -2503,9 +2521,8 @@ class SemiSpace : public Space {
#endif
private:
- void RewindPages(Page* start, int num_pages);
+ void RewindPages(int num_pages);
- inline Page* anchor() { return &anchor_; }
inline int max_pages() {
return static_cast<int>(current_capacity_ / Page::kPageSize);
}
@@ -2529,8 +2546,8 @@ class SemiSpace : public Space {
bool committed_;
SemiSpaceId id_;
- Page anchor_;
Page* current_page_;
+
int pages_used_;
friend class NewSpace;
@@ -2569,27 +2586,19 @@ class NewSpace : public SpaceWithLinearArea {
public:
typedef PageIterator iterator;
- explicit NewSpace(Heap* heap)
- : SpaceWithLinearArea(heap, NEW_SPACE),
- to_space_(heap, kToSpace),
- from_space_(heap, kFromSpace),
- reservation_() {}
+ NewSpace(Heap* heap, size_t initial_semispace_capacity,
+ size_t max_semispace_capacity);
+
+ ~NewSpace() override { TearDown(); }
inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a);
inline bool Contains(Object* o);
- bool SetUp(size_t initial_semispace_capacity, size_t max_semispace_capacity);
-
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
void TearDown();
- // True if the space has been set up but not torn down.
- bool HasBeenSetUp() {
- return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
- }
-
// Flip the pair of spaces.
void Flip();
@@ -2643,9 +2652,10 @@ class NewSpace : public SpaceWithLinearArea {
return Capacity() - Size();
}
- size_t ExternalBackingStoreBytes() const override {
- DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes());
- return to_space_.ExternalBackingStoreBytes();
+ size_t ExternalBackingStoreBytes(
+ ExternalBackingStoreType type) const override {
+ DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
+ return to_space_.ExternalBackingStoreBytes(type);
}
size_t AllocatedSinceLastGC() {
@@ -2701,28 +2711,29 @@ class NewSpace : public SpaceWithLinearArea {
void ResetOriginalTop() {
DCHECK_GE(top(), original_top());
DCHECK_LE(top(), original_limit());
- original_top_.SetValue(top());
+ original_top_ = top();
}
- Address original_top() { return original_top_.Value(); }
- Address original_limit() { return original_limit_.Value(); }
+ Address original_top() { return original_top_; }
+ Address original_limit() { return original_limit_; }
- // Return the address of the first object in the active semispace.
- Address bottom() { return to_space_.space_start(); }
+ // Return the address of the first allocatable address in the active
+ // semispace. This may be the address where the first object resides.
+ Address first_allocatable_address() { return to_space_.space_start(); }
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); }
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
- V8_WARN_UNUSED_RESULT INLINE(AllocationResult AllocateRawAligned(
- int size_in_bytes, AllocationAlignment alignment));
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
- V8_WARN_UNUSED_RESULT INLINE(
- AllocationResult AllocateRawUnaligned(int size_in_bytes));
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRawUnaligned(int size_in_bytes);
- V8_WARN_UNUSED_RESULT INLINE(AllocationResult AllocateRaw(
- int size_in_bytes, AllocationAlignment alignment));
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment);
@@ -2737,18 +2748,6 @@ class NewSpace : public SpaceWithLinearArea {
// it in steps to guarantee that the observers are notified periodically.
void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
- // Get the extent of the inactive semispace (for use as a marking stack,
- // or to zap it). Notice: space-addresses are not necessarily on the
- // same page, so FromSpaceStart() might be above FromSpaceEnd().
- Address FromSpacePageLow() { return from_space_.page_low(); }
- Address FromSpacePageHigh() { return from_space_.page_high(); }
- Address FromSpaceStart() { return from_space_.space_start(); }
- Address FromSpaceEnd() { return from_space_.space_end(); }
-
- // Get the extent of the active semispace's pages' memory.
- Address ToSpaceStart() { return to_space_.space_start(); }
- Address ToSpaceEnd() { return to_space_.space_end(); }
-
inline bool ToSpaceContainsSlow(Address a);
inline bool FromSpaceContainsSlow(Address a);
inline bool ToSpaceContains(Object* o);
@@ -2763,7 +2762,7 @@ class NewSpace : public SpaceWithLinearArea {
#ifdef VERIFY_HEAP
// Verify the active semispace.
- virtual void Verify();
+ virtual void Verify(Isolate* isolate);
#endif
#ifdef DEBUG
@@ -2786,6 +2785,9 @@ class NewSpace : public SpaceWithLinearArea {
SemiSpace* active_space() { return &to_space_; }
+ Page* first_page() { return to_space_.first_page(); }
+ Page* last_page() { return to_space_.last_page(); }
+
iterator begin() { return to_space_.begin(); }
iterator end() { return to_space_.end(); }
@@ -2802,8 +2804,8 @@ class NewSpace : public SpaceWithLinearArea {
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
- base::AtomicValue<Address> original_top_;
- base::AtomicValue<Address> original_limit_;
+ std::atomic<Address> original_top_;
+ std::atomic<Address> original_limit_;
// The semispaces.
SemiSpace to_space_;
@@ -2907,8 +2909,7 @@ class CodeSpace : public PagedSpace {
class MapSpace : public PagedSpace {
public:
// Creates a map space object.
- MapSpace(Heap* heap, AllocationSpace id)
- : PagedSpace(heap, id, NOT_EXECUTABLE) {}
+ explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
@@ -2940,7 +2941,9 @@ class ReadOnlySpace : public PagedSpace {
ReadOnlySpace* space_;
};
- ReadOnlySpace(Heap* heap, AllocationSpace id, Executability executable);
+ explicit ReadOnlySpace(Heap* heap);
+
+ bool writable() const { return !is_marked_read_only_; }
void ClearStringPaddingIfNeeded();
void MarkAsReadOnly();
@@ -2968,11 +2971,10 @@ class LargeObjectSpace : public Space {
public:
typedef LargePageIterator iterator;
+ explicit LargeObjectSpace(Heap* heap);
LargeObjectSpace(Heap* heap, AllocationSpace id);
- virtual ~LargeObjectSpace();
- // Initializes internal data structures.
- bool SetUp();
+ ~LargeObjectSpace() override { TearDown(); }
// Releases internal resources, frees objects in this space.
void TearDown();
@@ -2982,13 +2984,11 @@ class LargeObjectSpace : public Space {
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
- // Shared implementation of AllocateRaw, AllocateRawCode and
- // AllocateRawFixedArray.
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
// Available bytes for objects in this space.
- inline size_t Available() override;
+ size_t Available() override;
size_t Size() override { return size_; }
size_t SizeOfObjects() override { return objects_size_; }
@@ -3026,14 +3026,16 @@ class LargeObjectSpace : public Space {
bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
// Checks whether the space is empty.
- bool IsEmpty() { return first_page_ == nullptr; }
+ bool IsEmpty() { return first_page() == nullptr; }
- LargePage* first_page() { return first_page_; }
+ LargePage* first_page() {
+ return reinterpret_cast<LargePage*>(Space::first_page());
+ }
// Collect code statistics.
void CollectCodeStatistics();
- iterator begin() { return iterator(first_page_); }
+ iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
@@ -3041,16 +3043,17 @@ class LargeObjectSpace : public Space {
base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
#ifdef VERIFY_HEAP
- virtual void Verify();
+ virtual void Verify(Isolate* isolate);
#endif
#ifdef DEBUG
void Print() override;
#endif
+ protected:
+ LargePage* AllocateLargePage(int object_size, Executability executable);
+
private:
- // The head of the linked list of large object chunks.
- LargePage* first_page_;
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
@@ -3065,6 +3068,15 @@ class LargeObjectSpace : public Space {
friend class LargeObjectIterator;
};
+class NewLargeObjectSpace : public LargeObjectSpace {
+ public:
+ explicit NewLargeObjectSpace(Heap* heap);
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
+
+ // Available bytes for objects in this space.
+ size_t Available() override;
+};
class LargeObjectIterator : public ObjectIterator {
public:
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index efd5d30486..8b62213cb6 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -17,7 +17,7 @@ namespace internal {
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
: sweeper_(sweeper) {
- sweeper_->stop_sweeper_tasks_.SetValue(true);
+ sweeper_->stop_sweeper_tasks_ = true;
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->AbortAndWaitForTasks();
@@ -34,7 +34,7 @@ Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
}
Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
- sweeper_->stop_sweeper_tasks_.SetValue(false);
+ sweeper_->stop_sweeper_tasks_ = false;
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->StartSweeperTasks();
@@ -133,7 +133,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
};
void Sweeper::StartSweeping() {
- CHECK(!stop_sweeper_tasks_.Value());
+ CHECK(!stop_sweeper_tasks_);
sweeping_in_progress_ = true;
iterability_in_progress_ = true;
MajorNonAtomicMarkingState* marking_state =
@@ -366,14 +366,14 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
// The allocated_bytes() counter is precisely the total size of objects.
DCHECK_EQ(live_bytes, p->allocated_bytes());
}
- p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
+ p->set_concurrent_sweeping_state(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
Page* page = nullptr;
- while (!stop_sweeper_tasks_.Value() &&
+ while (!stop_sweeper_tasks_ &&
((page = GetSweepingPageSafe(identity)) != nullptr)) {
ParallelSweepPage(page, identity);
}
@@ -419,9 +419,8 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
// the page protection mode from rx -> rw while sweeping.
CodePageMemoryModificationScope code_page_scope(page);
- DCHECK_EQ(Page::kSweepingPending,
- page->concurrent_sweeping_state().Value());
- page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
+ DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
+ page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
@@ -467,17 +466,17 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
// happened when the page was initially added, so it is skipped here.
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
}
- DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
+ DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
- DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
+ DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
page->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
- page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+ page->set_concurrent_sweeping_state(Page::kSweepingPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
}
@@ -569,10 +568,10 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_);
DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
- DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
+ DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
iterability_list_.push_back(page);
- page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+ page->set_concurrent_sweeping_state(Page::kSweepingPending);
}
void Sweeper::MakeIterable(Page* page) {
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index af2e5629c6..90a429b3ea 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -188,7 +188,7 @@ class Sweeper {
// the semaphore for maintaining a task counter on the main thread.
std::atomic<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
- base::AtomicValue<bool> stop_sweeper_tasks_;
+ std::atomic<bool> stop_sweeper_tasks_;
// Pages that are only made iterable but have their free lists ignored.
IterabilityList iterability_list_;
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
index bb3eae2228..db6e572df7 100644
--- a/deps/v8/src/heap/worklist.h
+++ b/deps/v8/src/heap/worklist.h
@@ -43,7 +43,7 @@ class Worklist {
// Returns true if the worklist is empty. Can only be used from the main
// thread without concurrent access.
- bool IsGlobalEmpty() { return worklist_->IsGlobalEmpty(); }
+ bool IsEmpty() { return worklist_->IsEmpty(); }
bool IsGlobalPoolEmpty() { return worklist_->IsGlobalPoolEmpty(); }
@@ -69,7 +69,7 @@ class Worklist {
}
~Worklist() {
- CHECK(IsGlobalEmpty());
+ CHECK(IsEmpty());
for (int i = 0; i < num_tasks_; i++) {
DCHECK_NOT_NULL(private_push_segment(i));
DCHECK_NOT_NULL(private_pop_segment(i));
@@ -78,6 +78,15 @@ class Worklist {
}
}
+ // Swaps content with the given worklist. Local buffers need to
+ // be empty, not thread safe.
+ void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
+ CHECK(AreLocalsEmpty());
+ CHECK(other.AreLocalsEmpty());
+
+ global_pool_.Swap(other.global_pool_);
+ }
+
bool Push(int task_id, EntryType entry) {
DCHECK_LT(task_id, num_tasks_);
DCHECK_NOT_NULL(private_push_segment(task_id));
@@ -119,11 +128,16 @@ class Worklist {
bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
- bool IsGlobalEmpty() {
+ bool IsEmpty() {
+ if (!AreLocalsEmpty()) return false;
+ return global_pool_.IsEmpty();
+ }
+
+ bool AreLocalsEmpty() {
for (int i = 0; i < num_tasks_; i++) {
if (!IsLocalEmpty(i)) return false;
}
- return global_pool_.IsEmpty();
+ return true;
}
size_t LocalSize(int task_id) {
@@ -159,6 +173,20 @@ class Worklist {
global_pool_.Update(callback);
}
+ // Calls the specified callback on each element of the deques.
+ // The signature of the callback is:
+ // void Callback(EntryType entry).
+ //
+ // Assumes that no other tasks are running.
+ template <typename Callback>
+ void Iterate(Callback callback) {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Iterate(callback);
+ private_push_segment(i)->Iterate(callback);
+ }
+ global_pool_.Iterate(callback);
+ }
+
template <typename Callback>
void IterateGlobalPool(Callback callback) {
global_pool_.Iterate(callback);
@@ -246,6 +274,13 @@ class Worklist {
public:
GlobalPool() : top_(nullptr) {}
+ // Swaps contents, not thread safe.
+ void Swap(GlobalPool& other) {
+ Segment* temp = top_;
+ set_top(other.top_);
+ other.set_top(temp);
+ }
+
V8_INLINE void Push(Segment* segment) {
base::LockGuard<base::Mutex> guard(&lock_);
segment->set_next(top_);
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 658f8f49d5..429953c0e6 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -72,8 +72,8 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsOffHeapTarget(rmode_));
+ IsWasmStubCall(rmode_) || IsEmbeddedObject(rmode_) ||
+ IsExternalReference(rmode_) || IsOffHeapTarget(rmode_));
return pc_;
}
@@ -97,7 +97,7 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
return Handle<HeapObject>::cast(Memory::Object_Handle_at(pc_));
}
-void RelocInfo::set_target_object(HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -106,9 +106,8 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target);
+ heap->RecordWriteIntoCode(host(), this, target);
+ heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -138,15 +137,6 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
-void RelocInfo::set_wasm_code_table_entry(Address target,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
- Memory::Address_at(pc_) = target;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(Address));
- }
-}
-
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return static_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
@@ -185,7 +175,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
Assembler::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 3d319edae9..a3a76b8b64 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -52,6 +52,7 @@
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/conversions-inl.h"
+#include "src/deoptimizer.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
#include "src/v8.h"
@@ -187,8 +188,10 @@ void Displacement::init(Label* L, Type type) {
// Implementation of RelocInfo
const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::JS_TO_WASM_CALL;
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -203,24 +206,9 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
-Address RelocInfo::embedded_address() const { return Memory::Address_at(pc_); }
-
-uint32_t RelocInfo::embedded_size() const { return Memory::uint32_at(pc_); }
-
-void RelocInfo::set_embedded_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- Memory::Address_at(pc_) = address;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(Address));
- }
-}
-
-void RelocInfo::set_embedded_size(uint32_t size,
- ICacheFlushMode icache_flush_mode) {
- Memory::uint32_at(pc_) = size;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(uint32_t));
- }
+int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
void RelocInfo::set_js_to_wasm_address(Address address,
@@ -235,6 +223,11 @@ Address RelocInfo::js_to_wasm_address() const {
return Assembler::target_address_at(pc_, constant_pool_);
}
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return Memory::uint32_at(pc_);
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -310,8 +303,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- IMMUTABLE, TENURED);
+ object =
+ isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
@@ -330,8 +323,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
#define EMIT(x) \
*pc_++ = (x)
-Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size) {
+Assembler::Assembler(const AssemblerOptions& options, void* buffer,
+ int buffer_size)
+ : AssemblerBase(options, buffer, buffer_size) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it.
@@ -1641,23 +1635,31 @@ void Assembler::call(CodeStub* stub) {
emit(Immediate::EmbeddedCode(stub));
}
-void Assembler::jmp(Label* L, Label::Distance distance) {
+void Assembler::jmp_rel(int offset) {
EnsureSpace ensure_space(this);
+ const int short_size = 2;
+ const int long_size = 5;
+ if (is_int8(offset - short_size)) {
+ // 1110 1011 #8-bit disp.
+ EMIT(0xEB);
+ EMIT((offset - short_size) & 0xFF);
+ } else {
+ // 1110 1001 #32-bit disp.
+ EMIT(0xE9);
+ emit(offset - long_size);
+ }
+}
+
+void Assembler::jmp(Label* L, Label::Distance distance) {
if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 5;
- int offs = L->pos() - pc_offset();
- DCHECK_LE(offs, 0);
- if (is_int8(offs - short_size)) {
- // 1110 1011 #8-bit disp.
- EMIT(0xEB);
- EMIT((offs - short_size) & 0xFF);
- } else {
- // 1110 1001 #32-bit disp.
- EMIT(0xE9);
- emit(offs - long_size);
- }
- } else if (distance == Label::kNear) {
+ int offset = L->pos() - pc_offset();
+ DCHECK_LE(offset, 0); // backward jump.
+ jmp_rel(offset);
+ return;
+ }
+
+ EnsureSpace ensure_space(this);
+ if (distance == Label::kNear) {
EMIT(0xEB);
emit_near_disp(L);
} else {
@@ -2605,16 +2607,6 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
EMIT(imm8);
}
-void Assembler::ptest(XMMRegister dst, Operand src) {
- DCHECK(IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x38);
- EMIT(0x17);
- emit_sse_operand(dst, src);
-}
-
void Assembler::psllw(XMMRegister reg, int8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2706,6 +2698,15 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::pshufhw(XMMRegister dst, Operand src, uint8_t shuffle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x70);
+ emit_sse_operand(dst, src);
+ EMIT(shuffle);
+}
+
void Assembler::pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@@ -2735,6 +2736,17 @@ void Assembler::pblendw(XMMRegister dst, Operand src, uint8_t mask) {
EMIT(mask);
}
+void Assembler::palignr(XMMRegister dst, Operand src, uint8_t mask) {
+ DCHECK(IsEnabled(SSSE3));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x0F);
+ emit_sse_operand(dst, src);
+ EMIT(mask);
+}
+
void Assembler::pextrb(Operand dst, XMMRegister src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2959,6 +2971,11 @@ void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
EMIT(imm8);
}
+void Assembler::vpshufhw(XMMRegister dst, Operand src, uint8_t shuffle) {
+ vinstr(0x70, dst, xmm0, src, kF3, k0F, kWIG);
+ EMIT(shuffle);
+}
+
void Assembler::vpshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
EMIT(shuffle);
@@ -2975,6 +2992,12 @@ void Assembler::vpblendw(XMMRegister dst, XMMRegister src1, Operand src2,
EMIT(mask);
}
+void Assembler::vpalignr(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t mask) {
+ vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(mask);
+}
+
void Assembler::vpextrb(Operand dst, XMMRegister src, int8_t offset) {
vinstr(0x14, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
@@ -3198,6 +3221,12 @@ void Assembler::GrowBuffer() {
*p += pc_delta;
}
+ // Relocate js-to-wasm calls (which are encoded pc-relative).
+ for (RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ !it.done(); it.next()) {
+ it.rinfo()->apply(pc_delta);
+ }
+
DCHECK(!buffer_overflow());
}
@@ -3298,9 +3327,10 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
DCHECK(!RelocInfo::IsNone(rmode));
+ if (options().disable_reloc_info_for_patching) return;
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !serializer_enabled() && !emit_debug_code()) {
+ if (RelocInfo::IsOnlyForSerializer(rmode) &&
+ !options().record_reloc_info_for_serialization && !emit_debug_code()) {
return;
}
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 6d72e00113..40a981f53f 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -40,6 +40,7 @@
#include <deque>
#include "src/assembler.h"
+#include "src/ia32/constants-ia32.h"
#include "src/ia32/sse-instr.h"
#include "src/isolate.h"
#include "src/utils.h"
@@ -323,22 +324,22 @@ enum ScaleFactor {
class Operand {
public:
// reg
- INLINE(explicit Operand(Register reg)) { set_modrm(3, reg); }
+ V8_INLINE explicit Operand(Register reg) { set_modrm(3, reg); }
// XMM reg
- INLINE(explicit Operand(XMMRegister xmm_reg)) {
+ V8_INLINE explicit Operand(XMMRegister xmm_reg) {
Register reg = Register::from_code(xmm_reg.code());
set_modrm(3, reg);
}
// [disp/r]
- INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode)) {
+ V8_INLINE explicit Operand(int32_t disp, RelocInfo::Mode rmode) {
set_modrm(0, ebp);
set_dispr(disp, rmode);
}
// [disp/r]
- INLINE(explicit Operand(Immediate imm)) {
+ V8_INLINE explicit Operand(Immediate imm) {
set_modrm(0, ebp);
set_dispr(imm.immediate(), imm.rmode_);
}
@@ -500,9 +501,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : Assembler(IsolateData(isolate), buffer, buffer_size) {}
- Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
+ Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -866,6 +865,9 @@ class Assembler : public AssemblerBase {
void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(Operand adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+ // unconditionoal jump relative to the current address. Low-level rountine,
+ // use with caution!
+ void jmp_rel(int offset);
// Conditional jumps
void j(Condition cc,
@@ -1111,9 +1113,6 @@ class Assembler : public AssemblerBase {
void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
void extractps(Register dst, XMMRegister src, byte imm8);
- void ptest(XMMRegister dst, XMMRegister src) { ptest(dst, Operand(src)); }
- void ptest(XMMRegister dst, Operand src);
-
void psllw(XMMRegister reg, int8_t shift);
void pslld(XMMRegister reg, int8_t shift);
void psrlw(XMMRegister reg, int8_t shift);
@@ -1125,6 +1124,10 @@ class Assembler : public AssemblerBase {
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
+ void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ pshufhw(dst, Operand(src), shuffle);
+ }
+ void pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
pshuflw(dst, Operand(src), shuffle);
}
@@ -1139,6 +1142,11 @@ class Assembler : public AssemblerBase {
}
void pblendw(XMMRegister dst, Operand src, uint8_t mask);
+ void palignr(XMMRegister dst, XMMRegister src, uint8_t mask) {
+ palignr(dst, Operand(src), mask);
+ }
+ void palignr(XMMRegister dst, Operand src, uint8_t mask);
+
void pextrb(Register dst, XMMRegister src, int8_t offset) {
pextrb(Operand(dst), src, offset);
}
@@ -1431,10 +1439,6 @@ class Assembler : public AssemblerBase {
}
void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
- void vptest(XMMRegister dst, XMMRegister src) { vptest(dst, Operand(src)); }
- void vptest(XMMRegister dst, Operand src) {
- vinstr(0x17, dst, xmm0, src, k66, k0F38, kWIG);
- }
void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8);
@@ -1442,6 +1446,10 @@ class Assembler : public AssemblerBase {
void vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8);
+ void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ vpshufhw(dst, Operand(src), shuffle);
+ }
+ void vpshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
vpshuflw(dst, Operand(src), shuffle);
}
@@ -1457,6 +1465,12 @@ class Assembler : public AssemblerBase {
}
void vpblendw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask);
+ void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ uint8_t mask) {
+ vpalignr(dst, src1, Operand(src2), mask);
+ }
+ void vpalignr(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask);
+
void vpextrb(Register dst, XMMRegister src, int8_t offset) {
vpextrb(Operand(dst), src, offset);
}
@@ -1684,6 +1698,7 @@ class Assembler : public AssemblerBase {
}
SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
+ SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
#undef DECLARE_SSE4_INSTRUCTION
#define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \
@@ -1699,6 +1714,18 @@ class Assembler : public AssemblerBase {
SSE4_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
#undef DECLARE_SSE34_AVX_INSTRUCTION
+#define DECLARE_SSE4_AVX_RM_INSTRUCTION(instruction, prefix, escape1, escape2, \
+ opcode) \
+ void v##instruction(XMMRegister dst, XMMRegister src) { \
+ v##instruction(dst, Operand(src)); \
+ } \
+ void v##instruction(XMMRegister dst, Operand src) { \
+ vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
+ }
+
+ SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE4_AVX_RM_INSTRUCTION)
+#undef DECLARE_SSE4_AVX_RM_INSTRUCTION
+
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
@@ -1841,6 +1868,8 @@ class Assembler : public AssemblerBase {
bool is_optimizable_farjmp(int idx);
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
friend class EnsureSpace;
// Internal reference positions, required for (potential) patching in
@@ -1851,19 +1880,6 @@ class Assembler : public AssemblerBase {
// code generation
RelocInfoWriter reloc_info_writer;
- // The following functions help with avoiding allocations of embedded heap
- // objects during the code assembly phase. {RequestHeapObject} records the
- // need for a future heap number allocation or code stub generation. After
- // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
- // objects and place them where they are expected (determined by the pc offset
- // associated with each request). That is, for each request, it will patch the
- // dummy heap object handle that we emitted during code assembly with the
- // actual heap object handle.
- void RequestHeapObject(HeapObjectRequest request);
- void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
-
- std::forward_list<HeapObjectRequest> heap_object_requests_;
-
// Variables for this instance of assembler
int farjmp_num_ = 0;
std::deque<int> farjmp_positions_;
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 3334b4e7d2..b7a704f359 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -25,22 +25,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
- __ pop(ecx);
- __ mov(MemOperand(esp, eax, times_4, 0), edi);
- __ push(edi);
- __ push(ebx);
- __ push(ecx);
- __ add(eax, Immediate(3));
- __ TailCallRuntime(Runtime::kNewArray);
-}
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- // It is important that the store buffer overflow stubs are generated first.
- CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
@@ -173,303 +157,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-
-template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(),
- GetInitialFastElementsKind(),
- mode);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(edx, kind);
- __ j(not_equal, &next);
- T stub(masm->isolate(), kind);
- __ TailCallStub(&stub);
- __ bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- // ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // edx - kind (if mode != DISABLE_ALLOCATION_SITES)
- // eax - number of arguments
- // edi - constructor?
- // esp[0] - return address
- // esp[4] - last argument
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
- holey_initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
- } else if (mode == DONT_OVERRIDE) {
- // is the low bit set? If so, we are holey and that is good.
- Label normal_sequence;
- __ test_b(edx, Immediate(1));
- __ j(not_zero, &normal_sequence);
-
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry.
- __ inc(edx);
-
- if (FLAG_debug_code) {
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ Assert(equal, AbortReason::kExpectedAllocationSite);
- }
-
- // Save the resulting elements kind in type info. We can't just store r3
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field...upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ add(
- FieldOperand(ebx, AllocationSite::kTransitionInfoOrBoilerplateOffset),
- Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
-
- __ bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(edx, kind);
- __ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
- __ TailCallStub(&stub);
- __ bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-template<class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(isolate, kind);
- stub.GetCode();
- if (AllocationSite::ShouldTrack(kind)) {
- T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode();
- }
- }
-}
-
-void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayNArgumentsConstructorStub stub(isolate);
- stub.GetCode();
-
- ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
- stubh1.GetCode();
- InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
- stubh2.GetCode();
- }
-}
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm, AllocationSiteOverrideMode mode) {
- Label not_zero_case, not_one_case;
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc (only if argument_count() is ANY or MORE_THAN_ONE)
- // -- ebx : AllocationSite or undefined
- // -- edi : constructor
- // -- edx : Original constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
-
- // We should either have undefined in ebx or a valid AllocationSite
- __ AssertUndefinedOrAllocationSite(ebx);
- }
-
- Label subclassing;
-
- // Enter the context of the Array function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- __ cmp(edx, edi);
- __ j(not_equal, &subclassing);
-
- Label no_info;
- // If the feedback vector is the undefined value call an array constructor
- // that doesn't use AllocationSites.
- __ cmp(ebx, isolate()->factory()->undefined_value());
- __ j(equal, &no_info);
-
- // Only look at the lower 16 bits of the transition info.
- __ mov(edx,
- FieldOperand(ebx, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ SmiUntag(edx);
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask));
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-
- // Subclassing.
- __ bind(&subclassing);
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
- __ add(eax, Immediate(3));
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ Push(ebx);
- __ PushReturnAddressFrom(ecx);
- __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
-}
-
-
-void InternalArrayConstructorStub::GenerateCase(
- MacroAssembler* masm, ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
-
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
- __ TailCallStub(&stub0);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- __ mov(ecx, Operand(esp, kPointerSize));
- __ test(ecx, ecx);
- __ j(zero, &normal_sequence);
-
- InternalArraySingleArgumentConstructorStub
- stub1_holey(isolate(), GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
- }
-
- __ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
- __ TailCallStub(&stub1);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stubN(isolate());
- __ TailCallStub(&stubN);
-}
-
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // Figure out the right elements kind
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(ecx);
-
- if (FLAG_debug_code) {
- Label done;
- __ cmp(ecx, Immediate(PACKED_ELEMENTS));
- __ j(equal, &done);
- __ cmp(ecx, Immediate(HOLEY_ELEMENTS));
- __ Assert(
- equal,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
- }
-
- Label fast_elements_case;
- __ cmp(ecx, Immediate(PACKED_ELEMENTS));
- __ j(equal, &fast_elements_case);
- GenerateCase(masm, HOLEY_ELEMENTS);
-
- __ bind(&fast_elements_case);
- GenerateCase(masm, PACKED_ELEMENTS);
-}
-
// Generates an Operand for saving parameters after PrepareCallApiFunction.
static Operand ApiParameterOperand(int index) {
return Operand(esp, index * kPointerSize);
diff --git a/deps/v8/src/ia32/constants-ia32.h b/deps/v8/src/ia32/constants-ia32.h
new file mode 100644
index 0000000000..38ad1280f1
--- /dev/null
+++ b/deps/v8/src/ia32/constants-ia32.h
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IA32_CONSTANTS_IA32_H_
+#define V8_IA32_CONSTANTS_IA32_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// For x86, this value is provided for uniformity with other platforms, although
+// currently no root register is present.
+constexpr int kRootRegisterBias = 0;
+
+// TODO(sigurds): Change this value once we use relative jumps.
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+} // namespace internal
+} // namespace v8
+
+#endif // V8_IA32_CONSTANTS_IA32_H_
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 0c2cd1fa70..259ad6a508 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -72,7 +72,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(static_cast<int>(deopt_kind())));
__ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 17a27b8290..42f699bf82 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -735,10 +735,6 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0x17:
- AppendToBuffer("vptest %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- break;
case 0x99:
AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
@@ -816,6 +812,16 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
SSSE3_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
SSE4_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
#undef DECLARE_SSE_AVX_DIS_CASE
+#define DECLARE_SSE_AVX_RM_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, \
+ opcode) \
+ case 0x##opcode: { \
+ AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
+ current += PrintRightXMMOperand(current); \
+ break; \
+ }
+
+ SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE_AVX_RM_DIS_CASE)
+#undef DECLARE_SSE_AVX_RM_DIS_CASE
default:
UnimplementedInstruction();
}
@@ -830,6 +836,13 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(current));
current++;
break;
+ case 0x0F:
+ AppendToBuffer("vpalignr %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(current));
+ current++;
+ break;
case 0x14:
AppendToBuffer("vpextrb ");
current += PrintRightOperand(current);
@@ -975,6 +988,12 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovdqu %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
break;
+ case 0x70:
+ AppendToBuffer("vpshufhw %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
case 0x7f:
AppendToBuffer("vmovdqu ");
current += PrintRightOperand(current);
@@ -1934,10 +1953,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
switch (op) {
- case 0x17:
- AppendToBuffer("ptest %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- break;
#define SSE34_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, opcode) \
case 0x##opcode: { \
AppendToBuffer(#instruction " %s,", NameOfXMMRegister(regop)); \
@@ -1947,6 +1962,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_RM_INSTRUCTION_LIST(SSE34_DIS_CASE)
#undef SSE34_DIS_CASE
default:
UnimplementedInstruction();
@@ -1979,6 +1995,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(data));
data++;
+ } else if (*data == 0x0F) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("palignr %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(data));
+ data++;
} else if (*data == 0x14) {
data++;
int mod, regop, rm;
@@ -2397,6 +2421,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x70) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pshufhw %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ data++;
} else if (b2 == 0x7F) {
AppendToBuffer("movdqu ");
data += 3;
@@ -2487,6 +2519,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*(data+1) == 0xAB) {
data += 2;
AppendToBuffer("rep_stos");
+ } else if (*(data + 1) == 0x90) {
+ data += 2;
+ AppendToBuffer("pause");
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/ia32/frame-constants-ia32.h b/deps/v8/src/ia32/frame-constants-ia32.h
index f0c1dc835f..5683fdd9e3 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/ia32/frame-constants-ia32.h
@@ -5,6 +5,9 @@
#ifndef V8_IA32_FRAME_CONSTANTS_IA32_H_
#define V8_IA32_FRAME_CONSTANTS_IA32_H_
+#include "src/base/macros.h"
+#include "src/frame-constants.h"
+
namespace v8 {
namespace internal {
@@ -35,6 +38,19 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kConstantPoolOffset = 0; // Not used
};
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 5;
+ static constexpr int kNumberOfSavedFpParamRegs = 6;
+
+ // FP-relative.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kSimd128Size;
+};
+
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 754f619c93..c230087618 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -58,13 +58,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return edi; }
const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
-const Register MathPowTaggedDescriptor::exponent() { return eax; }
-
-const Register MathPowIntegerDescriptor::exponent() {
- return MathPowTaggedDescriptor::exponent();
-}
-
-
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
@@ -75,13 +68,13 @@ const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
@@ -178,24 +171,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ConstructTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edx : the new target
- // edi : the target to call
- Register registers[] = {edi, edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void TransitionElementsKindDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax, ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void AbortJSDescriptor::InitializePlatformSpecific(
+void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -204,57 +180,19 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
- Register registers[] = {edi, edx, eax, ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // eax -- number of arguments
- // edi -- function
- // ebx -- allocation site with elements kind
- Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // eax -- number of arguments
- // edi -- function
- // ebx -- allocation site with elements kind
- Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // eax -- number of arguments
- // edi -- function
- // ebx -- allocation site with elements kind
- Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
@@ -309,7 +247,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+namespace {
+
+void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (argc)
@@ -319,6 +259,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+} // namespace
+
+void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
+void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 5a03586fe2..72fc778966 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -27,9 +27,10 @@ namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
-MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, buffer, size, create_code_object) {
+MacroAssembler::MacroAssembler(Isolate* isolate,
+ const AssemblerOptions& options, void* buffer,
+ int size, CodeObjectRequired create_code_object)
+ : TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@@ -41,16 +42,7 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
}
}
-TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ = Handle<HeapObject>::New(
- isolate->heap()->self_reference_marker(), isolate);
- }
-}
-
-void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> object = isolate()->heap()->root_handle(index);
if (object->IsHeapObject()) {
@@ -68,7 +60,6 @@ void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
roots_array_start));
}
-
void MacroAssembler::CompareRoot(Register with,
Register scratch,
Heap::RootListIndex index) {
@@ -510,17 +501,6 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
-void MacroAssembler::AssertFixedArray(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(not_equal, AbortReason::kOperandIsASmiAndNotAFixedArray);
- Push(object);
- CmpObjectType(object, FIXED_ARRAY_TYPE, object);
- Pop(object);
- Check(equal, AbortReason::kOperandIsNotAFixedArray);
- }
-}
-
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -623,14 +603,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
push(Immediate(StackFrame::TypeToMarker(type)));
- if (type == StackFrame::INTERNAL) {
- push(Immediate(CodeObject()));
- // Check at runtime that this code object was patched correctly.
- if (emit_debug_code()) {
- cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
- }
- }
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -642,6 +614,30 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
leave();
}
+#ifdef V8_OS_WIN
+void TurboAssembler::AllocateStackFrame(Register bytes_scratch) {
+ // In windows, we cannot increment the stack size by more than one page
+ // (minimum page size is 4KB) without accessing at least one byte on the
+ // page. Check this:
+ // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
+ constexpr int kPageSize = 4 * 1024;
+ Label check_offset;
+ Label touch_next_page;
+ jmp(&check_offset);
+ bind(&touch_next_page);
+ sub(esp, Immediate(kPageSize));
+ // Just to touch the page, before we increment further.
+ mov(Operand(esp, 0), Immediate(0));
+ sub(bytes_scratch, Immediate(kPageSize));
+
+ bind(&check_offset);
+ cmp(bytes_scratch, kPageSize);
+ j(greater, &touch_next_page);
+
+ sub(esp, bytes_scratch);
+}
+#endif
+
void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
Register argc) {
Push(ebp);
@@ -850,8 +846,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
Call(code, RelocInfo::CODE_TARGET);
}
-void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles) {
+void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
+ Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -859,9 +855,9 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
Move(eax, Immediate(f->nargs));
mov(ebx, Immediate(ExternalReference::Create(f)));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
- Call(code, RelocInfo::CODE_TARGET);
+ DCHECK(!AreAliased(centry, eax, ebx));
+ add(centry, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ Call(centry);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
@@ -1134,7 +1130,8 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
DCHECK(fun == edi);
mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
+ movzx_w(ebx,
+ FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(ebx);
InvokeFunctionCode(edi, new_target, expected, actual, flag);
@@ -1276,6 +1273,15 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
+void TurboAssembler::Pshufhw(XMMRegister dst, Operand src, uint8_t shuffle) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpshufhw(dst, src, shuffle);
+ } else {
+ pshufhw(dst, src, shuffle);
+ }
+}
+
void TurboAssembler::Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1294,6 +1300,24 @@ void TurboAssembler::Pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
}
}
+void TurboAssembler::Psraw(XMMRegister dst, int8_t shift) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsraw(dst, dst, shift);
+ } else {
+ psraw(dst, shift);
+ }
+}
+
+void TurboAssembler::Psrlw(XMMRegister dst, int8_t shift) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsrlw(dst, dst, shift);
+ } else {
+ psrlw(dst, shift);
+ }
+}
+
void TurboAssembler::Psignb(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1336,29 +1360,43 @@ void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
UNREACHABLE();
}
-void TurboAssembler::Ptest(XMMRegister dst, Operand src) {
+void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vptest(dst, src);
+ vpshufb(dst, dst, src);
+ return;
+ }
+ if (CpuFeatures::IsSupported(SSSE3)) {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ pshufb(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+void TurboAssembler::Pblendw(XMMRegister dst, Operand src, uint8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpblendw(dst, dst, src, imm8);
return;
}
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
- ptest(dst, src);
+ pblendw(dst, src, imm8);
return;
}
UNREACHABLE();
}
-void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
+void TurboAssembler::Palignr(XMMRegister dst, Operand src, uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpshufb(dst, dst, src);
+ vpalignr(dst, dst, src, imm8);
return;
}
if (CpuFeatures::IsSupported(SSSE3)) {
CpuFeatureScope sse_scope(this, SSSE3);
- pshufb(dst, src);
+ palignr(dst, src, imm8);
return;
}
UNREACHABLE();
@@ -1543,16 +1581,15 @@ void TurboAssembler::CheckStackAlignment() {
void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
const char* msg = GetAbortReason(reason);
- if (msg != nullptr) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+#endif
- if (FLAG_trap_on_abort) {
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
int3();
return;
}
-#endif
Move(edx, Smi::FromInt(static_cast<int>(reason)));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 84eaa215ba..5d2b9bb0af 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -9,6 +9,7 @@
#include "src/bailout-reason.h"
#include "src/globals.h"
#include "src/ia32/assembler-ia32.h"
+#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
@@ -25,9 +26,13 @@ constexpr Register kInterpreterAccumulatorRegister = eax;
constexpr Register kInterpreterBytecodeOffsetRegister = edx;
constexpr Register kInterpreterBytecodeArrayRegister = edi;
constexpr Register kInterpreterDispatchTableRegister = esi;
+
constexpr Register kJavaScriptCallArgCountRegister = eax;
constexpr Register kJavaScriptCallCodeStartRegister = ecx;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = edx;
+constexpr Register kJavaScriptCallExtraArg1Register = ebx;
+
constexpr Register kOffHeapTrampolineRegister = ecx;
constexpr Register kRuntimeCallFunctionRegister = ebx;
constexpr Register kRuntimeCallArgCountRegister = eax;
@@ -49,20 +54,13 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg8 = no_reg);
#endif
-class TurboAssembler : public Assembler {
+class TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object);
-
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() const { return has_frame_; }
-
- Isolate* isolate() const { return isolate_; }
-
- Handle<HeapObject> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
+ TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : TurboAssemblerBase(isolate, options, buffer, buffer_size,
+ create_code_object) {}
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
@@ -76,6 +74,19 @@ class TurboAssembler : public Assembler {
}
void LeaveFrame(StackFrame::Type type);
+// Allocate a stack frame of given size (i.e. decrement {esp} by the value
+// stored in the given register).
+#ifdef V8_OS_WIN
+ // On win32, take special care if the number of bytes is greater than 4096:
+ // Ensure that each page within the new stack frame is touched once in
+ // decreasing order. See
+ // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
+ // Use {bytes_scratch} as scratch register for this procedure.
+ void AllocateStackFrame(Register bytes_scratch);
+#else
+ void AllocateStackFrame(Register bytes) { sub(esp, bytes); }
+#endif
+
// Print a message to stdout and abort execution.
void Abort(AbortReason reason);
@@ -123,16 +134,18 @@ class TurboAssembler : public Assembler {
void RetpolineJump(Register reg);
- void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
+ void CallForDeoptimization(Address target, int deopt_id,
+ RelocInfo::Mode rmode) {
+ USE(deopt_id);
call(target, rmode);
}
inline bool AllowThisStubCall(CodeStub* stub);
void CallStubDelayed(CodeStub* stub);
- // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
- void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ // Call a runtime routine. This expects {centry} to contain a fitting CEntry
+ // builtin for the target runtime function and uses an indirect call.
+ void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,
@@ -147,6 +160,16 @@ class TurboAssembler : public Assembler {
j(zero, smi_label, distance);
}
+ void JumpIfEqual(Register a, int32_t b, Label* dest) {
+ cmp(a, Immediate(b));
+ j(equal, dest);
+ }
+
+ void JumpIfLessThan(Register a, int32_t b, Label* dest) {
+ cmp(a, Immediate(b));
+ j(less, dest);
+ }
+
void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }
// Removes current frame and its arguments from the stack preserving the
@@ -201,10 +224,28 @@ class TurboAssembler : public Assembler {
void Ret();
+ void LoadRoot(Register destination, Heap::RootListIndex index) override;
+
+ // TODO(jgruber,v8:6666): Implement embedded builtins.
+ void LoadFromConstantsTable(Register destination,
+ int constant_index) override {
+ UNREACHABLE();
+ }
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override {
+ UNREACHABLE();
+ }
+ void LoadRootRelative(Register destination, int32_t offset) override {
+ UNREACHABLE();
+ }
+
// Return and drop arguments from stack, where the number of arguments
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
+ void Pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ Pshufhw(dst, Operand(src), shuffle);
+ }
+ void Pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
Pshuflw(dst, Operand(src), shuffle);
}
@@ -213,6 +254,8 @@ class TurboAssembler : public Assembler {
Pshufd(dst, Operand(src), shuffle);
}
void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
+ void Psraw(XMMRegister dst, int8_t shift);
+ void Psrlw(XMMRegister dst, int8_t shift);
// SSE/SSE2 instructions with AVX version.
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
@@ -252,12 +295,16 @@ class TurboAssembler : public Assembler {
AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
+ AVX_OP3_XO(Packsswb, packsswb)
+ AVX_OP3_XO(Packuswb, packuswb)
AVX_OP3_XO(Pcmpeqb, pcmpeqb)
AVX_OP3_XO(Pcmpeqw, pcmpeqw)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
AVX_OP3_XO(Psubb, psubb)
AVX_OP3_XO(Psubw, psubw)
AVX_OP3_XO(Psubd, psubd)
+ AVX_OP3_XO(Punpcklbw, punpcklbw)
+ AVX_OP3_XO(Punpckhbw, punpckhbw)
AVX_OP3_XO(Pxor, pxor)
AVX_OP3_XO(Andps, andps)
AVX_OP3_XO(Andpd, andpd)
@@ -269,12 +316,41 @@ class TurboAssembler : public Assembler {
#undef AVX_OP3_XO
#undef AVX_OP3_WITH_TYPE
- // Non-SSE2 instructions.
- void Ptest(XMMRegister dst, XMMRegister src) { Ptest(dst, Operand(src)); }
- void Ptest(XMMRegister dst, Operand src);
+// Non-SSE2 instructions.
+#define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
+ sse_scope) \
+ void macro_name(dst_type dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, src); \
+ return; \
+ } \
+ if (CpuFeatures::IsSupported(sse_scope)) { \
+ CpuFeatureScope scope(this, sse_scope); \
+ name(dst, src); \
+ return; \
+ } \
+ UNREACHABLE(); \
+ }
+#define AVX_OP2_XO_SSE4(macro_name, name) \
+ AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
+ AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
+
+ AVX_OP2_XO_SSE4(Ptest, ptest)
+ AVX_OP2_XO_SSE4(Pmovsxbw, pmovsxbw)
+ AVX_OP2_XO_SSE4(Pmovsxwd, pmovsxwd)
+ AVX_OP2_XO_SSE4(Pmovzxbw, pmovzxbw)
+ AVX_OP2_XO_SSE4(Pmovzxwd, pmovzxwd)
+
+#undef AVX_OP2_WITH_TYPE_SCOPE
+#undef AVX_OP2_XO_SSE4
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
void Pshufb(XMMRegister dst, Operand src);
+ void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
+ Pblendw(dst, Operand(src), imm8);
+ }
+ void Pblendw(XMMRegister dst, Operand src, uint8_t imm8);
void Psignb(XMMRegister dst, XMMRegister src) { Psignb(dst, Operand(src)); }
void Psignb(XMMRegister dst, Operand src);
@@ -283,6 +359,11 @@ class TurboAssembler : public Assembler {
void Psignd(XMMRegister dst, XMMRegister src) { Psignd(dst, Operand(src)); }
void Psignd(XMMRegister dst, Operand src);
+ void Palignr(XMMRegister dst, XMMRegister src, uint8_t imm8) {
+ Palignr(dst, Operand(src), imm8);
+ }
+ void Palignr(XMMRegister dst, Operand src, uint8_t imm8);
+
void Pextrb(Register dst, XMMRegister src, int8_t imm8);
void Pextrw(Register dst, XMMRegister src, int8_t imm8);
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
@@ -358,25 +439,17 @@ class TurboAssembler : public Assembler {
void ComputeCodeStartAddress(Register dst);
void ResetSpeculationPoisonRegister();
-
- bool root_array_available() const { return root_array_available_; }
- void set_root_array_available(bool v) { root_array_available_ = v; }
-
- protected:
- // This handle will be patched with the code object on installation.
- Handle<HeapObject> code_object_;
-
- private:
- bool has_frame_ = false;
- bool root_array_available_ = false;
- Isolate* const isolate_;
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ CodeObjectRequired create_code_object)
+ : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
+ size, create_code_object) {}
+ MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int size, CodeObjectRequired create_code_object);
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int32_t x) {
@@ -389,7 +462,6 @@ class MacroAssembler : public TurboAssembler {
void Set(Operand dst, int32_t x) { mov(dst, Immediate(x)); }
// Operations on roots in the root-array.
- void LoadRoot(Register destination, Heap::RootListIndex index);
void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
// These methods can only be used with constant roots (i.e. non-writable
// and not in new space).
@@ -555,9 +627,6 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
- // Abort execution if argument is not a FixedArray, enabled via --debug-code.
- void AssertFixedArray(Register object);
-
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
diff --git a/deps/v8/src/ia32/sse-instr.h b/deps/v8/src/ia32/sse-instr.h
index 98783e7c78..f9d4c59e07 100644
--- a/deps/v8/src/ia32/sse-instr.h
+++ b/deps/v8/src/ia32/sse-instr.h
@@ -42,8 +42,14 @@
V(psubsw, 66, 0F, E9) \
V(psubusb, 66, 0F, D8) \
V(psubusw, 66, 0F, D9) \
- V(punpckhdq, 66, 0F, 6A) \
+ V(punpcklbw, 66, 0F, 60) \
+ V(punpcklwd, 66, 0F, 61) \
V(punpckldq, 66, 0F, 62) \
+ V(punpcklqdq, 66, 0F, 6C) \
+ V(punpckhbw, 66, 0F, 68) \
+ V(punpckhwd, 66, 0F, 69) \
+ V(punpckhdq, 66, 0F, 6A) \
+ V(punpckhqdq, 66, 0F, 6D) \
V(pxor, 66, 0F, EF)
#define SSSE3_INSTRUCTION_LIST(V) \
@@ -66,4 +72,11 @@
V(pmaxud, 66, 0F, 38, 3F) \
V(pmulld, 66, 0F, 38, 40)
+#define SSE4_RM_INSTRUCTION_LIST(V) \
+ V(pmovsxbw, 66, 0F, 38, 20) \
+ V(pmovsxwd, 66, 0F, 38, 23) \
+ V(pmovzxbw, 66, 0F, 38, 30) \
+ V(pmovzxwd, 66, 0F, 38, 33) \
+ V(ptest, 66, 0F, 38, 17)
+
#endif // V8_IA32_SSE_INSTR_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index a40ec297a6..f471381dd8 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -11,6 +11,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/objects-inl.h"
+#include "src/objects/module.h"
namespace v8 {
namespace internal {
@@ -25,7 +26,7 @@ using SloppyTNode = compiler::SloppyTNode<T>;
//////////////////// Private helpers.
// Loads dataX field from the DataHandler object.
-TNode<Object> AccessorAssembler::LoadHandlerDataField(
+TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
SloppyTNode<DataHandler> handler, int data_index) {
#ifdef DEBUG
TNode<Map> handler_map = LoadMap(handler);
@@ -57,14 +58,12 @@ TNode<Object> AccessorAssembler::LoadHandlerDataField(
CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
LoadMapInstanceSizeInWords(handler_map),
IntPtrConstant(minimum_size / kPointerSize)));
- return LoadObjectField(handler, offset);
+ return LoadMaybeWeakObjectField(handler, offset);
}
-Node* AccessorAssembler::TryMonomorphicCase(Node* slot, Node* vector,
- Node* receiver_map,
- Label* if_handler,
- TVariable<MaybeObject>* var_handler,
- Label* if_miss) {
+TNode<MaybeObject> AccessorAssembler::TryMonomorphicCase(
+ Node* slot, Node* vector, Node* receiver_map, Label* if_handler,
+ TVariable<MaybeObject>* var_handler, Label* if_miss) {
Comment("TryMonomorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
@@ -76,15 +75,13 @@ Node* AccessorAssembler::TryMonomorphicCase(Node* slot, Node* vector,
// into ElementOffsetFromIndex() allows it to be folded into a single
// [base, index, offset] indirect memory access on x64.
Node* offset = ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, SMI_PARAMETERS);
- TNode<Object> feedback =
- CAST(Load(MachineType::AnyTagged(), vector,
- IntPtrAdd(offset, IntPtrConstant(header_size))));
+ TNode<MaybeObject> feedback = ReinterpretCast<MaybeObject>(
+ Load(MachineType::AnyTagged(), vector,
+ IntPtrAdd(offset, IntPtrConstant(header_size))));
// Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- GotoIf(WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(CAST(feedback))),
- if_miss);
+ // if we have a weak reference in feedback.
+ GotoIf(IsNotWeakReferenceTo(feedback, CAST(receiver_map)), if_miss);
TNode<MaybeObject> handler = UncheckedCast<MaybeObject>(
Load(MachineType::AnyTagged(), vector,
@@ -128,10 +125,11 @@ void AccessorAssembler::HandlePolymorphicCase(
}
Label next_entry(this);
- TNode<MaybeObject> element = LoadWeakFixedArrayElement(feedback, map_index);
- CSA_ASSERT(this, IsStrongHeapObject(element));
- Node* cached_map = LoadWeakCellValue(CAST(ToStrongHeapObject(element)));
- GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+ TNode<MaybeObject> maybe_cached_map =
+ LoadWeakFixedArrayElement(feedback, map_index);
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_cached_map));
+ GotoIf(IsNotWeakReferenceTo(maybe_cached_map, CAST(receiver_map)),
+ &next_entry);
// Found, now call handler.
TNode<MaybeObject> handler =
@@ -150,12 +148,12 @@ void AccessorAssembler::HandlePolymorphicCase(
BuildFastLoop(
start_index, end_index,
[this, receiver_map, feedback, if_handler, var_handler](Node* index) {
- TNode<MaybeObject> element = LoadWeakFixedArrayElement(feedback, index);
- CSA_ASSERT(this, IsStrongHeapObject(element));
- Node* cached_map = LoadWeakCellValue(CAST(ToStrongHeapObject(element)));
-
Label next_entry(this);
- GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+ TNode<MaybeObject> maybe_cached_map =
+ LoadWeakFixedArrayElement(feedback, index);
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_cached_map));
+ GotoIf(IsNotWeakReferenceTo(maybe_cached_map, CAST(receiver_map)),
+ &next_entry);
// Found, now call handler.
TNode<MaybeObject> handler =
@@ -175,7 +173,6 @@ void AccessorAssembler::HandleLoadICHandlerCase(
ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
ElementSupport support_elements) {
Comment("have_handler");
- CSA_ASSERT(this, IsObject(handler));
VARIABLE(var_holder, MachineRepresentation::kTagged, p->holder);
VARIABLE(var_smi_handler, MachineRepresentation::kTagged, handler);
@@ -205,8 +202,7 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&call_handler);
{
- typedef LoadWithVectorDescriptor Descriptor;
- exit_point->ReturnCallStub(Descriptor(isolate()), handler, p->context,
+ exit_point->ReturnCallStub(LoadWithVectorDescriptor{}, handler, p->context,
p->receiver, p->name, p->slot, p->vector);
}
}
@@ -254,16 +250,22 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
}
}
-Node* AccessorAssembler::LoadDescriptorValue(Node* map, Node* descriptor) {
- Node* descriptors = LoadMapDescriptors(map);
+TNode<Object> AccessorAssembler::LoadDescriptorValue(Node* map,
+ Node* descriptor) {
+ return CAST(LoadDescriptorValueOrFieldType(map, descriptor));
+}
+
+TNode<MaybeObject> AccessorAssembler::LoadDescriptorValueOrFieldType(
+ Node* map, Node* descriptor) {
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
Node* scaled_descriptor =
IntPtrMul(descriptor, IntPtrConstant(DescriptorArray::kEntrySize));
Node* value_index = IntPtrAdd(
scaled_descriptor, IntPtrConstant(DescriptorArray::kFirstIndex +
DescriptorArray::kEntryValueIndex));
- CSA_ASSERT(this, UintPtrLessThan(descriptor, LoadAndUntagFixedArrayBaseLength(
+ CSA_ASSERT(this, UintPtrLessThan(descriptor, LoadAndUntagWeakFixedArrayLength(
descriptors)));
- return LoadFixedArrayElement(descriptors, value_index);
+ return LoadWeakFixedArrayElement(descriptors, value_index);
}
void AccessorAssembler::HandleLoadICSmiHandlerCase(
@@ -479,8 +481,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
// the access check is enabled for this handler or not.
TNode<Object> context_cell = Select<Object>(
IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
- [=] { return LoadHandlerDataField(handler, 3); },
- [=] { return LoadHandlerDataField(handler, 2); });
+ [=] { return CAST(LoadHandlerDataField(handler, 3)); },
+ [=] { return CAST(LoadHandlerDataField(handler, 2)); });
Node* context = LoadWeakCellValueUnchecked(CAST(context_cell));
Node* foreign =
@@ -518,13 +520,16 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
to_name_failed(this, Label::kDeferred);
if (support_elements == kSupportElements) {
+ DCHECK_NE(on_nonexistent, OnNonExistent::kThrowReferenceError);
+
TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
&to_name_failed);
BIND(&if_unique_name);
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
- p->context, holder, var_unique.value(), p->receiver);
+ p->context, holder, var_unique.value(), p->receiver,
+ SmiConstant(on_nonexistent));
BIND(&if_index);
// TODO(mslekova): introduce TryToName that doesn't try to compute
@@ -533,12 +538,13 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&to_name_failed);
exit_point->ReturnCallRuntime(Runtime::kGetPropertyWithReceiver,
- p->context, holder, p->name, p->receiver);
-
+ p->context, holder, p->name, p->receiver,
+ SmiConstant(on_nonexistent));
} else {
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
- p->context, holder, p->name, p->receiver);
+ p->context, holder, p->name, p->receiver,
+ SmiConstant(on_nonexistent));
}
}
@@ -664,7 +670,7 @@ Node* AccessorAssembler::HandleProtoHandler(
BIND(&if_do_access_check);
{
- Node* data2 = LoadHandlerDataField(handler, 2);
+ TNode<WeakCell> data2 = CAST(LoadHandlerDataField(handler, 2));
Node* expected_native_context = LoadWeakCellValue(data2, miss);
EmitAccessCheck(expected_native_context, p->context, p->receiver, &done,
miss);
@@ -725,7 +731,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
},
miss, ic_mode);
- Node* maybe_holder_cell = LoadHandlerDataField(handler, 1);
+ TNode<Object> maybe_holder_cell = CAST(LoadHandlerDataField(handler, 1));
Label load_from_cached_holder(this), done(this);
@@ -736,7 +742,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
// For regular holders, having passed the receiver map check and the
// validity cell check implies that |holder| is alive. However, for
// global object receivers, the |maybe_holder_cell| may be cleared.
- Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
+ Node* holder = LoadWeakCellValue(CAST(maybe_holder_cell), miss);
var_holder->Bind(holder);
Goto(&done);
@@ -803,8 +809,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_smi_handler);
{
Node* holder = p->receiver;
- CSA_ASSERT(this, IsObject(handler));
- Node* handler_word = SmiUntag(CAST(ToObject(handler)));
+ Node* handler_word = SmiUntag(CAST(handler));
Label if_fast_smi(this), if_proxy(this);
@@ -870,8 +875,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_nonsmi_handler);
{
GotoIf(IsWeakOrClearedHeapObject(handler), &store_transition_or_global);
- CSA_ASSERT(this, IsStrongHeapObject(handler));
- TNode<HeapObject> strong_handler = ToStrongHeapObject(handler);
+ TNode<HeapObject> strong_handler = CAST(handler);
TNode<Map> handler_map = LoadMap(strong_handler);
Branch(IsCodeMap(handler_map), &call_handler, &if_proto_handler);
@@ -884,9 +888,9 @@ void AccessorAssembler::HandleStoreICHandlerCase(
// |handler| is a heap object. Must be code, call it.
BIND(&call_handler);
{
- StoreWithVectorDescriptor descriptor(isolate());
- TailCallStub(descriptor, strong_handler, p->context, p->receiver, p->name,
- p->value, p->slot, p->vector);
+ TailCallStub(StoreWithVectorDescriptor{}, CAST(strong_handler),
+ CAST(p->context), p->receiver, p->name, p->value, p->slot,
+ p->vector);
}
}
@@ -929,21 +933,21 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
// Load last descriptor details.
Node* nof = DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
CSA_ASSERT(this, WordNotEqual(nof, IntPtrConstant(0)));
- Node* descriptors = LoadMapDescriptors(transition_map);
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(transition_map);
Node* factor = IntPtrConstant(DescriptorArray::kEntrySize);
- Node* last_key_index = IntPtrAdd(
- IntPtrConstant(DescriptorArray::ToKeyIndex(-1)), IntPtrMul(nof, factor));
+ TNode<IntPtrT> last_key_index = UncheckedCast<IntPtrT>(IntPtrAdd(
+ IntPtrConstant(DescriptorArray::ToKeyIndex(-1)), IntPtrMul(nof, factor)));
if (validate_transition_handler) {
- Node* key = LoadFixedArrayElement(descriptors, last_key_index);
+ Node* key = LoadWeakFixedArrayElement(descriptors, last_key_index);
GotoIf(WordNotEqual(key, p->name), miss);
} else {
- CSA_ASSERT(
- this,
- WordEqual(LoadFixedArrayElement(descriptors, last_key_index), p->name));
+ CSA_ASSERT(this,
+ WordEqual(BitcastMaybeObjectToWord(LoadWeakFixedArrayElement(
+ descriptors, last_key_index)),
+ p->name));
}
- Node* details =
- LoadDetailsByKeyIndex<DescriptorArray>(descriptors, last_key_index);
+ Node* details = LoadDetailsByKeyIndex(descriptors, last_key_index);
if (validate_transition_handler) {
// Follow transitions only in the following cases:
// 1) name is a non-private symbol and attributes equal to NONE,
@@ -969,9 +973,9 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
true);
}
-void AccessorAssembler::CheckFieldType(Node* descriptors, Node* name_index,
- Node* representation, Node* value,
- Label* bailout) {
+void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
+ Node* name_index, Node* representation,
+ Node* value, Label* bailout) {
Label r_smi(this), r_double(this), r_heapobject(this), all_fine(this);
// Ignore FLAG_track_fields etc. and always emit code for all checks,
// because this builtin is part of the snapshot and therefore should
@@ -1007,20 +1011,25 @@ void AccessorAssembler::CheckFieldType(Node* descriptors, Node* name_index,
BIND(&r_heapobject);
{
GotoIf(TaggedIsSmi(value), bailout);
- Node* field_type =
- LoadValueByKeyIndex<DescriptorArray>(descriptors, name_index);
+ TNode<MaybeObject> field_type = LoadFieldTypeByKeyIndex(
+ descriptors, UncheckedCast<IntPtrT>(name_index));
intptr_t kNoneType = reinterpret_cast<intptr_t>(FieldType::None());
intptr_t kAnyType = reinterpret_cast<intptr_t>(FieldType::Any());
+ DCHECK_NE(kNoneType, kClearedWeakHeapObject);
+ DCHECK_NE(kAnyType, kClearedWeakHeapObject);
// FieldType::None can't hold any value.
- GotoIf(WordEqual(field_type, IntPtrConstant(kNoneType)), bailout);
+ GotoIf(WordEqual(BitcastMaybeObjectToWord(field_type),
+ IntPtrConstant(kNoneType)),
+ bailout);
// FieldType::Any can hold any value.
- GotoIf(WordEqual(field_type, IntPtrConstant(kAnyType)), &all_fine);
- CSA_ASSERT(this, IsWeakCell(field_type));
- // Cleared WeakCells count as FieldType::None, which can't hold any value.
- field_type = LoadWeakCellValue(field_type, bailout);
+ GotoIf(WordEqual(BitcastMaybeObjectToWord(field_type),
+ IntPtrConstant(kAnyType)),
+ &all_fine);
+ // Cleared weak references count as FieldType::None, which can't hold any
+ // value.
+ TNode<Map> field_type_map = CAST(ToWeakHeapObject(field_type, bailout));
// FieldType::Class(...) performs a map check.
- CSA_ASSERT(this, IsMap(field_type));
- Branch(WordEqual(LoadMap(value), field_type), &all_fine, bailout);
+ Branch(WordEqual(LoadMap(value), field_type_map), &all_fine, bailout);
}
BIND(&all_fine);
@@ -1045,16 +1054,17 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
if (FLAG_track_constant_fields && !do_transitioning_store) {
// TODO(ishell): Taking the slow path is not necessary if new and old
// values are identical.
- GotoIf(Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details),
- Int32Constant(kConst)),
+ GotoIf(Word32Equal(
+ DecodeWord32<PropertyDetails::ConstnessField>(details),
+ Int32Constant(static_cast<int32_t>(VariableMode::kConst))),
slow);
}
Node* representation =
DecodeWord32<PropertyDetails::RepresentationField>(details);
- CheckFieldType(descriptors, descriptor_name_index, representation, value,
- slow);
+ CheckFieldType(CAST(descriptors), descriptor_name_index, representation,
+ value, slow);
Node* field_index =
DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
@@ -1085,7 +1095,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
} else {
if (do_transitioning_store) {
Node* mutable_heap_number =
- AllocateHeapNumberWithValue(double_value, MUTABLE);
+ AllocateMutableHeapNumberWithValue(double_value);
StoreMap(object, object_map);
StoreObjectField(object, field_offset, mutable_heap_number);
} else {
@@ -1124,7 +1134,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
{
Node* double_value = ChangeNumberToFloat64(value);
Node* mutable_heap_number =
- AllocateHeapNumberWithValue(double_value, MUTABLE);
+ AllocateMutableHeapNumberWithValue(double_value);
var_value.Bind(mutable_heap_number);
Goto(&cont);
}
@@ -1164,8 +1174,8 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
BIND(&if_descriptor);
{
// Check that constant matches value.
- Node* constant = LoadValueByKeyIndex<DescriptorArray>(
- descriptors, descriptor_name_index);
+ Node* constant = LoadValueByKeyIndex(
+ CAST(descriptors), UncheckedCast<IntPtrT>(descriptor_name_index));
GotoIf(WordNotEqual(value, constant), slow);
if (do_transitioning_store) {
@@ -1218,22 +1228,22 @@ void AccessorAssembler::HandleStoreICProtoHandler(
&if_transitioning_element_store);
BIND(&if_element_store);
{
- StoreWithVectorDescriptor descriptor(isolate());
- TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
- p->value, p->slot, p->vector);
+ TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context,
+ p->receiver, p->name, p->value, p->slot, p->vector);
}
BIND(&if_transitioning_element_store);
{
- Node* transition_map_cell = LoadHandlerDataField(handler, 1);
- Node* transition_map = LoadWeakCellValue(transition_map_cell, miss);
- CSA_ASSERT(this, IsMap(transition_map));
+ TNode<MaybeObject> maybe_transition_map =
+ LoadHandlerDataField(handler, 1);
+ TNode<Map> transition_map =
+ CAST(ToWeakHeapObject(maybe_transition_map, miss));
GotoIf(IsDeprecatedMap(transition_map), miss);
- StoreTransitionDescriptor descriptor(isolate());
- TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
- transition_map, p->value, p->slot, p->vector);
+ TailCallStub(StoreTransitionDescriptor{}, code_handler, p->context,
+ p->receiver, p->name, transition_map, p->value, p->slot,
+ p->vector);
}
};
}
@@ -1267,7 +1277,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal)),
&if_add_normal);
- Node* holder_cell = LoadHandlerDataField(handler, 1);
+ TNode<WeakCell> holder_cell = CAST(LoadHandlerDataField(handler, 1));
Node* holder = LoadWeakCellValue(holder_cell, miss);
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kGlobalProxy)),
@@ -1326,8 +1336,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
// the access check is enabled for this handler or not.
TNode<Object> context_cell = Select<Object>(
IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
- [=] { return LoadHandlerDataField(handler, 3); },
- [=] { return LoadHandlerDataField(handler, 2); });
+ [=] { return CAST(LoadHandlerDataField(handler, 3)); },
+ [=] { return CAST(LoadHandlerDataField(handler, 2)); });
Node* context = LoadWeakCellValueUnchecked(CAST(context_cell));
@@ -1520,12 +1530,13 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
&done);
}
Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
- Node* maybe_field_type = LoadDescriptorValue(LoadMap(holder), descriptor);
+ TNode<MaybeObject> maybe_field_type =
+ LoadDescriptorValueOrFieldType(LoadMap(holder), descriptor);
GotoIf(TaggedIsSmi(maybe_field_type), &done);
// Check that value type matches the field type.
{
- Node* field_type = LoadWeakCellValue(maybe_field_type, bailout);
+ Node* field_type = ToWeakHeapObject(maybe_field_type, bailout);
Branch(WordEqual(LoadMap(value), field_type), &done, bailout);
}
BIND(&done);
@@ -1791,19 +1802,10 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("dictionary elements");
GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
- TVARIABLE(IntPtrT, var_entry);
- Label if_found(this);
- NumberDictionaryLookup(CAST(elements), intptr_index, &if_found, &var_entry,
- if_hole);
- BIND(&if_found);
- // Check that the value is a data property.
- TNode<IntPtrT> index = EntryToIndex<NumberDictionary>(var_entry.value());
- Node* details = LoadDetailsByKeyIndex<NumberDictionary>(elements, index);
- Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
- // TODO(jkummerow): Support accessors without missing?
- GotoIfNot(Word32Equal(kind, Int32Constant(kData)), miss);
- // Finally, load the value.
- exit_point->Return(LoadValueByKeyIndex<NumberDictionary>(elements, index));
+
+ TNode<Object> value = BasicLoadNumberDictionaryElement(
+ CAST(elements), intptr_index, miss, if_hole);
+ exit_point->Return(value);
}
BIND(&if_typed_array);
@@ -1814,8 +1816,7 @@ void AccessorAssembler::EmitElementLoad(
GotoIf(IsDetachedBuffer(buffer), miss);
// Bounds check.
- Node* length =
- SmiUntag(CAST(LoadObjectField(object, JSTypedArray::kLengthOffset)));
+ Node* length = SmiUntag(LoadTypedArrayLength(CAST(object)));
GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
@@ -2074,7 +2075,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
// Try looking up the property on the receiver; if unsuccessful, look
// for a handler in the stub cache.
- Node* descriptors = LoadMapDescriptors(receiver_map);
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
Label if_descriptor_found(this), stub_cache(this);
TVARIABLE(IntPtrT, var_name_index);
@@ -2094,7 +2095,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
if (use_stub_cache == kUseStubCache) {
BIND(&stub_cache);
Comment("stub cache probe for fast property load");
- VARIABLE(var_handler, MachineRepresentation::kTagged);
+ TVARIABLE(MaybeObject, var_handler);
Label found_handler(this, &var_handler), stub_cache_miss(this);
TryProbeStubCache(isolate()->load_stub_cache(), receiver, p->name,
&found_handler, &var_handler, &stub_cache_miss);
@@ -2199,7 +2200,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
direct_exit.ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
p->context, receiver /*holder is the same as receiver*/, p->name,
- receiver);
+ receiver, SmiConstant(OnNonExistent::kReturnUndefined));
}
}
@@ -2243,12 +2244,10 @@ Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) {
return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
}
-void AccessorAssembler::TryProbeStubCacheTable(StubCache* stub_cache,
- StubCacheTable table_id,
- Node* entry_offset, Node* name,
- Node* map, Label* if_handler,
- Variable* var_handler,
- Label* if_miss) {
+void AccessorAssembler::TryProbeStubCacheTable(
+ StubCache* stub_cache, StubCacheTable table_id, Node* entry_offset,
+ Node* name, Node* map, Label* if_handler,
+ TVariable<MaybeObject>* var_handler, Label* if_miss) {
StubCache::Table table = static_cast<StubCache::Table>(table_id);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
@@ -2280,17 +2279,18 @@ void AccessorAssembler::TryProbeStubCacheTable(StubCache* stub_cache,
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
stub_cache->key_reference(table).address());
- Node* handler = Load(MachineType::TaggedPointer(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
+ TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>(
+ Load(MachineType::TaggedPointer(), key_base,
+ IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize))));
// We found the handler.
- var_handler->Bind(handler);
+ *var_handler = handler;
Goto(if_handler);
}
void AccessorAssembler::TryProbeStubCache(StubCache* stub_cache, Node* receiver,
Node* name, Label* if_handler,
- Variable* var_handler,
+ TVariable<MaybeObject>* var_handler,
Label* if_miss) {
Label try_secondary(this), miss(this);
@@ -2347,20 +2347,19 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
TVARIABLE(MaybeObject, var_handler);
Label try_polymorphic(this), if_handler(this, &var_handler);
- Node* feedback =
+ TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, recv_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
- CSA_ASSERT(this, IsObject(var_handler.value()));
- HandleLoadICHandlerCase(p, ToObject(var_handler.value()), &miss,
- exit_point);
+ HandleLoadICHandlerCase(p, CAST(var_handler.value()), &miss, exit_point);
BIND(&try_polymorphic);
{
- GotoIfNot(IsWeakFixedArrayMap(LoadMap(feedback)), &stub_call);
- HandlePolymorphicCase(recv_map, CAST(feedback), &if_handler, &var_handler,
- &miss, 2);
+ TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
+ GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &stub_call);
+ HandlePolymorphicCase(recv_map, CAST(strong_feedback), &if_handler,
+ &var_handler, &miss, 2);
}
}
@@ -2398,26 +2397,27 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
GotoIf(IsDeprecatedMap(receiver_map), &miss);
// Check monomorphic case.
- Node* feedback =
+ TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
- CSA_ASSERT(this, IsObject(var_handler.value()));
- HandleLoadICHandlerCase(p, ToObject(var_handler.value()), &miss,
- &direct_exit);
+ HandleLoadICHandlerCase(p, CAST(var_handler.value()), &miss, &direct_exit);
BIND(&try_polymorphic);
+ TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
{
// Check polymorphic case.
Comment("LoadIC_try_polymorphic");
- GotoIfNot(IsWeakFixedArrayMap(LoadMap(feedback)), &non_inlined);
- HandlePolymorphicCase(receiver_map, CAST(feedback), &if_handler,
+ GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &non_inlined);
+ HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
&var_handler, &miss, 2);
}
BIND(&non_inlined);
- LoadIC_Noninlined(p, receiver_map, feedback, &var_handler, &if_handler, &miss,
- &direct_exit);
+ {
+ LoadIC_Noninlined(p, receiver_map, strong_feedback, &var_handler,
+ &if_handler, &miss, &direct_exit);
+ }
BIND(&miss);
direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver,
@@ -2425,8 +2425,9 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
}
void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
- Node* receiver_map, Node* feedback,
- Variable* var_handler,
+ Node* receiver_map,
+ TNode<HeapObject> feedback,
+ TVariable<MaybeObject>* var_handler,
Label* if_handler, Label* miss,
ExitPoint* exit_point) {
Label try_uninitialized(this, Label::kDeferred);
@@ -2434,8 +2435,7 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
// Neither deprecated map nor monomorphic. These cases are handled in the
// bytecode handler.
CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(receiver_map)));
- CSA_ASSERT(this,
- WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(feedback)));
+ CSA_ASSERT(this, WordNotEqual(receiver_map, feedback));
CSA_ASSERT(this, Word32BinaryNot(IsWeakFixedArrayMap(LoadMap(feedback))));
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
@@ -2539,19 +2539,16 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
Comment("LoadGlobalIC_TryPropertyCellCase");
Label if_lexical_var(this), if_property_cell(this);
- TNode<MaybeObject> feedback_element =
+ TNode<MaybeObject> maybe_weak_ref =
LoadFeedbackVectorSlot(vector, slot, 0, slot_mode);
- CSA_ASSERT(this, IsObject(feedback_element));
- TNode<Object> maybe_weak_cell = ToObject(feedback_element);
- Branch(TaggedIsSmi(maybe_weak_cell), &if_lexical_var, &if_property_cell);
+ Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_property_cell);
BIND(&if_property_cell);
{
- TNode<WeakCell> weak_cell = CAST(maybe_weak_cell);
-
- // Load value or try handler case if the {weak_cell} is cleared.
+ // Load value or try handler case if the weak reference is cleared.
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_weak_ref));
TNode<PropertyCell> property_cell =
- CAST(LoadWeakCellValue(weak_cell, try_handler));
+ CAST(ToWeakHeapObject(maybe_weak_ref, try_handler));
TNode<Object> value =
LoadObjectField(property_cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(value, TheHoleConstant()), miss);
@@ -2561,7 +2558,7 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
BIND(&if_lexical_var);
{
Comment("Load lexical variable");
- TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_cell));
+ TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_ref));
TNode<IntPtrT> context_index =
Signed(DecodeWord<FeedbackNexus::ContextIndexBits>(lexical_handler));
TNode<IntPtrT> slot_index =
@@ -2584,8 +2581,7 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(vector, slot, kPointerSize, slot_mode);
- CSA_ASSERT(this, IsObject(feedback_element));
- TNode<Object> handler = ToObject(feedback_element);
+ TNode<Object> handler = CAST(feedback_element);
GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
miss);
@@ -2619,23 +2615,23 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
GotoIf(IsDeprecatedMap(receiver_map), &miss);
// Check monomorphic case.
- Node* feedback =
+ TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
{
- CSA_ASSERT(this, IsObject(var_handler.value()));
- HandleLoadICHandlerCase(p, ToObject(var_handler.value()), &miss,
- &direct_exit, ICMode::kNonGlobalIC,
+ HandleLoadICHandlerCase(p, CAST(var_handler.value()), &miss, &direct_exit,
+ ICMode::kNonGlobalIC,
OnNonExistent::kReturnUndefined, kSupportElements);
}
BIND(&try_polymorphic);
+ TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
{
// Check polymorphic case.
Comment("KeyedLoadIC_try_polymorphic");
- GotoIfNot(IsWeakFixedArrayMap(LoadMap(feedback)), &try_megamorphic);
- HandlePolymorphicCase(receiver_map, CAST(feedback), &if_handler,
+ GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
+ HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
&var_handler, &miss, 2);
}
@@ -2643,7 +2639,8 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
{
// Check megamorphic case.
Comment("KeyedLoadIC_try_megamorphic");
- GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ GotoIfNot(WordEqual(strong_feedback,
+ LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
&try_polymorphic_name);
// TODO(jkummerow): Inline this? Or some of it?
TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, p->context, p->receiver,
@@ -2661,7 +2658,7 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
if_notinternalized(this, Label::kDeferred);
// Fast-case: The recorded {feedback} matches the {name}.
- GotoIf(WordEqual(feedback, name), &if_polymorphic_name);
+ GotoIf(WordEqual(strong_feedback, name), &if_polymorphic_name);
// Try to internalize the {name} if it isn't already.
TryToName(name, &miss, &var_index, &if_internalized, &var_name, &miss,
@@ -2670,7 +2667,7 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
BIND(&if_internalized);
{
// The {var_name} now contains a unique name.
- Branch(WordEqual(feedback, var_name.value()), &if_polymorphic_name,
+ Branch(WordEqual(strong_feedback, var_name.value()), &if_polymorphic_name,
&miss);
}
@@ -2679,8 +2676,11 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
// Try to internalize the {name}.
Node* function = ExternalConstant(
ExternalReference::try_internalize_string_function());
- var_name.Bind(CallCFunction1(MachineType::AnyTagged(),
- MachineType::AnyTagged(), function, name));
+ Node* const isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ var_name.Bind(CallCFunction2(
+ MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::AnyTagged(), function, isolate_ptr, name));
Goto(&if_internalized);
}
@@ -2783,25 +2783,21 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p) {
// LoadIC handler logic below.
CSA_ASSERT(this, IsName(name));
CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(receiver_map)));
- CSA_ASSERT(this, IsStrongHeapObject(LoadFeedbackVectorSlot(vector, slot, 0,
- SMI_PARAMETERS)));
- CSA_ASSERT(this, WordEqual(name, ToStrongHeapObject(LoadFeedbackVectorSlot(
+ CSA_ASSERT(this, WordEqual(name, CAST(LoadFeedbackVectorSlot(
vector, slot, 0, SMI_PARAMETERS))));
// Check if we have a matching handler for the {receiver_map}.
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(vector, slot, kPointerSize, SMI_PARAMETERS);
- CSA_ASSERT(this, IsObject(feedback_element));
- TNode<WeakFixedArray> array = CAST(ToObject(feedback_element));
+ TNode<WeakFixedArray> array = CAST(feedback_element);
HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
1);
BIND(&if_handler);
{
ExitPoint direct_exit(this);
- CSA_ASSERT(this, IsObject(var_handler.value()));
- HandleLoadICHandlerCase(p, ToObject(var_handler.value()), &miss,
- &direct_exit, ICMode::kNonGlobalIC,
+ HandleLoadICHandlerCase(p, CAST(var_handler.value()), &miss, &direct_exit,
+ ICMode::kNonGlobalIC,
OnNonExistent::kReturnUndefined, kOnlyProperties);
}
@@ -2827,7 +2823,7 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
GotoIf(IsDeprecatedMap(receiver_map), &miss);
// Check monomorphic case.
- Node* feedback =
+ TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
@@ -2838,49 +2834,31 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
}
BIND(&try_polymorphic);
+ TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
{
// Check polymorphic case.
Comment("StoreIC_try_polymorphic");
- GotoIfNot(IsWeakFixedArrayMap(LoadMap(feedback)), &try_megamorphic);
- HandlePolymorphicCase(receiver_map, CAST(feedback), &if_handler,
+ GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
+ HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
&var_handler, &miss, 2);
}
BIND(&try_megamorphic);
{
// Check megamorphic case.
- GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ GotoIfNot(WordEqual(strong_feedback,
+ LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
&try_uninitialized);
TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
- &if_handler_from_stub_cache, &var_handler, &miss);
- }
- BIND(&if_handler_from_stub_cache);
- {
- // If the stub cache contains a WeakCell pointing to a Map, convert it to an
- // in-place weak reference. TODO(marja): This well get simplified once more
- // WeakCells are converted into in-place weak references.
- Comment("StoreIC_if_handler_from_stub_cache");
- GotoIf(TaggedIsSmi(var_handler.value()), &if_handler);
-
- CSA_ASSERT(this, IsStrongHeapObject(var_handler.value()));
- TNode<HeapObject> handler = ToStrongHeapObject(var_handler.value());
- GotoIfNot(IsWeakCell(handler), &if_handler);
-
- TNode<HeapObject> value = CAST(LoadWeakCellValue(CAST(handler), &miss));
- TNode<Map> value_map = LoadMap(value);
- GotoIfNot(Word32Or(IsMetaMap(value_map), IsPropertyCellMap(value_map)),
- &if_handler);
-
- TNode<MaybeObject> weak_handler = MakeWeak(value);
- HandleStoreICHandlerCase(p, weak_handler, &miss, ICMode::kNonGlobalIC);
+ &if_handler, &var_handler, &miss);
}
BIND(&try_uninitialized);
{
// Check uninitialized case.
- GotoIfNot(
- WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- &miss);
+ GotoIfNot(WordEqual(strong_feedback,
+ LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ &miss);
TailCallBuiltin(Builtins::kStoreIC_Uninitialized, p->context, p->receiver,
p->name, p->value, p->slot, p->vector);
}
@@ -2893,17 +2871,16 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
Label if_lexical_var(this), if_property_cell(this);
- TNode<MaybeObject> feedback_element =
+ TNode<MaybeObject> maybe_weak_ref =
LoadFeedbackVectorSlot(pp->vector, pp->slot, 0, SMI_PARAMETERS);
- CSA_ASSERT(this, IsObject(feedback_element));
- TNode<Object> maybe_weak_cell = ToObject(feedback_element);
- Branch(TaggedIsSmi(maybe_weak_cell), &if_lexical_var, &if_property_cell);
+ Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_property_cell);
BIND(&if_property_cell);
{
Label try_handler(this), miss(this, Label::kDeferred);
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_weak_ref));
TNode<PropertyCell> property_cell =
- CAST(LoadWeakCellValue(CAST(maybe_weak_cell), &try_handler));
+ CAST(ToWeakHeapObject(maybe_weak_ref, &try_handler));
ExitPoint direct_exit(this);
StoreGlobalIC_PropertyCellCase(property_cell, pp->value, &direct_exit,
@@ -2937,7 +2914,7 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
BIND(&if_lexical_var);
{
Comment("Store lexical variable");
- TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_cell));
+ TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_ref));
TNode<IntPtrT> context_index =
Signed(DecodeWord<FeedbackNexus::ContextIndexBits>(lexical_handler));
TNode<IntPtrT> slot_index =
@@ -3027,7 +3004,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
GotoIf(IsDeprecatedMap(receiver_map), &miss);
// Check monomorphic case.
- Node* feedback =
+ TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
@@ -3038,11 +3015,13 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
}
BIND(&try_polymorphic);
+ TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
{
// CheckPolymorphic case.
Comment("KeyedStoreIC_try_polymorphic");
- GotoIfNot(IsWeakFixedArrayMap(LoadMap(feedback)), &try_megamorphic);
- HandlePolymorphicCase(receiver_map, CAST(feedback), &if_handler,
+ GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)),
+ &try_megamorphic);
+ HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
&var_handler, &miss, 2);
}
@@ -3050,9 +3029,9 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
{
// Check megamorphic case.
Comment("KeyedStoreIC_try_megamorphic");
- GotoIfNot(
- WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &try_polymorphic_name);
+ GotoIfNot(WordEqual(strong_feedback,
+ LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &try_polymorphic_name);
TailCallBuiltin(Builtins::kKeyedStoreIC_Megamorphic, p->context,
p->receiver, p->name, p->value, p->slot, p->vector);
}
@@ -3061,13 +3040,12 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
{
// We might have a name in feedback, and a fixed array in the next slot.
Comment("KeyedStoreIC_try_polymorphic_name");
- GotoIfNot(WordEqual(feedback, p->name), &miss);
+ GotoIfNot(WordEqual(strong_feedback, p->name), &miss);
// If the name comparison succeeded, we know we have a feedback vector
// with at least one map/handler pair.
TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot(
p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
- CSA_ASSERT(this, IsObject(feedback_element));
- TNode<WeakFixedArray> array = CAST(ToObject(feedback_element));
+ TNode<WeakFixedArray> array = CAST(feedback_element);
HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
&miss, 1);
}
@@ -3091,7 +3069,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
Node* array_map = LoadReceiverMap(p->receiver);
GotoIf(IsDeprecatedMap(array_map), &miss);
- Node* feedback =
+ TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, array_map, &if_handler,
&var_handler, &try_polymorphic);
@@ -3100,33 +3078,33 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
Comment("StoreInArrayLiteralIC_if_handler");
// This is a stripped-down version of HandleStoreICHandlerCase.
- CSA_ASSERT(this, IsStrongHeapObject(var_handler.value()));
- TNode<HeapObject> handler = ToStrongHeapObject(var_handler.value());
+ TNode<HeapObject> handler = CAST(var_handler.value());
Label if_transitioning_element_store(this);
GotoIfNot(IsCode(handler), &if_transitioning_element_store);
- StoreWithVectorDescriptor descriptor(isolate());
- TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
- p->value, p->slot, p->vector);
+ TailCallStub(StoreWithVectorDescriptor{}, CAST(handler), CAST(p->context),
+ p->receiver, p->name, p->value, p->slot, p->vector);
BIND(&if_transitioning_element_store);
{
- Node* transition_map_cell = LoadHandlerDataField(CAST(handler), 1);
- Node* transition_map = LoadWeakCellValue(transition_map_cell, &miss);
- CSA_ASSERT(this, IsMap(transition_map));
+ TNode<MaybeObject> maybe_transition_map =
+ LoadHandlerDataField(CAST(handler), 1);
+ TNode<Map> transition_map =
+ CAST(ToWeakHeapObject(maybe_transition_map, &miss));
GotoIf(IsDeprecatedMap(transition_map), &miss);
Node* code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
CSA_ASSERT(this, IsCode(code));
- StoreTransitionDescriptor descriptor(isolate());
- TailCallStub(descriptor, code, p->context, p->receiver, p->name,
- transition_map, p->value, p->slot, p->vector);
+ TailCallStub(StoreTransitionDescriptor{}, code, p->context, p->receiver,
+ p->name, transition_map, p->value, p->slot, p->vector);
}
}
BIND(&try_polymorphic);
+ TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
{
Comment("StoreInArrayLiteralIC_try_polymorphic");
- GotoIfNot(IsWeakFixedArrayMap(LoadMap(feedback)), &try_megamorphic);
- HandlePolymorphicCase(array_map, CAST(feedback), &if_handler,
+ GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)),
+ &try_megamorphic);
+ HandlePolymorphicCase(array_map, CAST(strong_feedback), &if_handler,
&var_handler, &miss, 2);
}
@@ -3135,16 +3113,13 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
Comment("StoreInArrayLiteralIC_try_megamorphic");
CSA_ASSERT(
this,
- Word32Or(
- Word32Or(
- IsWeakCellMap(LoadMap(feedback)),
- WordEqual(feedback,
- LoadRoot(Heap::kuninitialized_symbolRootIndex))),
- WordEqual(feedback,
- LoadRoot(Heap::kmegamorphic_symbolRootIndex))));
- GotoIfNot(
- WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &miss);
+ Word32Or(WordEqual(strong_feedback,
+ LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ WordEqual(strong_feedback,
+ LoadRoot(Heap::kmegamorphic_symbolRootIndex))));
+ GotoIfNot(WordEqual(strong_feedback,
+ LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &miss);
TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context,
p->value, p->receiver, p->name);
}
@@ -3184,14 +3159,13 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
Node* context = Parameter(Descriptor::kContext);
ExitPoint direct_exit(this);
- VARIABLE(var_handler, MachineRepresentation::kTagged);
+ TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
Node* receiver_map = LoadReceiverMap(receiver);
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(vector, slot, 0, SMI_PARAMETERS);
- CSA_ASSERT(this, IsObject(feedback_element));
- TNode<Object> feedback = ToObject(feedback_element);
+ TNode<HeapObject> feedback = CAST(feedback_element);
LoadICParameters p(context, receiver, name, slot, vector);
LoadIC_Noninlined(&p, receiver_map, feedback, &var_handler, &if_handler,
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 24a47376a5..0aa9f0ab41 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -49,7 +49,7 @@ class AccessorAssembler : public CodeStubAssembler {
void GenerateStoreInArrayLiteralIC();
void TryProbeStubCache(StubCache* stub_cache, Node* receiver, Node* name,
- Label* if_handler, Variable* var_handler,
+ Label* if_handler, TVariable<MaybeObject>* var_handler,
Label* if_miss);
Node* StubCachePrimaryOffsetForTesting(Node* name, Node* map) {
@@ -88,8 +88,8 @@ class AccessorAssembler : public CodeStubAssembler {
void LoadIC_BytecodeHandler(const LoadICParameters* p, ExitPoint* exit_point);
// Loads dataX field from the DataHandler object.
- TNode<Object> LoadHandlerDataField(SloppyTNode<DataHandler> handler,
- int data_index);
+ TNode<MaybeObject> LoadHandlerDataField(SloppyTNode<DataHandler> handler,
+ int data_index);
protected:
struct StoreICParameters : public LoadICParameters {
@@ -123,8 +123,8 @@ class AccessorAssembler : public CodeStubAssembler {
Label* slow,
bool do_transitioning_store);
- void CheckFieldType(Node* descriptors, Node* name_index, Node* representation,
- Node* value, Label* bailout);
+ void CheckFieldType(TNode<DescriptorArray> descriptors, Node* name_index,
+ Node* representation, Node* value, Label* bailout);
private:
// Stub generation entry points.
@@ -133,10 +133,13 @@ class AccessorAssembler : public CodeStubAssembler {
// logic not inlined into Ignition bytecode handlers.
void LoadIC(const LoadICParameters* p);
void LoadIC_Noninlined(const LoadICParameters* p, Node* receiver_map,
- Node* feedback, Variable* var_handler,
- Label* if_handler, Label* miss, ExitPoint* exit_point);
+ TNode<HeapObject> feedback,
+ TVariable<MaybeObject>* var_handler, Label* if_handler,
+ Label* miss, ExitPoint* exit_point);
- Node* LoadDescriptorValue(Node* map, Node* descriptor);
+ TNode<Object> LoadDescriptorValue(Node* map, Node* descriptor);
+ TNode<MaybeObject> LoadDescriptorValueOrFieldType(Node* map,
+ Node* descriptor);
void LoadIC_Uninitialized(const LoadICParameters* p);
@@ -153,16 +156,16 @@ class AccessorAssembler : public CodeStubAssembler {
// IC dispatcher behavior.
// Checks monomorphic case. Returns {feedback} entry of the vector.
- Node* TryMonomorphicCase(Node* slot, Node* vector, Node* receiver_map,
- Label* if_handler,
- TVariable<MaybeObject>* var_handler, Label* if_miss);
+ TNode<MaybeObject> TryMonomorphicCase(Node* slot, Node* vector,
+ Node* receiver_map, Label* if_handler,
+ TVariable<MaybeObject>* var_handler,
+ Label* if_miss);
void HandlePolymorphicCase(Node* receiver_map, TNode<WeakFixedArray> feedback,
Label* if_handler,
TVariable<MaybeObject>* var_handler,
Label* if_miss, int min_feedback_capacity);
// LoadIC implementation.
- enum class OnNonExistent { kThrowReferenceError, kReturnUndefined };
void HandleLoadICHandlerCase(
const LoadICParameters* p, TNode<Object> handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode = ICMode::kNonGlobalIC,
@@ -285,7 +288,8 @@ class AccessorAssembler : public CodeStubAssembler {
void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
Node* entry_offset, Node* name, Node* map,
- Label* if_handler, Variable* var_handler,
+ Label* if_handler,
+ TVariable<MaybeObject>* var_handler,
Label* if_miss);
};
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index c8705bc6c9..41928f1c08 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -8,15 +8,15 @@
namespace v8 {
namespace internal {
-CallOptimization::CallOptimization(Handle<Object> function) {
+CallOptimization::CallOptimization(Isolate* isolate, Handle<Object> function) {
constant_function_ = Handle<JSFunction>::null();
is_simple_api_call_ = false;
expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
api_call_info_ = Handle<CallHandlerInfo>::null();
if (function->IsJSFunction()) {
- Initialize(Handle<JSFunction>::cast(function));
+ Initialize(isolate, Handle<JSFunction>::cast(function));
} else if (function->IsFunctionTemplateInfo()) {
- Initialize(Handle<FunctionTemplateInfo>::cast(function));
+ Initialize(isolate, Handle<FunctionTemplateInfo>::cast(function));
}
}
@@ -48,8 +48,9 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
return Handle<JSObject>::null();
}
if (object_map->has_hidden_prototype()) {
- Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
- object_map = handle(prototype->map());
+ JSObject* raw_prototype = JSObject::cast(object_map->prototype());
+ Handle<JSObject> prototype(raw_prototype, raw_prototype->GetIsolate());
+ object_map = handle(prototype->map(), prototype->GetIsolate());
if (expected_receiver_type_->IsTemplateFor(*object_map)) {
*holder_lookup = kHolderFound;
return prototype;
@@ -64,7 +65,7 @@ bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
Handle<JSObject> holder) const {
DCHECK(is_simple_api_call());
if (!receiver->IsHeapObject()) return false;
- Handle<Map> map(HeapObject::cast(*receiver)->map());
+ Handle<Map> map(HeapObject::cast(*receiver)->map(), holder->GetIsolate());
return IsCompatibleReceiverMap(map, holder);
}
@@ -96,30 +97,30 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
}
void CallOptimization::Initialize(
- Handle<FunctionTemplateInfo> function_template_info) {
- Isolate* isolate = function_template_info->GetIsolate();
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
if (function_template_info->call_code()->IsUndefined(isolate)) return;
- api_call_info_ =
- handle(CallHandlerInfo::cast(function_template_info->call_code()));
+ api_call_info_ = handle(
+ CallHandlerInfo::cast(function_template_info->call_code()), isolate);
if (!function_template_info->signature()->IsUndefined(isolate)) {
expected_receiver_type_ =
- handle(FunctionTemplateInfo::cast(function_template_info->signature()));
+ handle(FunctionTemplateInfo::cast(function_template_info->signature()),
+ isolate);
}
is_simple_api_call_ = true;
}
-void CallOptimization::Initialize(Handle<JSFunction> function) {
+void CallOptimization::Initialize(Isolate* isolate,
+ Handle<JSFunction> function) {
if (function.is_null() || !function->is_compiled()) return;
constant_function_ = function;
- AnalyzePossibleApiFunction(function);
+ AnalyzePossibleApiFunction(isolate, function);
}
-
-void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
+void CallOptimization::AnalyzePossibleApiFunction(Isolate* isolate,
+ Handle<JSFunction> function) {
if (!function->shared()->IsApiFunction()) return;
- Isolate* isolate = function->GetIsolate();
Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data(),
isolate);
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index ee421355e6..d87ec4fdb1 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -14,7 +14,7 @@ namespace internal {
// Holds information about possible function call optimizations.
class CallOptimization BASE_EMBEDDED {
public:
- explicit CallOptimization(Handle<Object> function);
+ CallOptimization(Isolate* isolate, Handle<Object> function);
Context* GetAccessorContext(Map* holder_map) const;
bool IsCrossContextLazyAccessorPair(Context* native_context,
@@ -52,12 +52,14 @@ class CallOptimization BASE_EMBEDDED {
Handle<JSObject> holder) const;
private:
- void Initialize(Handle<JSFunction> function);
- void Initialize(Handle<FunctionTemplateInfo> function_template_info);
+ void Initialize(Isolate* isolate, Handle<JSFunction> function);
+ void Initialize(Isolate* isolate,
+ Handle<FunctionTemplateInfo> function_template_info);
// Determines whether the given function can be called using the
// fast api call builtin.
- void AnalyzePossibleApiFunction(Handle<JSFunction> function);
+ void AnalyzePossibleApiFunction(Isolate* isolate,
+ Handle<JSFunction> function);
Handle<JSFunction> constant_function_;
bool is_simple_api_call_;
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index e4d3d33c40..74ee29edaf 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -163,8 +163,9 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, int descriptor,
FieldIndex field_index,
PropertyConstness constness,
Representation representation) {
- DCHECK_IMPLIES(!FLAG_track_constant_fields, constness == kMutable);
- Kind kind = constness == kMutable ? kField : kConstField;
+ DCHECK_IMPLIES(!FLAG_track_constant_fields,
+ constness == PropertyConstness::kMutable);
+ Kind kind = constness == PropertyConstness::kMutable ? kField : kConstField;
return StoreField(isolate, kind, descriptor, field_index, representation);
}
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index bd4ba68c34..c95e036e1c 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -59,7 +59,7 @@ int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
}
}
if (fill_handler) {
- handler->set_data1(*data1);
+ handler->set_data1(MaybeObject::FromObject(*data1));
}
Handle<Object> data2;
if (maybe_data2.ToHandle(&data2)) {
@@ -187,11 +187,10 @@ Handle<Object> StoreHandler::StoreElementTransition(
.GetCode();
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
Handle<StoreHandler> handler = isolate->factory()->NewStoreHandler(1);
handler->set_smi_handler(*stub);
handler->set_validity_cell(*validity_cell);
- handler->set_data1(*cell);
+ handler->set_data1(HeapObjectReference::Weak(*transition));
return handler;
}
@@ -201,7 +200,8 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate,
#ifdef DEBUG
if (!is_dictionary_map) {
int descriptor = transition_map->LastAdded();
- Handle<DescriptorArray> descriptors(transition_map->instance_descriptors());
+ Handle<DescriptorArray> descriptors(transition_map->instance_descriptors(),
+ isolate);
PropertyDetails details = descriptors->GetDetails(descriptor);
if (descriptors->GetKey(descriptor)->IsPrivate()) {
DCHECK_EQ(DONT_ENUM, details.attributes());
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 835b39efda..7c95f2fcf0 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -39,17 +39,13 @@ Address IC::raw_constant_pool() const {
}
}
-bool IC::IsHandler(MaybeObject* object, bool from_stub_cache) {
+bool IC::IsHandler(MaybeObject* object) {
HeapObject* heap_object;
return (object->IsSmi() && (object != nullptr)) ||
(object->ToWeakHeapObject(&heap_object) &&
(heap_object->IsMap() || heap_object->IsPropertyCell())) ||
(object->ToStrongHeapObject(&heap_object) &&
(heap_object->IsDataHandler() ||
- (from_stub_cache && heap_object->IsWeakCell() &&
- (WeakCell::cast(heap_object)->cleared() ||
- WeakCell::cast(heap_object)->value()->IsMap() ||
- WeakCell::cast(heap_object)->value()->IsPropertyCell())) ||
heap_object->IsCode()));
}
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index b11bc9de98..0937d792c2 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -22,6 +22,7 @@
#include "src/macro-assembler.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/module-inl.h"
#include "src/prototype.h"
#include "src/runtime-profiler.h"
#include "src/runtime/runtime-utils.h"
@@ -206,7 +207,7 @@ JSFunction* IC::GetHostFunction() const {
return frame->function();
}
-static void LookupForRead(LookupIterator* it) {
+static void LookupForRead(Isolate* isolate, LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
@@ -217,8 +218,7 @@ static void LookupForRead(LookupIterator* it) {
case LookupIterator::INTERCEPTOR: {
// If there is a getter, return; otherwise loop to perform the lookup.
Handle<JSObject> holder = it->GetHolder<JSObject>();
- if (!holder->GetNamedInterceptor()->getter()->IsUndefined(
- it->isolate())) {
+ if (!holder->GetNamedInterceptor()->getter()->IsUndefined(isolate)) {
return;
}
break;
@@ -255,7 +255,7 @@ bool IC::ShouldRecomputeHandler(Handle<String> name) {
if (!receiver_map()->IsJSObjectMap()) return false;
Map* first_map = FirstTargetMap();
if (first_map == nullptr) return false;
- Handle<Map> old_map(first_map);
+ Handle<Map> old_map(first_map, isolate());
if (old_map->is_deprecated()) return true;
return IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
receiver_map()->elements_kind());
@@ -333,7 +333,7 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector* vector,
if (FLAG_trace_feedback_updates) {
int slot_count = vector->metadata()->slot_count();
- OFStream os(stdout);
+ StdoutStream os;
if (slot.IsInvalid()) {
os << "[Feedback slots in ";
} else {
@@ -429,7 +429,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
TraceIC("LoadIC", name);
}
- if (*name == isolate()->heap()->iterator_symbol()) {
+ if (*name == ReadOnlyRoots(isolate()).iterator_symbol()) {
return Runtime::ThrowIteratorError(isolate(), object);
}
return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
@@ -442,8 +442,8 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
update_receiver_map(object);
}
// Named lookup in the object.
- LookupIterator it(object, name);
- LookupForRead(&it);
+ LookupIterator it(isolate(), object, name);
+ LookupForRead(isolate(), &it);
if (name->IsPrivate()) {
if (name->IsPrivateField() && !it.IsFound()) {
@@ -484,14 +484,15 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table());
+ global->native_context()->script_context_table(), isolate());
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
- Handle<Object> result =
- FixedArray::get(*ScriptContextTable::GetContext(
- script_contexts, lookup_result.context_index),
- lookup_result.slot_index, isolate());
+ if (ScriptContextTable::Lookup(isolate(), script_contexts, str_name,
+ &lookup_result)) {
+ Handle<Object> result = FixedArray::get(
+ *ScriptContextTable::GetContext(isolate(), script_contexts,
+ lookup_result.context_index),
+ lookup_result.slot_index, isolate());
if (result->IsTheHole(isolate())) {
// Do not install stubs and stay pre-monomorphic for
// uninitialized accesses.
@@ -625,8 +626,9 @@ bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
Map* transitioned_map = nullptr;
if (more_general_transition) {
MapHandles map_list;
- map_list.push_back(handle(target_map));
- transitioned_map = source_map->FindElementsKindTransitionedMap(map_list);
+ map_list.push_back(handle(target_map, isolate_));
+ transitioned_map =
+ source_map->FindElementsKindTransitionedMap(isolate(), map_list);
}
return transitioned_map == target_map;
}
@@ -720,21 +722,7 @@ StubCache* IC::stub_cache() {
void IC::UpdateMegamorphicCache(Handle<Map> map, Handle<Name> name,
const MaybeObjectHandle& handler) {
- HeapObject* heap_object;
- if (handler->ToWeakHeapObject(&heap_object)) {
- // TODO(marja): remove this conversion once megamorphic stub cache supports
- // weak handlers.
- Handle<Object> weak_cell;
- if (heap_object->IsMap()) {
- weak_cell = Map::WeakCellForMap(handle(Map::cast(heap_object)));
- } else {
- weak_cell = isolate_->factory()->NewWeakCell(
- handle(PropertyCell::cast(heap_object)));
- }
- stub_cache()->Set(*name, *map, *weak_cell);
- } else {
- stub_cache()->Set(*name, *map, handler->ToObject());
- }
+ stub_cache()->Set(*name, *map, *handler);
}
void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
@@ -750,21 +738,19 @@ void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Object> receiver = lookup->GetReceiver();
- if (receiver->IsString() &&
- *lookup->name() == isolate()->heap()->length_string()) {
+ ReadOnlyRoots roots(isolate());
+ if (receiver->IsString() && *lookup->name() == roots.length_string()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_StringLength);
return BUILTIN_CODE(isolate(), LoadIC_StringLength);
}
- if (receiver->IsStringWrapper() &&
- *lookup->name() == isolate()->heap()->length_string()) {
+ if (receiver->IsStringWrapper() && *lookup->name() == roots.length_string()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_StringWrapperLength);
return BUILTIN_CODE(isolate(), LoadIC_StringWrapperLength);
}
// Use specialized code for getting prototype of functions.
- if (receiver->IsJSFunction() &&
- *lookup->name() == isolate()->heap()->prototype_string() &&
+ if (receiver->IsJSFunction() && *lookup->name() == roots.prototype_string() &&
JSFunction::cast(*receiver)->has_prototype_slot() &&
!JSFunction::cast(*receiver)->map()->has_non_instance_prototype()) {
Handle<Code> stub;
@@ -810,7 +796,8 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
// The method will only return true for absolute truths based on the
// receiver maps.
FieldIndex index;
- if (Accessors::IsJSObjectFieldAccessor(map, lookup->name(), &index)) {
+ if (Accessors::IsJSObjectFieldAccessor(isolate(), map, lookup->name(),
+ &index)) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
return LoadHandler::LoadField(isolate(), index);
}
@@ -818,7 +805,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<ObjectHashTable> exports(
Handle<JSModuleNamespace>::cast(holder)->module()->exports(),
isolate());
- int entry = exports->FindEntry(isolate(), lookup->name(),
+ int entry = exports->FindEntry(roots, lookup->name(),
Smi::ToInt(lookup->name()->GetHash()));
// We found the accessor, so the entry must exist.
DCHECK_NE(entry, ObjectHashTable::kNotFound);
@@ -849,7 +836,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Smi> smi_handler;
- CallOptimization call_optimization(getter);
+ CallOptimization call_optimization(isolate(), getter);
if (call_optimization.is_simple_api_call()) {
if (!call_optimization.IsCompatibleReceiverMap(map, holder) ||
!holder->HasFastProperties()) {
@@ -864,7 +851,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
isolate(), holder_lookup == CallOptimization::kHolderIsReceiver);
Handle<Context> context(
- call_optimization.GetAccessorContext(holder->map()));
+ call_optimization.GetAccessorContext(holder->map()), isolate());
Handle<WeakCell> context_cell =
isolate()->factory()->NewWeakCell(context);
Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
@@ -1145,9 +1132,10 @@ void KeyedLoadIC::LoadElementPolymorphicHandlers(
// among receiver_maps as unstable because the optimizing compilers may
// generate an elements kind transition for this kind of receivers.
if (receiver_map->is_stable()) {
- Map* tmap = receiver_map->FindElementsKindTransitionedMap(*receiver_maps);
+ Map* tmap = receiver_map->FindElementsKindTransitionedMap(isolate(),
+ *receiver_maps);
if (tmap != nullptr) {
- receiver_map->NotifyLeafMapLayoutChange();
+ receiver_map->NotifyLeafMapLayoutChange(isolate());
}
}
handlers->push_back(
@@ -1195,7 +1183,8 @@ bool IsOutOfBoundsAccess(Handle<Object> receiver, uint32_t index) {
return index >= length;
}
-KeyedAccessLoadMode GetLoadMode(Handle<Object> receiver, uint32_t index) {
+KeyedAccessLoadMode GetLoadMode(Isolate* isolate, Handle<Object> receiver,
+ uint32_t index) {
if (IsOutOfBoundsAccess(receiver, index)) {
if (receiver->IsJSTypedArray()) {
// For JSTypedArray we never lookup elements in the prototype chain.
@@ -1203,7 +1192,6 @@ KeyedAccessLoadMode GetLoadMode(Handle<Object> receiver, uint32_t index) {
}
// For other {receiver}s we need to check the "no elements" protector.
- Isolate* isolate = Handle<HeapObject>::cast(receiver)->GetIsolate();
if (isolate->IsNoElementsProtectorIntact()) {
if (receiver->IsString()) {
// ToObject(receiver) will have the initial String.prototype.
@@ -1254,7 +1242,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
LoadIC::Load(object, Handle<Name>::cast(key)),
Object);
} else if (ConvertKeyToIndex(object, key, &index)) {
- KeyedAccessLoadMode load_mode = GetLoadMode(object, index);
+ KeyedAccessLoadMode load_mode = GetLoadMode(isolate(), object, index);
UpdateLoadElement(Handle<HeapObject>::cast(object), load_mode);
if (is_vector_set()) {
TraceIC("LoadIC", key);
@@ -1298,9 +1286,9 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
InterceptorInfo* info = holder->GetNamedInterceptor();
if (it->HolderIsReceiverOrHiddenPrototype()) {
return !info->non_masking() && receiver.is_identical_to(holder) &&
- !info->setter()->IsUndefined(it->isolate());
- } else if (!info->getter()->IsUndefined(it->isolate()) ||
- !info->query()->IsUndefined(it->isolate())) {
+ !info->setter()->IsUndefined(isolate());
+ } else if (!info->getter()->IsUndefined(isolate()) ||
+ !info->query()->IsUndefined(isolate())) {
return false;
}
break;
@@ -1325,7 +1313,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
// Receiver != holder.
if (receiver->IsJSGlobalProxy()) {
- PrototypeIterator iter(it->isolate(), receiver);
+ PrototypeIterator iter(isolate(), receiver);
return it->GetHolder<Object>().is_identical_to(
PrototypeIterator::GetCurrent(iter));
}
@@ -1355,13 +1343,14 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
Handle<String> str_name = Handle<String>::cast(name);
Handle<JSGlobalObject> global = isolate()->global_object();
Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table());
+ global->native_context()->script_context_table(), isolate());
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
+ if (ScriptContextTable::Lookup(isolate(), script_contexts, str_name,
+ &lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
- script_contexts, lookup_result.context_index);
- if (lookup_result.mode == CONST) {
+ isolate(), script_contexts, lookup_result.context_index);
+ if (lookup_result.mode == VariableMode::kConst) {
return TypeError(MessageTemplate::kConstAssign, global, name);
}
@@ -1402,7 +1391,8 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- Object::SetProperty(object, name, value, language_mode()), Object);
+ Object::SetProperty(isolate(), object, name, value, language_mode()),
+ Object);
return result;
}
@@ -1422,7 +1412,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
if (state() != UNINITIALIZED) {
JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
}
- LookupIterator it(object, name);
+ LookupIterator it(isolate(), object, name);
bool use_ic = FLAG_use_ic;
if (name->IsPrivate()) {
@@ -1581,7 +1571,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
return MaybeObjectHandle(slow_stub());
}
- CallOptimization call_optimization(setter);
+ CallOptimization call_optimization(isolate(), setter);
if (call_optimization.is_simple_api_call()) {
if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
CallOptimization::HolderLookup holder_lookup;
@@ -1593,7 +1583,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
holder_lookup == CallOptimization::kHolderIsReceiver);
Handle<Context> context(
- call_optimization.GetAccessorContext(holder->map()));
+ call_optimization.GetAccessorContext(holder->map()), isolate());
Handle<WeakCell> context_cell =
isolate()->factory()->NewWeakCell(context);
Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
@@ -1653,10 +1643,12 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
int descriptor = lookup->GetFieldDescriptorIndex();
FieldIndex index = lookup->GetFieldIndex();
PropertyConstness constness = lookup->constness();
- if (constness == kConst && IsStoreOwnICKind(nexus()->kind())) {
+ if (constness == PropertyConstness::kConst &&
+ IsStoreOwnICKind(nexus()->kind())) {
// StoreOwnICs are used for initializing object literals therefore
- // we must store the value unconditionally even to kConst fields.
- constness = kMutable;
+ // we must store the value unconditionally even to
+ // VariableMode::kConst fields.
+ constness = PropertyConstness::kMutable;
}
return MaybeObjectHandle(StoreHandler::StoreField(
isolate(), descriptor, index, constness, lookup->representation()));
@@ -1820,14 +1812,14 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
ElementsKind kind = IsHoleyElementsKind(map->elements_kind())
? HOLEY_ELEMENTS
: PACKED_ELEMENTS;
- return Map::TransitionElementsTo(map, kind);
+ return Map::TransitionElementsTo(isolate(), map, kind);
}
case STORE_TRANSITION_TO_DOUBLE:
case STORE_AND_GROW_TRANSITION_TO_DOUBLE: {
ElementsKind kind = IsHoleyElementsKind(map->elements_kind())
? HOLEY_DOUBLE_ELEMENTS
: PACKED_DOUBLE_ELEMENTS;
- return Map::TransitionElementsTo(map, kind);
+ return Map::TransitionElementsTo(isolate(), map, kind);
}
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
DCHECK(map->has_fixed_typed_array_elements());
@@ -1846,8 +1838,9 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- DCHECK_IMPLIES(receiver_map->DictionaryElementsInPrototypeChainOnly(),
- IsStoreInArrayLiteralICKind(kind()));
+ DCHECK_IMPLIES(
+ receiver_map->DictionaryElementsInPrototypeChainOnly(isolate()),
+ IsStoreInArrayLiteralICKind(kind()));
if (receiver_map->IsJSProxyMap()) {
return StoreHandler::StoreProxy(isolate());
@@ -1910,7 +1903,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
Handle<Map> transition;
if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE ||
- receiver_map->DictionaryElementsInPrototypeChainOnly()) {
+ receiver_map->DictionaryElementsInPrototypeChainOnly(isolate())) {
// TODO(mvstanton): Consider embedding store_mode in the state of the slow
// keyed store ic for uniformity.
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
@@ -1918,13 +1911,13 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
} else {
{
- Map* tmap =
- receiver_map->FindElementsKindTransitionedMap(*receiver_maps);
+ Map* tmap = receiver_map->FindElementsKindTransitionedMap(
+ isolate(), *receiver_maps);
if (tmap != nullptr) {
if (receiver_map->is_stable()) {
- receiver_map->NotifyLeafMapLayoutChange();
+ receiver_map->NotifyLeafMapLayoutChange(isolate());
}
- transition = handle(tmap);
+ transition = handle(tmap, isolate());
}
}
@@ -2040,7 +2033,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// expect to be able to trap element sets to objects with those maps in
// the runtime to enable optimization of element hole access.
Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
- if (heap_object->map()->IsMapInArrayPrototypeChain()) {
+ if (heap_object->map()->IsMapInArrayPrototypeChain(isolate())) {
set_slow_stub_reason("map in array prototype");
use_ic = false;
}
@@ -2087,8 +2080,8 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
} else if (key_is_valid_index) {
if (old_receiver_map->is_abandoned_prototype_map()) {
set_slow_stub_reason("receiver with prototype map");
- } else if (!old_receiver_map
- ->DictionaryElementsInPrototypeChainOnly()) {
+ } else if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly(
+ isolate())) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
@@ -2114,12 +2107,12 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
namespace {
-void StoreOwnElement(Handle<JSArray> array, Handle<Object> index,
- Handle<Object> value) {
+void StoreOwnElement(Isolate* isolate, Handle<JSArray> array,
+ Handle<Object> index, Handle<Object> value) {
DCHECK(index->IsNumber());
bool success = false;
LookupIterator it = LookupIterator::PropertyOrElement(
- array->GetIsolate(), array, index, &success, LookupIterator::OWN);
+ isolate, array, index, &success, LookupIterator::OWN);
DCHECK(success);
CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE,
@@ -2130,11 +2123,11 @@ void StoreOwnElement(Handle<JSArray> array, Handle<Object> index,
void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
Handle<Object> value) {
- DCHECK(!array->map()->IsMapInArrayPrototypeChain());
+ DCHECK(!array->map()->IsMapInArrayPrototypeChain(isolate()));
DCHECK(index->IsNumber());
if (!FLAG_use_ic || MigrateDeprecated(array)) {
- StoreOwnElement(array, index, value);
+ StoreOwnElement(isolate(), array, index, value);
TraceIC("StoreInArrayLiteralIC", index);
return;
}
@@ -2150,7 +2143,7 @@ void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
Handle<Map> old_array_map(array->map(), isolate());
bool array_was_cow = array->elements()->IsCowArray();
- StoreOwnElement(array, index, value);
+ StoreOwnElement(isolate(), array, index, value);
if (index->IsSmi()) {
DCHECK(!old_array_map->is_abandoned_prototype_map());
@@ -2227,15 +2220,16 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
Handle<Context> native_context = isolate->native_context();
Handle<ScriptContextTable> script_contexts(
- native_context->script_context_table());
+ native_context->script_context_table(), isolate);
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
+ if (ScriptContextTable::Lookup(isolate, script_contexts, name,
+ &lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
- script_contexts, lookup_result.context_index);
+ isolate, script_contexts, lookup_result.context_index);
Handle<Object> result =
FixedArray::get(*script_context, lookup_result.slot_index, isolate);
- if (*result == isolate->heap()->the_hole_value()) {
+ if (*result == ReadOnlyRoots(isolate).the_hole_value()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
}
@@ -2343,13 +2337,14 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> native_context = isolate->native_context();
Handle<ScriptContextTable> script_contexts(
- native_context->script_context_table());
+ native_context->script_context_table(), isolate);
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
+ if (ScriptContextTable::Lookup(isolate, script_contexts, name,
+ &lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
- script_contexts, lookup_result.context_index);
- if (lookup_result.mode == CONST) {
+ isolate, script_contexts, lookup_result.context_index);
+ if (lookup_result.mode == VariableMode::kConst) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstAssign, global, name));
}
@@ -2427,7 +2422,7 @@ RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Slow) {
Handle<Object> value = args.at(0);
Handle<Object> array = args.at(1);
Handle<Object> index = args.at(2);
- StoreOwnElement(Handle<JSArray>::cast(array), index, value);
+ StoreOwnElement(isolate, Handle<JSArray>::cast(array), index, value);
return *value;
}
@@ -2450,7 +2445,7 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
}
if (IsStoreInArrayLiteralICKind(kind)) {
- StoreOwnElement(Handle<JSArray>::cast(object), key, value);
+ StoreOwnElement(isolate, Handle<JSArray>::cast(object), key, value);
return *value;
} else {
DCHECK(IsKeyedStoreICKind(kind) || IsStoreICKind(kind));
@@ -2480,7 +2475,8 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<AccessorInfo> info(
callback_or_cell->IsWeakCell()
? AccessorInfo::cast(WeakCell::cast(*callback_or_cell)->value())
- : AccessorInfo::cast(*callback_or_cell));
+ : AccessorInfo::cast(*callback_or_cell),
+ isolate);
DCHECK(info->IsCompatibleReceiver(*receiver));
@@ -2539,7 +2535,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
// It could actually be any kind of load IC slot here but the predicate
// handles all the cases properly.
if (!LoadIC::ShouldThrowReferenceError(slot_kind)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// Throw a reference error.
@@ -2593,7 +2589,7 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
MAYBE_RETURN(Object::SetProperty(&it, value, language_mode,
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
return *value;
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 22f3cb9c26..feac4dc63b 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -59,8 +59,7 @@ class IC {
IsKeyedStoreIC() || IsStoreInArrayLiteralICKind(kind());
}
- static inline bool IsHandler(MaybeObject* object,
- bool from_stub_cache = false);
+ static inline bool IsHandler(MaybeObject* object);
// Nofity the IC system that a feedback has changed.
static void OnFeedbackChanged(Isolate* isolate, FeedbackVector* vector,
@@ -145,7 +144,7 @@ class IC {
if (receiver->IsSmi()) {
receiver_map_ = isolate_->factory()->heap_number_map();
} else {
- receiver_map_ = handle(HeapObject::cast(*receiver)->map());
+ receiver_map_ = handle(HeapObject::cast(*receiver)->map(), isolate_);
}
}
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 13eaee8e2b..4257f05914 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -262,8 +262,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// The length property is non-configurable, so it's guaranteed to always
// be the first property.
TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
- TNode<Int32T> details = LoadAndUntagToWord32FixedArrayElement(
- descriptors, DescriptorArray::ToDetailsIndex(0));
+ TNode<Uint32T> details = LoadDetailsByKeyIndex(
+ descriptors, IntPtrConstant(DescriptorArray::ToKeyIndex(0)));
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
slow);
}
@@ -573,10 +573,9 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
&var_entry, &next_proto, bailout);
BIND(&found_fast);
{
- Node* descriptors = var_meta_storage.value();
- Node* name_index = var_entry.value();
- Node* details =
- LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
+ TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value());
+ TNode<IntPtrT> name_index = var_entry.value();
+ Node* details = LoadDetailsByKeyIndex(descriptors, name_index);
JumpIfDataProperty(details, &ok_to_write, readonly);
// Accessor case.
@@ -651,7 +650,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&fast_properties);
{
Comment("fast property store");
- Node* descriptors = LoadMapDescriptors(receiver_map);
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
Label descriptor_found(this), lookup_transition(this);
TVARIABLE(IntPtrT, var_name_index);
DescriptorLookup(p->name, descriptors, bitfield3, &descriptor_found,
@@ -659,9 +658,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&descriptor_found);
{
- Node* name_index = var_name_index.value();
- Node* details =
- LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
+ TNode<IntPtrT> name_index = var_name_index.value();
+ Node* details = LoadDetailsByKeyIndex(descriptors, name_index);
Label data_property(this);
JumpIfDataProperty(details, &data_property, &readonly);
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index a9cafd6648..4958726a22 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -58,31 +58,32 @@ int StubCache::SecondaryOffset(Name* name, int seed) {
namespace {
bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
- Object* handler) {
+ MaybeObject* handler) {
// Validate that the name and handler do not move on scavenge, and that we
// can use identity checks instead of structural equality checks.
- DCHECK(!name->GetHeap()->InNewSpace(name));
- DCHECK(!name->GetHeap()->InNewSpace(handler));
+ DCHECK(!Heap::InNewSpace(name));
+ DCHECK(!Heap::InNewSpace(handler));
DCHECK(name->IsUniqueName());
DCHECK(name->HasHashCode());
- if (handler) DCHECK(IC::IsHandler(MaybeObject::FromObject(handler), true));
+ if (handler) DCHECK(IC::IsHandler(handler));
return true;
}
} // namespace
#endif
-Object* StubCache::Set(Name* name, Map* map, Object* handler) {
+MaybeObject* StubCache::Set(Name* name, Map* map, MaybeObject* handler) {
DCHECK(CommonStubCacheChecks(this, name, map, handler));
// Compute the primary entry.
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
- Object* old_handler = primary->value;
+ MaybeObject* old_handler = primary->value;
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
- if (old_handler != isolate_->builtins()->builtin(Builtins::kIllegal)) {
+ if (old_handler != MaybeObject::FromObject(
+ isolate_->builtins()->builtin(Builtins::kIllegal))) {
Map* old_map = primary->map;
int seed = PrimaryOffset(primary->key, old_map);
int secondary_offset = SecondaryOffset(primary->key, seed);
@@ -98,7 +99,7 @@ Object* StubCache::Set(Name* name, Map* map, Object* handler) {
return handler;
}
-Object* StubCache::Get(Name* name, Map* map) {
+MaybeObject* StubCache::Get(Name* name, Map* map) {
DCHECK(CommonStubCacheChecks(this, name, map, nullptr));
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
@@ -115,14 +116,16 @@ Object* StubCache::Get(Name* name, Map* map) {
void StubCache::Clear() {
- Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
+ MaybeObject* empty = MaybeObject::FromObject(
+ isolate_->builtins()->builtin(Builtins::kIllegal));
+ Name* empty_string = ReadOnlyRoots(isolate()).empty_string();
for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = isolate()->heap()->empty_string();
+ primary_[i].key = empty_string;
primary_[i].map = nullptr;
primary_[i].value = empty;
}
for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = isolate()->heap()->empty_string();
+ secondary_[j].key = empty_string;
secondary_[j].map = nullptr;
secondary_[j].value = empty;
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 870266eefd..5cff496b15 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -34,14 +34,14 @@ class StubCache {
public:
struct Entry {
Name* key;
- Object* value;
+ MaybeObject* value;
Map* map;
};
void Initialize();
// Access cache for entry hash(name, map).
- Object* Set(Name* name, Map* map, Object* handler);
- Object* Get(Name* name, Map* map);
+ MaybeObject* Set(Name* name, Map* map, MaybeObject* handler);
+ MaybeObject* Get(Name* name, Map* map);
// Clear the lookup table (@ mark compact collection).
void Clear();
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index d5ab8ddb60..c6d74d2bd8 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -45,7 +45,7 @@ void IdentityMapBase::DisableIteration() {
int IdentityMapBase::ScanKeysFor(Object* address) const {
int start = Hash(address) & mask_;
- Object* not_mapped = heap_->not_mapped_symbol();
+ Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
for (int index = start; index < capacity_; index++) {
if (keys_[index] == address) return index; // Found.
if (keys_[index] == not_mapped) return -1; // Not found.
@@ -58,7 +58,7 @@ int IdentityMapBase::ScanKeysFor(Object* address) const {
}
int IdentityMapBase::InsertKey(Object* address) {
- Object* not_mapped = heap_->not_mapped_symbol();
+ Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
while (true) {
int start = Hash(address) & mask_;
int limit = capacity_ / 2;
@@ -80,7 +80,7 @@ int IdentityMapBase::InsertKey(Object* address) {
bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
if (deleted_value != nullptr) *deleted_value = values_[index];
- Object* not_mapped = heap_->not_mapped_symbol();
+ Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
DCHECK_NE(keys_[index], not_mapped);
keys_[index] = not_mapped;
values_[index] = nullptr;
@@ -141,7 +141,7 @@ int IdentityMapBase::LookupOrInsert(Object* key) {
}
int IdentityMapBase::Hash(Object* address) const {
- CHECK_NE(address, heap_->not_mapped_symbol());
+ CHECK_NE(address, ReadOnlyRoots(heap_).not_mapped_symbol());
uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
return static_cast<int>(hasher_(raw_address));
}
@@ -159,7 +159,7 @@ IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Object* key) {
gc_counter_ = heap_->gc_count();
keys_ = reinterpret_cast<Object**>(NewPointerArray(capacity_));
- Object* not_mapped = heap_->not_mapped_symbol();
+ Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
values_ = NewPointerArray(capacity_);
memset(values_, 0, sizeof(void*) * capacity_);
@@ -197,7 +197,7 @@ bool IdentityMapBase::DeleteEntry(Object* key, void** deleted_value) {
Object* IdentityMapBase::KeyAtIndex(int index) const {
DCHECK_LE(0, index);
DCHECK_LT(index, capacity_);
- DCHECK_NE(keys_[index], heap_->not_mapped_symbol());
+ DCHECK_NE(keys_[index], ReadOnlyRoots(heap_).not_mapped_symbol());
CHECK(is_iterable()); // Must be iterable to access by index;
return keys_[index];
}
@@ -205,7 +205,7 @@ Object* IdentityMapBase::KeyAtIndex(int index) const {
IdentityMapBase::RawEntry IdentityMapBase::EntryAtIndex(int index) const {
DCHECK_LE(0, index);
DCHECK_LT(index, capacity_);
- DCHECK_NE(keys_[index], heap_->not_mapped_symbol());
+ DCHECK_NE(keys_[index], ReadOnlyRoots(heap_).not_mapped_symbol());
CHECK(is_iterable()); // Must be iterable to access by index;
return &values_[index];
}
@@ -214,7 +214,7 @@ int IdentityMapBase::NextIndex(int index) const {
DCHECK_LE(-1, index);
DCHECK_LE(index, capacity_);
CHECK(is_iterable()); // Must be iterable to access by index;
- Object* not_mapped = heap_->not_mapped_symbol();
+ Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
for (++index; index < capacity_; ++index) {
if (keys_[index] != not_mapped) {
return index;
@@ -232,7 +232,7 @@ void IdentityMapBase::Rehash() {
// Search the table looking for keys that wouldn't be found with their
// current hashcode and evacuate them.
int last_empty = -1;
- Object* not_mapped = heap_->not_mapped_symbol();
+ Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
for (int i = 0; i < capacity_; i++) {
if (keys_[i] == not_mapped) {
last_empty = i;
@@ -270,7 +270,7 @@ void IdentityMapBase::Resize(int new_capacity) {
size_ = 0;
keys_ = reinterpret_cast<Object**>(NewPointerArray(capacity_));
- Object* not_mapped = heap_->not_mapped_symbol();
+ Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
values_ = NewPointerArray(capacity_);
memset(values_, 0, sizeof(void*) * capacity_);
diff --git a/deps/v8/src/inspector/PRESUBMIT.py b/deps/v8/src/inspector/PRESUBMIT.py
index 9760cb0a5b..8b7a5cb320 100644
--- a/deps/v8/src/inspector/PRESUBMIT.py
+++ b/deps/v8/src/inspector/PRESUBMIT.py
@@ -61,6 +61,7 @@ def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
- 'master.tryserver.blink:linux_trusty_blink_rel'
+ 'master.tryserver.blink:linux_trusty_blink_rel',
+ 'luci.chromium.try:linux_chromium_headless_rel',
],
'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index 662a5678b0..5852b227e7 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -263,6 +263,7 @@ std::unique_ptr<InjectedScript> InjectedScript::create(
v8::HandleScope handles(isolate);
v8::TryCatch tryCatch(isolate);
v8::Local<v8::Context> context = inspectedContext->context();
+ v8::debug::PostponeInterruptsScope postponeInterrupts(isolate);
v8::Context::Scope scope(context);
v8::MicrotasksScope microtasksScope(isolate,
v8::MicrotasksScope::kDoNotRunMicrotasks);
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index 5e2cf23bd9..bb98c87158 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -110,11 +110,11 @@ InjectedScript* InspectedContext::getInjectedScript(int sessionId) {
}
bool InspectedContext::createInjectedScript(int sessionId) {
- DCHECK(m_injectedScripts.find(sessionId) == m_injectedScripts.end());
std::unique_ptr<InjectedScript> injectedScript =
InjectedScript::create(this, sessionId);
// InjectedScript::create can destroy |this|.
if (!injectedScript) return false;
+ CHECK(m_injectedScripts.find(sessionId) == m_injectedScripts.end());
m_injectedScripts[sessionId] = std::move(injectedScript);
return true;
}
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index dc473849ee..23f8875063 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -2859,6 +2859,18 @@
]
},
{
+ "name": "setAsyncCallStackDepth",
+ "description": "Enables or disables async call stacks tracking.",
+ "redirect": "Debugger",
+ "parameters": [
+ {
+ "name": "maxDepth",
+ "description": "Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async\ncall stacks (default).",
+ "type": "integer"
+ }
+ ]
+ },
+ {
"name": "setCustomObjectFormatterEnabled",
"experimental": true,
"parameters": [
@@ -2869,13 +2881,70 @@
]
},
{
+ "name": "setMaxCallStackSizeToCapture",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "size",
+ "type": "integer"
+ }
+ ]
+ },
+ {
"name": "terminateExecution",
"description": "Terminate current or next JavaScript execution.\nWill cancel the termination when the outer-most script execution ends.",
"experimental": true
+ },
+ {
+ "name": "addBinding",
+ "description": "If executionContextId is empty, adds binding with the given name on the\nglobal objects of all inspected contexts, including those created later,\nbindings survive reloads.\nIf executionContextId is specified, adds binding only on global object of\ngiven execution context.\nBinding function takes exactly one argument, this argument should be string,\nin case of any other input, function throws an exception.\nEach binding function call produces Runtime.bindingCalled notification.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "name",
+ "type": "string"
+ },
+ {
+ "name": "executionContextId",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ }
+ ]
+ },
+ {
+ "name": "removeBinding",
+ "description": "This method does not remove binding function from global object but\nunsubscribes current runtime agent from Runtime.bindingCalled notifications.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "name",
+ "type": "string"
+ }
+ ]
}
],
"events": [
{
+ "name": "bindingCalled",
+ "description": "Notification is issued every time when binding is called.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "name",
+ "type": "string"
+ },
+ {
+ "name": "payload",
+ "type": "string"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Identifier of the context where the call was made.",
+ "$ref": "ExecutionContextId"
+ }
+ ]
+ },
+ {
"name": "consoleAPICalled",
"description": "Issued when console API was called.",
"parameters": [
diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/src/inspector/js_protocol.pdl
index 9f02e9d35c..a7e368d2b8 100644
--- a/deps/v8/src/inspector/js_protocol.pdl
+++ b/deps/v8/src/inspector/js_protocol.pdl
@@ -1314,14 +1314,53 @@ domain Runtime
# Exception details.
optional ExceptionDetails exceptionDetails
+ # Enables or disables async call stacks tracking.
+ command setAsyncCallStackDepth
+ redirect Debugger
+ parameters
+ # Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async
+ # call stacks (default).
+ integer maxDepth
+
experimental command setCustomObjectFormatterEnabled
parameters
boolean enabled
+ experimental command setMaxCallStackSizeToCapture
+ parameters
+ integer size
+
# Terminate current or next JavaScript execution.
# Will cancel the termination when the outer-most script execution ends.
experimental command terminateExecution
+ # If executionContextId is empty, adds binding with the given name on the
+ # global objects of all inspected contexts, including those created later,
+ # bindings survive reloads.
+ # If executionContextId is specified, adds binding only on global object of
+ # given execution context.
+ # Binding function takes exactly one argument, this argument should be string,
+ # in case of any other input, function throws an exception.
+ # Each binding function call produces Runtime.bindingCalled notification.
+ experimental command addBinding
+ parameters
+ string name
+ optional ExecutionContextId executionContextId
+
+ # This method does not remove binding function from global object but
+ # unsubscribes current runtime agent from Runtime.bindingCalled notifications.
+ experimental command removeBinding
+ parameters
+ string name
+
+ # Notification is issued every time when binding is called.
+ experimental event bindingCalled
+ parameters
+ string name
+ string payload
+ # Identifier of the context where the call was made.
+ ExecutionContextId executionContextId
+
# Issued when console API was called.
event consoleAPICalled
parameters
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index c02870ac06..faba4a082b 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -345,16 +345,6 @@ void V8Console::Assert(const v8::debug::ConsoleCallArguments& info,
m_inspector->debugger()->breakProgramOnAssert(helper.groupId());
}
-void V8Console::MarkTimeline(const v8::debug::ConsoleCallArguments& info,
- const v8::debug::ConsoleContext& consoleContext) {
- ConsoleHelper(info, consoleContext, m_inspector)
- .reportDeprecatedCall("V8Console#markTimelineDeprecated",
- "'console.markTimeline' is "
- "deprecated. Please use "
- "'console.timeStamp' instead.");
- TimeStamp(info, consoleContext);
-}
-
void V8Console::Profile(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
ConsoleHelper helper(info, consoleContext, m_inspector);
@@ -414,25 +404,6 @@ static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
}
-void V8Console::Timeline(const v8::debug::ConsoleCallArguments& info,
- const v8::debug::ConsoleContext& consoleContext) {
- ConsoleHelper(info, consoleContext, m_inspector)
- .reportDeprecatedCall("V8Console#timeline",
- "'console.timeline' is deprecated. Please use "
- "'console.time' instead.");
- timeFunction(info, consoleContext, true, m_inspector);
-}
-
-void V8Console::TimelineEnd(const v8::debug::ConsoleCallArguments& info,
- const v8::debug::ConsoleContext& consoleContext) {
- ConsoleHelper(info, consoleContext, m_inspector)
- .reportDeprecatedCall("V8Console#timelineEnd",
- "'console.timelineEnd' is "
- "deprecated. Please use "
- "'console.timeEnd' instead.");
- timeEndFunction(info, consoleContext, true, m_inspector);
-}
-
void V8Console::Time(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
timeFunction(info, consoleContext, false, m_inspector);
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index c38890a8f9..2e47012807 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -82,16 +82,10 @@ class V8Console : public v8::debug::ConsoleDelegate {
const v8::debug::ConsoleContext& consoleContext) override;
void Assert(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext& consoleContext) override;
- void MarkTimeline(const v8::debug::ConsoleCallArguments&,
- const v8::debug::ConsoleContext& consoleContext) override;
void Profile(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext& consoleContext) override;
void ProfileEnd(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext& consoleContext) override;
- void Timeline(const v8::debug::ConsoleCallArguments&,
- const v8::debug::ConsoleContext& consoleContext) override;
- void TimelineEnd(const v8::debug::ConsoleCallArguments&,
- const v8::debug::ConsoleContext& consoleContext) override;
void Time(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext& consoleContext) override;
void TimeEnd(const v8::debug::ConsoleCallArguments&,
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 6b3c0ab887..e4e6492b67 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -294,62 +294,6 @@ Response buildScopes(v8::debug::ScopeIterator* iterator,
return Response::OK();
}
-bool liveEditExceptionToDetails(
- V8InspectorImpl* inspector, v8::Local<v8::Context> context,
- v8::Local<v8::Value> exceptionValue,
- Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
- if (!exceptionValue->IsObject()) return false;
- v8::Isolate* isolate = context->GetIsolate();
- v8::Local<v8::Object> exception = exceptionValue.As<v8::Object>();
- v8::Local<v8::Value> detailsValue;
- if (!exception->Get(context, toV8String(isolate, "details"))
- .ToLocal(&detailsValue) ||
- !detailsValue->IsObject()) {
- return false;
- }
- v8::Local<v8::Object> details = detailsValue.As<v8::Object>();
- v8::Local<v8::Value> message;
- if (!details->Get(context, toV8String(isolate, "syntaxErrorMessage"))
- .ToLocal(&message) ||
- !message->IsString()) {
- return false;
- }
- v8::Local<v8::Value> positionValue;
- if (!details->Get(context, toV8String(isolate, "position"))
- .ToLocal(&positionValue) ||
- !positionValue->IsObject()) {
- return false;
- }
- v8::Local<v8::Value> startPositionValue;
- if (!positionValue.As<v8::Object>()
- ->Get(context, toV8String(isolate, "start"))
- .ToLocal(&startPositionValue) ||
- !startPositionValue->IsObject()) {
- return false;
- }
- v8::Local<v8::Object> startPosition = startPositionValue.As<v8::Object>();
- v8::Local<v8::Value> lineValue;
- if (!startPosition->Get(context, toV8String(isolate, "line"))
- .ToLocal(&lineValue) ||
- !lineValue->IsInt32()) {
- return false;
- }
- v8::Local<v8::Value> columnValue;
- if (!startPosition->Get(context, toV8String(isolate, "column"))
- .ToLocal(&columnValue) ||
- !columnValue->IsInt32()) {
- return false;
- }
- *exceptionDetails =
- protocol::Runtime::ExceptionDetails::create()
- .setExceptionId(inspector->nextExceptionId())
- .setText(toProtocolString(message.As<v8::String>()))
- .setLineNumber(lineValue->Int32Value(context).FromJust() - 1)
- .setColumnNumber(columnValue->Int32Value(context).FromJust() - 1)
- .build();
- return true;
-}
-
protocol::DictionaryValue* getOrCreateObject(protocol::DictionaryValue* object,
const String16& key) {
protocol::DictionaryValue* value = object->getObject(key);
@@ -477,7 +421,7 @@ Response V8DebuggerAgentImpl::setBreakpointsActive(bool active) {
m_debugger->setBreakpointsActive(active);
if (!active && !m_breakReason.empty()) {
clearBreakDetails();
- m_debugger->setPauseOnNextStatement(false, m_session->contextGroupId());
+ m_debugger->setPauseOnNextCall(false, m_session->contextGroupId());
}
return Response::OK();
}
@@ -717,9 +661,12 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
std::vector<v8::debug::BreakLocation> v8Locations;
{
v8::HandleScope handleScope(m_isolate);
- v8::Local<v8::Context> debuggerContext =
- v8::debug::GetDebugContext(m_isolate);
- v8::Context::Scope contextScope(debuggerContext);
+ int contextId = it->second->executionContextId();
+ InspectedContext* inspected = m_inspector->getContext(contextId);
+ if (!inspected) {
+ return Response::Error("Cannot retrive script context");
+ }
+ v8::Context::Scope contextScope(inspected->context());
v8::MicrotasksScope microtasks(m_isolate,
v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::TryCatch tryCatch(m_isolate);
@@ -924,23 +871,22 @@ Response V8DebuggerAgentImpl::setScriptSource(
v8::HandleScope handleScope(m_isolate);
v8::Local<v8::Context> context = inspected->context();
v8::Context::Scope contextScope(context);
- v8::TryCatch tryCatch(m_isolate);
-
- bool stackChangedValue = false;
- it->second->setSource(newContent, dryRun.fromMaybe(false),
- &stackChangedValue);
- if (tryCatch.HasCaught()) {
- if (liveEditExceptionToDetails(m_inspector, context, tryCatch.Exception(),
- optOutCompileError)) {
- return Response::OK();
- }
- v8::Local<v8::Message> message = tryCatch.Message();
- if (!message.IsEmpty())
- return Response::Error(toProtocolStringWithTypeCheck(message->Get()));
- else
- return Response::InternalError();
+
+ v8::debug::LiveEditResult result;
+ it->second->setSource(newContent, dryRun.fromMaybe(false), &result);
+ if (result.status != v8::debug::LiveEditResult::OK) {
+ *optOutCompileError =
+ protocol::Runtime::ExceptionDetails::create()
+ .setExceptionId(m_inspector->nextExceptionId())
+ .setText(toProtocolString(result.message))
+ .setLineNumber(result.line_number != -1 ? result.line_number - 1
+ : 0)
+ .setColumnNumber(result.column_number != -1 ? result.column_number
+ : 0)
+ .build();
+ return Response::OK();
} else {
- *stackChanged = stackChangedValue;
+ *stackChanged = result.stack_changed;
}
std::unique_ptr<Array<CallFrame>> callFrames;
Response response = currentCallFrames(&callFrames);
@@ -1006,7 +952,7 @@ void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
std::unique_ptr<protocol::DictionaryValue> data) {
if (isPaused() || !acceptsPause(false) || !m_breakpointsActive) return;
if (m_breakReason.empty()) {
- m_debugger->setPauseOnNextStatement(true, m_session->contextGroupId());
+ m_debugger->setPauseOnNextCall(true, m_session->contextGroupId());
}
pushBreakDetails(breakReason, std::move(data));
}
@@ -1014,7 +960,7 @@ void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
if (isPaused() || !acceptsPause(false) || !m_breakpointsActive) return;
if (m_breakReason.size() == 1) {
- m_debugger->setPauseOnNextStatement(false, m_session->contextGroupId());
+ m_debugger->setPauseOnNextCall(false, m_session->contextGroupId());
}
popBreakDetails();
}
@@ -1022,10 +968,14 @@ void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
Response V8DebuggerAgentImpl::pause() {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
if (isPaused()) return Response::OK();
- if (m_breakReason.empty()) {
- m_debugger->setPauseOnNextStatement(true, m_session->contextGroupId());
+ if (m_debugger->canBreakProgram()) {
+ m_debugger->interruptAndBreak(m_session->contextGroupId());
+ } else {
+ if (m_breakReason.empty()) {
+ m_debugger->setPauseOnNextCall(true, m_session->contextGroupId());
+ }
+ pushBreakDetails(protocol::Debugger::Paused::ReasonEnum::Other, nullptr);
}
- pushBreakDetails(protocol::Debugger::Paused::ReasonEnum::Other, nullptr);
return Response::OK();
}
@@ -1206,7 +1156,9 @@ Response V8DebuggerAgentImpl::setReturnValue(
}
Response V8DebuggerAgentImpl::setAsyncCallStackDepth(int depth) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (!enabled() && !m_session->runtimeAgent()->enabled()) {
+ return Response::Error(kDebuggerNotEnabled);
+ }
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, depth);
m_debugger->setAsyncCallStackDepth(this, depth);
return Response::OK();
@@ -1668,7 +1620,7 @@ void V8DebuggerAgentImpl::breakProgram(
popBreakDetails();
m_breakReason.swap(currentScheduledReason);
if (!m_breakReason.empty()) {
- m_debugger->setPauseOnNextStatement(true, m_session->contextGroupId());
+ m_debugger->setPauseOnNextCall(true, m_session->contextGroupId());
}
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index f5623a8dc5..65f7677b47 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -6,6 +6,7 @@
#define V8_INSPECTOR_V8_DEBUGGER_AGENT_IMPL_H_
#include <deque>
+#include <unordered_map>
#include <vector>
#include "src/base/macros.h"
@@ -183,11 +184,11 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
bool isPaused() const;
using ScriptsMap =
- protocol::HashMap<String16, std::unique_ptr<V8DebuggerScript>>;
+ std::unordered_map<String16, std::unique_ptr<V8DebuggerScript>>;
using BreakpointIdToDebuggerBreakpointIdsMap =
- protocol::HashMap<String16, std::vector<v8::debug::BreakpointId>>;
+ std::unordered_map<String16, std::vector<v8::debug::BreakpointId>>;
using DebuggerBreakpointIdToBreakpointIdMap =
- protocol::HashMap<v8::debug::BreakpointId, String16>;
+ std::unordered_map<v8::debug::BreakpointId, String16>;
V8InspectorImpl* m_inspector;
V8Debugger* m_debugger;
@@ -216,7 +217,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
bool m_breakpointsActive = false;
std::unique_ptr<V8Regex> m_blackboxPattern;
- protocol::HashMap<String16, std::vector<std::pair<int, int>>>
+ std::unordered_map<String16, std::vector<std::pair<int, int>>>
m_blackboxedPositions;
DISALLOW_COPY_AND_ASSIGN(V8DebuggerAgentImpl);
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index d632df3f66..c40477ae2a 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -114,37 +114,7 @@ class ActualScript : public V8DebuggerScript {
: V8DebuggerScript(isolate, String16::fromInteger(script->Id()),
GetNameOrSourceUrl(script)),
m_isLiveEdit(isLiveEdit) {
- v8::Local<v8::String> tmp;
- if (script->SourceURL().ToLocal(&tmp)) m_sourceURL = toProtocolString(tmp);
- if (script->SourceMappingURL().ToLocal(&tmp))
- m_sourceMappingURL = toProtocolString(tmp);
- m_startLine = script->LineOffset();
- m_startColumn = script->ColumnOffset();
- std::vector<int> lineEnds = script->LineEnds();
- CHECK(lineEnds.size());
- int source_length = lineEnds[lineEnds.size() - 1];
- if (lineEnds.size()) {
- m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
- if (lineEnds.size() > 1) {
- m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
- } else {
- m_endColumn = source_length + m_startColumn;
- }
- } else {
- m_endLine = m_startLine;
- m_endColumn = m_startColumn;
- }
-
- USE(script->ContextId().To(&m_executionContextId));
-
- if (script->Source().ToLocal(&tmp)) {
- m_source = toProtocolString(tmp);
- }
-
- m_isModule = script->IsModule();
-
- m_script.Reset(m_isolate, script);
- m_script.AnnotateStrongRetainer(kGlobalDebuggerScriptHandleLabel);
+ Initialize(script);
}
bool isLiveEdit() const override { return m_isLiveEdit; }
@@ -166,17 +136,17 @@ class ActualScript : public V8DebuggerScript {
}
void setSource(const String16& newSource, bool preview,
- bool* stackChanged) override {
+ v8::debug::LiveEditResult* result) override {
DCHECK(!isModule());
- v8::HandleScope scope(m_isolate);
+ v8::EscapableHandleScope scope(m_isolate);
v8::Local<v8::String> v8Source = toV8String(m_isolate, newSource);
- if (!m_script.Get(m_isolate)->SetScriptSource(v8Source, preview,
- stackChanged)) {
+ if (!m_script.Get(m_isolate)->SetScriptSource(v8Source, preview, result)) {
+ result->message = scope.Escape(result->message);
return;
}
if (preview) return;
- m_source = newSource;
m_hash = String16();
+ Initialize(scope.Escape(result->script));
}
bool getPossibleBreakpoints(
@@ -259,6 +229,40 @@ class ActualScript : public V8DebuggerScript {
return m_script.Get(m_isolate);
}
+ void Initialize(v8::Local<v8::debug::Script> script) {
+ v8::Local<v8::String> tmp;
+ if (script->SourceURL().ToLocal(&tmp)) m_sourceURL = toProtocolString(tmp);
+ if (script->SourceMappingURL().ToLocal(&tmp))
+ m_sourceMappingURL = toProtocolString(tmp);
+ m_startLine = script->LineOffset();
+ m_startColumn = script->ColumnOffset();
+ std::vector<int> lineEnds = script->LineEnds();
+ CHECK(lineEnds.size());
+ int source_length = lineEnds[lineEnds.size() - 1];
+ if (lineEnds.size()) {
+ m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
+ if (lineEnds.size() > 1) {
+ m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
+ } else {
+ m_endColumn = source_length + m_startColumn;
+ }
+ } else {
+ m_endLine = m_startLine;
+ m_endColumn = m_startColumn;
+ }
+
+ USE(script->ContextId().To(&m_executionContextId));
+
+ if (script->Source().ToLocal(&tmp)) {
+ m_source = toProtocolString(tmp);
+ }
+
+ m_isModule = script->IsModule();
+
+ m_script.Reset(m_isolate, script);
+ m_script.AnnotateStrongRetainer(kGlobalDebuggerScriptHandleLabel);
+ }
+
String16 m_sourceMappingURL;
bool m_isLiveEdit = false;
bool m_isModule = false;
@@ -290,7 +294,9 @@ class WasmVirtualScript : public V8DebuggerScript {
bool isLiveEdit() const override { return false; }
bool isModule() const override { return false; }
void setSourceMappingURL(const String16&) override {}
- void setSource(const String16&, bool, bool*) override { UNREACHABLE(); }
+ void setSource(const String16&, bool, v8::debug::LiveEditResult*) override {
+ UNREACHABLE();
+ }
bool isSourceLoadedLazily() const override { return true; }
const String16& source() const override {
return m_wasmTranslation->GetSource(m_id, m_functionIndex);
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index 3e3885ed52..e0e7d93b20 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -73,7 +73,7 @@ class V8DebuggerScript {
void setSourceURL(const String16&);
virtual void setSourceMappingURL(const String16&) = 0;
virtual void setSource(const String16& source, bool preview,
- bool* stackChanged) = 0;
+ v8::debug::LiveEditResult* result) = 0;
virtual bool getPossibleBreakpoints(
const v8::debug::Location& start, const v8::debug::Location& end,
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 28212a1993..1ceb4210f7 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -200,7 +200,6 @@ void V8Debugger::disable() {
}
if (--m_enableCount) return;
clearContinueToLocation();
- allAsyncTasksCanceled();
m_taskWithScheduledBreak = nullptr;
m_taskWithScheduledBreakDebuggerId = String16();
m_pauseOnAsyncCall = false;
@@ -259,7 +258,7 @@ void V8Debugger::setPauseOnExceptionsState(
m_pauseOnExceptionsState = pauseOnExceptionsState;
}
-void V8Debugger::setPauseOnNextStatement(bool pause, int targetContextGroupId) {
+void V8Debugger::setPauseOnNextCall(bool pause, int targetContextGroupId) {
if (isPaused()) return;
DCHECK(targetContextGroupId);
if (!pause && m_targetContextGroupId &&
@@ -269,9 +268,9 @@ void V8Debugger::setPauseOnNextStatement(bool pause, int targetContextGroupId) {
m_targetContextGroupId = targetContextGroupId;
m_breakRequested = pause;
if (pause)
- v8::debug::DebugBreak(m_isolate);
+ v8::debug::SetBreakOnNextFunctionCall(m_isolate);
else
- v8::debug::CancelDebugBreak(m_isolate);
+ v8::debug::ClearBreakOnNextFunctionCall(m_isolate);
}
bool V8Debugger::canBreakProgram() {
@@ -287,6 +286,16 @@ void V8Debugger::breakProgram(int targetContextGroupId) {
v8::debug::BreakRightNow(m_isolate);
}
+void V8Debugger::interruptAndBreak(int targetContextGroupId) {
+ // Don't allow nested breaks.
+ if (isPaused()) return;
+ DCHECK(targetContextGroupId);
+ m_targetContextGroupId = targetContextGroupId;
+ m_isolate->RequestInterrupt(
+ [](v8::Isolate* isolate, void*) { v8::debug::BreakRightNow(isolate); },
+ nullptr);
+}
+
void V8Debugger::continueProgram(int targetContextGroupId) {
if (m_pausedContextGroupId != targetContextGroupId) return;
if (isPaused()) m_inspector->client()->quitMessageLoopOnPause();
@@ -308,6 +317,7 @@ void V8Debugger::stepIntoStatement(int targetContextGroupId,
bool breakOnAsyncCall) {
DCHECK(isPaused());
DCHECK(targetContextGroupId);
+ if (asyncStepOutOfFunction(targetContextGroupId, true)) return;
m_targetContextGroupId = targetContextGroupId;
m_pauseOnAsyncCall = breakOnAsyncCall;
v8::debug::PrepareStep(m_isolate, v8::debug::StepIn);
@@ -317,6 +327,7 @@ void V8Debugger::stepIntoStatement(int targetContextGroupId,
void V8Debugger::stepOverStatement(int targetContextGroupId) {
DCHECK(isPaused());
DCHECK(targetContextGroupId);
+ if (asyncStepOutOfFunction(targetContextGroupId, true)) return;
m_targetContextGroupId = targetContextGroupId;
v8::debug::PrepareStep(m_isolate, v8::debug::StepNext);
continueProgram(targetContextGroupId);
@@ -325,11 +336,44 @@ void V8Debugger::stepOverStatement(int targetContextGroupId) {
void V8Debugger::stepOutOfFunction(int targetContextGroupId) {
DCHECK(isPaused());
DCHECK(targetContextGroupId);
+ if (asyncStepOutOfFunction(targetContextGroupId, false)) return;
m_targetContextGroupId = targetContextGroupId;
v8::debug::PrepareStep(m_isolate, v8::debug::StepOut);
continueProgram(targetContextGroupId);
}
+bool V8Debugger::asyncStepOutOfFunction(int targetContextGroupId,
+ bool onlyAtReturn) {
+ auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
+ DCHECK(!iterator->Done());
+ bool atReturn = !iterator->GetReturnValue().IsEmpty();
+ iterator->Advance();
+ // Synchronous stack has more then one frame.
+ if (!iterator->Done()) return false;
+ // There is only one synchronous frame but we are not at return position and
+ // user requests stepOver or stepInto.
+ if (onlyAtReturn && !atReturn) return false;
+ // If we are inside async function, current async parent was captured when
+ // async function was suspended first time and we install that stack as
+ // current before resume async function. So it represents current async
+ // function.
+ auto current = currentAsyncParent();
+ if (!current) return false;
+ // Lookup for parent async function.
+ auto parent = current->parent();
+ if (parent.expired()) return false;
+ // Parent async stack will have suspended task id iff callee async function
+ // is awaiting current async function. We can make stepOut there only in this
+ // case.
+ void* parentTask =
+ std::shared_ptr<AsyncStackTrace>(parent)->suspendedTaskId();
+ if (!parentTask) return false;
+ pauseOnAsyncCall(targetContextGroupId,
+ reinterpret_cast<uintptr_t>(parentTask), String16());
+ continueProgram(targetContextGroupId);
+ return true;
+}
+
void V8Debugger::scheduleStepIntoAsync(
std::unique_ptr<ScheduleStepIntoAsyncCallback> callback,
int targetContextGroupId) {
@@ -488,9 +532,6 @@ void V8Debugger::handleProgramBreak(
});
{
v8::Context::Scope scope(pausedContext);
- v8::Local<v8::Context> context = m_isolate->GetCurrentContext();
- CHECK(!context.IsEmpty() &&
- context != v8::debug::GetDebugContext(m_isolate));
m_inspector->client()->runMessageLoopOnPause(contextGroupId);
m_pausedContextGroupId = 0;
}
@@ -521,9 +562,11 @@ size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
thisPtr->m_originalHeapLimit = current_heap_limit;
thisPtr->m_scheduledOOMBreak = true;
v8::Local<v8::Context> context = thisPtr->m_isolate->GetEnteredContext();
- DCHECK(!context.IsEmpty());
- thisPtr->setPauseOnNextStatement(
- true, thisPtr->m_inspector->contextGroupId(context));
+ thisPtr->m_targetContextGroupId =
+ context.IsEmpty() ? 0 : thisPtr->m_inspector->contextGroupId(context);
+ thisPtr->m_isolate->RequestInterrupt(
+ [](v8::Isolate* isolate, void*) { v8::debug::BreakRightNow(isolate); },
+ nullptr);
return HeapLimitForDebugging(initial_heap_limit);
}
@@ -555,13 +598,12 @@ void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
}
void V8Debugger::BreakProgramRequested(
- v8::Local<v8::Context> pausedContext, v8::Local<v8::Object>,
+ v8::Local<v8::Context> pausedContext,
const std::vector<v8::debug::BreakpointId>& break_points_hit) {
handleProgramBreak(pausedContext, v8::Local<v8::Value>(), break_points_hit);
}
void V8Debugger::ExceptionThrown(v8::Local<v8::Context> pausedContext,
- v8::Local<v8::Object>,
v8::Local<v8::Value> exception,
v8::Local<v8::Value> promise,
bool isUncaught) {
@@ -591,15 +633,12 @@ bool V8Debugger::IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
return hasAgents && allBlackboxed;
}
-void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
- int id, bool isBlackboxed) {
+void V8Debugger::AsyncEventOccurred(v8::debug::DebugAsyncActionType type,
+ int id, bool isBlackboxed) {
// Async task events from Promises are given misaligned pointers to prevent
// from overlapping with other Blink task identifiers.
void* task = reinterpret_cast<void*>(id * 2 + 1);
switch (type) {
- case v8::debug::kDebugAsyncFunctionPromiseCreated:
- asyncTaskScheduledForStack("async function", task, true);
- break;
case v8::debug::kDebugPromiseThen:
asyncTaskScheduledForStack("Promise.then", task, false);
if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
@@ -620,6 +659,20 @@ void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
asyncTaskFinishedForStack(task);
asyncTaskFinishedForStepping(task);
break;
+ case v8::debug::kAsyncFunctionSuspended: {
+ if (m_asyncTaskStacks.find(task) == m_asyncTaskStacks.end()) {
+ asyncTaskScheduledForStack("async function", task, true);
+ }
+ auto stackIt = m_asyncTaskStacks.find(task);
+ if (stackIt != m_asyncTaskStacks.end() && !stackIt->second.expired()) {
+ std::shared_ptr<AsyncStackTrace> stack(stackIt->second);
+ stack->setSuspendedTaskId(task);
+ }
+ break;
+ }
+ case v8::debug::kAsyncFunctionFinished:
+ asyncTaskCanceledForStack(task);
+ break;
}
}
@@ -664,14 +717,9 @@ v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
}
String16 type = v8_inspector::scopeType(iterator->GetType());
String16 name;
- v8::Local<v8::Function> closure = iterator->GetFunction();
- if (!closure.IsEmpty()) {
- name = toProtocolStringWithTypeCheck(closure->GetDebugName());
- } else {
- v8::Local<v8::Value> maybe_name = iterator->GetFunctionDebugName();
- if (!maybe_name->IsUndefined()) {
- name = toProtocolStringWithTypeCheck(maybe_name);
- }
+ v8::Local<v8::Value> maybe_name = iterator->GetFunctionDebugName();
+ if (!maybe_name->IsUndefined()) {
+ name = toProtocolStringWithTypeCheck(maybe_name);
}
v8::Local<v8::Object> object = iterator->GetObject();
createDataProperty(context, scope,
@@ -799,6 +847,8 @@ void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
m_inspector->client()->maxAsyncCallStackDepthChanged(
m_maxAsyncCallStackDepth);
if (!maxAsyncCallStackDepth) allAsyncTasksCanceled();
+ v8::debug::SetAsyncEventDelegate(m_isolate,
+ maxAsyncCallStackDepth ? this : nullptr);
}
std::shared_ptr<AsyncStackTrace> V8Debugger::stackTraceFor(
@@ -851,7 +901,7 @@ void V8Debugger::externalAsyncTaskStarted(const V8StackTraceId& parent) {
reinterpret_cast<uintptr_t>(m_taskWithScheduledBreak) == parent.id &&
m_taskWithScheduledBreakDebuggerId ==
debuggerIdToString(parent.debugger_id)) {
- v8::debug::DebugBreak(m_isolate);
+ v8::debug::SetBreakOnNextFunctionCall(m_isolate);
}
}
@@ -871,7 +921,7 @@ void V8Debugger::externalAsyncTaskFinished(const V8StackTraceId& parent) {
m_taskWithScheduledBreak = nullptr;
m_taskWithScheduledBreakDebuggerId = String16();
if (m_breakRequested) return;
- v8::debug::CancelDebugBreak(m_isolate);
+ v8::debug::ClearBreakOnNextFunctionCall(m_isolate);
}
void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
@@ -928,8 +978,10 @@ void V8Debugger::asyncTaskStartedForStack(void* task) {
// - asyncTaskFinished
m_currentTasks.push_back(task);
AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(task);
- if (stackIt != m_asyncTaskStacks.end()) {
- m_currentAsyncParent.push_back(stackIt->second.lock());
+ if (stackIt != m_asyncTaskStacks.end() && !stackIt->second.expired()) {
+ std::shared_ptr<AsyncStackTrace> stack(stackIt->second);
+ stack->setSuspendedTaskId(nullptr);
+ m_currentAsyncParent.push_back(stack);
} else {
m_currentAsyncParent.emplace_back();
}
@@ -980,7 +1032,7 @@ void V8Debugger::asyncTaskStartedForStepping(void* task) {
// blackboxing.
if (m_taskWithScheduledBreakDebuggerId.isEmpty() &&
task == m_taskWithScheduledBreak) {
- v8::debug::DebugBreak(m_isolate);
+ v8::debug::SetBreakOnNextFunctionCall(m_isolate);
}
}
@@ -991,7 +1043,7 @@ void V8Debugger::asyncTaskFinishedForStepping(void* task) {
}
m_taskWithScheduledBreak = nullptr;
if (m_breakRequested) return;
- v8::debug::CancelDebugBreak(m_isolate);
+ v8::debug::ClearBreakOnNextFunctionCall(m_isolate);
}
void V8Debugger::asyncTaskCanceledForStepping(void* task) {
@@ -1076,7 +1128,9 @@ std::shared_ptr<StackFrame> V8Debugger::symbolize(
frameId = v8::debug::GetStackFrameId(v8Frame);
it = m_framesCache.find(frameId);
}
- if (it != m_framesCache.end() && it->second.lock()) return it->second.lock();
+ if (it != m_framesCache.end() && !it->second.expired()) {
+ return std::shared_ptr<StackFrame>(it->second);
+ }
std::shared_ptr<StackFrame> frame(new StackFrame(v8Frame));
// TODO(clemensh): Figure out a way to do this translation only right before
// sending the stack trace over wire.
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 351e5b66ad..72962dde31 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -7,6 +7,7 @@
#include <list>
#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "src/base/macros.h"
@@ -35,7 +36,8 @@ using ScheduleStepIntoAsyncCallback =
using TerminateExecutionCallback =
protocol::Runtime::Backend::TerminateExecutionCallback;
-class V8Debugger : public v8::debug::DebugDelegate {
+class V8Debugger : public v8::debug::DebugDelegate,
+ public v8::debug::AsyncEventDelegate {
public:
V8Debugger(v8::Isolate*, V8InspectorImpl*);
~V8Debugger();
@@ -49,10 +51,11 @@ class V8Debugger : public v8::debug::DebugDelegate {
void setPauseOnExceptionsState(v8::debug::ExceptionBreakState);
bool canBreakProgram();
void breakProgram(int targetContextGroupId);
+ void interruptAndBreak(int targetContextGroupId);
void continueProgram(int targetContextGroupId);
void breakProgramOnAssert(int targetContextGroupId);
- void setPauseOnNextStatement(bool, int targetContextGroupId);
+ void setPauseOnNextCall(bool, int targetContextGroupId);
void stepIntoStatement(int targetContextGroupId, bool breakOnAsyncCall);
void stepOverStatement(int targetContextGroupId);
void stepOutOfFunction(int targetContextGroupId);
@@ -169,21 +172,22 @@ class V8Debugger : public v8::debug::DebugDelegate {
void asyncTaskCanceledForStepping(void* task);
// v8::debug::DebugEventListener implementation.
- void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
- bool isBlackboxed) override;
+ void AsyncEventOccurred(v8::debug::DebugAsyncActionType type, int id,
+ bool isBlackboxed) override;
void ScriptCompiled(v8::Local<v8::debug::Script> script, bool is_live_edited,
bool has_compile_error) override;
void BreakProgramRequested(
- v8::Local<v8::Context> paused_context, v8::Local<v8::Object>,
+ v8::Local<v8::Context> paused_context,
const std::vector<v8::debug::BreakpointId>& break_points_hit) override;
void ExceptionThrown(v8::Local<v8::Context> paused_context,
- v8::Local<v8::Object>, v8::Local<v8::Value> exception,
+ v8::Local<v8::Value> exception,
v8::Local<v8::Value> promise, bool is_uncaught) override;
bool IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
const v8::debug::Location& start,
const v8::debug::Location& end) override;
int currentContextGroupId();
+ bool asyncStepOutOfFunction(int targetContextGroupId, bool onlyAtReturn);
v8::Isolate* m_isolate;
V8InspectorImpl* m_inspector;
@@ -200,9 +204,9 @@ class V8Debugger : public v8::debug::DebugDelegate {
std::unique_ptr<V8StackTraceImpl> m_continueToLocationStack;
using AsyncTaskToStackTrace =
- protocol::HashMap<void*, std::weak_ptr<AsyncStackTrace>>;
+ std::unordered_map<void*, std::weak_ptr<AsyncStackTrace>>;
AsyncTaskToStackTrace m_asyncTaskStacks;
- protocol::HashSet<void*> m_recurringTasks;
+ std::unordered_set<void*> m_recurringTasks;
int m_maxAsyncCallStacks;
int m_maxAsyncCallStackDepth;
@@ -218,7 +222,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
std::list<std::shared_ptr<AsyncStackTrace>> m_allAsyncStacks;
std::unordered_map<int, std::weak_ptr<StackFrame>> m_framesCache;
- protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
+ std::unordered_map<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
void* m_taskWithScheduledBreak = nullptr;
String16 m_taskWithScheduledBreakDebuggerId;
@@ -230,13 +234,13 @@ class V8Debugger : public v8::debug::DebugDelegate {
v8_inspector::V8StackTraceId m_scheduledAsyncCall;
using StackTraceIdToStackTrace =
- protocol::HashMap<uintptr_t, std::weak_ptr<AsyncStackTrace>>;
+ std::unordered_map<uintptr_t, std::weak_ptr<AsyncStackTrace>>;
StackTraceIdToStackTrace m_storedStackTraces;
uintptr_t m_lastStackTraceId = 0;
- protocol::HashMap<int, std::pair<int64_t, int64_t>>
+ std::unordered_map<int, std::pair<int64_t, int64_t>>
m_contextGroupIdToDebuggerId;
- protocol::HashMap<String16, std::pair<int64_t, int64_t>>
+ std::unordered_map<String16, std::pair<int64_t, int64_t>>
m_serializedDebuggerIdToDebuggerId;
std::unique_ptr<TerminateExecutionCallback> m_terminateExecutionCallback;
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 0eb87da94b..6272e4b4b2 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -206,6 +206,7 @@ void V8InspectorImpl::contextCreated(const V8ContextInfo& info) {
(*contextById)[contextId].reset(context);
forEachSession(
info.contextGroupId, [&context](V8InspectorSessionImpl* session) {
+ session->runtimeAgent()->addBindings(context);
session->runtimeAgent()->reportExecutionContextCreated(context);
});
}
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 73a9253e9e..b255feed40 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -33,6 +33,7 @@
#include <functional>
#include <map>
+#include <unordered_map>
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
@@ -147,23 +148,23 @@ class V8InspectorImpl : public V8Inspector {
int m_lastSessionId = 0;
uint64_t m_isolateId;
- using MuteExceptionsMap = protocol::HashMap<int, int>;
+ using MuteExceptionsMap = std::unordered_map<int, int>;
MuteExceptionsMap m_muteExceptionsMap;
using ContextByIdMap =
- protocol::HashMap<int, std::unique_ptr<InspectedContext>>;
+ std::unordered_map<int, std::unique_ptr<InspectedContext>>;
using ContextsByGroupMap =
- protocol::HashMap<int, std::unique_ptr<ContextByIdMap>>;
+ std::unordered_map<int, std::unique_ptr<ContextByIdMap>>;
ContextsByGroupMap m_contexts;
// contextGroupId -> sessionId -> session
- protocol::HashMap<int, std::map<int, V8InspectorSessionImpl*>> m_sessions;
+ std::unordered_map<int, std::map<int, V8InspectorSessionImpl*>> m_sessions;
using ConsoleStorageMap =
- protocol::HashMap<int, std::unique_ptr<V8ConsoleMessageStorage>>;
+ std::unordered_map<int, std::unique_ptr<V8ConsoleMessageStorage>>;
ConsoleStorageMap m_consoleStorageMap;
- protocol::HashMap<int, int> m_contextIdToGroupIdMap;
+ std::unordered_map<int, int> m_contextIdToGroupIdMap;
std::unique_ptr<V8Console> m_console;
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 5e77868cd3..31111add16 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -54,6 +54,7 @@ namespace V8RuntimeAgentImplState {
static const char customObjectFormatterEnabled[] =
"customObjectFormatterEnabled";
static const char runtimeEnabled[] = "runtimeEnabled";
+static const char bindings[] = "bindings";
};
using protocol::Runtime::RemoteObject;
@@ -460,6 +461,14 @@ Response V8RuntimeAgentImpl::setCustomObjectFormatterEnabled(bool enabled) {
return Response::OK();
}
+Response V8RuntimeAgentImpl::setMaxCallStackSizeToCapture(int size) {
+ if (size < 0) {
+ return Response::Error("maxCallStackSizeToCapture should be non-negative");
+ }
+ V8StackTraceImpl::maxCallStackSizeToCapture = size;
+ return Response::OK();
+}
+
Response V8RuntimeAgentImpl::discardConsoleEntries() {
V8ConsoleMessageStorage* storage =
m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
@@ -639,6 +648,102 @@ void V8RuntimeAgentImpl::terminateExecution(
m_inspector->debugger()->terminateExecution(std::move(callback));
}
+Response V8RuntimeAgentImpl::addBinding(const String16& name,
+ Maybe<int> executionContextId) {
+ if (!m_state->getObject(V8RuntimeAgentImplState::bindings)) {
+ m_state->setObject(V8RuntimeAgentImplState::bindings,
+ protocol::DictionaryValue::create());
+ }
+ protocol::DictionaryValue* bindings =
+ m_state->getObject(V8RuntimeAgentImplState::bindings);
+ if (bindings->booleanProperty(name, false)) return Response::OK();
+ if (executionContextId.isJust()) {
+ int contextId = executionContextId.fromJust();
+ InspectedContext* context =
+ m_inspector->getContext(m_session->contextGroupId(), contextId);
+ if (!context) {
+ return Response::Error(
+ "Cannot find execution context with given executionContextId");
+ }
+ addBinding(context, name);
+ // false means that we should not add this binding later.
+ bindings->setBoolean(name, false);
+ return Response::OK();
+ }
+ bindings->setBoolean(name, true);
+ m_inspector->forEachContext(
+ m_session->contextGroupId(),
+ [&name, this](InspectedContext* context) { addBinding(context, name); });
+ return Response::OK();
+}
+
+void V8RuntimeAgentImpl::bindingCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ if (info.Length() != 1 || !info[0]->IsString()) {
+ info.GetIsolate()->ThrowException(toV8String(
+ isolate, "Invalid arguments: should be exactly one string."));
+ return;
+ }
+ V8InspectorImpl* inspector =
+ static_cast<V8InspectorImpl*>(v8::debug::GetInspector(isolate));
+ int contextId = InspectedContext::contextId(isolate->GetCurrentContext());
+ int contextGroupId = inspector->contextGroupId(contextId);
+
+ String16 name = toProtocolString(v8::Local<v8::String>::Cast(info.Data()));
+ String16 payload = toProtocolString(v8::Local<v8::String>::Cast(info[0]));
+
+ inspector->forEachSession(
+ contextGroupId,
+ [&name, &payload, &contextId](V8InspectorSessionImpl* session) {
+ session->runtimeAgent()->bindingCalled(name, payload, contextId);
+ });
+}
+
+void V8RuntimeAgentImpl::addBinding(InspectedContext* context,
+ const String16& name) {
+ v8::HandleScope handles(m_inspector->isolate());
+ v8::Local<v8::Context> localContext = context->context();
+ v8::Local<v8::Object> global = localContext->Global();
+ v8::Local<v8::String> v8Name = toV8String(m_inspector->isolate(), name);
+ v8::Local<v8::Value> functionValue;
+ v8::MicrotasksScope microtasks(m_inspector->isolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ if (v8::Function::New(localContext, bindingCallback, v8Name)
+ .ToLocal(&functionValue)) {
+ v8::Maybe<bool> success = global->Set(localContext, v8Name, functionValue);
+ USE(success);
+ }
+}
+
+Response V8RuntimeAgentImpl::removeBinding(const String16& name) {
+ protocol::DictionaryValue* bindings =
+ m_state->getObject(V8RuntimeAgentImplState::bindings);
+ if (!bindings) return Response::OK();
+ bindings->remove(name);
+ return Response::OK();
+}
+
+void V8RuntimeAgentImpl::bindingCalled(const String16& name,
+ const String16& payload,
+ int executionContextId) {
+ protocol::DictionaryValue* bindings =
+ m_state->getObject(V8RuntimeAgentImplState::bindings);
+ if (!bindings || !bindings->get(name)) return;
+ m_frontend.bindingCalled(name, payload, executionContextId);
+}
+
+void V8RuntimeAgentImpl::addBindings(InspectedContext* context) {
+ if (!m_enabled) return;
+ protocol::DictionaryValue* bindings =
+ m_state->getObject(V8RuntimeAgentImplState::bindings);
+ if (!bindings) return;
+ for (size_t i = 0; i < bindings->size(); ++i) {
+ if (!bindings->at(i).second) continue;
+ addBinding(context, bindings->at(i).first);
+ }
+}
+
void V8RuntimeAgentImpl::restore() {
if (!m_state->booleanProperty(V8RuntimeAgentImplState::runtimeEnabled, false))
return;
@@ -647,6 +752,10 @@ void V8RuntimeAgentImpl::restore() {
if (m_state->booleanProperty(
V8RuntimeAgentImplState::customObjectFormatterEnabled, false))
m_session->setCustomObjectFormatterEnabled(true);
+
+ m_inspector->forEachContext(
+ m_session->contextGroupId(),
+ [this](InspectedContext* context) { addBindings(context); });
}
Response V8RuntimeAgentImpl::enable() {
@@ -669,11 +778,15 @@ Response V8RuntimeAgentImpl::disable() {
if (!m_enabled) return Response::OK();
m_enabled = false;
m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, false);
+ m_state->remove(V8RuntimeAgentImplState::bindings);
m_inspector->disableStackCapturingIfNeeded();
m_session->setCustomObjectFormatterEnabled(false);
reset();
m_inspector->client()->endEnsureAllContextsInGroup(
m_session->contextGroupId());
+ if (m_session->debuggerAgent() && !m_session->debuggerAgent()->enabled()) {
+ m_session->debuggerAgent()->setAsyncCallStackDepth(0);
+ }
return Response::OK();
}
@@ -730,5 +843,4 @@ bool V8RuntimeAgentImpl::reportMessage(V8ConsoleMessage* message,
m_frontend.flush();
return m_inspector->hasConsoleMessageStorage(m_session->contextGroupId());
}
-
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index 420092e72a..bf76cb8491 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -31,6 +31,8 @@
#ifndef V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
#define V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
+#include <unordered_map>
+
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
@@ -89,6 +91,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Response releaseObjectGroup(const String16& objectGroup) override;
Response runIfWaitingForDebugger() override;
Response setCustomObjectFormatterEnabled(bool) override;
+ Response setMaxCallStackSizeToCapture(int) override;
Response discardConsoleEntries() override;
Response compileScript(const String16& expression, const String16& sourceURL,
bool persistScript, Maybe<int> executionContextId,
@@ -110,6 +113,11 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
void terminateExecution(
std::unique_ptr<TerminateExecutionCallback> callback) override;
+ Response addBinding(const String16& name,
+ Maybe<int> executionContextId) override;
+ Response removeBinding(const String16& name) override;
+ void addBindings(InspectedContext* context);
+
void reset();
void reportExecutionContextCreated(InspectedContext*);
void reportExecutionContextDestroyed(InspectedContext*);
@@ -121,12 +129,17 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
private:
bool reportMessage(V8ConsoleMessage*, bool generatePreview);
+ static void bindingCallback(const v8::FunctionCallbackInfo<v8::Value>& args);
+ void bindingCalled(const String16& name, const String16& payload,
+ int executionContextId);
+ void addBinding(InspectedContext* context, const String16& name);
+
V8InspectorSessionImpl* m_session;
protocol::DictionaryValue* m_state;
protocol::Runtime::Frontend m_frontend;
V8InspectorImpl* m_inspector;
bool m_enabled;
- protocol::HashMap<String16, std::unique_ptr<v8::Global<v8::Script>>>
+ std::unordered_map<String16, std::unique_ptr<v8::Global<v8::Script>>>
m_compiledScripts;
DISALLOW_COPY_AND_ASSIGN(V8RuntimeAgentImpl);
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index c8965745a9..75293c59af 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -11,6 +11,8 @@
namespace v8_inspector {
+int V8StackTraceImpl::maxCallStackSizeToCapture = 200;
+
namespace {
static const v8::StackTrace::StackTraceOptions stackTraceOptions =
@@ -23,9 +25,9 @@ std::vector<std::shared_ptr<StackFrame>> toFramesVector(
int maxStackSize) {
DCHECK(debugger->isolate()->InContext());
int frameCount = std::min(v8StackTrace->GetFrameCount(), maxStackSize);
- std::vector<std::shared_ptr<StackFrame>> frames;
+ std::vector<std::shared_ptr<StackFrame>> frames(frameCount);
for (int i = 0; i < frameCount; ++i) {
- frames.push_back(debugger->symbolize(v8StackTrace->GetFrame(i)));
+ frames[i] = debugger->symbolize(v8StackTrace->GetFrame(i));
}
return frames;
}
@@ -216,10 +218,12 @@ std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
}
StringView V8StackTraceImpl::firstNonEmptySourceURL() const {
- for (size_t i = 0; i < m_frames.size(); ++i) {
- if (m_frames[i]->sourceURL().length()) {
- return toStringView(m_frames[i]->sourceURL());
+ StackFrameIterator current(this);
+ while (!current.done()) {
+ if (current.frame()->sourceURL().length()) {
+ return toStringView(current.frame()->sourceURL());
}
+ current.next();
}
return StringView();
}
@@ -369,6 +373,7 @@ AsyncStackTrace::AsyncStackTrace(
const V8StackTraceId& externalParent)
: m_contextGroupId(contextGroupId),
m_id(0),
+ m_suspendedTaskId(nullptr),
m_description(description),
m_frames(std::move(frames)),
m_asyncParent(asyncParent),
@@ -386,6 +391,12 @@ AsyncStackTrace::buildInspectorObject(V8Debugger* debugger,
int AsyncStackTrace::contextGroupId() const { return m_contextGroupId; }
+void AsyncStackTrace::setSuspendedTaskId(void* task) {
+ m_suspendedTaskId = task;
+}
+
+void* AsyncStackTrace::suspendedTaskId() const { return m_suspendedTaskId; }
+
uintptr_t AsyncStackTrace::store(V8Debugger* debugger,
std::shared_ptr<AsyncStackTrace> stack) {
if (stack->m_id) return stack->m_id;
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index e0327a5dae..a8f23c48b6 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -48,7 +48,7 @@ class V8StackTraceImpl : public V8StackTrace {
public:
static void setCaptureStackTraceForUncaughtExceptions(v8::Isolate*,
bool capture);
- static const int maxCallStackSizeToCapture = 200;
+ static int maxCallStackSizeToCapture;
static std::unique_ptr<V8StackTraceImpl> create(V8Debugger*,
int contextGroupId,
v8::Local<v8::StackTrace>,
@@ -117,6 +117,16 @@ class AsyncStackTrace {
std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObject(
V8Debugger* debugger, int maxAsyncDepth) const;
+ // If async stack has suspended task id, it means that at moment when we
+ // capture current stack trace we suspended corresponded asynchronous
+ // execution flow and it is possible to request pause for a momemnt when
+ // that flow is resumed.
+ // E.g. every time when we suspend async function we mark corresponded async
+ // stack as suspended and every time when this function is resumed we remove
+ // suspendedTaskId.
+ void setSuspendedTaskId(void* task);
+ void* suspendedTaskId() const;
+
int contextGroupId() const;
const String16& description() const;
std::weak_ptr<AsyncStackTrace> parent() const;
@@ -135,6 +145,7 @@ class AsyncStackTrace {
int m_contextGroupId;
uintptr_t m_id;
+ void* m_suspendedTaskId;
String16 m_description;
std::vector<std::shared_ptr<StackFrame>> m_frames;
diff --git a/deps/v8/src/instruction-stream.cc b/deps/v8/src/instruction-stream.cc
index ff140fe9e5..4b2a9012d6 100644
--- a/deps/v8/src/instruction-stream.cc
+++ b/deps/v8/src/instruction-stream.cc
@@ -13,17 +13,16 @@ namespace internal {
// static
bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
-#ifdef V8_EMBEDDED_BUILTINS
- const Address start = reinterpret_cast<Address>(isolate->embedded_blob());
- return start <= pc && pc < start + isolate->embedded_blob_size();
-#else
- return false;
-#endif
+ if (FLAG_embedded_builtins) {
+ const Address start = reinterpret_cast<Address>(isolate->embedded_blob());
+ return start <= pc && pc < start + isolate->embedded_blob_size();
+ } else {
+ return false;
+ }
}
// static
Code* InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
-#ifdef V8_EMBEDDED_BUILTINS
if (!PcIsOffHeap(isolate, address)) return nullptr;
EmbeddedData d = EmbeddedData::FromBlob();
@@ -44,12 +43,8 @@ Code* InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
}
UNREACHABLE();
-#else
- return nullptr;
-#endif
}
-#ifdef V8_EMBEDDED_BUILTINS
// static
void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
uint8_t** data,
@@ -80,7 +75,6 @@ void InstructionStream::FreeOffHeapInstructionStream(uint8_t* data,
const uint32_t page_size = static_cast<uint32_t>(AllocatePageSize());
CHECK(FreePages(data, RoundUp(size, page_size)));
}
-#endif // V8_EMBEDDED_BUILTINS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/instruction-stream.h b/deps/v8/src/instruction-stream.h
index d659de4266..25129871db 100644
--- a/deps/v8/src/instruction-stream.h
+++ b/deps/v8/src/instruction-stream.h
@@ -24,7 +24,6 @@ class InstructionStream final : public AllStatic {
// Returns the corresponding Code object if it exists, and nullptr otherwise.
static Code* TryLookupCode(Isolate* isolate, Address address);
-#ifdef V8_EMBEDDED_BUILTINS
// During snapshot creation, we first create an executable off-heap area
// containing all off-heap code. The area is guaranteed to be contiguous.
// Note that this only applies when building the snapshot, e.g. for
@@ -32,7 +31,6 @@ class InstructionStream final : public AllStatic {
static void CreateOffHeapInstructionStream(Isolate* isolate, uint8_t** data,
uint32_t* size);
static void FreeOffHeapInstructionStream(uint8_t* data, uint32_t size);
-#endif
};
} // namespace internal
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index bd94373c5b..85c2b56e21 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -7,29 +7,27 @@
namespace v8 {
namespace internal {
-
void CallInterfaceDescriptorData::InitializePlatformSpecific(
- int register_parameter_count, const Register* registers,
- PlatformInterfaceDescriptor* platform_descriptor) {
- platform_specific_descriptor_ = platform_descriptor;
+ int register_parameter_count, const Register* registers) {
register_param_count_ = register_parameter_count;
// InterfaceDescriptor owns a copy of the registers array.
- register_params_.reset(NewArray<Register>(register_parameter_count, no_reg));
+ register_params_ = NewArray<Register>(register_parameter_count, no_reg);
for (int i = 0; i < register_parameter_count; i++) {
register_params_[i] = registers[i];
}
}
void CallInterfaceDescriptorData::InitializePlatformIndependent(
- int parameter_count, int extra_parameter_count,
- const MachineType* machine_types) {
- // InterfaceDescriptor owns a copy of the MachineType array.
- // We only care about parameters, not receiver and result.
- param_count_ = parameter_count + extra_parameter_count;
- machine_types_.reset(NewArray<MachineType>(param_count_));
- for (int i = 0; i < param_count_; i++) {
- if (machine_types == nullptr || i >= parameter_count) {
+ Flags flags, int return_count, int parameter_count,
+ const MachineType* machine_types, int machine_types_length) {
+ flags_ = flags;
+ return_count_ = return_count;
+ param_count_ = parameter_count;
+ int types_length = return_count_ + param_count_;
+ machine_types_ = NewArray<MachineType>(types_length);
+ for (int i = 0; i < types_length; i++) {
+ if (machine_types == nullptr || i >= machine_types_length) {
machine_types_[i] = MachineType::AnyTagged();
} else {
machine_types_[i] = machine_types[i];
@@ -37,15 +35,63 @@ void CallInterfaceDescriptorData::InitializePlatformIndependent(
}
}
-const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
- CallInterfaceDescriptorData* start = isolate->call_descriptor_data(0);
- size_t index = data_ - start;
- DCHECK(index < CallDescriptors::NUMBER_OF_DESCRIPTORS);
- CallDescriptors::Key key = static_cast<CallDescriptors::Key>(index);
+void CallInterfaceDescriptorData::Reset() {
+ delete[] machine_types_;
+ machine_types_ = nullptr;
+ delete[] register_params_;
+ register_params_ = nullptr;
+}
+
+// static
+CallInterfaceDescriptorData
+ CallDescriptors::call_descriptor_data_[NUMBER_OF_DESCRIPTORS];
+
+void CallDescriptors::InitializeOncePerProcess() {
+#define INTERFACE_DESCRIPTOR(name, ...) \
+ name##Descriptor().Initialize(&call_descriptor_data_[CallDescriptors::name]);
+ INTERFACE_DESCRIPTOR_LIST(INTERFACE_DESCRIPTOR)
+#undef INTERFACE_DESCRIPTOR
+
+ DCHECK(ContextOnlyDescriptor{}.HasContextParameter());
+ DCHECK(!NoContextDescriptor{}.HasContextParameter());
+ DCHECK(!AllocateDescriptor{}.HasContextParameter());
+ DCHECK(!AllocateHeapNumberDescriptor{}.HasContextParameter());
+ DCHECK(!AbortDescriptor{}.HasContextParameter());
+}
+
+void CallDescriptors::TearDown() {
+ for (CallInterfaceDescriptorData& data : call_descriptor_data_) {
+ data.Reset();
+ }
+}
+
+void CallInterfaceDescriptor::JSDefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int non_js_register_parameter_count) {
+ DCHECK_LE(static_cast<unsigned>(non_js_register_parameter_count), 1);
+
+ // 3 is for kTarget, kNewTarget and kActualArgumentsCount
+ int register_parameter_count = 3 + non_js_register_parameter_count;
+
+ DCHECK(!AreAliased(
+ kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister, kJavaScriptCallExtraArg1Register));
+
+ const Register default_js_stub_registers[] = {
+ kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister, kJavaScriptCallExtraArg1Register};
+
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_js_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_js_stub_registers);
+}
+
+const char* CallInterfaceDescriptor::DebugName() const {
+ CallDescriptors::Key key = CallDescriptors::GetKey(data_);
switch (key) {
-#define DEF_CASE(NAME, ...) \
- case CallDescriptors::NAME: \
- return #NAME " Descriptor";
+#define DEF_CASE(name, ...) \
+ case CallDescriptors::name: \
+ return #name " Descriptor";
INTERFACE_DESCRIPTOR_LIST(DEF_CASE)
#undef DEF_CASE
case CallDescriptors::NUMBER_OF_DESCRIPTORS:
@@ -60,12 +106,17 @@ void VoidDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
-void FastNewFunctionContextDescriptor::InitializePlatformIndependent(
+void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
+ Register registers[] = {kAllocateSizeRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CEntry1ArgvOnStackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {kRuntimeCallArgCountRegister,
+ kRuntimeCallFunctionRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewFunctionContextDescriptor::InitializePlatformSpecific(
@@ -88,35 +139,6 @@ const Register FastNewObjectDescriptor::NewTargetRegister() {
return kJavaScriptCallNewTargetRegister;
}
-void FastNewArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {TargetRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-const Register FastNewArgumentsDescriptor::TargetRegister() {
- return kJSFunctionRegister;
-}
-
-void RecordWriteDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- MachineType machine_types[] = {MachineType::TaggedPointer(),
- MachineType::Pointer(), MachineType::Pointer(),
- MachineType::TaggedSigned(),
- MachineType::TaggedSigned()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void LoadDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kName, kSlot
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged(),
- MachineType::TaggedSigned()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
void LoadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -124,47 +146,18 @@ void LoadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void LoadGlobalDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kName, kSlot
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::TaggedSigned()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
void LoadGlobalDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {NameRegister(), SlotRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void LoadGlobalWithVectorDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kName, kSlot, kVector
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::TaggedSigned(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {NameRegister(), SlotRegister(), VectorRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreGlobalDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kName, kValue, kSlot
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged(),
- MachineType::TaggedSigned()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
void StoreGlobalDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {NameRegister(), ValueRegister(), SlotRegister()};
@@ -173,16 +166,6 @@ void StoreGlobalDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
-void StoreGlobalWithVectorDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kName, kValue, kSlot, kVector
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::TaggedSigned(), MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
void StoreGlobalWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {NameRegister(), ValueRegister(), SlotRegister(),
@@ -191,16 +174,6 @@ void StoreGlobalWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
-void StoreDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kName, kValue, kSlot
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::TaggedSigned()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
void StoreDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
@@ -220,65 +193,11 @@ void StoreTransitionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
-void StoreTransitionDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kName, kMap, kValue, kSlot, kVector
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::TaggedSigned(), MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void StoreNamedTransitionDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kFieldOffset, kMap, kValue, kSlot, kVector, kName
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::TaggedSigned(),
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::TaggedSigned(), MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void StoreNamedTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- ReceiverRegister(), FieldOffsetRegister(), MapRegister(),
- ValueRegister(), SlotRegister(), VectorRegister(),
- NameRegister(),
- };
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StringAtDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kPosition
- // TODO(turbofan): Allow builtins to return untagged values.
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::IntPtr()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
void StringAtDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
-void StringSubstringDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kString, kFrom, kTo
- // TODO(turbofan): Allow builtins to return untagged values.
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::IntPtr(), MachineType::IntPtr()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
void StringSubstringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
@@ -295,33 +214,6 @@ void TypeConversionStackParameterDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
-void TypeConversionStackParameterDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformIndependent(data->register_param_count(), 1, nullptr);
-}
-
-void MathPowTaggedDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {exponent()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void MathPowIntegerDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {exponent()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadWithVectorDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kName, kSlot, kVector
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::TaggedSigned(), MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
void LoadWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
@@ -329,17 +221,6 @@ void LoadWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreWithVectorDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kName, kValue, kSlot, kVector
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::TaggedSigned(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
void StoreWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
@@ -364,20 +245,15 @@ void ContextOnlyDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
-void GrowArrayElementsDescriptor::InitializePlatformSpecific(
+void NoContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ObjectRegister(), KeyRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(0, nullptr);
}
-void NewArgumentsElementsDescriptor::InitializePlatformIndependent(
+void GrowArrayElementsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // kFrame, kLength, kMappedCount
- MachineType const kMachineTypes[] = {MachineType::Pointer(),
- MachineType::TaggedSigned(),
- MachineType::TaggedSigned()};
- data->InitializePlatformIndependent(arraysize(kMachineTypes), 0,
- kMachineTypes);
+ Register registers[] = {ObjectRegister(), KeyRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void NewArgumentsElementsDescriptor::InitializePlatformSpecific(
@@ -385,244 +261,34 @@ void NewArgumentsElementsDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 3);
}
-void CallTrampolineDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kFunction, kActualArgumentsCount
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void CallVarargsDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTarget, kActualArgumentsCount, kArgumentsList, kArgumentsLength
- MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::AnyTagged(),
- MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTarget, kActualArgumentsCount, kStartIndex
- MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTarget, kArgumentsCount, kArgumentsList
- MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTarget, kArgumentsList
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kArgumentsList,
- // kArgumentsLength
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::AnyTagged(), MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kStartIndex
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kArgumentsCount, kSpread
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kArgumentsList
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ConstructStubDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kFunction, kNewTarget, kActualArgumentsCount, kAllocationSite
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ConstructTrampolineDescriptor::InitializePlatformIndependent(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // kFunction, kNewTarget, kActualArgumentsCount
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
+ // This descriptor must use the same set of registers as the
+ // ArrayNArgumentsConstructorDescriptor.
+ ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(data);
}
-void BuiltinDescriptor::InitializePlatformIndependent(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kArgumentsCount
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
+ // This descriptor must use the same set of registers as the
+ // ArrayNArgumentsConstructorDescriptor.
+ ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(data);
}
-void BuiltinDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {TargetRegister(), NewTargetRegister(),
- ArgumentsCountRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-const Register BuiltinDescriptor::ArgumentsCountRegister() {
- return kJavaScriptCallArgCountRegister;
-}
-const Register BuiltinDescriptor::NewTargetRegister() {
- return kJavaScriptCallNewTargetRegister;
-}
-
-const Register BuiltinDescriptor::TargetRegister() {
- return kJSFunctionRegister;
-}
-
-void ArrayConstructorDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter
- MachineType machine_types[] = {MachineType::TaggedPointer(),
- MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter,
- // kArraySizeSmiParameter
- MachineType machine_types[] = {
- MachineType::TaggedPointer(), MachineType::AnyTagged(),
- MachineType::Int32(), MachineType::AnyTagged(), MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformIndependent(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // Keep the arguments on the same registers as they were in
+ // ArrayConstructorDescriptor to avoid unnecessary register moves.
// kFunction, kAllocationSite, kActualArgumentsCount
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ArgumentAdaptorDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kFunction, kNewTarget, kActualArgumentsCount, kExpectedArgumentsCount
- MachineType machine_types[] = {MachineType::TaggedPointer(),
- MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::Int32()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void ApiCallbackDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kTargetContext, kCallData, kHolder, kApiFunctionAddress
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::Pointer()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::IntPtr(), MachineType::AnyTagged(),
- MachineType::IntPtr()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kNumberOfArguments, kFirstArgument, kFunction
- MachineType machine_types[] = {MachineType::Int32(), MachineType::Pointer(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kNumberOfArguments, kNewTarget, kConstructor, kFeedbackElement,
- // kFirstArgument
- MachineType machine_types[] = {
- MachineType::Int32(), MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::Pointer()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void InterpreterCEntryDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kNumberOfArguments, kFirstArgument, kFunctionEntry
- MachineType machine_types[] = {MachineType::Int32(), MachineType::Pointer(),
- MachineType::Pointer()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
+ Register registers[] = {kJavaScriptCallTargetRegister,
+ kJavaScriptCallExtraArg1Register,
+ kJavaScriptCallArgCountRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FrameDropperTrampolineDescriptor::InitializePlatformIndependent(
+void WasmGrowMemoryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // New FP value.
- MachineType machine_types[] = {MachineType::Pointer()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
+ DefaultInitializePlatformSpecific(data, kParameterCount);
}
} // namespace internal
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 3f047098b1..cf841383b2 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -15,24 +15,24 @@
namespace v8 {
namespace internal {
-class PlatformInterfaceDescriptor;
-
#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(CppBuiltinAdaptor) \
+ V(CEntry1ArgvOnStack) \
+ V(Allocate) \
V(Void) \
V(ContextOnly) \
+ V(NoContext) \
V(Load) \
V(LoadWithVector) \
V(LoadGlobal) \
V(LoadGlobalWithVector) \
V(Store) \
V(StoreWithVector) \
- V(StoreNamedTransition) \
V(StoreTransition) \
V(StoreGlobal) \
V(StoreGlobalWithVector) \
V(FastNewFunctionContext) \
V(FastNewObject) \
- V(FastNewArguments) \
V(RecordWrite) \
V(TypeConversion) \
V(TypeConversionStackParameter) \
@@ -48,13 +48,10 @@ class PlatformInterfaceDescriptor;
V(ConstructForwardVarargs) \
V(ConstructWithSpread) \
V(ConstructWithArrayLike) \
- V(ConstructTrampoline) \
- V(TransitionElementsKind) \
- V(AbortJS) \
+ V(JSTrampoline) \
+ V(Abort) \
V(AllocateHeapNumber) \
- V(Builtin) \
V(ArrayConstructor) \
- V(IteratingArrayBuiltin) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
@@ -62,61 +59,72 @@ class PlatformInterfaceDescriptor;
V(BinaryOp) \
V(StringAt) \
V(StringSubstring) \
- V(ForInPrepare) \
V(GetProperty) \
V(ArgumentAdaptor) \
V(ApiCallback) \
V(ApiGetter) \
- V(MathPowTagged) \
- V(MathPowInteger) \
V(GrowArrayElements) \
V(NewArgumentsElements) \
V(InterpreterDispatch) \
V(InterpreterPushArgsThenCall) \
V(InterpreterPushArgsThenConstruct) \
- V(InterpreterCEntry) \
+ V(InterpreterCEntry1) \
+ V(InterpreterCEntry2) \
V(ResumeGenerator) \
V(FrameDropperTrampoline) \
- V(WasmRuntimeCall) \
V(RunMicrotasks) \
+ V(WasmGrowMemory) \
BUILTIN_LIST_TFS(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
public:
- CallInterfaceDescriptorData()
- : register_param_count_(-1),
- param_count_(-1),
- allocatable_registers_(0) {}
+ enum Flag {
+ kNoFlags = 0u,
+ kNoContext = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ CallInterfaceDescriptorData() = default;
// A copy of the passed in registers and param_representations is made
// and owned by the CallInterfaceDescriptorData.
- void InitializePlatformSpecific(
- int register_parameter_count, const Register* registers,
- PlatformInterfaceDescriptor* platform_descriptor = nullptr);
+ void InitializePlatformSpecific(int register_parameter_count,
+ const Register* registers);
// if machine_types is null, then an array of size
- // (parameter_count + extra_parameter_count) will be created with
+ // (return_count + parameter_count) will be created with
// MachineType::AnyTagged() for each member.
//
// if machine_types is not null, then it should be of the size
- // parameter_count. Those members of the parameter array will be initialized
- // from {machine_types}, and the rest initialized to MachineType::AnyTagged().
- void InitializePlatformIndependent(int parameter_count,
- int extra_parameter_count,
- const MachineType* machine_types);
+ // (return_count + parameter_count). Those members of the parameter array will
+ // be initialized from {machine_types}, and the rest initialized to
+ // MachineType::AnyTagged().
+ void InitializePlatformIndependent(Flags flags, int return_count,
+ int parameter_count,
+ const MachineType* machine_types,
+ int machine_types_length);
+
+ void Reset();
bool IsInitialized() const {
- return register_param_count_ >= 0 && param_count_ >= 0;
+ return register_param_count_ >= 0 && return_count_ >= 0 &&
+ param_count_ >= 0;
}
+ Flags flags() const { return flags_; }
+ int return_count() const { return return_count_; }
int param_count() const { return param_count_; }
int register_param_count() const { return register_param_count_; }
Register register_param(int index) const { return register_params_[index]; }
- Register* register_params() const { return register_params_.get(); }
- MachineType param_type(int index) const { return machine_types_[index]; }
- PlatformInterfaceDescriptor* platform_specific_descriptor() const {
- return platform_specific_descriptor_;
+ Register* register_params() const { return register_params_; }
+ MachineType return_type(int index) const {
+ DCHECK_LT(index, return_count_);
+ return machine_types_[index];
+ }
+ MachineType param_type(int index) const {
+ DCHECK_LT(index, param_count_);
+ return machine_types_[return_count_ + index];
}
void RestrictAllocatableRegisters(const Register* registers, int num) {
@@ -130,27 +138,28 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
RegList allocatable_registers() const { return allocatable_registers_; }
private:
- int register_param_count_;
- int param_count_;
+ int register_param_count_ = -1;
+ int return_count_ = -1;
+ int param_count_ = -1;
+ Flags flags_ = kNoFlags;
// Specifying the set of registers that could be used by the register
// allocator. Currently, it's only used by RecordWrite code stub.
- RegList allocatable_registers_;
-
- // The Register params are allocated dynamically by the
- // InterfaceDescriptor, and freed on destruction. This is because static
- // arrays of Registers cause creation of runtime static initializers
- // which we don't want.
- std::unique_ptr<Register[]> register_params_;
- std::unique_ptr<MachineType[]> machine_types_;
+ RegList allocatable_registers_ = 0;
- PlatformInterfaceDescriptor* platform_specific_descriptor_;
+ // |registers_params_| defines registers that are used for parameter passing.
+ // |machine_types_| defines machine types for resulting values and incomping
+ // parameters.
+ // Both arrays are allocated dynamically by the InterfaceDescriptor and
+ // freed on destruction. This is because static arrays cause creation of
+ // runtime static initializers which we don't want.
+ Register* register_params_ = nullptr;
+ MachineType* machine_types_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(CallInterfaceDescriptorData);
};
-
-class CallDescriptors {
+class V8_EXPORT_PRIVATE CallDescriptors : public AllStatic {
public:
enum Key {
#define DEF_ENUM(name, ...) name,
@@ -158,15 +167,49 @@ class CallDescriptors {
#undef DEF_ENUM
NUMBER_OF_DESCRIPTORS
};
+
+ static void InitializeOncePerProcess();
+ static void TearDown();
+
+ static CallInterfaceDescriptorData* call_descriptor_data(
+ CallDescriptors::Key key) {
+ return &call_descriptor_data_[key];
+ }
+
+ static Key GetKey(const CallInterfaceDescriptorData* data) {
+ ptrdiff_t index = data - call_descriptor_data_;
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, CallDescriptors::NUMBER_OF_DESCRIPTORS);
+ return static_cast<CallDescriptors::Key>(index);
+ }
+
+ private:
+ static CallInterfaceDescriptorData
+ call_descriptor_data_[NUMBER_OF_DESCRIPTORS];
};
class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
public:
+ typedef CallInterfaceDescriptorData::Flags Flags;
+
CallInterfaceDescriptor() : data_(nullptr) {}
virtual ~CallInterfaceDescriptor() {}
- CallInterfaceDescriptor(Isolate* isolate, CallDescriptors::Key key)
- : data_(isolate->call_descriptor_data(key)) {}
+ CallInterfaceDescriptor(CallDescriptors::Key key)
+ : data_(CallDescriptors::call_descriptor_data(key)) {}
+
+ Flags flags() const { return data()->flags(); }
+
+ bool HasContextParameter() const {
+ return (flags() & CallInterfaceDescriptorData::kNoContext) == 0;
+ }
+
+ int GetReturnCount() const { return data()->return_count(); }
+
+ MachineType GetReturnType(int index) const {
+ DCHECK_LT(index, data()->return_count());
+ return data()->return_type(index);
+ }
int GetParameterCount() const { return data()->param_count(); }
@@ -183,22 +226,17 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
}
MachineType GetParameterType(int index) const {
- DCHECK(index < data()->param_count());
+ DCHECK_LT(index, data()->param_count());
return data()->param_type(index);
}
- // Some platforms have extra information to associate with the descriptor.
- PlatformInterfaceDescriptor* platform_specific_descriptor() const {
- return data()->platform_specific_descriptor();
- }
-
RegList allocatable_registers() const {
return data()->allocatable_registers();
}
static const Register ContextRegister();
- const char* DebugName(Isolate* isolate) const;
+ const char* DebugName() const;
protected:
const CallInterfaceDescriptorData* data() const { return data_; }
@@ -209,19 +247,11 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
virtual void InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformIndependent(data->register_param_count(), 0,
- nullptr);
- }
-
- void Initialize(Isolate* isolate, CallDescriptors::Key key) {
- if (!data()->IsInitialized()) {
- // We should only initialize descriptors on the isolate's main thread.
- DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
- CallInterfaceDescriptorData* d = isolate->call_descriptor_data(key);
- DCHECK(d == data()); // d should be a modifiable pointer to data().
- InitializePlatformSpecific(d);
- InitializePlatformIndependent(d);
- }
+ // Default descriptor configuration: one result, all parameters are passed
+ // in registers and all parameters have MachineType::AnyTagged() type.
+ data->InitializePlatformIndependent(CallInterfaceDescriptorData::kNoFlags,
+ 1, data->register_param_count(),
+ nullptr, 0);
}
// Initializes |data| using the platform dependent default set of registers.
@@ -230,133 +260,190 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
static void DefaultInitializePlatformSpecific(
CallInterfaceDescriptorData* data, int register_parameter_count);
+ // Initializes |data| using the platform dependent default set of registers
+ // for JavaScript-compatible calling convention.
+ // It is intended to be used for TurboFan stubs being called with JavaScript
+ // linkage + additional parameters on registers and stack.
+ static void JSDefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int non_js_register_parameter_count);
+
private:
+ // {CallDescriptors} is allowed to call the private {Initialize} method.
+ friend class CallDescriptors;
+
const CallInterfaceDescriptorData* data_;
+
+ void Initialize(CallInterfaceDescriptorData* data) {
+ // The passed pointer should be a modifiable pointer to our own data.
+ DCHECK_EQ(data, data_);
+ DCHECK(!data->IsInitialized());
+ InitializePlatformSpecific(data);
+ InitializePlatformIndependent(data);
+ DCHECK(data->IsInitialized());
+ }
};
-#define DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- public: \
- explicit name(Isolate* isolate) : base(isolate, key()) { \
- Initialize(isolate, key()); \
- } \
+#define DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ public: \
+ explicit name() : base(key()) {} \
static inline CallDescriptors::Key key();
-static const int kMaxBuiltinRegisterParams = 5;
-
-#define DECLARE_DEFAULT_DESCRIPTOR(name, base, parameter_count) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- static const int kRegisterParams = \
- parameter_count > kMaxBuiltinRegisterParams ? kMaxBuiltinRegisterParams \
- : parameter_count; \
- static const int kStackParams = parameter_count - kRegisterParams; \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
- override { \
- DefaultInitializePlatformSpecific(data, kRegisterParams); \
- } \
- void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
- override { \
- data->InitializePlatformIndependent(kRegisterParams, kStackParams, \
- nullptr); \
- } \
- name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
- \
- public:
+constexpr int kMaxBuiltinRegisterParams = 5;
-#define DECLARE_DESCRIPTOR(name, base) \
+#define DECLARE_DEFAULT_DESCRIPTOR(name, base) \
DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
protected: \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override; \
- name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
+ static const int kRegisterParams = \
+ kParameterCount > kMaxBuiltinRegisterParams ? kMaxBuiltinRegisterParams \
+ : kParameterCount; \
+ static const int kStackParams = kParameterCount - kRegisterParams; \
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
+ override { \
+ DefaultInitializePlatformSpecific(data, kRegisterParams); \
+ } \
+ void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+ override { \
+ data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
+ kParameterCount, nullptr, 0); \
+ } \
+ name(CallDescriptors::Key key) : base(key) {} \
\
public:
-#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
- DECLARE_DESCRIPTOR(name, base) \
- protected: \
- void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
- override; \
- \
- public:
-
-#define DECLARE_DESCRIPTOR_WITH_STACK_ARGS(name, base) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
- override { \
- data->InitializePlatformIndependent(0, kParameterCount, nullptr); \
- } \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
- override { \
- data->InitializePlatformSpecific(0, nullptr); \
- } \
- \
- public:
+#define DECLARE_JS_COMPATIBLE_DESCRIPTOR(name, base, \
+ non_js_reg_parameters_count) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ protected: \
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
+ override { \
+ JSDefaultInitializePlatformSpecific(data, non_js_reg_parameters_count); \
+ } \
+ name(CallDescriptors::Key key) : base(key) {} \
+ \
+ public:
+
+#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kNoFlags; \
+ static constexpr int kReturnCount = return_count; \
+ enum ParameterIndices { \
+ __dummy = -1, /* to be able to pass zero arguments */ \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount, \
+ kContext = kParameterCount /* implicit parameter */ \
+ };
-#define DEFINE_EMPTY_PARAMETERS() \
- enum ParameterIndices { \
- kParameterCount, \
- kContext = kParameterCount /* implicit parameter */ \
+#define DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(return_count, ...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kNoContext; \
+ static constexpr int kReturnCount = return_count; \
+ enum ParameterIndices { \
+ __dummy = -1, /* to be able to pass zero arguments */ \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount \
};
-#define DEFINE_PARAMETERS(...) \
+#define DEFINE_PARAMETERS(...) DEFINE_RESULT_AND_PARAMETERS(1, ##__VA_ARGS__)
+
+#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
+ DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(1, ##__VA_ARGS__)
+
+#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
+ void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+ override { \
+ MachineType machine_types[] = {__VA_ARGS__}; \
+ static_assert( \
+ kReturnCount + kParameterCount == arraysize(machine_types), \
+ "Parameter names definition is not consistent with parameter types"); \
+ data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
+ kParameterCount, machine_types, \
+ arraysize(machine_types)); \
+ }
+
+#define DEFINE_PARAMETER_TYPES(...) \
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged() /* result */, \
+ ##__VA_ARGS__)
+
+#define DEFINE_JS_PARAMETERS(...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kNoFlags; \
+ static constexpr int kReturnCount = 1; \
enum ParameterIndices { \
- __VA_ARGS__, \
+ kTarget, \
+ kNewTarget, \
+ kActualArgumentsCount, \
+ ##__VA_ARGS__, \
\
kParameterCount, \
kContext = kParameterCount /* implicit parameter */ \
};
-#define DECLARE_BUILTIN_DESCRIPTOR(name) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, BuiltinDescriptor) \
- protected: \
- void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
- override { \
- MachineType machine_types[] = {MachineType::AnyTagged(), \
- MachineType::AnyTagged(), \
- MachineType::Int32()}; \
- data->InitializePlatformIndependent(arraysize(machine_types), \
- kStackParameterCount, machine_types); \
- } \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
- override { \
- Register registers[] = {TargetRegister(), NewTargetRegister(), \
- ArgumentsCountRegister()}; \
- data->InitializePlatformSpecific(arraysize(registers), registers); \
- } \
- \
- public:
-
-#define DEFINE_BUILTIN_PARAMETERS(...) \
- enum ParameterIndices { \
- kReceiver, \
- kBeforeFirstStackParameter = kReceiver, \
- __VA_ARGS__, \
- kAfterLastStackParameter, \
- kNewTarget = kAfterLastStackParameter, \
- kArgumentsCount, \
- kContext, /* implicit parameter */ \
- kParameterCount = kContext, \
- kArity = kAfterLastStackParameter - kBeforeFirstStackParameter - 1, \
- kStackParameterCount = kArity + 1 \
- };
+#define DEFINE_JS_PARAMETER_TYPES(...) \
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), /* kTarget */ \
+ MachineType::AnyTagged(), /* kNewTarget */ \
+ MachineType::Int32(), /* kActualArgumentsCount */ \
+ ##__VA_ARGS__)
+
+#define DECLARE_DESCRIPTOR(name, base) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ protected: \
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override; \
+ name(CallDescriptors::Key key) : base(key) {} \
+ \
+ public:
class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS()
+ DEFINE_PARAMETER_TYPES()
DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
};
+class AllocateDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1
+ MachineType::Int32()) // kRequestedSize
+ DECLARE_DESCRIPTOR(AllocateDescriptor, CallInterfaceDescriptor)
+};
+
+// This descriptor defines the JavaScript calling convention that can be used
+// by stubs: target, new.target, argc (not including the receiver) and context
+// are passed in registers while receiver and the rest of the JS arguments are
+// passed on the stack.
+class JSTrampolineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_JS_PARAMETERS()
+ DEFINE_JS_PARAMETER_TYPES()
+
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(JSTrampolineDescriptor,
+ CallInterfaceDescriptor, 0)
+};
+
class ContextOnlyDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS()
+ DEFINE_PARAMETER_TYPES()
DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
};
+class NoContextDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT()
+ DEFINE_PARAMETER_TYPES()
+ DECLARE_DESCRIPTOR(NoContextDescriptor, CallInterfaceDescriptor)
+};
+
// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
class LoadDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kName, kSlot)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(LoadDescriptor, CallInterfaceDescriptor)
static const Register ReceiverRegister();
static const Register NameRegister();
@@ -366,8 +453,9 @@ class LoadDescriptor : public CallInterfaceDescriptor {
class LoadGlobalDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kName, kSlot)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(LoadGlobalDescriptor, CallInterfaceDescriptor)
static const Register NameRegister() {
return LoadDescriptor::NameRegister();
@@ -381,8 +469,11 @@ class LoadGlobalDescriptor : public CallInterfaceDescriptor {
class StoreDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(StoreDescriptor, CallInterfaceDescriptor)
static const Register ReceiverRegister();
static const Register NameRegister();
@@ -402,8 +493,13 @@ class StoreDescriptor : public CallInterfaceDescriptor {
class StoreTransitionDescriptor : public StoreDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kName, kMap, kValue, kSlot, kVector)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreTransitionDescriptor,
- StoreDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kMap
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(StoreTransitionDescriptor, StoreDescriptor)
static const Register MapRegister();
static const Register SlotRegister();
@@ -413,29 +509,15 @@ class StoreTransitionDescriptor : public StoreDescriptor {
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
};
-class StoreNamedTransitionDescriptor : public StoreTransitionDescriptor {
- public:
- DEFINE_PARAMETERS(kReceiver, kFieldOffset, kMap, kValue, kSlot, kVector,
- kName)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreNamedTransitionDescriptor,
- StoreTransitionDescriptor)
-
- // Always pass name on the stack.
- static const bool kPassLastArgsOnStack = true;
- static const int kStackArgumentsCount =
- StoreTransitionDescriptor::kStackArgumentsCount + 1;
-
- static const Register NameRegister() { return no_reg; }
- static const Register FieldOffsetRegister() {
- return StoreTransitionDescriptor::NameRegister();
- }
-};
-
class StoreWithVectorDescriptor : public StoreDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot, kVector)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreWithVectorDescriptor,
- StoreDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(StoreWithVectorDescriptor, StoreDescriptor)
static const Register VectorRegister();
@@ -446,8 +528,10 @@ class StoreWithVectorDescriptor : public StoreDescriptor {
class StoreGlobalDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kName, kValue, kSlot)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(StoreGlobalDescriptor, CallInterfaceDescriptor)
static const bool kPassLastArgsOnStack =
StoreDescriptor::kPassLastArgsOnStack;
@@ -470,8 +554,11 @@ class StoreGlobalDescriptor : public CallInterfaceDescriptor {
class StoreGlobalWithVectorDescriptor : public StoreGlobalDescriptor {
public:
DEFINE_PARAMETERS(kName, kValue, kSlot, kVector)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalWithVectorDescriptor,
- StoreGlobalDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(StoreGlobalWithVectorDescriptor, StoreGlobalDescriptor)
static const Register VectorRegister() {
return StoreWithVectorDescriptor::VectorRegister();
@@ -484,8 +571,11 @@ class StoreGlobalWithVectorDescriptor : public StoreGlobalDescriptor {
class LoadWithVectorDescriptor : public LoadDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadWithVectorDescriptor,
- LoadDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(LoadWithVectorDescriptor, LoadDescriptor)
static const Register VectorRegister();
};
@@ -493,8 +583,10 @@ class LoadWithVectorDescriptor : public LoadDescriptor {
class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
public:
DEFINE_PARAMETERS(kName, kSlot, kVector)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalWithVectorDescriptor,
- LoadGlobalDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(LoadGlobalWithVectorDescriptor, LoadGlobalDescriptor)
static const Register VectorRegister() {
return LoadWithVectorDescriptor::VectorRegister();
@@ -504,8 +596,9 @@ class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kScopeInfo, kSlots)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastNewFunctionContextDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kScopeInfo
+ MachineType::Int32()) // kSlots
+ DECLARE_DESCRIPTOR(FastNewFunctionContextDescriptor, CallInterfaceDescriptor)
static const Register ScopeInfoRegister();
static const Register SlotsRegister();
@@ -514,28 +607,29 @@ class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
class FastNewObjectDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::AnyTagged()) // kNewTarget
DECLARE_DESCRIPTOR(FastNewObjectDescriptor, CallInterfaceDescriptor)
static const Register TargetRegister();
static const Register NewTargetRegister();
};
-class FastNewArgumentsDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kFunction)
- DECLARE_DESCRIPTOR(FastNewArgumentsDescriptor, CallInterfaceDescriptor)
- static const Register TargetRegister();
-};
-
class RecordWriteDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kSlot, kIsolate, kRememberedSet, kFPMode)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(RecordWriteDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject
+ MachineType::Pointer(), // kSlot
+ MachineType::Pointer(), // kIsolate
+ MachineType::TaggedSigned(), // kRememberedSet
+ MachineType::TaggedSigned()) // kFPMode
+
+ DECLARE_DESCRIPTOR(RecordWriteDescriptor, CallInterfaceDescriptor)
};
class TypeConversionDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kArgument)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
DECLARE_DESCRIPTOR(TypeConversionDescriptor, CallInterfaceDescriptor)
static const Register ArgumentRegister();
@@ -545,183 +639,185 @@ class TypeConversionStackParameterDescriptor final
: public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kArgument)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- TypeConversionStackParameterDescriptor, CallInterfaceDescriptor)
-};
-
-class ForInPrepareDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kObject)
- DECLARE_DEFAULT_DESCRIPTOR(ForInPrepareDescriptor, CallInterfaceDescriptor,
- kParameterCount)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
+ DECLARE_DESCRIPTOR(TypeConversionStackParameterDescriptor,
+ CallInterfaceDescriptor)
};
class GetPropertyDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kKey)
- DECLARE_DEFAULT_DESCRIPTOR(GetPropertyDescriptor, CallInterfaceDescriptor,
- kParameterCount)
+ DECLARE_DEFAULT_DESCRIPTOR(GetPropertyDescriptor, CallInterfaceDescriptor)
};
class TypeofDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
DECLARE_DESCRIPTOR(TypeofDescriptor, CallInterfaceDescriptor)
};
class CallTrampolineDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kActualArgumentsCount)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallTrampolineDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
+ MachineType::Int32()) // kActualArgumentsCount
+ DECLARE_DESCRIPTOR(CallTrampolineDescriptor, CallInterfaceDescriptor)
};
class CallVarargsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kArgumentsList,
kArgumentsLength)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallVarargsDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::Int32(), // kActualArgumentsCount
+ MachineType::AnyTagged(), // kArgumentsList
+ MachineType::Int32()) // kArgumentsLength
+ DECLARE_DESCRIPTOR(CallVarargsDescriptor, CallInterfaceDescriptor)
};
class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kStartIndex)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallForwardVarargsDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::Int32(), // kActualArgumentsCount
+ MachineType::Int32()) // kStartIndex
+ DECLARE_DESCRIPTOR(CallForwardVarargsDescriptor, CallInterfaceDescriptor)
};
class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kArgumentsCount, kSpread)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallWithSpreadDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::Int32(), // kArgumentsCount
+ MachineType::AnyTagged()) // kSpread
+ DECLARE_DESCRIPTOR(CallWithSpreadDescriptor, CallInterfaceDescriptor)
};
class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kArgumentsList)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallWithArrayLikeDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::AnyTagged()) // kArgumentsList
+ DECLARE_DESCRIPTOR(CallWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
class ConstructVarargsDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTarget, kNewTarget, kActualArgumentsCount, kArgumentsList,
- kArgumentsLength)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructVarargsDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_JS_PARAMETERS(kArgumentsList, kArgumentsLength)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged(), // kArgumentsList
+ MachineType::Int32()) // kArgumentsLength
+ DECLARE_DESCRIPTOR(ConstructVarargsDescriptor, CallInterfaceDescriptor)
};
class ConstructForwardVarargsDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTarget, kNewTarget, kActualArgumentsCount, kStartIndex)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- ConstructForwardVarargsDescriptor, CallInterfaceDescriptor)
+ DEFINE_JS_PARAMETERS(kStartIndex)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::Int32())
+ DECLARE_DESCRIPTOR(ConstructForwardVarargsDescriptor, CallInterfaceDescriptor)
};
class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsCount, kSpread)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructWithSpreadDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_JS_PARAMETERS(kSpread)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged())
+ DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor, CallInterfaceDescriptor)
};
class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructWithArrayLikeDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::AnyTagged(), // kNewTarget
+ MachineType::AnyTagged()) // kArgumentsList
+ DECLARE_DESCRIPTOR(ConstructWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
+// TODO(ishell): consider merging this with ArrayConstructorDescriptor
class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount,
- kAllocationSite)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructStubDescriptor,
- CallInterfaceDescriptor)
-};
+ DEFINE_JS_PARAMETERS(kAllocationSite)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged());
-// This descriptor is also used by DebugBreakTrampoline because it handles both
-// regular function calls and construct calls, and we need to pass new.target
-// for the latter.
-class ConstructTrampolineDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructTrampolineDescriptor,
- CallInterfaceDescriptor)
+ // TODO(ishell): Use DECLARE_JS_COMPATIBLE_DESCRIPTOR if registers match
+ DECLARE_DESCRIPTOR(ConstructStubDescriptor, CallInterfaceDescriptor)
};
-
class CallFunctionDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kTarget)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
};
-class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kObject, kMap)
- DECLARE_DESCRIPTOR(TransitionElementsKindDescriptor, CallInterfaceDescriptor)
-};
-
-class AbortJSDescriptor : public CallInterfaceDescriptor {
+class AbortDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kObject)
- DECLARE_DESCRIPTOR(AbortJSDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETERS_NO_CONTEXT(kMessageOrMessageId)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
+ DECLARE_DESCRIPTOR(AbortDescriptor, CallInterfaceDescriptor)
};
class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_EMPTY_PARAMETERS()
+ DEFINE_PARAMETERS_NO_CONTEXT()
+ DEFINE_PARAMETER_TYPES()
DECLARE_DESCRIPTOR(AllocateHeapNumberDescriptor, CallInterfaceDescriptor)
};
-class BuiltinDescriptor : public CallInterfaceDescriptor {
+class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
public:
- // TODO(ishell): Where is kFunction??
- DEFINE_PARAMETERS(kNewTarget, kArgumentsCount)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(BuiltinDescriptor,
- CallInterfaceDescriptor)
- static const Register ArgumentsCountRegister();
- static const Register NewTargetRegister();
- static const Register TargetRegister();
-};
+ DEFINE_JS_PARAMETERS(kAllocationSite)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged());
-class IteratingArrayBuiltinDescriptor : public BuiltinDescriptor {
- public:
- DEFINE_BUILTIN_PARAMETERS(kCallback, kThisArg)
- DECLARE_BUILTIN_DESCRIPTOR(IteratingArrayBuiltinDescriptor)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(ArrayConstructorDescriptor,
+ CallInterfaceDescriptor, 1)
};
-class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
+class ArrayNArgumentsConstructorDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArrayConstructorDescriptor,
- CallInterfaceDescriptor)
+ // This descriptor declares only register arguments while respective number
+ // of JS arguments stay on the expression stack.
+ // The ArrayNArgumentsConstructor builtin does not access stack arguments
+ // directly it just forwards them to the runtime function.
+ DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction,
+ MachineType::AnyTagged(), // kAllocationSite
+ MachineType::Int32()) // kActualArgumentsCount
+ DECLARE_DESCRIPTOR(ArrayNArgumentsConstructorDescriptor,
+ CallInterfaceDescriptor)
};
-class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor {
+class ArrayNoArgumentConstructorDescriptor
+ : public ArrayNArgumentsConstructorDescriptor {
public:
+ // This descriptor declares same register arguments as the parent
+ // ArrayNArgumentsConstructorDescriptor and it declares indices for
+ // JS arguments passed on the expression stack.
DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
kFunctionParameter)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- ArrayNoArgumentConstructorDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
+ MachineType::AnyTagged(), // kAllocationSite
+ MachineType::Int32(), // kActualArgumentsCount
+ MachineType::AnyTagged()) // kFunctionParameter
+ DECLARE_DESCRIPTOR(ArrayNoArgumentConstructorDescriptor,
+ ArrayNArgumentsConstructorDescriptor)
};
class ArraySingleArgumentConstructorDescriptor
- : public CallInterfaceDescriptor {
+ : public ArrayNArgumentsConstructorDescriptor {
public:
+ // This descriptor declares same register arguments as the parent
+ // ArrayNArgumentsConstructorDescriptor and it declares indices for
+ // JS arguments passed on the expression stack.
DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
kFunctionParameter, kArraySizeSmiParameter)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- ArraySingleArgumentConstructorDescriptor, CallInterfaceDescriptor)
-};
-
-class ArrayNArgumentsConstructorDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- ArrayNArgumentsConstructorDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
+ MachineType::AnyTagged(), // kAllocationSite
+ MachineType::Int32(), // kActualArgumentsCount
+ MachineType::AnyTagged(), // kFunctionParameter
+ MachineType::AnyTagged()) // kArraySizeSmiParameter
+ DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor,
+ ArrayNArgumentsConstructorDescriptor)
};
class CompareDescriptor : public CallInterfaceDescriptor {
@@ -742,35 +838,73 @@ class BinaryOpDescriptor : public CallInterfaceDescriptor {
class StringAtDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kPosition)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringAtDescriptor,
- CallInterfaceDescriptor)
+ // TODO(turbofan): Return untagged value here.
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedSigned(), // result 1
+ MachineType::AnyTagged(), // kReceiver
+ MachineType::IntPtr()) // kPosition
+ DECLARE_DESCRIPTOR(StringAtDescriptor, CallInterfaceDescriptor)
};
class StringSubstringDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kString, kFrom, kTo)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringSubstringDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kString
+ MachineType::IntPtr(), // kFrom
+ MachineType::IntPtr()) // kTo
+
+ // TODO(turbofan): Allow builtins to return untagged values.
+ DECLARE_DESCRIPTOR(StringSubstringDescriptor, CallInterfaceDescriptor)
};
class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount,
- kExpectedArgumentsCount)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArgumentAdaptorDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_JS_PARAMETERS(kExpectedArgumentsCount)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::Int32())
+ DECLARE_DESCRIPTOR(ArgumentAdaptorDescriptor, CallInterfaceDescriptor)
+};
+
+class CppBuiltinAdaptorDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_JS_PARAMETERS(kCFunction)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::Pointer())
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(CppBuiltinAdaptorDescriptor,
+ CallInterfaceDescriptor, 1)
+};
+
+class CEntry1ArgvOnStackDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kArity, // register argument
+ kCFunction, // register argument
+ kPadding, // stack argument 1 (just padding)
+ kArgcSmi, // stack argument 2
+ kTargetCopy, // stack argument 3
+ kNewTargetCopy) // stack argument 4
+ DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kArity
+ MachineType::Pointer(), // kCFunction
+ MachineType::AnyTagged(), // kPadding
+ MachineType::AnyTagged(), // kArgcSmi
+ MachineType::AnyTagged(), // kTargetCopy
+ MachineType::AnyTagged()) // kNewTargetCopy
+ DECLARE_DESCRIPTOR(CEntry1ArgvOnStackDescriptor, CallInterfaceDescriptor)
};
class ApiCallbackDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTargetContext, kCallData, kHolder, kApiFunctionAddress)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiCallbackDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETERS_NO_CONTEXT(kTargetContext, kCallData, kHolder,
+ kApiFunctionAddress)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTargetContext
+ MachineType::AnyTagged(), // kCallData
+ MachineType::AnyTagged(), // kHolder
+ MachineType::Pointer()) // kApiFunctionAddress
+ DECLARE_DESCRIPTOR(ApiCallbackDescriptor, CallInterfaceDescriptor)
};
class ApiGetterDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kHolder, kCallback)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kHolder
+ MachineType::AnyTagged()) // kCallback
DECLARE_DESCRIPTOR(ApiGetterDescriptor, CallInterfaceDescriptor)
static const Register ReceiverRegister();
@@ -778,26 +912,12 @@ class ApiGetterDescriptor : public CallInterfaceDescriptor {
static const Register CallbackRegister();
};
-class MathPowTaggedDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kExponent)
- DECLARE_DESCRIPTOR(MathPowTaggedDescriptor, CallInterfaceDescriptor)
-
- static const Register exponent();
-};
-
-class MathPowIntegerDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kExponent)
- DECLARE_DESCRIPTOR(MathPowIntegerDescriptor, CallInterfaceDescriptor)
-
- static const Register exponent();
-};
-
// TODO(turbofan): We should probably rename this to GrowFastElementsDescriptor.
class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kKey)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kObject
+ MachineType::AnyTagged()) // kKey
DECLARE_DESCRIPTOR(GrowArrayElementsDescriptor, CallInterfaceDescriptor)
static const Register ObjectRegister();
@@ -807,8 +927,10 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
class NewArgumentsElementsDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFrame, kLength, kMappedCount)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(NewArgumentsElementsDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kFrame
+ MachineType::TaggedSigned(), // kLength
+ MachineType::TaggedSigned()) // kMappedCount
+ DECLARE_DESCRIPTOR(NewArgumentsElementsDescriptor, CallInterfaceDescriptor)
};
class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
@@ -816,15 +938,21 @@ class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
public:
DEFINE_PARAMETERS(kAccumulator, kBytecodeOffset, kBytecodeArray,
kDispatchTable)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterDispatchDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kAccumulator
+ MachineType::IntPtr(), // kBytecodeOffset
+ MachineType::AnyTagged(), // kBytecodeArray
+ MachineType::IntPtr()) // kDispatchTable
+ DECLARE_DESCRIPTOR(InterpreterDispatchDescriptor, CallInterfaceDescriptor)
};
class InterpreterPushArgsThenCallDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunction)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- InterpreterPushArgsThenCallDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kNumberOfArguments
+ MachineType::Pointer(), // kFirstArgument
+ MachineType::AnyTagged()) // kFunction
+ DECLARE_DESCRIPTOR(InterpreterPushArgsThenCallDescriptor,
+ CallInterfaceDescriptor)
};
class InterpreterPushArgsThenConstructDescriptor
@@ -832,47 +960,72 @@ class InterpreterPushArgsThenConstructDescriptor
public:
DEFINE_PARAMETERS(kNumberOfArguments, kNewTarget, kConstructor,
kFeedbackElement, kFirstArgument)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- InterpreterPushArgsThenConstructDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kNumberOfArguments
+ MachineType::AnyTagged(), // kNewTarget
+ MachineType::AnyTagged(), // kConstructor
+ MachineType::AnyTagged(), // kFeedbackElement
+ MachineType::Pointer()) // kFirstArgument
+ DECLARE_DESCRIPTOR(InterpreterPushArgsThenConstructDescriptor,
+ CallInterfaceDescriptor)
};
-class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
+class InterpreterCEntry1Descriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunctionEntry)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterCEntryDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_RESULT_AND_PARAMETERS(1, kNumberOfArguments, kFirstArgument,
+ kFunctionEntry)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result 1
+ MachineType::Int32(), // kNumberOfArguments
+ MachineType::Pointer(), // kFirstArgument
+ MachineType::Pointer()) // kFunctionEntry
+ DECLARE_DESCRIPTOR(InterpreterCEntry1Descriptor, CallInterfaceDescriptor)
+};
+
+class InterpreterCEntry2Descriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_RESULT_AND_PARAMETERS(2, kNumberOfArguments, kFirstArgument,
+ kFunctionEntry)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result 1
+ MachineType::AnyTagged(), // result 2
+ MachineType::Int32(), // kNumberOfArguments
+ MachineType::Pointer(), // kFirstArgument
+ MachineType::Pointer()) // kFunctionEntry
+ DECLARE_DESCRIPTOR(InterpreterCEntry2Descriptor, CallInterfaceDescriptor)
};
class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kValue, kGenerator)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
+ MachineType::AnyTagged()) // kGenerator
DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
};
class FrameDropperTrampolineDescriptor final : public CallInterfaceDescriptor {
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FrameDropperTrampolineDescriptor,
- CallInterfaceDescriptor)
+ public:
+ DEFINE_PARAMETERS(kRestartFp)
+ DEFINE_PARAMETER_TYPES(MachineType::Pointer())
+ DECLARE_DESCRIPTOR(FrameDropperTrampolineDescriptor, CallInterfaceDescriptor)
};
-class WasmRuntimeCallDescriptor final : public CallInterfaceDescriptor {
+class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
public:
- DEFINE_EMPTY_PARAMETERS()
- DECLARE_DEFAULT_DESCRIPTOR(WasmRuntimeCallDescriptor, CallInterfaceDescriptor,
- 0)
+ DEFINE_PARAMETERS()
+ DECLARE_DEFAULT_DESCRIPTOR(RunMicrotasksDescriptor, CallInterfaceDescriptor)
};
-class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
+class WasmGrowMemoryDescriptor final : public CallInterfaceDescriptor {
public:
- DEFINE_EMPTY_PARAMETERS()
- DECLARE_DEFAULT_DESCRIPTOR(RunMicrotasksDescriptor, CallInterfaceDescriptor,
- 0)
+ DEFINE_PARAMETERS_NO_CONTEXT(kNumPages)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int32(), // result 1
+ MachineType::Int32()) // kNumPages
+ DECLARE_DESCRIPTOR(WasmGrowMemoryDescriptor, CallInterfaceDescriptor)
};
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor : public CallInterfaceDescriptor { \
public: \
DEFINE_PARAMETERS(__VA_ARGS__) \
- DECLARE_DEFAULT_DESCRIPTOR(Name##Descriptor, CallInterfaceDescriptor, \
- kParameterCount) \
+ DECLARE_DEFAULT_DESCRIPTOR(Name##Descriptor, CallInterfaceDescriptor) \
};
BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR)
#undef DEFINE_TFS_BUILTIN_DESCRIPTOR
@@ -880,9 +1033,15 @@ BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR)
#undef DECLARE_DEFAULT_DESCRIPTOR
#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
-#undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
-#undef DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG
+#undef DECLARE_JS_COMPATIBLE_DESCRIPTOR
+#undef DEFINE_RESULT_AND_PARAMETERS
+#undef DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT
#undef DEFINE_PARAMETERS
+#undef DEFINE_PARAMETERS_NO_CONTEXT
+#undef DEFINE_RESULT_AND_PARAMETER_TYPES
+#undef DEFINE_PARAMETER_TYPES
+#undef DEFINE_JS_PARAMETERS
+#undef DEFINE_JS_PARAMETER_TYPES
// We define the association between CallDescriptors::Key and the specialized
// descriptor here to reduce boilerplate and mistakes.
@@ -893,11 +1052,4 @@ INTERFACE_DESCRIPTOR_LIST(DEF_KEY)
} // namespace internal
} // namespace v8
-
-#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/interface-descriptors-arm64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/interface-descriptors-arm.h"
-#endif
-
#endif // V8_INTERFACE_DESCRIPTORS_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index 1c6d28dd63..ef6bdd30a1 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -184,12 +184,11 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
-Handle<Object> BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
- return FixedArray::get(bytecode_array()->constant_pool(), index,
- bytecode_array()->GetIsolate());
+Object* BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
+ return bytecode_array()->constant_pool()->get(index);
}
-Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
+Object* BytecodeArrayAccessor::GetConstantForIndexOperand(
int operand_index) const {
return GetConstantAtIndex(GetIndexOperand(operand_index));
}
@@ -203,7 +202,7 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
}
return GetAbsoluteOffset(relative_offset);
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
- Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
+ Smi* smi = Smi::cast(GetConstantForIndexOperand(0));
return GetAbsoluteOffset(smi->value());
} else {
UNREACHABLE();
@@ -273,6 +272,7 @@ JumpTableTargetOffsets::iterator::iterator(
int case_value, int table_offset, int table_end,
const BytecodeArrayAccessor* accessor)
: accessor_(accessor),
+ current_(Smi::kZero),
index_(case_value),
table_offset_(table_offset),
table_end_(table_end) {
@@ -281,8 +281,7 @@ JumpTableTargetOffsets::iterator::iterator(
JumpTableTargetOffset JumpTableTargetOffsets::iterator::operator*() {
DCHECK_LT(table_offset_, table_end_);
- DCHECK(current_->IsSmi());
- return {index_, accessor_->GetAbsoluteOffset(Smi::ToInt(*current_))};
+ return {index_, accessor_->GetAbsoluteOffset(Smi::ToInt(current_))};
}
JumpTableTargetOffsets::iterator& JumpTableTargetOffsets::iterator::
@@ -305,13 +304,17 @@ bool JumpTableTargetOffsets::iterator::operator!=(
void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
if (table_offset_ >= table_end_) return;
- current_ = accessor_->GetConstantAtIndex(table_offset_);
- Isolate* isolate = accessor_->bytecode_array()->GetIsolate();
- while (current_->IsTheHole(isolate)) {
+ Object* current = accessor_->GetConstantAtIndex(table_offset_);
+ while (!current->IsSmi()) {
+ DCHECK(current->IsTheHole());
++table_offset_;
++index_;
if (table_offset_ >= table_end_) break;
- current_ = accessor_->GetConstantAtIndex(table_offset_);
+ current = accessor_->GetConstantAtIndex(table_offset_);
+ }
+ // Make sure we haven't reached the end of the table with a hole in current.
+ if (current->IsSmi()) {
+ current_ = Smi::cast(current);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index f31d2d0e7f..443929aefe 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -42,7 +42,7 @@ class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
void UpdateAndAdvanceToValid();
const BytecodeArrayAccessor* accessor_;
- Handle<Object> current_;
+ Smi* current_;
int index_;
int table_offset_;
int table_end_;
@@ -90,8 +90,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
uint32_t GetNativeContextIndexOperand(int operand_index) const;
- Handle<Object> GetConstantAtIndex(int offset) const;
- Handle<Object> GetConstantForIndexOperand(int operand_index) const;
+ Object* GetConstantAtIndex(int offset) const;
+ Object* GetConstantForIndexOperand(int operand_index) const;
// Returns the absolute offset of the branch target at the current bytecode.
// It is an error to call this method if the bytecode is not for a jump or
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index dcd7dcdd79..b0162c77ad 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -176,12 +176,13 @@ namespace {
template <OperandTypeInfo type_info>
class UnsignedOperandHelper {
public:
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, size_t value)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder,
+ size_t value) {
DCHECK(IsValid(value));
return static_cast<uint32_t>(value);
}
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, int value)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder, int value) {
DCHECK_GE(value, 0);
return Convert(builder, static_cast<size_t>(value));
}
@@ -215,7 +216,7 @@ UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
template <>
class OperandHelper<OperandType::kImm> {
public:
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, int value)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder, int value) {
return static_cast<uint32_t>(value);
}
};
@@ -223,7 +224,8 @@ class OperandHelper<OperandType::kImm> {
template <>
class OperandHelper<OperandType::kReg> {
public:
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, Register reg)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder,
+ Register reg) {
return builder->GetInputRegisterOperand(reg);
}
};
@@ -231,8 +233,8 @@ class OperandHelper<OperandType::kReg> {
template <>
class OperandHelper<OperandType::kRegList> {
public:
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
- RegisterList reg_list)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list) {
return builder->GetInputRegisterListOperand(reg_list);
}
};
@@ -240,8 +242,8 @@ class OperandHelper<OperandType::kRegList> {
template <>
class OperandHelper<OperandType::kRegPair> {
public:
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
- RegisterList reg_list)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list) {
DCHECK_EQ(reg_list.register_count(), 2);
return builder->GetInputRegisterListOperand(reg_list);
}
@@ -250,7 +252,8 @@ class OperandHelper<OperandType::kRegPair> {
template <>
class OperandHelper<OperandType::kRegOut> {
public:
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, Register reg)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder,
+ Register reg) {
return builder->GetOutputRegisterOperand(reg);
}
};
@@ -258,8 +261,8 @@ class OperandHelper<OperandType::kRegOut> {
template <>
class OperandHelper<OperandType::kRegOutList> {
public:
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
- RegisterList reg_list)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list) {
return builder->GetOutputRegisterListOperand(reg_list);
}
};
@@ -267,8 +270,8 @@ class OperandHelper<OperandType::kRegOutList> {
template <>
class OperandHelper<OperandType::kRegOutPair> {
public:
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
- RegisterList reg_list)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list) {
DCHECK_EQ(2, reg_list.register_count());
return builder->GetOutputRegisterListOperand(reg_list);
}
@@ -277,8 +280,8 @@ class OperandHelper<OperandType::kRegOutPair> {
template <>
class OperandHelper<OperandType::kRegOutTriple> {
public:
- INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
- RegisterList reg_list)) {
+ V8_INLINE static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list) {
DCHECK_EQ(3, reg_list.register_count());
return builder->GetOutputRegisterListOperand(reg_list);
}
@@ -291,8 +294,8 @@ template <Bytecode bytecode, AccumulatorUse accumulator_use,
class BytecodeNodeBuilder {
public:
template <typename... Operands>
- INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
- Operands... operands)) {
+ V8_INLINE static BytecodeNode Make(BytecodeArrayBuilder* builder,
+ Operands... operands) {
static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands,
"too many operands for bytecode");
builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 4aceba9b6e..f34f6f3a7d 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -537,19 +537,19 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
}
// Returns the current source position for the given |bytecode|.
- INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
-
-#define DECLARE_BYTECODE_OUTPUT(Name, ...) \
- template <typename... Operands> \
- INLINE(BytecodeNode Create##Name##Node(Operands... operands)); \
- template <typename... Operands> \
- INLINE(void Output##Name(Operands... operands)); \
- template <typename... Operands> \
- INLINE(void Output##Name(BytecodeLabel* label, Operands... operands));
+ V8_INLINE BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode);
+
+#define DECLARE_BYTECODE_OUTPUT(Name, ...) \
+ template <typename... Operands> \
+ V8_INLINE BytecodeNode Create##Name##Node(Operands... operands); \
+ template <typename... Operands> \
+ V8_INLINE void Output##Name(Operands... operands); \
+ template <typename... Operands> \
+ V8_INLINE void Output##Name(BytecodeLabel* label, Operands... operands);
BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT)
#undef DECLARE_OPERAND_TYPE_INFO
- INLINE(void OutputSwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table));
+ V8_INLINE void OutputSwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table);
bool RegisterIsValid(Register reg) const;
bool RegisterListIsValid(RegisterList reg_list) const;
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 432b271343..79792a3d56 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -6,7 +6,6 @@
#include "src/api.h"
#include "src/ast/ast-source-ranges.h"
-#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
#include "src/code-stubs.h"
@@ -734,7 +733,7 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
data->set(array_index++, Smi::FromInt(declaration.slot.ToInt()));
Object* undefined_or_literal_slot;
if (declaration.literal_slot.IsInvalid()) {
- undefined_or_literal_slot = isolate->heap()->undefined_value();
+ undefined_or_literal_slot = ReadOnlyRoots(isolate).undefined_value();
} else {
undefined_or_literal_slot =
Smi::FromInt(declaration.literal_slot.ToInt());
@@ -991,8 +990,8 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
if (object_literal->properties_count() > 0) {
// If constant properties is an empty fixed array, we've already added it
// to the constant pool when visiting the object literal.
- Handle<BoilerplateDescription> constant_properties =
- object_literal->GetOrBuildConstantProperties(isolate);
+ Handle<ObjectBoilerplateDescription> constant_properties =
+ object_literal->GetOrBuildBoilerplateDescription(isolate);
builder()->SetDeferredConstantPoolEntry(literal.second,
constant_properties);
@@ -1002,8 +1001,8 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
// Build array literal constant elements
for (std::pair<ArrayLiteral*, size_t> literal : array_literals_) {
ArrayLiteral* array_literal = literal.first;
- Handle<ConstantElementsPair> constant_elements =
- array_literal->GetOrBuildConstantElements(isolate);
+ Handle<ArrayBoilerplateDescription> constant_elements =
+ array_literal->GetOrBuildBoilerplateDescription(isolate);
builder()->SetDeferredConstantPoolEntry(literal.second, constant_elements);
}
@@ -1212,7 +1211,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
}
break;
case VariableLocation::LOOKUP: {
- DCHECK_EQ(VAR, variable->mode());
+ DCHECK_EQ(VariableMode::kVar, variable->mode());
DCHECK(!variable->binding_needs_init());
Register name = register_allocator()->NewRegister();
@@ -1235,7 +1234,8 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Variable* variable = decl->proxy()->var();
- DCHECK(variable->mode() == LET || variable->mode() == VAR);
+ DCHECK(variable->mode() == VariableMode::kLet ||
+ variable->mode() == VariableMode::kVar);
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
FeedbackSlot slot =
@@ -1270,7 +1270,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
break;
}
case VariableLocation::MODULE:
- DCHECK_EQ(variable->mode(), LET);
+ DCHECK_EQ(variable->mode(), VariableMode::kLet);
DCHECK(variable->IsExport());
VisitForAccumulatorValue(decl->fun());
BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
@@ -1327,7 +1327,7 @@ void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
globals_builder_ = new (zone()) GlobalDeclarationsBuilder(zone());
}
-void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+void BytecodeGenerator::VisitStatements(ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
// Allocate an outer register allocations scope for the statement.
RegisterAllocationScope allocation_scope(this);
@@ -1416,7 +1416,7 @@ void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// We need this scope because we visit for register values. We have to
// maintain a execution result scope where registers can be allocated.
- ZoneList<CaseClause*>* clauses = stmt->cases();
+ ZonePtrList<CaseClause>* clauses = stmt->cases();
SwitchBuilder switch_builder(builder(), block_coverage_builder_, stmt,
clauses->length());
ControlScopeForBreakable scope(this, stmt, &switch_builder);
@@ -2118,7 +2118,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// If constant properties is an empty fixed array, use a cached empty fixed
// array to ensure it's only added to the constant pool once.
if (expr->properties_count() == 0) {
- entry = builder()->EmptyBoilerplateDescriptionConstantPoolEntry();
+ entry = builder()->EmptyObjectBoilerplateDescriptionConstantPoolEntry();
} else {
entry = builder()->AllocateDeferredConstantPoolEntry();
object_literals_.push_back(std::make_pair(expr, entry));
@@ -2143,7 +2143,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ DCHECK(!property->value()->IsCompileTimeValue());
V8_FALLTHROUGH;
case ObjectLiteral::Property::COMPUTED: {
// It is safe to use [[Put]] here because the boilerplate already
@@ -2315,15 +2315,15 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
void BytecodeGenerator::BuildArrayLiteralElementsInsertion(
- Register array, int first_spread_index, ZoneList<Expression*>* elements,
+ Register array, int first_spread_index, ZonePtrList<Expression>* elements,
bool skip_constants) {
DCHECK_LT(first_spread_index, elements->length());
Register index = register_allocator()->NewRegister();
int array_index = 0;
- ZoneList<Expression*>::iterator iter = elements->begin();
- ZoneList<Expression*>::iterator first_spread_or_end =
+ ZonePtrList<Expression>::iterator iter = elements->begin();
+ ZonePtrList<Expression>::iterator first_spread_or_end =
first_spread_index >= 0 ? elements->begin() + first_spread_index
: elements->end();
@@ -2332,8 +2332,7 @@ void BytecodeGenerator::BuildArrayLiteralElementsInsertion(
for (; iter != first_spread_or_end; ++iter, array_index++) {
Expression* subexpr = *iter;
DCHECK(!subexpr->IsSpread());
- if (skip_constants && CompileTimeValue::IsCompileTimeValue(subexpr))
- continue;
+ if (skip_constants && subexpr->IsCompileTimeValue()) continue;
if (keyed_store_slot.IsInvalid()) {
keyed_store_slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
}
@@ -2532,7 +2531,7 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
}
case VariableLocation::LOOKUP: {
switch (variable->mode()) {
- case DYNAMIC_LOCAL: {
+ case VariableMode::kDynamicLocal: {
Variable* local_variable = variable->local_if_not_shadowed();
int depth =
execution_context()->ContextChainDepth(local_variable->scope());
@@ -2543,7 +2542,7 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
}
break;
}
- case DYNAMIC_GLOBAL: {
+ case VariableMode::kDynamicGlobal: {
int depth =
current_scope()->ContextChainLengthUntilOutermostSloppyEval();
FeedbackSlot slot = GetCachedLoadGlobalICSlot(typeof_mode, variable);
@@ -2622,7 +2621,7 @@ void BytecodeGenerator::BuildReThrow() { builder()->ReThrow(); }
void BytecodeGenerator::BuildThrowIfHole(Variable* variable) {
if (variable->is_this()) {
- DCHECK(variable->mode() == CONST);
+ DCHECK(variable->mode() == VariableMode::kConst);
builder()->ThrowSuperNotCalledIfHole();
} else {
builder()->ThrowReferenceErrorIfHole(variable->raw_name());
@@ -2631,7 +2630,8 @@ void BytecodeGenerator::BuildThrowIfHole(Variable* variable) {
void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
Token::Value op) {
- if (variable->is_this() && variable->mode() == CONST && op == Token::INIT) {
+ if (variable->is_this() && variable->mode() == VariableMode::kConst &&
+ op == Token::INIT) {
// Perform an initialization check for 'this'. 'this' variable is the
// only variable able to trigger bind operations outside the TDZ
// via 'super' calls.
@@ -2675,7 +2675,7 @@ void BytecodeGenerator::BuildVariableAssignment(
builder()->LoadAccumulatorWithRegister(value_temp);
}
- if (mode != CONST || op == Token::INIT) {
+ if (mode != VariableMode::kConst || op == Token::INIT) {
builder()->StoreAccumulatorInRegister(destination);
} else if (variable->throw_on_const_assignment(language_mode())) {
builder()->CallRuntime(Runtime::kThrowConstAssignError);
@@ -2711,7 +2711,7 @@ void BytecodeGenerator::BuildVariableAssignment(
builder()->LoadAccumulatorWithRegister(value_temp);
}
- if (mode != CONST || op == Token::INIT) {
+ if (mode != VariableMode::kConst || op == Token::INIT) {
builder()->StoreContextSlot(context_reg, variable->index(), depth);
} else if (variable->throw_on_const_assignment(language_mode())) {
builder()->CallRuntime(Runtime::kThrowConstAssignError);
@@ -2726,7 +2726,7 @@ void BytecodeGenerator::BuildVariableAssignment(
case VariableLocation::MODULE: {
DCHECK(IsDeclaredVariableMode(mode));
- if (mode == CONST && op != Token::INIT) {
+ if (mode == VariableMode::kConst && op != Token::INIT) {
builder()->CallRuntime(Runtime::kThrowConstAssignError);
break;
}
@@ -3424,7 +3424,7 @@ void BytecodeGenerator::VisitResolvedProperty(ResolvedProperty* expr) {
UNREACHABLE();
}
-void BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args,
+void BytecodeGenerator::VisitArguments(ZonePtrList<Expression>* args,
RegisterList* arg_regs) {
// Visit arguments.
for (int i = 0; i < static_cast<int>(args->length()); i++) {
@@ -3595,7 +3595,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
void BytecodeGenerator::VisitCallSuper(Call* expr) {
RegisterAllocationScope register_scope(this);
SuperCallReference* super = expr->expression()->AsSuperCallReference();
- ZoneList<Expression*>* args = expr->arguments();
+ ZonePtrList<Expression>* args = expr->arguments();
int first_spread_index = 0;
for (; first_spread_index < args->length(); first_spread_index++) {
@@ -4309,8 +4309,8 @@ void BytecodeGenerator::VisitGetTemplateObject(GetTemplateObject* expr) {
}
void BytecodeGenerator::VisitTemplateLiteral(TemplateLiteral* expr) {
- const TemplateLiteral::StringList& parts = *expr->string_parts();
- const TemplateLiteral::ExpressionList& substitutions = *expr->substitutions();
+ const ZonePtrList<const AstRawString>& parts = *expr->string_parts();
+ const ZonePtrList<Expression>& substitutions = *expr->substitutions();
// Template strings with no substitutions are turned into StringLiterals.
DCHECK_GT(substitutions.length(), 0);
DCHECK_EQ(parts.length(), substitutions.length() + 1);
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index b02d416860..c9cee39bb9 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -43,7 +43,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Visiting function for declarations list and statements are overridden.
void VisitDeclarations(Declaration::List* declarations);
- void VisitStatements(ZoneList<Statement*>* statments);
+ void VisitStatements(ZonePtrList<Statement>* statments);
private:
class ContextScope;
@@ -100,7 +100,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Visit the arguments expressions in |args| and store them in |args_regs|,
// growing |args_regs| for each argument visited.
- void VisitArguments(ZoneList<Expression*>* args, RegisterList* arg_regs);
+ void VisitArguments(ZonePtrList<Expression>* args, RegisterList* arg_regs);
// Visit a keyed super property load. The optional
// |opt_receiver_out| register will have the receiver stored to it
@@ -179,7 +179,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
FeedbackSlot element_slot);
void BuildArrayLiteralElementsInsertion(Register array,
int first_spread_index,
- ZoneList<Expression*>* elements,
+ ZonePtrList<Expression>* elements,
bool skip_constants);
void AllocateTopLevelRegisters();
@@ -247,7 +247,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
TypeHint VisitForAccumulatorValue(Expression* expr);
void VisitForAccumulatorValueOrTheHole(Expression* expr);
V8_WARN_UNUSED_RESULT Register VisitForRegisterValue(Expression* expr);
- INLINE(void VisitForRegisterValue(Expression* expr, Register destination));
+ V8_INLINE void VisitForRegisterValue(Expression* expr, Register destination);
void VisitAndPushIntoRegisterList(Expression* expr, RegisterList* reg_list);
void VisitForEffect(Expression* expr);
void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
diff --git a/deps/v8/src/interpreter/bytecode-node.h b/deps/v8/src/interpreter/bytecode-node.h
index 98e1577f45..48d8961632 100644
--- a/deps/v8/src/interpreter/bytecode-node.h
+++ b/deps/v8/src/interpreter/bytecode-node.h
@@ -18,8 +18,8 @@ namespace interpreter {
// A container for a generated bytecode, it's operands, and source information.
class V8_EXPORT_PRIVATE BytecodeNode final {
public:
- INLINE(BytecodeNode(Bytecode bytecode,
- BytecodeSourceInfo source_info = BytecodeSourceInfo()))
+ V8_INLINE BytecodeNode(Bytecode bytecode,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo())
: bytecode_(bytecode),
operand_count_(0),
operand_scale_(OperandScale::kSingle),
@@ -27,8 +27,8 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
}
- INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0,
- BytecodeSourceInfo source_info = BytecodeSourceInfo()))
+ V8_INLINE BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo())
: bytecode_(bytecode),
operand_count_(1),
operand_scale_(OperandScale::kSingle),
@@ -37,8 +37,9 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
SetOperand(0, operand0);
}
- INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- BytecodeSourceInfo source_info = BytecodeSourceInfo()))
+ V8_INLINE BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo())
: bytecode_(bytecode),
operand_count_(2),
operand_scale_(OperandScale::kSingle),
@@ -48,9 +49,9 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
SetOperand(1, operand1);
}
- INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2,
- BytecodeSourceInfo source_info = BytecodeSourceInfo()))
+ V8_INLINE BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo())
: bytecode_(bytecode),
operand_count_(3),
operand_scale_(OperandScale::kSingle),
@@ -61,9 +62,10 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
SetOperand(2, operand2);
}
- INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3,
- BytecodeSourceInfo source_info = BytecodeSourceInfo()))
+ V8_INLINE BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo())
: bytecode_(bytecode),
operand_count_(4),
operand_scale_(OperandScale::kSingle),
@@ -75,9 +77,10 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
SetOperand(3, operand3);
}
- INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3, uint32_t operand4,
- BytecodeSourceInfo source_info = BytecodeSourceInfo()))
+ V8_INLINE BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3, uint32_t operand4,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo())
: bytecode_(bytecode),
operand_count_(5),
operand_scale_(OperandScale::kSingle),
@@ -92,8 +95,8 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
#define DEFINE_BYTECODE_NODE_CREATOR(Name, ...) \
template <typename... Operands> \
- INLINE(static BytecodeNode Name(BytecodeSourceInfo source_info, \
- Operands... operands)) { \
+ V8_INLINE static BytecodeNode Name(BytecodeSourceInfo source_info, \
+ Operands... operands) { \
return Create<Bytecode::k##Name, __VA_ARGS__>(source_info, operands...); \
}
BYTECODE_LIST(DEFINE_BYTECODE_NODE_CREATOR)
@@ -128,11 +131,11 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
OperandType... operand_types>
friend class BytecodeNodeBuilder;
- INLINE(BytecodeNode(Bytecode bytecode, int operand_count,
- OperandScale operand_scale,
- BytecodeSourceInfo source_info, uint32_t operand0 = 0,
- uint32_t operand1 = 0, uint32_t operand2 = 0,
- uint32_t operand3 = 0, uint32_t operand4 = 0))
+ V8_INLINE BytecodeNode(Bytecode bytecode, int operand_count,
+ OperandScale operand_scale,
+ BytecodeSourceInfo source_info, uint32_t operand0 = 0,
+ uint32_t operand1 = 0, uint32_t operand2 = 0,
+ uint32_t operand3 = 0, uint32_t operand4 = 0)
: bytecode_(bytecode),
operand_count_(operand_count),
operand_scale_(operand_scale),
@@ -146,14 +149,14 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
}
template <Bytecode bytecode, AccumulatorUse accum_use>
- INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info)) {
+ V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info) {
return BytecodeNode(bytecode, 0, OperandScale::kSingle, source_info);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type>
- INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
- uint32_t operand0)) {
+ V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
@@ -162,8 +165,8 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type>
- INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
- uint32_t operand0, uint32_t operand1)) {
+ V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
OperandScale scale = OperandScale::kSingle;
@@ -175,9 +178,9 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type>
- INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
- uint32_t operand0, uint32_t operand1,
- uint32_t operand2)) {
+ V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1,
+ uint32_t operand2) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
@@ -192,9 +195,9 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type, OperandType operand3_type>
- INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
- uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3)) {
+ V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
@@ -212,10 +215,10 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type, OperandType operand3_type,
OperandType operand4_type>
- INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
- uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3,
- uint32_t operand4)) {
+ V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3,
+ uint32_t operand4) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
@@ -232,7 +235,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
}
template <OperandType operand_type>
- INLINE(static OperandScale ScaleForOperand(uint32_t operand)) {
+ V8_INLINE static OperandScale ScaleForOperand(uint32_t operand) {
if (BytecodeOperands::IsScalableUnsignedByte(operand_type)) {
return Bytecodes::ScaleForUnsignedOperand(operand);
} else if (BytecodeOperands::IsScalableSignedByte(operand_type)) {
@@ -242,7 +245,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
}
}
- INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
+ V8_INLINE void UpdateScaleForOperand(int operand_index, uint32_t operand) {
if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
operand_scale_ =
std::max(operand_scale_, Bytecodes::ScaleForSignedOperand(operand));
@@ -253,7 +256,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
}
}
- INLINE(void SetOperand(int operand_index, uint32_t operand)) {
+ V8_INLINE void SetOperand(int operand_index, uint32_t operand) {
operands_[operand_index] = operand;
UpdateScaleForOperand(operand_index, operand);
}
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 92673d9cac..11794274b9 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -63,7 +63,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
// Prepares for |bytecode|.
template <Bytecode bytecode, AccumulatorUse accumulator_use>
- INLINE(void PrepareForBytecode()) {
+ V8_INLINE void PrepareForBytecode() {
if (Bytecodes::IsJump(bytecode) || Bytecodes::IsSwitch(bytecode) ||
bytecode == Bytecode::kDebugger ||
bytecode == Bytecode::kSuspendGenerator ||
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index e5f918115a..b1ae88c1ba 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -695,11 +695,6 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
#undef OR_BYTECODE
}
- // Returns the number of values which |bytecode| returns.
- static constexpr size_t ReturnCount(Bytecode bytecode) {
- return Returns(bytecode) ? 1 : 0;
- }
-
// Returns the number of operands expected by |bytecode|.
static int NumberOfOperands(Bytecode bytecode) {
DCHECK_LE(bytecode, Bytecode::kLast);
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index e66946f2c5..8f558c4a90 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -21,14 +21,14 @@ class AstValue;
namespace interpreter {
// Constant array entries that represent singletons.
-#define SINGLETON_CONSTANT_ENTRY_TYPES(V) \
- V(AsyncIteratorSymbol, async_iterator_symbol) \
- V(ClassFieldsSymbol, class_fields_symbol) \
- V(EmptyBoilerplateDescription, empty_boilerplate_description) \
- V(EmptyFixedArray, empty_fixed_array) \
- V(HomeObjectSymbol, home_object_symbol) \
- V(IteratorSymbol, iterator_symbol) \
- V(InterpreterTrampolineSymbol, interpreter_trampoline_symbol) \
+#define SINGLETON_CONSTANT_ENTRY_TYPES(V) \
+ V(AsyncIteratorSymbol, async_iterator_symbol) \
+ V(ClassFieldsSymbol, class_fields_symbol) \
+ V(EmptyObjectBoilerplateDescription, empty_object_boilerplate_description) \
+ V(EmptyFixedArray, empty_fixed_array) \
+ V(HomeObjectSymbol, home_object_symbol) \
+ V(IteratorSymbol, iterator_symbol) \
+ V(InterpreterTrampolineSymbol, interpreter_trampoline_symbol) \
V(NaN, nan_value)
// A helper class for constructing constant arrays for the
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index e79cb0061b..6ea4ba628c 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -23,6 +23,8 @@ namespace interpreter {
using compiler::CodeAssemblerState;
using compiler::Node;
+template <class T>
+using TNode = compiler::TNode<T>;
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
Bytecode bytecode,
@@ -667,8 +669,8 @@ Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
}
-Node* InterpreterAssembler::LoadFeedbackVector() {
- Node* function = LoadRegister(Register::function_closure());
+TNode<FeedbackVector> InterpreterAssembler::LoadFeedbackVector() {
+ TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
return CodeStubAssembler::LoadFeedbackVector(function);
}
@@ -703,8 +705,8 @@ void InterpreterAssembler::CallEpilogue() {
void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
Node* slot_id) {
Comment("increment call count");
- TNode<Smi> call_count = CAST(
- ToObject(LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize)));
+ TNode<Smi> call_count =
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize));
// The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
// count are used as flags. To increment the call count by 1 we hence
// have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
@@ -721,35 +723,33 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
Label extra_checks(this, Label::kDeferred), done(this);
// Check if we have monomorphic {target} feedback already.
- TNode<HeapObject> feedback_element =
- ToStrongHeapObject(LoadFeedbackVectorSlot(feedback_vector, slot_id));
- Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
+ TNode<MaybeObject> feedback =
+ LoadFeedbackVectorSlot(feedback_vector, slot_id);
Comment("check if monomorphic");
- Node* is_monomorphic = WordEqual(target, feedback_value);
+ TNode<BoolT> is_monomorphic = IsWeakReferenceTo(feedback, CAST(target));
GotoIf(is_monomorphic, &done);
// Check if it is a megamorphic {target}.
Comment("check if megamorphic");
- Node* is_megamorphic =
- WordEqual(feedback_element,
- HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
+ Node* is_megamorphic = WordEqual(
+ feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
Branch(is_megamorphic, &done, &extra_checks);
BIND(&extra_checks);
{
Label initialize(this), mark_megamorphic(this);
- Comment("check if weak cell");
+ Comment("check if weak reference");
Node* is_uninitialized = WordEqual(
- feedback_element,
+ feedback,
HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
GotoIf(is_uninitialized, &initialize);
- CSA_ASSERT(this, IsWeakCell(feedback_element));
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(feedback));
- // If the weak cell is cleared, we have a new chance to become monomorphic.
- Comment("check if weak cell is cleared");
- Node* is_smi = TaggedIsSmi(feedback_value);
- Branch(is_smi, &initialize, &mark_megamorphic);
+ // If the weak reference is cleared, we have a new chance to become
+ // monomorphic.
+ Comment("check if weak reference is cleared");
+ Branch(IsClearedWeakHeapObject(feedback), &initialize, &mark_megamorphic);
BIND(&initialize);
{
@@ -792,7 +792,8 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
}
}
BIND(&done_loop);
- CreateWeakCellInFeedbackVector(feedback_vector, slot_id, target);
+ StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
+ CAST(target));
ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
Goto(&done);
}
@@ -930,10 +931,10 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
IncrementCallCount(feedback_vector, slot_id);
// Check if we have monomorphic {new_target} feedback already.
- TNode<HeapObject> feedback_element =
- CAST(ToObject(LoadFeedbackVectorSlot(feedback_vector, slot_id)));
- Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
- Branch(WordEqual(new_target, feedback_value), &construct, &extra_checks);
+ TNode<MaybeObject> feedback =
+ LoadFeedbackVectorSlot(feedback_vector, slot_id);
+ Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
+ &extra_checks);
BIND(&extra_checks);
{
@@ -942,32 +943,31 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
// Check if it is a megamorphic {new_target}..
Comment("check if megamorphic");
- Node* is_megamorphic =
- WordEqual(feedback_element,
- HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
+ Node* is_megamorphic = WordEqual(
+ feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
GotoIf(is_megamorphic, &construct);
- Comment("check if weak cell");
- Node* feedback_element_map = LoadMap(feedback_element);
- GotoIfNot(IsWeakCellMap(feedback_element_map), &check_allocation_site);
+ Comment("check if weak reference");
+ GotoIfNot(IsWeakOrClearedHeapObject(feedback), &check_allocation_site);
- // If the weak cell is cleared, we have a new chance to become monomorphic.
- Comment("check if weak cell is cleared");
- Node* is_smi = TaggedIsSmi(feedback_value);
- Branch(is_smi, &initialize, &mark_megamorphic);
+ // If the weak reference is cleared, we have a new chance to become
+ // monomorphic.
+ Comment("check if weak reference is cleared");
+ Branch(IsClearedWeakHeapObject(feedback), &initialize, &mark_megamorphic);
BIND(&check_allocation_site);
{
// Check if it is an AllocationSite.
Comment("check if allocation site");
- GotoIfNot(IsAllocationSiteMap(feedback_element_map), &check_initialized);
+ TNode<HeapObject> strong_feedback = CAST(feedback);
+ GotoIfNot(IsAllocationSite(strong_feedback), &check_initialized);
// Make sure that {target} and {new_target} are the Array constructor.
Node* array_function = LoadContextElement(LoadNativeContext(context),
Context::ARRAY_FUNCTION_INDEX);
GotoIfNot(WordEqual(target, array_function), &mark_megamorphic);
GotoIfNot(WordEqual(new_target, array_function), &mark_megamorphic);
- var_site.Bind(feedback_element);
+ var_site.Bind(strong_feedback);
Goto(&construct_array);
}
@@ -975,8 +975,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
{
// Check if it is uninitialized.
Comment("check if uninitialized");
- Node* is_uninitialized = WordEqual(
- feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
+ Node* is_uninitialized =
+ WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex));
Branch(is_uninitialized, &initialize, &mark_megamorphic);
}
@@ -1023,12 +1023,12 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
// Create an AllocationSite if {target} and {new_target} refer
// to the current native context's Array constructor.
- Label create_allocation_site(this), create_weak_cell(this);
- GotoIfNot(WordEqual(target, new_target), &create_weak_cell);
+ Label create_allocation_site(this), store_weak_reference(this);
+ GotoIfNot(WordEqual(target, new_target), &store_weak_reference);
Node* array_function = LoadContextElement(LoadNativeContext(context),
Context::ARRAY_FUNCTION_INDEX);
Branch(WordEqual(target, array_function), &create_allocation_site,
- &create_weak_cell);
+ &store_weak_reference);
BIND(&create_allocation_site);
{
@@ -1039,11 +1039,12 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
Goto(&construct_array);
}
- BIND(&create_weak_cell);
+ BIND(&store_weak_reference);
{
- CreateWeakCellInFeedbackVector(feedback_vector, slot_id, new_target);
+ StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
+ CAST(new_target));
ReportFeedbackUpdate(feedback_vector, slot_id,
- "Construct:CreateWeakCell");
+ "Construct:StoreWeakReference");
Goto(&construct);
}
}
@@ -1110,10 +1111,10 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
IncrementCallCount(feedback_vector, slot_id);
// Check if we have monomorphic {new_target} feedback already.
- TNode<HeapObject> feedback_element =
- CAST(ToObject(LoadFeedbackVectorSlot(feedback_vector, slot_id)));
- Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
- Branch(WordEqual(new_target, feedback_value), &construct, &extra_checks);
+ TNode<MaybeObject> feedback =
+ LoadFeedbackVectorSlot(feedback_vector, slot_id);
+ Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
+ &extra_checks);
BIND(&extra_checks);
{
@@ -1121,27 +1122,24 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// Check if it is a megamorphic {new_target}.
Comment("check if megamorphic");
- Node* is_megamorphic =
- WordEqual(feedback_element,
- HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
+ Node* is_megamorphic = WordEqual(
+ feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
GotoIf(is_megamorphic, &construct);
- Comment("check if weak cell");
- Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
- LoadRoot(Heap::kWeakCellMapRootIndex));
- GotoIfNot(is_weak_cell, &check_initialized);
+ Comment("check if weak reference");
+ GotoIfNot(IsWeakOrClearedHeapObject(feedback), &check_initialized);
- // If the weak cell is cleared, we have a new chance to become monomorphic.
- Comment("check if weak cell is cleared");
- Node* is_smi = TaggedIsSmi(feedback_value);
- Branch(is_smi, &initialize, &mark_megamorphic);
+ // If the weak reference is cleared, we have a new chance to become
+ // monomorphic.
+ Comment("check if weak reference is cleared");
+ Branch(IsClearedWeakHeapObject(feedback), &initialize, &mark_megamorphic);
BIND(&check_initialized);
{
// Check if it is uninitialized.
Comment("check if uninitialized");
- Node* is_uninitialized = WordEqual(
- feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
+ Node* is_uninitialized =
+ WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex));
Branch(is_uninitialized, &initialize, &mark_megamorphic);
}
@@ -1185,7 +1183,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
}
}
BIND(&done_loop);
- CreateWeakCellInFeedbackVector(feedback_vector, slot_id, new_target);
+ StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
+ CAST(new_target));
ReportFeedbackUpdate(feedback_vector, slot_id,
"ConstructWithSpread:Initialize");
Goto(&construct);
@@ -1423,12 +1422,12 @@ Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
- InterpreterDispatchDescriptor descriptor(isolate());
// Propagate speculation poisoning.
Node* poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
return TailCallBytecodeDispatch(
- descriptor, poisoned_handler_entry, GetAccumulatorUnchecked(),
- bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
+ InterpreterDispatchDescriptor{}, poisoned_handler_entry,
+ GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(),
+ DispatchTableRawPointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1582,62 +1581,103 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#endif
}
-void InterpreterAssembler::AbortIfRegisterCountInvalid(Node* register_file,
- Node* register_count) {
- Node* array_size = LoadAndUntagFixedArrayBaseLength(register_file);
+void InterpreterAssembler::AbortIfRegisterCountInvalid(
+ Node* parameters_and_registers, Node* formal_parameter_count,
+ Node* register_count) {
+ Node* array_size = LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
Label ok(this), abort(this, Label::kDeferred);
- Branch(UintPtrLessThanOrEqual(register_count, array_size), &ok, &abort);
+ Branch(UintPtrLessThanOrEqual(
+ IntPtrAdd(formal_parameter_count, register_count), array_size),
+ &ok, &abort);
BIND(&abort);
- Abort(AbortReason::kInvalidRegisterFileInGenerator);
+ Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
Goto(&ok);
BIND(&ok);
}
-Node* InterpreterAssembler::ExportRegisterFile(
- Node* array, const RegListNodePair& registers) {
+Node* InterpreterAssembler::ExportParametersAndRegisterFile(
+ Node* array, const RegListNodePair& registers,
+ Node* formal_parameter_count) {
+ // Store the formal parameters (without receiver) followed by the
+ // registers into the generator's internal parameters_and_registers field.
+ formal_parameter_count = ChangeInt32ToIntPtr(formal_parameter_count);
Node* register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
RegisterLocation(Register(0))));
- AbortIfRegisterCountInvalid(array, register_count);
+ AbortIfRegisterCountInvalid(array, formal_parameter_count, register_count);
}
- Variable var_index(this, MachineType::PointerRepresentation());
- var_index.Bind(IntPtrConstant(0));
-
- // Iterate over register file and write values into array.
- // The mapping of register to array index must match that used in
- // BytecodeGraphBuilder::VisitResumeGenerator.
- Label loop(this, &var_index), done_loop(this);
- Goto(&loop);
- BIND(&loop);
{
- Node* index = var_index.value();
- GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
+ Variable var_index(this, MachineType::PointerRepresentation());
+ var_index.Bind(IntPtrConstant(0));
- Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
- Node* value = LoadRegister(reg_index);
+ // Iterate over parameters and write them into the array.
+ Label loop(this, &var_index), done_loop(this);
- StoreFixedArrayElement(array, index, value);
+ Node* reg_base = IntPtrAdd(
+ IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
+ formal_parameter_count);
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
Goto(&loop);
+ BIND(&loop);
+ {
+ Node* index = var_index.value();
+ GotoIfNot(UintPtrLessThan(index, formal_parameter_count), &done_loop);
+
+ Node* reg_index = IntPtrSub(reg_base, index);
+ Node* value = LoadRegister(reg_index);
+
+ StoreFixedArrayElement(array, index, value);
+
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ Goto(&loop);
+ }
+ BIND(&done_loop);
+ }
+
+ {
+ // Iterate over register file and write values into array.
+ // The mapping of register to array index must match that used in
+ // BytecodeGraphBuilder::VisitResumeGenerator.
+ Variable var_index(this, MachineType::PointerRepresentation());
+ var_index.Bind(IntPtrConstant(0));
+
+ Label loop(this, &var_index), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* index = var_index.value();
+ GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
+
+ Node* reg_index =
+ IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
+ Node* value = LoadRegister(reg_index);
+
+ Node* array_index = IntPtrAdd(formal_parameter_count, index);
+ StoreFixedArrayElement(array, array_index, value);
+
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ Goto(&loop);
+ }
+ BIND(&done_loop);
}
- BIND(&done_loop);
return array;
}
-Node* InterpreterAssembler::ImportRegisterFile(
- Node* array, const RegListNodePair& registers) {
+Node* InterpreterAssembler::ImportRegisterFile(Node* array,
+ const RegListNodePair& registers,
+ Node* formal_parameter_count) {
+ formal_parameter_count = ChangeInt32ToIntPtr(formal_parameter_count);
Node* register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
RegisterLocation(Register(0))));
- AbortIfRegisterCountInvalid(array, register_count);
+ AbortIfRegisterCountInvalid(array, formal_parameter_count, register_count);
}
Variable var_index(this, MachineType::PointerRepresentation());
@@ -1652,12 +1692,13 @@ Node* InterpreterAssembler::ImportRegisterFile(
Node* index = var_index.value();
GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
- Node* value = LoadFixedArrayElement(array, index);
+ Node* array_index = IntPtrAdd(formal_parameter_count, index);
+ Node* value = LoadFixedArrayElement(array, array_index);
Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
StoreRegister(value, reg_index);
- StoreFixedArrayElement(array, index,
+ StoreFixedArrayElement(array, array_index,
LoadRoot(Heap::kStaleRegisterRootIndex));
var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 74b7e3b57c..641d553fd2 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -98,10 +98,16 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
};
// Backup/restore register file to/from a fixed array of the correct length.
- compiler::Node* ExportRegisterFile(compiler::Node* array,
- const RegListNodePair& registers);
+ // There is an asymmetry between suspend/export and resume/import.
+ // - Suspend copies arguments and registers to the generator.
+ // - Resume copies only the registers from the generator, the arguments
+ // are copied by the ResumeGenerator trampoline.
+ compiler::Node* ExportParametersAndRegisterFile(
+ compiler::Node* array, const RegListNodePair& registers,
+ compiler::Node* formal_parameter_count);
compiler::Node* ImportRegisterFile(compiler::Node* array,
- const RegListNodePair& registers);
+ const RegListNodePair& registers,
+ compiler::Node* formal_parameter_count);
// Loads from and stores to the interpreter register file.
compiler::Node* LoadRegister(Register reg);
@@ -139,7 +145,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
// Load the FeedbackVector for the current function.
- compiler::Node* LoadFeedbackVector();
+ compiler::TNode<FeedbackVector> LoadFeedbackVector();
// Increment the call count for a CALL_IC or construct call.
// The call count is located at feedback_vector[slot_id + 1].
@@ -248,7 +254,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
AbortReason abort_reason);
// Abort if |register_count| is invalid for given register file array.
- void AbortIfRegisterCountInvalid(compiler::Node* register_file,
+ void AbortIfRegisterCountInvalid(compiler::Node* parameters_and_registers,
+ compiler::Node* formal_parameter_count,
compiler::Node* register_count);
// Dispatch to frame dropper trampoline if necessary.
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 0689b8a032..74ebaabcc7 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -19,6 +19,7 @@
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics-generator.h"
#include "src/objects-inl.h"
+#include "src/objects/module.h"
namespace v8 {
namespace internal {
@@ -153,7 +154,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
void LdaGlobal(int slot_operand_index, int name_operand_index,
TypeofMode typeof_mode) {
- TNode<FeedbackVector> feedback_vector = CAST(LoadFeedbackVector());
+ TNode<FeedbackVector> feedback_vector = LoadFeedbackVector();
Node* feedback_slot = BytecodeOperandIdx(slot_operand_index);
AccessorAssembler accessor_asm(state());
@@ -2407,7 +2408,8 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
BIND(&if_not_fast_clone);
{
// If we can't do a fast clone, call into the runtime.
- Node* boilerplate_description = LoadConstantPoolEntryAtOperandIndex(0);
+ Node* object_boilerplate_description =
+ LoadConstantPoolEntryAtOperandIndex(0);
Node* context = GetContext();
Node* flags_raw = DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
@@ -2416,7 +2418,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
Node* result =
CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
- SmiTag(slot_id), boilerplate_description, flags);
+ SmiTag(slot_id), object_boilerplate_description, flags);
StoreRegisterAtOperandIndex(result, 3);
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
@@ -2442,8 +2444,8 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
Node* feedback_vector = LoadFeedbackVector();
Node* slot = BytecodeOperandIdx(1);
- TNode<Object> cached_value = ToObject(
- LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ TNode<Object> cached_value =
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
Label call_runtime(this, Label::kDeferred);
GotoIf(WordEqual(cached_value, SmiConstant(0)), &call_runtime);
@@ -2474,7 +2476,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
Node* slot = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
TNode<Object> feedback_cell =
- ToObject(LoadFeedbackVectorSlot(feedback_vector, slot));
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
Label if_fast(this), if_slow(this, Label::kDeferred);
Branch(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags), &if_fast,
@@ -2645,7 +2647,7 @@ IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
//
// Performs a stack guard check.
IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
- Node* context = GetContext();
+ TNode<Context> context = CAST(GetContext());
PerformStackCheck(context);
Dispatch();
}
@@ -2996,18 +2998,26 @@ IGNITION_HANDLER(Illegal, InterpreterAssembler) {
// SuspendGenerator <generator> <first input register> <register count>
// <suspend_id>
//
-// Exports the register file and stores it into the generator. Also stores the
-// current context, |suspend_id|, and the current bytecode offset (for debugging
-// purposes) into the generator. Then, returns the value in the accumulator.
+// Stores the parameters and the register file in the generator. Also stores
+// the current context, |suspend_id|, and the current bytecode offset
+// (for debugging purposes) into the generator. Then, returns the value
+// in the accumulator.
IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Node* generator = LoadRegisterAtOperandIndex(0);
- Node* array =
- LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
+ Node* array = LoadObjectField(
+ generator, JSGeneratorObject::kParametersAndRegistersOffset);
+ Node* closure = LoadRegister(Register::function_closure());
Node* context = GetContext();
RegListNodePair registers = GetRegisterListAtOperandIndex(1);
Node* suspend_id = BytecodeOperandUImmSmi(3);
- ExportRegisterFile(array, registers);
+ Node* shared =
+ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* formal_parameter_count =
+ LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
+ MachineType::Uint16());
+
+ ExportParametersAndRegisterFile(array, registers, formal_parameter_count);
StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
suspend_id);
@@ -3072,11 +3082,19 @@ IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
// state as executing.
IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
Node* generator = LoadRegisterAtOperandIndex(0);
+ Node* closure = LoadRegister(Register::function_closure());
RegListNodePair registers = GetRegisterListAtOperandIndex(1);
+ Node* shared =
+ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* formal_parameter_count =
+ LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
+ MachineType::Uint16());
+
ImportRegisterFile(
- LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset),
- registers);
+ LoadObjectField(generator,
+ JSGeneratorObject::kParametersAndRegistersOffset),
+ registers, formal_parameter_count);
// Return the generator's input_or_debug_pos in the accumulator.
SetAccumulator(
@@ -3090,14 +3108,12 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
OperandScale operand_scale) {
Zone zone(isolate->allocator(), ZONE_NAME);
- InterpreterDispatchDescriptor descriptor(isolate);
compiler::CodeAssemblerState state(
- isolate, &zone, descriptor, Code::BYTECODE_HANDLER,
+ isolate, &zone, InterpreterDispatchDescriptor{}, Code::BYTECODE_HANDLER,
Bytecodes::ToString(bytecode),
FLAG_untrusted_code_mitigations
? PoisoningMitigationLevel::kPoisonCriticalOnly
- : PoisoningMitigationLevel::kDontPoison,
- Bytecodes::ReturnCount(bytecode));
+ : PoisoningMitigationLevel::kDontPoison);
switch (bytecode) {
#define CALL_GENERATOR(Name, ...) \
@@ -3108,14 +3124,15 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
#undef CALL_GENERATOR
}
- Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(
+ &state, AssemblerOptions::Default(isolate));
PROFILE(isolate, CodeCreateEvent(
CodeEventListener::BYTECODE_HANDLER_TAG,
AbstractCode::cast(*code),
Bytecodes::ToString(bytecode, operand_scale).c_str()));
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
- OFStream os(stdout);
+ StdoutStream os;
code->Disassemble(Bytecodes::ToString(bytecode), os);
os << std::flush;
}
@@ -3157,7 +3174,6 @@ class DeserializeLazyAssembler : public InterpreterAssembler {
Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
OperandScale operand_scale) {
Zone zone(isolate->allocator(), ZONE_NAME);
- const size_t return_count = 0;
std::string debug_name = std::string("DeserializeLazy");
if (operand_scale > OperandScale::kSingle) {
@@ -3166,23 +3182,23 @@ Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
debug_name = debug_name.append(Bytecodes::ToString(prefix_bytecode));
}
- InterpreterDispatchDescriptor descriptor(isolate);
compiler::CodeAssemblerState state(
- isolate, &zone, descriptor, Code::BYTECODE_HANDLER, debug_name.c_str(),
+ isolate, &zone, InterpreterDispatchDescriptor{}, Code::BYTECODE_HANDLER,
+ debug_name.c_str(),
FLAG_untrusted_code_mitigations
? PoisoningMitigationLevel::kPoisonCriticalOnly
- : PoisoningMitigationLevel::kDontPoison,
- return_count);
+ : PoisoningMitigationLevel::kDontPoison);
DeserializeLazyAssembler::Generate(&state, operand_scale);
- Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(
+ &state, AssemblerOptions::Default(isolate));
PROFILE(isolate,
CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
AbstractCode::cast(*code), debug_name.c_str()));
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
- OFStream os(stdout);
+ StdoutStream os;
code->Disassemble(debug_name.c_str(), os);
os << std::flush;
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 0480dec6cc..62785d7904 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -170,18 +170,6 @@ Node* IntrinsicsGenerator::IsTypedArray(
return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsJSMap(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
- return IsInstanceType(input, JS_MAP_TYPE);
-}
-
-Node* IntrinsicsGenerator::IsJSSet(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
- return IsInstanceType(input, JS_SET_TYPE);
-}
-
Node* IntrinsicsGenerator::IsJSWeakMap(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
Node* input = __ LoadRegisterFromRegisterList(args, 0);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 3016183c0b..570caca072 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -27,10 +27,8 @@ namespace interpreter {
V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
V(HasProperty, has_property, 2) \
V(IsArray, is_array, 1) \
- V(IsJSMap, is_js_map, 1) \
V(IsJSProxy, is_js_proxy, 1) \
V(IsJSReceiver, is_js_receiver, 1) \
- V(IsJSSet, is_js_set, 1) \
V(IsJSWeakMap, is_js_weak_map, 1) \
V(IsJSWeakSet, is_js_weak_set, 1) \
V(IsSmi, is_smi, 1) \
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index db02011c50..0446ed494d 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -136,7 +136,7 @@ void MaybePrintAst(ParseInfo* parse_info,
UnoptimizedCompilationInfo* compilation_info) {
if (!FLAG_print_ast) return;
- OFStream os(stdout);
+ StdoutStream os;
std::unique_ptr<char[]> name = compilation_info->literal()->GetDebugName();
os << "[generating bytecode for function: " << name.get() << "]" << std::endl;
#ifdef DEBUG
@@ -208,7 +208,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
}
if (ShouldPrintBytecode(shared_info)) {
- OFStream os(stdout);
+ StdoutStream os;
std::unique_ptr<char[]> name =
compilation_info()->literal()->GetDebugName();
os << "[generated bytecode for function: " << name.get() << "]"
@@ -233,7 +233,7 @@ bool Interpreter::IsDispatchTableInitialized() const {
return dispatch_table_[0] != kNullAddress;
}
-const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
+const char* Interpreter::LookupNameOfBytecodeHandler(const Code* code) {
#ifdef ENABLE_DISASSEMBLER
#define RETURN_NAME(Name, ...) \
if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 711aea8029..5ded893798 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -65,7 +65,7 @@ class Interpreter {
void IterateDispatchTable(RootVisitor* v);
// Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
- const char* LookupNameOfBytecodeHandler(Code* code);
+ const char* LookupNameOfBytecodeHandler(const Code* code);
V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
diff --git a/deps/v8/src/interpreter/setup-interpreter-internal.cc b/deps/v8/src/interpreter/setup-interpreter-internal.cc
index 538aae0faa..2e0a7d8ca5 100644
--- a/deps/v8/src/interpreter/setup-interpreter-internal.cc
+++ b/deps/v8/src/interpreter/setup-interpreter-internal.cc
@@ -84,6 +84,13 @@ void SetupInterpreter::InstallBytecodeHandler(Isolate* isolate,
dispatch_table[index] = code->entry();
if (FLAG_print_builtin_size) PrintBuiltinSize(bytecode, operand_scale, code);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_builtin_code) {
+ std::string name = Bytecodes::ToString(bytecode, operand_scale);
+ code->PrintBuiltinCode(isolate, name.c_str());
+ }
+#endif // ENABLE_DISASSEMBLER
}
} // namespace interpreter
diff --git a/deps/v8/src/intl.cc b/deps/v8/src/intl.cc
index 139bb4daf5..a4d3262d74 100644
--- a/deps/v8/src/intl.cc
+++ b/deps/v8/src/intl.cc
@@ -166,7 +166,7 @@ V8_WARN_UNUSED_RESULT Object* LocaleConvertCase(Handle<String> s,
Handle<SeqTwoByteString> result;
std::unique_ptr<uc16[]> sap;
- if (dest_length == 0) return isolate->heap()->empty_string();
+ if (dest_length == 0) return ReadOnlyRoots(isolate).empty_string();
// This is not a real loop. It'll be executed only once (no overflow) or
// twice (overflow).
diff --git a/deps/v8/src/intl.h b/deps/v8/src/intl.h
index 627cb4980d..e250e78fb0 100644
--- a/deps/v8/src/intl.h
+++ b/deps/v8/src/intl.h
@@ -23,6 +23,16 @@ class TimeZone;
namespace v8 {
namespace internal {
+enum class IcuService {
+ kBreakIterator,
+ kCollator,
+ kDateFormat,
+ kNumberFormat,
+ kPluralRules,
+ kResourceBundle,
+ kRelativeDateTimeFormatter
+};
+
const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
std::unique_ptr<uc16[]>* dest,
int32_t length);
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 2d982f009f..7a43f1367f 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -38,7 +38,7 @@ void Isolate::set_pending_exception(Object* exception_obj) {
void Isolate::clear_pending_exception() {
DCHECK(!thread_local_top_.pending_exception_->IsException(this));
- thread_local_top_.pending_exception_ = heap_.the_hole_value();
+ thread_local_top_.pending_exception_ = ReadOnlyRoots(this).the_hole_value();
}
@@ -60,7 +60,7 @@ void Isolate::clear_wasm_caught_exception() {
}
void Isolate::clear_pending_message() {
- thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
+ thread_local_top_.pending_message_obj_ = ReadOnlyRoots(this).the_hole_value();
}
@@ -73,17 +73,18 @@ Object* Isolate::scheduled_exception() {
bool Isolate::has_scheduled_exception() {
DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
- return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
+ return thread_local_top_.scheduled_exception_ !=
+ ReadOnlyRoots(this).the_hole_value();
}
void Isolate::clear_scheduled_exception() {
DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
- thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
+ thread_local_top_.scheduled_exception_ = ReadOnlyRoots(this).the_hole_value();
}
bool Isolate::is_catchable_by_javascript(Object* exception) {
- return exception != heap()->termination_exception();
+ return exception != ReadOnlyRoots(heap()).termination_exception();
}
void Isolate::FireBeforeCallEnteredCallback() {
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index bb50ae493d..b0a970305e 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -42,8 +42,8 @@
#include "src/messages.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/profiler/tracing-cpu-profiler.h"
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
@@ -62,13 +62,27 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
+#ifdef V8_INTL_SUPPORT
+#include "unicode/regex.h"
+#endif // V8_INTL_SUPPORT
namespace v8 {
namespace internal {
+#ifdef DEBUG
+#define TRACE_ISOLATE(tag) \
+ do { \
+ if (FLAG_trace_isolates) { \
+ PrintF("Isolate %p (id %d)" #tag "\n", reinterpret_cast<void*>(this), \
+ id()); \
+ } \
+ } while (false)
+#else
+#define TRACE_ISOLATE(tag)
+#endif
+
base::Atomic32 ThreadId::highest_thread_id_ = 0;
-#ifdef V8_EMBEDDED_BUILTINS
extern const uint8_t* DefaultEmbeddedBlob();
extern uint32_t DefaultEmbeddedBlobSize();
@@ -119,7 +133,6 @@ uint32_t Isolate::CurrentEmbeddedBlobSize() {
return current_embedded_blob_size_.load(
std::memory_order::memory_order_relaxed);
}
-#endif // V8_EMBEDDED_BUILTINS
int ThreadId::AllocateThreadId() {
int new_id = base::Relaxed_AtomicIncrement(&highest_thread_id_, 1);
@@ -136,44 +149,11 @@ int ThreadId::GetCurrentThreadId() {
return thread_id;
}
-
-ThreadLocalTop::ThreadLocalTop() {
- InitializeInternal();
-}
-
-
-void ThreadLocalTop::InitializeInternal() {
- c_entry_fp_ = 0;
- c_function_ = 0;
- handler_ = 0;
-#ifdef USE_SIMULATOR
- simulator_ = nullptr;
-#endif
- js_entry_sp_ = kNullAddress;
- external_callback_scope_ = nullptr;
- current_vm_state_ = EXTERNAL;
- try_catch_handler_ = nullptr;
- context_ = nullptr;
- thread_id_ = ThreadId::Invalid();
- external_caught_exception_ = false;
- failed_access_check_callback_ = nullptr;
- save_context_ = nullptr;
- promise_on_stack_ = nullptr;
-
- // These members are re-initialized later after deserialization
- // is complete.
- pending_exception_ = nullptr;
- wasm_caught_exception_ = nullptr;
- rethrowing_message_ = false;
- pending_message_obj_ = nullptr;
- scheduled_exception_ = nullptr;
-}
-
-
-void ThreadLocalTop::Initialize() {
- InitializeInternal();
+void ThreadLocalTop::Initialize(Isolate* isolate) {
+ *this = ThreadLocalTop();
+ isolate_ = isolate;
#ifdef USE_SIMULATOR
- simulator_ = Simulator::current(isolate_);
+ simulator_ = Simulator::current(isolate);
#endif
thread_id_ = ThreadId::Current();
}
@@ -400,6 +380,7 @@ StackTraceFailureMessage::StackTraceFailureMessage(Isolate* isolate, void* ptr1,
size_t i = 0;
StackFrameIterator it(isolate);
for (; !it.done() && i < code_objects_length; it.Advance()) {
+ if (it.frame()->type() == StackFrame::INTERNAL) continue;
code_objects_[i++] = it.frame()->unchecked_code();
}
}
@@ -471,7 +452,7 @@ class FrameArrayBuilder {
}
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = 0;
- if (instance->module_object()->shared()->is_asm_js()) {
+ if (instance->module_object()->is_asm_js()) {
flags |= FrameArray::kIsAsmJsWasmFrame;
if (WasmCompiledFrame::cast(frame)->at_to_number_conversion()) {
flags |= FrameArray::kAsmJsAtNumberConversion;
@@ -490,7 +471,7 @@ class FrameArrayBuilder {
const auto& summary = summ.AsWasmInterpreted();
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = FrameArray::kIsWasmInterpretedFrame;
- DCHECK(!instance->module_object()->shared()->is_asm_js());
+ DCHECK(!instance->module_object()->is_asm_js());
elements_ = FrameArray::AppendWasmFrame(elements_, instance,
summary.function_index(), {},
summary.byte_offset(), flags);
@@ -521,7 +502,7 @@ class FrameArrayBuilder {
bool full() { return elements_->FrameCount() >= limit_; }
Handle<FrameArray> GetElements() {
- elements_->ShrinkToFit();
+ elements_->ShrinkToFit(isolate_);
return elements_;
}
@@ -671,10 +652,11 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
Handle<FixedArray> stack_trace = CaptureCurrentStackTrace(
stack_trace_for_uncaught_exceptions_frame_limit_,
stack_trace_for_uncaught_exceptions_options_);
- RETURN_ON_EXCEPTION(this,
- JSReceiver::SetProperty(error_object, key, stack_trace,
- LanguageMode::kStrict),
- JSReceiver);
+ RETURN_ON_EXCEPTION(
+ this,
+ JSReceiver::SetProperty(this, error_object, key, stack_trace,
+ LanguageMode::kStrict),
+ JSReceiver);
}
return error_object;
}
@@ -686,10 +668,11 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
Handle<Name> key = factory()->stack_trace_symbol();
Handle<Object> stack_trace =
CaptureSimpleStackTrace(error_object, mode, caller);
- RETURN_ON_EXCEPTION(this,
- JSReceiver::SetProperty(error_object, key, stack_trace,
- LanguageMode::kStrict),
- JSReceiver);
+ RETURN_ON_EXCEPTION(
+ this,
+ JSReceiver::SetProperty(this, error_object, key, stack_trace,
+ LanguageMode::kStrict),
+ JSReceiver);
return error_object;
}
@@ -763,15 +746,11 @@ class CaptureStackTraceHelper {
} else {
cache = SimpleNumberDictionary::New(isolate_, 1);
}
- int entry = cache->FindEntry(code_offset);
+ int entry = cache->FindEntry(isolate_, code_offset);
if (entry != NumberDictionary::kNotFound) {
Handle<StackFrameInfo> frame(
- StackFrameInfo::cast(cache->ValueAt(entry)));
- DCHECK(frame->function_name()->IsString());
- Handle<String> function_name = summ.FunctionName();
- if (function_name->Equals(String::cast(frame->function_name()))) {
- return frame;
- }
+ StackFrameInfo::cast(cache->ValueAt(entry)), isolate_);
+ return frame;
}
}
@@ -794,7 +773,8 @@ class CaptureStackTraceHelper {
frame->set_is_constructor(summ.is_constructor());
frame->set_is_wasm(false);
if (!FLAG_optimize_for_size) {
- auto new_cache = SimpleNumberDictionary::Set(cache, code_offset, frame);
+ auto new_cache =
+ SimpleNumberDictionary::Set(isolate_, cache, code_offset, frame);
if (*new_cache != *cache || !maybe_cache->IsNumberDictionary()) {
AbstractCode::SetStackFrameCache(summ.abstract_code(), new_cache);
}
@@ -807,10 +787,10 @@ class CaptureStackTraceHelper {
const FrameSummary::WasmFrameSummary& summ) {
Handle<StackFrameInfo> info = factory()->NewStackFrameInfo();
- Handle<WasmSharedModuleData> shared(
- summ.wasm_instance()->module_object()->shared(), isolate_);
- Handle<String> name = WasmSharedModuleData::GetFunctionName(
- isolate_, shared, summ.function_index());
+ Handle<WasmModuleObject> module_object(
+ summ.wasm_instance()->module_object(), isolate_);
+ Handle<String> name = WasmModuleObject::GetFunctionName(
+ isolate_, module_object, summ.function_index());
info->set_function_name(*name);
// Encode the function index as line number (1-based).
info->set_line_number(summ.function_index() + 1);
@@ -866,8 +846,7 @@ Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
frames_seen++;
}
}
- stack_trace_elems->Shrink(frames_seen);
- return stack_trace_elems;
+ return FixedArray::ShrinkOrEmpty(this, stack_trace_elems, frames_seen);
}
@@ -1037,12 +1016,12 @@ Object* Isolate::StackOverflow() {
}
#endif // VERIFY_HEAP
- return heap()->exception();
+ return ReadOnlyRoots(heap()).exception();
}
Object* Isolate::TerminateExecution() {
- return Throw(heap_.termination_exception(), nullptr);
+ return Throw(ReadOnlyRoots(this).termination_exception(), nullptr);
}
@@ -1051,12 +1030,12 @@ void Isolate::CancelTerminateExecution() {
try_catch_handler()->has_terminated_ = false;
}
if (has_pending_exception() &&
- pending_exception() == heap_.termination_exception()) {
+ pending_exception() == ReadOnlyRoots(this).termination_exception()) {
thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
}
if (has_scheduled_exception() &&
- scheduled_exception() == heap_.termination_exception()) {
+ scheduled_exception() == ReadOnlyRoots(this).termination_exception()) {
thread_local_top()->external_caught_exception_ = false;
clear_scheduled_exception();
}
@@ -1121,7 +1100,8 @@ void ReportBootstrappingException(Handle<Object> exception,
// Since comments and empty lines have been stripped from the source of
// builtins, print the actual source here so that line numbers match.
if (location->script()->source()->IsString()) {
- Handle<String> src(String::cast(location->script()->source()));
+ Handle<String> src(String::cast(location->script()->source()),
+ location->script()->GetIsolate());
PrintF("Failing script:");
int len = src->length();
if (len == 0) {
@@ -1154,11 +1134,11 @@ bool Isolate::is_catchable_by_wasm(Object* exception) {
.IsJust();
}
-Object* Isolate::Throw(Object* exception, MessageLocation* location) {
+Object* Isolate::Throw(Object* raw_exception, MessageLocation* location) {
DCHECK(!has_pending_exception());
HandleScope scope(this);
- Handle<Object> exception_handle(exception, this);
+ Handle<Object> exception(raw_exception, this);
if (FLAG_print_all_exceptions) {
printf("=========================================================\n");
@@ -1183,11 +1163,13 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
Script::GetColumnNumber(script, location->start_pos()),
Script::GetLineNumber(script, location->end_pos()) + 1,
Script::GetColumnNumber(script, location->end_pos()));
+ // Make sure to update the raw exception pointer in case it moved.
+ raw_exception = *exception;
} else {
printf(", line %d\n", script->GetLineNumber(location->start_pos()) + 1);
}
}
- exception->Print();
+ raw_exception->Print();
printf("Stack Trace:\n");
PrintStack(stdout);
printf("=========================================================\n");
@@ -1209,8 +1191,8 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
thread_local_top()->rethrowing_message_ = false;
// Notify debugger of exception.
- if (is_catchable_by_javascript(exception)) {
- debug()->OnThrow(exception_handle);
+ if (is_catchable_by_javascript(raw_exception)) {
+ debug()->OnThrow(exception);
}
// Generate the message if required.
@@ -1225,9 +1207,9 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
// It's not safe to try to make message objects or collect stack traces
// while the bootstrapper is active since the infrastructure may not have
// been properly initialized.
- ReportBootstrappingException(exception_handle, location);
+ ReportBootstrappingException(exception, location);
} else {
- Handle<Object> message_obj = CreateMessage(exception_handle, location);
+ Handle<Object> message_obj = CreateMessage(exception, location);
thread_local_top()->pending_message_obj_ = *message_obj;
// For any exception not caught by JavaScript, even when an external
@@ -1256,8 +1238,8 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
}
// Set the exception being thrown.
- set_pending_exception(*exception_handle);
- return heap()->exception();
+ set_pending_exception(*exception);
+ return ReadOnlyRoots(heap()).exception();
}
@@ -1266,7 +1248,7 @@ Object* Isolate::ReThrow(Object* exception) {
// Set the exception being re-thrown.
set_pending_exception(exception);
- return heap()->exception();
+ return ReadOnlyRoots(heap()).exception();
}
@@ -1576,7 +1558,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
} break;
case StackFrame::STUB: {
- Handle<Code> code(frame->LookupCode());
+ Handle<Code> code(frame->LookupCode(), this);
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
!code->handler_table_offset() || !code->is_turbofanned()) {
break;
@@ -1587,7 +1569,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
} break;
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH: {
- Handle<Code> code(frame->LookupCode());
+ Handle<Code> code(frame->LookupCode(), this);
CatchType prediction = ToCatchType(code->GetBuiltinCatchPrediction());
if (prediction != NOT_CAUGHT) return prediction;
} break;
@@ -1604,7 +1586,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
Object* Isolate::ThrowIllegalOperation() {
if (FLAG_stack_trace_on_illegal) PrintStack(stdout);
- return Throw(heap()->illegal_access_string());
+ return Throw(ReadOnlyRoots(heap()).illegal_access_string());
}
@@ -1635,7 +1617,8 @@ void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
DCHECK(has_scheduled_exception());
if (scheduled_exception() == handler->exception_) {
- DCHECK(scheduled_exception() != heap()->termination_exception());
+ DCHECK(scheduled_exception() !=
+ ReadOnlyRoots(heap()).termination_exception());
clear_scheduled_exception();
}
if (thread_local_top_.pending_message_obj_ == handler->message_obj_) {
@@ -1661,8 +1644,17 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
Handle<Object> receiver(frame->receiver(), this);
Handle<JSFunction> function(frame->function(), this);
- Handle<AbstractCode> code(AbstractCode::cast(frame->LookupCode()), this);
- const int offset = static_cast<int>(frame->pc() - code->InstructionStart());
+ Handle<AbstractCode> code;
+ int offset;
+ if (frame->is_interpreted()) {
+ InterpretedFrame* interpreted_frame = InterpretedFrame::cast(frame);
+ code = handle(AbstractCode::cast(interpreted_frame->GetBytecodeArray()),
+ this);
+ offset = interpreted_frame->GetBytecodeOffset();
+ } else {
+ code = handle(AbstractCode::cast(frame->LookupCode()), this);
+ offset = static_cast<int>(frame->pc() - code->InstructionStart());
+ }
JSStackFrame site(this, receiver, function, code, offset);
Handle<String> line = site.ToString().ToHandleChecked();
@@ -1692,7 +1684,7 @@ bool Isolate::ComputeLocation(MessageLocation* target) {
}
if (summary.IsJavaScript()) {
- shared = handle(summary.AsJavaScript().function()->shared());
+ shared = handle(summary.AsJavaScript().function()->shared(), this);
}
*target = MessageLocation(Handle<Script>::cast(script), pos, pos + 1, shared);
return true;
@@ -1719,7 +1711,7 @@ bool Isolate::ComputeLocationFromException(MessageLocation* target,
Handle<JSObject>::cast(exception), script_symbol);
if (!script->IsScript()) return false;
- Handle<Script> cast_script(Script::cast(*script));
+ Handle<Script> cast_script(Script::cast(*script), this);
*target = MessageLocation(cast_script, start_pos_value, end_pos_value);
return true;
}
@@ -1734,12 +1726,13 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
if (!property->IsJSArray()) return false;
Handle<JSArray> simple_stack_trace = Handle<JSArray>::cast(property);
- Handle<FrameArray> elements(FrameArray::cast(simple_stack_trace->elements()));
+ Handle<FrameArray> elements(FrameArray::cast(simple_stack_trace->elements()),
+ this);
const int frame_count = elements->FrameCount();
for (int i = 0; i < frame_count; i++) {
if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
- Handle<WasmInstanceObject> instance(elements->WasmInstance(i));
+ Handle<WasmInstanceObject> instance(elements->WasmInstance(i), this);
uint32_t func_index =
static_cast<uint32_t>(elements->WasmFunctionIndex(i)->value());
int code_offset = elements->Offset(i)->value();
@@ -1748,16 +1741,16 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
// a second lookup here could lead to inconsistency.
int byte_offset =
FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
- instance->compiled_module()->GetNativeModule()->code(func_index),
+ instance->module_object()->native_module()->code(func_index),
code_offset);
bool is_at_number_conversion =
elements->IsAsmJsWasmFrame(i) &&
elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
- int pos = WasmSharedModuleData::GetSourcePosition(
- handle(instance->module_object()->shared(), this), func_index,
- byte_offset, is_at_number_conversion);
- Handle<Script> script(instance->module_object()->shared()->script());
+ int pos = WasmModuleObject::GetSourcePosition(
+ handle(instance->module_object(), this), func_index, byte_offset,
+ is_at_number_conversion);
+ Handle<Script> script(instance->module_object()->script(), this);
*target = MessageLocation(script, pos, pos + 1);
return true;
@@ -1773,7 +1766,7 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
const int code_offset = elements->Offset(i)->value();
const int pos = abstract_code->SourcePosition(code_offset);
- Handle<Script> casted_script(Script::cast(script));
+ Handle<Script> casted_script(Script::cast(script), this);
*target = MessageLocation(casted_script, pos, pos + 1);
return true;
}
@@ -1816,7 +1809,7 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
bool Isolate::IsJavaScriptHandlerOnTop(Object* exception) {
- DCHECK_NE(heap()->the_hole_value(), exception);
+ DCHECK_NE(ReadOnlyRoots(heap()).the_hole_value(), exception);
// For uncatchable exceptions, the JavaScript handler cannot be on top.
if (!is_catchable_by_javascript(exception)) return false;
@@ -1841,7 +1834,7 @@ bool Isolate::IsJavaScriptHandlerOnTop(Object* exception) {
bool Isolate::IsExternalHandlerOnTop(Object* exception) {
- DCHECK_NE(heap()->the_hole_value(), exception);
+ DCHECK_NE(ReadOnlyRoots(heap()).the_hole_value(), exception);
// Get the address of the external handler so we can compare the address to
// determine which one is closer to the top of the stack.
@@ -1891,8 +1884,7 @@ void Isolate::ReportPendingMessagesImpl(bool report_externally) {
if (!message_obj->IsTheHole(this) && should_report_exception) {
HandleScope scope(this);
Handle<JSMessageObject> message(JSMessageObject::cast(message_obj), this);
- Handle<JSValue> script_wrapper(JSValue::cast(message->script()), this);
- Handle<Script> script(Script::cast(script_wrapper->value()), this);
+ Handle<Script> script(message->script(), this);
int start_pos = message->start_position();
int end_pos = message->end_position();
MessageLocation location(script, start_pos, end_pos);
@@ -1983,12 +1975,12 @@ void Isolate::ReportPendingMessagesFromJavaScript() {
MessageLocation Isolate::GetMessageLocation() {
DCHECK(has_pending_exception());
- if (thread_local_top_.pending_exception_ != heap()->termination_exception() &&
+ if (thread_local_top_.pending_exception_ !=
+ ReadOnlyRoots(heap()).termination_exception() &&
!thread_local_top_.pending_message_obj_->IsTheHole(this)) {
Handle<JSMessageObject> message_obj(
JSMessageObject::cast(thread_local_top_.pending_message_obj_), this);
- Handle<JSValue> script_wrapper(JSValue::cast(message_obj->script()), this);
- Handle<Script> script(Script::cast(script_wrapper->value()), this);
+ Handle<Script> script(message_obj->script(), this);
int start_pos = message_obj->start_position();
int end_pos = message_obj->end_position();
return MessageLocation(script, start_pos, end_pos);
@@ -2003,7 +1995,7 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
PropagatePendingExceptionToExternalTryCatch();
bool is_termination_exception =
- pending_exception() == heap_.termination_exception();
+ pending_exception() == ReadOnlyRoots(this).termination_exception();
// Do not reschedule the exception if this is the bottom call.
bool clear_exception = is_bottom_call;
@@ -2224,24 +2216,8 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
abort_on_uncaught_exception_callback_ = callback;
}
-namespace {
-void AdvanceWhileDebugContext(JavaScriptFrameIterator& it, Debug* debug) {
- if (!debug->in_debug_scope()) return;
-
- while (!it.done()) {
- Context* context = Context::cast(it.frame()->context());
- if (context->native_context() == *debug->debug_context()) {
- it.Advance();
- } else {
- break;
- }
- }
-}
-} // namespace
-
Handle<Context> Isolate::GetCallingNativeContext() {
JavaScriptFrameIterator it(this);
- AdvanceWhileDebugContext(it, debug_);
if (it.done()) return Handle<Context>::null();
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
@@ -2250,7 +2226,6 @@ Handle<Context> Isolate::GetCallingNativeContext() {
Handle<Context> Isolate::GetIncumbentContext() {
JavaScriptFrameIterator it(this);
- AdvanceWhileDebugContext(it, debug_);
// 1st candidate: most-recently-entered author function's context
// if it's newer than the last Context::BackupIncumbentScope entry.
@@ -2370,19 +2345,6 @@ void Isolate::ThreadDataTable::RemoveAllThreads() {
table_.clear();
}
-
-#ifdef DEBUG
-#define TRACE_ISOLATE(tag) \
- do { \
- if (FLAG_trace_isolates) { \
- PrintF("Isolate %p (id %d)" #tag "\n", \
- reinterpret_cast<void*>(this), id()); \
- } \
- } while (false)
-#else
-#define TRACE_ISOLATE(tag)
-#endif
-
class VerboseAccountingAllocator : public AccountingAllocator {
public:
VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes,
@@ -2400,12 +2362,11 @@ class VerboseAccountingAllocator : public AccountingAllocator {
size_t malloced_current = GetCurrentMemoryUsage();
size_t pooled_current = GetCurrentPoolSize();
- if (last_memory_usage_.Value() + allocation_sample_bytes_ <
- malloced_current ||
- last_pool_size_.Value() + pool_sample_bytes_ < pooled_current) {
+ if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current ||
+ last_pool_size_ + pool_sample_bytes_ < pooled_current) {
PrintMemoryJSON(malloced_current, pooled_current);
- last_memory_usage_.SetValue(malloced_current);
- last_pool_size_.SetValue(pooled_current);
+ last_memory_usage_ = malloced_current;
+ last_pool_size_ = pooled_current;
}
}
return memory;
@@ -2416,22 +2377,21 @@ class VerboseAccountingAllocator : public AccountingAllocator {
size_t malloced_current = GetCurrentMemoryUsage();
size_t pooled_current = GetCurrentPoolSize();
- if (malloced_current + allocation_sample_bytes_ <
- last_memory_usage_.Value() ||
- pooled_current + pool_sample_bytes_ < last_pool_size_.Value()) {
+ if (malloced_current + allocation_sample_bytes_ < last_memory_usage_ ||
+ pooled_current + pool_sample_bytes_ < last_pool_size_) {
PrintMemoryJSON(malloced_current, pooled_current);
- last_memory_usage_.SetValue(malloced_current);
- last_pool_size_.SetValue(pooled_current);
+ last_memory_usage_ = malloced_current;
+ last_pool_size_ = pooled_current;
}
}
void ZoneCreation(const Zone* zone) override {
PrintZoneModificationSample(zone, "zonecreation");
- nesting_deepth_.Increment(1);
+ nesting_deepth_++;
}
void ZoneDestruction(const Zone* zone) override {
- nesting_deepth_.Decrement(1);
+ nesting_deepth_--;
PrintZoneModificationSample(zone, "zonedestruction");
}
@@ -2446,11 +2406,11 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"name\": \"%s\", "
"\"size\": %" PRIuS
","
- "\"nesting\": %" PRIuS "}\n",
+ "\"nesting\": %zu}\n",
type, reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(),
reinterpret_cast<const void*>(zone), zone->name(),
- zone->allocation_size(), nesting_deepth_.Value());
+ zone->allocation_size(), nesting_deepth_.load());
}
void PrintMemoryJSON(size_t malloced, size_t pooled) {
@@ -2469,14 +2429,14 @@ class VerboseAccountingAllocator : public AccountingAllocator {
}
Heap* heap_;
- base::AtomicNumber<size_t> last_memory_usage_;
- base::AtomicNumber<size_t> last_pool_size_;
- base::AtomicNumber<size_t> nesting_deepth_;
+ std::atomic<size_t> last_memory_usage_;
+ std::atomic<size_t> last_pool_size_;
+ std::atomic<size_t> nesting_deepth_;
size_t allocation_sample_bytes_, pool_sample_bytes_;
};
#ifdef DEBUG
-base::AtomicNumber<size_t> Isolate::non_disposed_isolates_;
+std::atomic<size_t> Isolate::non_disposed_isolates_;
#endif // DEBUG
Isolate::Isolate()
@@ -2507,24 +2467,32 @@ Isolate::Isolate()
global_handles_(nullptr),
eternal_handles_(nullptr),
thread_manager_(nullptr),
+ builtins_(this),
setup_delegate_(nullptr),
regexp_stack_(nullptr),
date_cache_(nullptr),
- call_descriptor_data_(nullptr),
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(nullptr),
fuzzer_rng_(nullptr),
rail_mode_(PERFORMANCE_ANIMATION),
- promise_hook_or_debug_is_active_(false),
+ atomics_wait_callback_(nullptr),
+ atomics_wait_callback_data_(nullptr),
promise_hook_(nullptr),
+ host_import_module_dynamically_callback_(nullptr),
+ host_initialize_import_meta_object_callback_(nullptr),
load_start_time_ms_(0),
+#ifdef V8_INTL_SUPPORT
+ language_singleton_regexp_matcher_(nullptr),
+ language_tag_regexp_matcher_(nullptr),
+ language_variant_regexp_matcher_(nullptr),
+#endif // V8_INTL_SUPPORT
serializer_enabled_(false),
has_fatal_error_(false),
initialized_from_snapshot_(false),
is_tail_call_elimination_enabled_(true),
is_isolate_in_background_(false),
- cpu_profiler_(nullptr),
+ memory_savings_mode_active_(false),
heap_profiler_(nullptr),
code_event_dispatcher_(new CodeEventDispatcher()),
function_entry_hook_(nullptr),
@@ -2557,7 +2525,7 @@ Isolate::Isolate()
thread_manager_->isolate_ = this;
#ifdef DEBUG
- non_disposed_isolates_.Increment(1);
+ non_disposed_isolates_++;
#endif // DEBUG
handle_scope_data_.Initialize();
@@ -2578,6 +2546,18 @@ Isolate::Isolate()
tracing_cpu_profiler_.reset(new TracingCpuProfilerImpl(this));
init_memcopy_functions(this);
+
+ if (FLAG_embedded_builtins) {
+#ifdef V8_MULTI_SNAPSHOTS
+ if (FLAG_untrusted_code_mitigations) {
+ SetEmbeddedBlob(DefaultEmbeddedBlob(), DefaultEmbeddedBlobSize());
+ } else {
+ SetEmbeddedBlob(TrustedEmbeddedBlob(), TrustedEmbeddedBlobSize());
+ }
+#else
+ SetEmbeddedBlob(DefaultEmbeddedBlob(), DefaultEmbeddedBlobSize());
+#endif
+ }
}
@@ -2607,7 +2587,7 @@ void Isolate::TearDown() {
}
#ifdef DEBUG
- non_disposed_isolates_.Decrement(1);
+ non_disposed_isolates_--;
#endif // DEBUG
delete this;
@@ -2628,14 +2608,14 @@ void Isolate::Deinit() {
debug()->Unload();
+ wasm_engine()->TearDown();
+
if (concurrent_recompilation_enabled()) {
optimizing_compile_dispatcher_->Stop();
delete optimizing_compile_dispatcher_;
optimizing_compile_dispatcher_ = nullptr;
}
- wasm_engine()->TearDown();
-
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted();
@@ -2645,10 +2625,6 @@ void Isolate::Deinit() {
PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
}
- if (cpu_profiler_) {
- cpu_profiler_->DeleteAllProfiles();
- }
-
// We must stop the logger before we tear down other components.
sampler::Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
@@ -2681,18 +2657,20 @@ void Isolate::Deinit() {
delete compiler_dispatcher_;
compiler_dispatcher_ = nullptr;
+ // This stops cancelable tasks (i.e. concurrent masking tasks)
cancelable_task_manager()->CancelAndWait();
heap_.TearDown();
logger_->TearDown();
-#ifdef V8_EMBEDDED_BUILTINS
- if (DefaultEmbeddedBlob() == nullptr && embedded_blob() != nullptr) {
- // We own the embedded blob. Free it.
- uint8_t* data = const_cast<uint8_t*>(embedded_blob_);
- InstructionStream::FreeOffHeapInstructionStream(data, embedded_blob_size_);
+ if (FLAG_embedded_builtins) {
+ if (DefaultEmbeddedBlob() == nullptr && embedded_blob() != nullptr) {
+ // We own the embedded blob. Free it.
+ uint8_t* data = const_cast<uint8_t*>(embedded_blob_);
+ InstructionStream::FreeOffHeapInstructionStream(data,
+ embedded_blob_size_);
+ }
}
-#endif
delete interpreter_;
interpreter_ = nullptr;
@@ -2700,9 +2678,6 @@ void Isolate::Deinit() {
delete ast_string_constants_;
ast_string_constants_ = nullptr;
- delete cpu_profiler_;
- cpu_profiler_ = nullptr;
-
code_event_dispatcher_.reset();
delete root_index_map_;
@@ -2734,8 +2709,16 @@ Isolate::~Isolate() {
delete date_cache_;
date_cache_ = nullptr;
- delete[] call_descriptor_data_;
- call_descriptor_data_ = nullptr;
+#ifdef V8_INTL_SUPPORT
+ delete language_singleton_regexp_matcher_;
+ language_singleton_regexp_matcher_ = nullptr;
+
+ delete language_tag_regexp_matcher_;
+ language_tag_regexp_matcher_ = nullptr;
+
+ delete language_variant_regexp_matcher_;
+ language_variant_regexp_matcher_ = nullptr;
+#endif // V8_INTL_SUPPORT
delete regexp_stack_;
regexp_stack_ = nullptr;
@@ -2796,17 +2779,13 @@ Isolate::~Isolate() {
allocator_ = nullptr;
}
-
-void Isolate::InitializeThreadLocal() {
- thread_local_top_.isolate_ = this;
- thread_local_top_.Initialize();
-}
+void Isolate::InitializeThreadLocal() { thread_local_top_.Initialize(this); }
void Isolate::SetTerminationOnExternalTryCatch() {
if (try_catch_handler() == nullptr) return;
try_catch_handler()->can_continue_ = false;
try_catch_handler()->has_terminated_ = true;
- try_catch_handler()->exception_ = heap()->null_value();
+ try_catch_handler()->exception_ = ReadOnlyRoots(heap()).null_value();
}
bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
@@ -2864,7 +2843,6 @@ void PrintBuiltinSizes(Isolate* isolate) {
}
}
-#ifdef V8_EMBEDDED_BUILTINS
void CreateOffHeapTrampolines(Isolate* isolate) {
DCHECK(isolate->serializer_enabled());
DCHECK_NOT_NULL(isolate->embedded_blob());
@@ -2898,10 +2876,8 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
}
}
}
-#endif // V8_EMBEDDED_BUILTINS
} // namespace
-#ifdef V8_EMBEDDED_BUILTINS
void Isolate::PrepareEmbeddedBlobForSerialization() {
// When preparing the embedded blob, ensure it doesn't exist yet.
DCHECK_NULL(embedded_blob());
@@ -2918,7 +2894,6 @@ void Isolate::PrepareEmbeddedBlobForSerialization() {
SetEmbeddedBlob(const_cast<const uint8_t*>(data), size);
CreateOffHeapTrampolines(this);
}
-#endif // V8_EMBEDDED_BUILTINS
bool Isolate::Init(StartupDeserializer* des) {
TRACE_ISOLATE(init);
@@ -2968,25 +2943,11 @@ bool Isolate::Init(StartupDeserializer* des) {
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
- call_descriptor_data_ =
- new CallInterfaceDescriptorData[CallDescriptors::NUMBER_OF_DESCRIPTORS];
heap_profiler_ = new HeapProfiler(heap());
interpreter_ = new interpreter::Interpreter(this);
compiler_dispatcher_ =
new CompilerDispatcher(this, V8::GetCurrentPlatform(), FLAG_stack_size);
-#ifdef V8_EMBEDDED_BUILTINS
-#ifdef V8_MULTI_SNAPSHOTS
- if (FLAG_untrusted_code_mitigations) {
- SetEmbeddedBlob(DefaultEmbeddedBlob(), DefaultEmbeddedBlobSize());
- } else {
- SetEmbeddedBlob(TrustedEmbeddedBlob(), TrustedEmbeddedBlobSize());
- }
-#else
- SetEmbeddedBlob(DefaultEmbeddedBlob(), DefaultEmbeddedBlobSize());
-#endif
-#endif
-
// Enable logging before setting up the heap
logger_->SetUp(this);
@@ -3000,27 +2961,20 @@ bool Isolate::Init(StartupDeserializer* des) {
// SetUp the object heap.
DCHECK(!heap_.HasBeenSetUp());
- if (!heap_.SetUp()) {
- V8::FatalProcessOutOfMemory(this, "heap setup");
- return false;
- }
+ heap_.SetUp();
- // Setup the wasm engine. Currently, there's one per Isolate.
- wasm_engine_.reset(new wasm::WasmEngine(
- std::unique_ptr<wasm::WasmCodeManager>(new wasm::WasmCodeManager(
- reinterpret_cast<v8::Isolate*>(this), kMaxWasmCodeMemory))));
- wasm_engine_->memory_tracker()->SetAllocationResultHistogram(
- counters()->wasm_memory_allocation_result());
- wasm_engine_->memory_tracker()->SetAddressSpaceUsageHistogram(
- counters()->wasm_address_space_usage_mb());
- wasm_engine_->code_manager()->SetModuleCodeSizeHistogram(
- counters()->wasm_module_code_size_mb());
-
-// Initialize the interface descriptors ahead of time.
-#define INTERFACE_DESCRIPTOR(Name, ...) \
- { Name##Descriptor(this); }
- INTERFACE_DESCRIPTOR_LIST(INTERFACE_DESCRIPTOR)
-#undef INTERFACE_DESCRIPTOR
+ // Setup the wasm engine. Currently, there's one per Isolate by default.
+ if (wasm_engine_ == nullptr) {
+ wasm_engine_.reset(
+ new wasm::WasmEngine(std::unique_ptr<wasm::WasmCodeManager>(
+ new wasm::WasmCodeManager(kMaxWasmCodeMemory))));
+ wasm_engine_->memory_tracker()->SetAllocationResultHistogram(
+ counters()->wasm_memory_allocation_result());
+ wasm_engine_->memory_tracker()->SetAddressSpaceUsageHistogram(
+ counters()->wasm_address_space_usage_mb());
+ wasm_engine_->code_manager()->SetModuleCodeSizeHistogram(
+ counters()->wasm_module_code_size_mb());
+ }
deoptimizer_data_ = new DeoptimizerData(heap());
@@ -3036,26 +2990,27 @@ bool Isolate::Init(StartupDeserializer* des) {
if (create_heap_objects) {
// Terminate the partial snapshot cache so we can iterate.
- partial_snapshot_cache_.push_back(heap_.undefined_value());
+ partial_snapshot_cache_.push_back(ReadOnlyRoots(this).undefined_value());
}
InitializeThreadLocal();
bootstrapper_->Initialize(create_heap_objects);
-#ifdef V8_EMBEDDED_BUILTINS
- if (create_heap_objects && serializer_enabled()) {
- builtins_constants_table_builder_ = new BuiltinsConstantsTableBuilder(this);
+ if (FLAG_embedded_builtins) {
+ if (create_heap_objects && serializer_enabled()) {
+ builtins_constants_table_builder_ =
+ new BuiltinsConstantsTableBuilder(this);
+ }
}
-#endif
setup_delegate_->SetupBuiltins(this);
-#ifdef V8_EMBEDDED_BUILTINS
- if (create_heap_objects && serializer_enabled()) {
- builtins_constants_table_builder_->Finalize();
- delete builtins_constants_table_builder_;
- builtins_constants_table_builder_ = nullptr;
+ if (FLAG_embedded_builtins) {
+ if (create_heap_objects && serializer_enabled()) {
+ builtins_constants_table_builder_->Finalize();
+ delete builtins_constants_table_builder_;
+ builtins_constants_table_builder_ = nullptr;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
if (create_heap_objects) heap_.CreateFixedStubs();
@@ -3100,7 +3055,8 @@ bool Isolate::Init(StartupDeserializer* des) {
heap_.SetStackLimits();
// Quiet the heap NaN if needed on target platform.
- if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
+ if (!create_heap_objects)
+ Assembler::QuietNaN(ReadOnlyRoots(this).nan_value());
if (FLAG_trace_turbo) {
// Create an empty file.
@@ -3122,6 +3078,8 @@ bool Isolate::Init(StartupDeserializer* des) {
static_cast<int>(OFFSET_OF(Isolate, heap_.external_reference_table_)),
Internals::kIsolateRootsOffset +
Heap::kRootsExternalReferenceTableOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.builtins_)),
+ Internals::kIsolateRootsOffset + Heap::kRootsBuiltinsOffset);
{
HandleScope scope(this);
@@ -3241,8 +3199,7 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
void Isolate::DumpAndResetStats() {
if (turbo_statistics() != nullptr) {
DCHECK(FLAG_turbo_stats || FLAG_turbo_stats_nvp);
-
- OFStream os(stdout);
+ StdoutStream os;
if (FLAG_turbo_stats) {
AsPrintableStatistics ps = {*turbo_statistics(), false};
os << ps << std::endl;
@@ -3251,9 +3208,14 @@ void Isolate::DumpAndResetStats() {
AsPrintableStatistics ps = {*turbo_statistics(), true};
os << ps << std::endl;
}
+ delete turbo_statistics_;
+ turbo_statistics_ = nullptr;
+ }
+ // TODO(7424): There is no public API for the {WasmEngine} yet. So for now we
+ // just dump and reset the engines statistics together with the Isolate.
+ if (FLAG_turbo_stats_wasm) {
+ wasm_engine()->DumpAndResetTurboStatistics();
}
- delete turbo_statistics_;
- turbo_statistics_ = nullptr;
if (V8_UNLIKELY(FLAG_runtime_stats ==
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
counters()->runtime_call_stats()->Print();
@@ -3285,10 +3247,6 @@ bool Isolate::use_optimizer() {
!is_precise_count_code_coverage() && !is_block_count_code_coverage();
}
-bool Isolate::NeedsDetailedOptimizedCodeLineInfo() const {
- return NeedsSourcePositionsForProfiling() || FLAG_detailed_line_info;
-}
-
bool Isolate::NeedsSourcePositionsForProfiling() const {
return FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
FLAG_turbo_profiling || FLAG_perf_prof || is_profiling() ||
@@ -3328,7 +3286,7 @@ void Isolate::MaybeInitializeVectorListFromHeap() {
// Add collected feedback vectors to the root list lest we lose them to GC.
Handle<ArrayList> list =
ArrayList::New(this, static_cast<int>(vectors.size()));
- for (const auto& vector : vectors) list = ArrayList::Add(list, vector);
+ for (const auto& vector : vectors) list = ArrayList::Add(this, list, vector);
SetFeedbackVectorsForProfilingTools(*list);
}
@@ -3391,16 +3349,17 @@ bool Isolate::IsNoElementsProtectorIntact(Context* context) {
}
FixedArrayBase* elements = initial_array_proto->elements();
- if (elements != heap()->empty_fixed_array() &&
- elements != heap()->empty_slow_element_dictionary()) {
+ ReadOnlyRoots roots(heap());
+ if (elements != roots.empty_fixed_array() &&
+ elements != roots.empty_slow_element_dictionary()) {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
// Check that the Object.prototype hasn't been altered WRT empty elements.
elements = initial_object_proto->elements();
- if (elements != heap()->empty_fixed_array() &&
- elements != heap()->empty_slow_element_dictionary()) {
+ if (elements != roots.empty_fixed_array() &&
+ elements != roots.empty_slow_element_dictionary()) {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
@@ -3423,8 +3382,8 @@ bool Isolate::IsNoElementsProtectorIntact(Context* context) {
// Check that the String.prototype hasn't been altered WRT empty elements.
elements = initial_string_proto->elements();
- if (elements != heap()->empty_fixed_array() &&
- elements != heap()->empty_slow_element_dictionary()) {
+ if (elements != roots.empty_fixed_array() &&
+ elements != roots.empty_slow_element_dictionary()) {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
@@ -3458,7 +3417,7 @@ bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
Handle<Object> array_prototype(array_function()->prototype(), this);
Handle<Symbol> key = factory()->is_concat_spreadable_symbol();
Handle<Object> value;
- LookupIterator it(array_prototype, key);
+ LookupIterator it(this, array_prototype, key);
if (it.IsFound() && !JSReceiver::GetDataProperty(&it)->IsUndefined(this)) {
// TODO(cbruni): Currently we do not revert if we unset the
// @@isConcatSpreadable property on Array.prototype or Object.prototype
@@ -3481,7 +3440,7 @@ bool Isolate::IsPromiseHookProtectorIntact() {
bool is_promise_hook_protector_intact =
Smi::ToInt(promise_hook_cell->value()) == kProtectorValid;
DCHECK_IMPLIES(is_promise_hook_protector_intact,
- !promise_hook_or_debug_is_active_);
+ !promise_hook_or_async_event_delegate_);
return is_promise_hook_protector_intact;
}
@@ -3515,7 +3474,7 @@ void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
if (!IsNoElementsProtectorIntact()) return;
if (!IsArrayOrObjectOrStringPrototype(*object)) return;
PropertyCell::SetValueWithInvalidation(
- factory()->no_elements_protector(),
+ this, factory()->no_elements_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
}
@@ -3538,24 +3497,27 @@ void Isolate::InvalidateArrayConstructorProtector() {
void Isolate::InvalidateArraySpeciesProtector() {
DCHECK(factory()->array_species_protector()->value()->IsSmi());
DCHECK(IsArraySpeciesLookupChainIntact());
- factory()->array_species_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
+ PropertyCell::SetValueWithInvalidation(
+ this, factory()->array_species_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArraySpeciesLookupChainIntact());
}
void Isolate::InvalidateTypedArraySpeciesProtector() {
DCHECK(factory()->typed_array_species_protector()->value()->IsSmi());
DCHECK(IsTypedArraySpeciesLookupChainIntact());
- factory()->typed_array_species_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
+ PropertyCell::SetValueWithInvalidation(
+ this, factory()->typed_array_species_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsTypedArraySpeciesLookupChainIntact());
}
void Isolate::InvalidatePromiseSpeciesProtector() {
DCHECK(factory()->promise_species_protector()->value()->IsSmi());
DCHECK(IsPromiseSpeciesLookupChainIntact());
- factory()->promise_species_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
+ PropertyCell::SetValueWithInvalidation(
+ this, factory()->promise_species_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseSpeciesLookupChainIntact());
}
@@ -3571,7 +3533,7 @@ void Isolate::InvalidateArrayIteratorProtector() {
DCHECK(factory()->array_iterator_protector()->value()->IsSmi());
DCHECK(IsArrayIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- factory()->array_iterator_protector(),
+ this, factory()->array_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArrayIteratorLookupChainIntact());
}
@@ -3580,7 +3542,7 @@ void Isolate::InvalidateArrayBufferNeuteringProtector() {
DCHECK(factory()->array_buffer_neutering_protector()->value()->IsSmi());
DCHECK(IsArrayBufferNeuteringIntact());
PropertyCell::SetValueWithInvalidation(
- factory()->array_buffer_neutering_protector(),
+ this, factory()->array_buffer_neutering_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArrayBufferNeuteringIntact());
}
@@ -3589,7 +3551,7 @@ void Isolate::InvalidatePromiseHookProtector() {
DCHECK(factory()->promise_hook_protector()->value()->IsSmi());
DCHECK(IsPromiseHookProtectorIntact());
PropertyCell::SetValueWithInvalidation(
- factory()->promise_hook_protector(),
+ this, factory()->promise_hook_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseHookProtectorIntact());
}
@@ -3606,7 +3568,7 @@ void Isolate::InvalidatePromiseThenProtector() {
DCHECK(factory()->promise_then_protector()->value()->IsSmi());
DCHECK(IsPromiseThenLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- factory()->promise_then_protector(),
+ this, factory()->promise_then_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseThenLookupChainIntact());
}
@@ -3616,12 +3578,6 @@ bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
}
-
-CallInterfaceDescriptorData* Isolate::call_descriptor_data(int index) {
- DCHECK(0 <= index && index < CallDescriptors::NUMBER_OF_DESCRIPTORS);
- return &call_descriptor_data_[index];
-}
-
static base::RandomNumberGenerator* ensure_rng_exists(
base::RandomNumberGenerator** rng, int seed) {
if (*rng == nullptr) {
@@ -3678,13 +3634,13 @@ Handle<Symbol> Isolate::SymbolFor(Heap::RootListIndex dictionary_index,
Handle<String> key = factory()->InternalizeString(name);
Handle<NameDictionary> dictionary =
Handle<NameDictionary>::cast(heap()->root_handle(dictionary_index));
- int entry = dictionary->FindEntry(key);
+ int entry = dictionary->FindEntry(this, key);
Handle<Symbol> symbol;
if (entry == NameDictionary::kNotFound) {
symbol =
private_symbol ? factory()->NewPrivateSymbol() : factory()->NewSymbol();
symbol->set_name(*key);
- dictionary = NameDictionary::Add(dictionary, key, symbol,
+ dictionary = NameDictionary::Add(this, dictionary, key, symbol,
PropertyDetails::Empty(), &entry);
switch (dictionary_index) {
case Heap::kPublicSymbolTableRootIndex:
@@ -3701,7 +3657,7 @@ Handle<Symbol> Isolate::SymbolFor(Heap::RootListIndex dictionary_index,
UNREACHABLE();
}
} else {
- symbol = Handle<Symbol>(Symbol::cast(dictionary->ValueAt(entry)));
+ symbol = Handle<Symbol>(Symbol::cast(dictionary->ValueAt(entry)), this);
}
return symbol;
}
@@ -3772,12 +3728,13 @@ void Isolate::FireCallCompletedCallback() {
}
}
-void Isolate::DebugStateUpdated() {
- bool promise_hook_or_debug_is_active = promise_hook_ || debug()->is_active();
- if (promise_hook_or_debug_is_active && IsPromiseHookProtectorIntact()) {
+void Isolate::PromiseHookStateUpdated() {
+ bool is_active = promise_hook_ || async_event_delegate_;
+ if (is_active && IsPromiseHookProtectorIntact()) {
+ HandleScope scope(this);
InvalidatePromiseHookProtector();
}
- promise_hook_or_debug_is_active_ = promise_hook_or_debug_is_active;
+ promise_hook_or_async_event_delegate_ = is_active;
}
namespace {
@@ -3856,19 +3813,104 @@ void Isolate::SetHostInitializeImportMetaObjectCallback(
host_initialize_import_meta_object_callback_ = callback;
}
+void Isolate::SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
+ void* data) {
+ atomics_wait_callback_ = callback;
+ atomics_wait_callback_data_ = data;
+}
+
+void Isolate::RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t offset_in_bytes, int32_t value,
+ double timeout_in_ms,
+ AtomicsWaitWakeHandle* stop_handle) {
+ DCHECK(array_buffer->is_shared());
+ if (atomics_wait_callback_ == nullptr) return;
+ HandleScope handle_scope(this);
+ atomics_wait_callback_(
+ event, v8::Utils::ToLocalShared(array_buffer), offset_in_bytes, value,
+ timeout_in_ms,
+ reinterpret_cast<v8::Isolate::AtomicsWaitWakeHandle*>(stop_handle),
+ atomics_wait_callback_data_);
+}
+
void Isolate::SetPromiseHook(PromiseHook hook) {
promise_hook_ = hook;
- DebugStateUpdated();
+ PromiseHookStateUpdated();
}
void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent) {
- if (debug()->is_active()) debug()->RunPromiseHook(type, promise, parent);
+ RunPromiseHookForAsyncEventDelegate(type, promise);
if (promise_hook_ == nullptr) return;
promise_hook_(type, v8::Utils::PromiseToLocal(promise),
v8::Utils::ToLocal(parent));
}
+void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
+ Handle<JSPromise> promise) {
+ if (!async_event_delegate_) return;
+ if (type == PromiseHookType::kResolve) return;
+
+ if (type == PromiseHookType::kBefore) {
+ if (!promise->async_task_id()) return;
+ async_event_delegate_->AsyncEventOccurred(debug::kDebugWillHandle,
+ promise->async_task_id(), false);
+ } else if (type == PromiseHookType::kAfter) {
+ if (!promise->async_task_id()) return;
+ async_event_delegate_->AsyncEventOccurred(debug::kDebugDidHandle,
+ promise->async_task_id(), false);
+ } else {
+ DCHECK(type == PromiseHookType::kInit);
+ debug::DebugAsyncActionType type = debug::kDebugPromiseThen;
+ bool last_frame_was_promise_builtin = false;
+ JavaScriptFrameIterator it(this);
+ while (!it.done()) {
+ std::vector<Handle<SharedFunctionInfo>> infos;
+ it.frame()->GetFunctions(&infos);
+ for (size_t i = 1; i <= infos.size(); ++i) {
+ Handle<SharedFunctionInfo> info = infos[infos.size() - i];
+ if (info->IsUserJavaScript()) {
+ // We should not report PromiseThen and PromiseCatch which is called
+ // indirectly, e.g. Promise.all calls Promise.then internally.
+ if (last_frame_was_promise_builtin) {
+ if (!promise->async_task_id()) {
+ promise->set_async_task_id(++async_task_count_);
+ }
+ async_event_delegate_->AsyncEventOccurred(
+ type, promise->async_task_id(), debug()->IsBlackboxed(info));
+ }
+ return;
+ }
+ last_frame_was_promise_builtin = false;
+ if (info->HasBuiltinId()) {
+ if (info->builtin_id() == Builtins::kPromisePrototypeThen) {
+ type = debug::kDebugPromiseThen;
+ last_frame_was_promise_builtin = true;
+ } else if (info->builtin_id() == Builtins::kPromisePrototypeCatch) {
+ type = debug::kDebugPromiseCatch;
+ last_frame_was_promise_builtin = true;
+ } else if (info->builtin_id() == Builtins::kPromisePrototypeFinally) {
+ type = debug::kDebugPromiseFinally;
+ last_frame_was_promise_builtin = true;
+ }
+ }
+ }
+ it.Advance();
+ }
+ }
+}
+
+void Isolate::OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
+ debug::DebugAsyncActionType event) {
+ if (!async_event_delegate_) return;
+ if (!promise->async_task_id()) {
+ promise->set_async_task_id(++async_task_count_);
+ }
+ async_event_delegate_->AsyncEventOccurred(event, promise->async_task_id(),
+ false);
+}
+
void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
promise_reject_callback_ = callback;
}
@@ -3918,7 +3960,7 @@ void Isolate::RunMicrotasks() {
// If execution is terminating, bail out, clean up, and propagate to
// TryCatch scope.
if (maybe_result.is_null() && maybe_exception.is_null()) {
- heap()->set_microtask_queue(heap()->empty_fixed_array());
+ heap()->set_microtask_queue(ReadOnlyRoots(heap()).empty_fixed_array());
set_pending_microtask_count(0);
handle_scope_implementer()->LeaveMicrotaskContext();
SetTerminationOnExternalTryCatch();
@@ -4013,7 +4055,7 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
}
if (new_length == 0) {
- heap()->set_detached_contexts(heap()->empty_fixed_array());
+ heap()->set_detached_contexts(ReadOnlyRoots(heap()).empty_fixed_array());
} else if (new_length < length) {
heap()->RightTrimFixedArray(*detached_contexts, length - new_length);
}
@@ -4070,13 +4112,6 @@ void Isolate::SetIdle(bool is_idle) {
}
}
-CpuProfiler* Isolate::EnsureCpuProfiler() {
- if (!cpu_profiler_) {
- cpu_profiler_ = new CpuProfiler(this);
- }
- return cpu_profiler_;
-}
-
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
@@ -4091,7 +4126,7 @@ bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
SaveContext::SaveContext(Isolate* isolate)
: isolate_(isolate), prev_(isolate->save_context()) {
if (isolate->context() != nullptr) {
- context_ = Handle<Context>(isolate->context());
+ context_ = Handle<Context>(isolate->context(), isolate);
}
isolate->set_save_context(this);
@@ -4131,5 +4166,8 @@ bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
last_postpone_scope->intercepted_flags_ |= flag;
return true;
}
+
+#undef TRACE_ISOLATE
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 672bfef635..09aaf99684 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -32,6 +32,16 @@
#include "src/runtime/runtime.h"
#include "src/unicode.h"
+#ifdef V8_INTL_SUPPORT
+#include "unicode/uversion.h" // Define U_ICU_NAMESPACE.
+// 'icu' does not work. Use U_ICU_NAMESPACE.
+namespace U_ICU_NAMESPACE {
+
+class RegexMatcher;
+
+} // namespace U_ICU_NAMESPACE
+#endif // V8_INTL_SUPPORT
+
namespace v8 {
namespace base {
@@ -54,7 +64,6 @@ class AstStringConstants;
class BasicBlockProfiler;
class Bootstrapper;
class BuiltinsConstantsTableBuilder;
-class CallInterfaceDescriptorData;
class CancelableTaskManager;
class CodeEventDispatcher;
class ExternalCodeEventListener;
@@ -68,7 +77,6 @@ class CompilerDispatcher;
class ContextSlotCache;
class Counters;
class CpuFeatures;
-class CpuProfiler;
class Debug;
class DeoptimizerData;
class DescriptorLookupCache;
@@ -160,16 +168,16 @@ class WasmEngine;
} \
} while (false)
-#define RETURN_RESULT_OR_FAILURE(isolate, call) \
- do { \
- Handle<Object> __result__; \
- Isolate* __isolate__ = (isolate); \
- if (!(call).ToHandle(&__result__)) { \
- DCHECK(__isolate__->has_pending_exception()); \
- return __isolate__->heap()->exception(); \
- } \
- DCHECK(!__isolate__->has_pending_exception()); \
- return *__result__; \
+#define RETURN_RESULT_OR_FAILURE(isolate, call) \
+ do { \
+ Handle<Object> __result__; \
+ Isolate* __isolate__ = (isolate); \
+ if (!(call).ToHandle(&__result__)) { \
+ DCHECK(__isolate__->has_pending_exception()); \
+ return ReadOnlyRoots(__isolate__).exception(); \
+ } \
+ DCHECK(!__isolate__->has_pending_exception()); \
+ return *__result__; \
} while (false)
#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
@@ -180,11 +188,11 @@ class WasmEngine;
} \
} while (false)
-#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
- do { \
- Isolate* __isolate__ = (isolate); \
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call, \
- __isolate__->heap()->exception()); \
+#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call, \
+ ReadOnlyRoots(__isolate__).exception()); \
} while (false)
#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
@@ -202,6 +210,13 @@ class WasmEngine;
return __isolate__->Throw(*__isolate__->factory()->call); \
} while (false)
+#define THROW_NEW_ERROR_RETURN_VALUE(isolate, call, value) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ __isolate__->Throw(*__isolate__->factory()->call); \
+ return value; \
+ } while (false)
+
#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
do { \
if ((call).is_null()) { \
@@ -210,11 +225,11 @@ class WasmEngine;
} \
} while (false)
-#define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
- do { \
- Isolate* __isolate__ = (isolate); \
- RETURN_ON_EXCEPTION_VALUE(__isolate__, call, \
- __isolate__->heap()->exception()); \
+#define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ RETURN_ON_EXCEPTION_VALUE(__isolate__, call, \
+ ReadOnlyRoots(__isolate__).exception()); \
} while (false);
#define RETURN_ON_EXCEPTION(isolate, call, T) \
@@ -256,12 +271,12 @@ class ThreadId {
static ThreadId Invalid() { return ThreadId(kInvalidId); }
// Compares ThreadIds for equality.
- INLINE(bool Equals(const ThreadId& other) const) {
+ V8_INLINE bool Equals(const ThreadId& other) const {
return base::Relaxed_Load(&id_) == base::Relaxed_Load(&other.id_);
}
// Checks whether this ThreadId refers to any thread.
- INLINE(bool IsValid() const) {
+ V8_INLINE bool IsValid() const {
return base::Relaxed_Load(&id_) != kInvalidId;
}
@@ -298,10 +313,10 @@ class ThreadLocalTop BASE_EMBEDDED {
public:
// Does early low-level initialization that does not depend on the
// isolate being present.
- ThreadLocalTop();
+ ThreadLocalTop() = default;
// Initialize the thread data.
- void Initialize();
+ void Initialize(Isolate*);
// Get the top C++ try catch handler or nullptr if none are registered.
//
@@ -327,60 +342,62 @@ class ThreadLocalTop BASE_EMBEDDED {
void Free();
- Isolate* isolate_;
+ Isolate* isolate_ = nullptr;
// The context where the current execution method is created and for variable
// lookups.
- Context* context_;
- ThreadId thread_id_;
- Object* pending_exception_;
+ Context* context_ = nullptr;
+ ThreadId thread_id_ = ThreadId::Invalid();
+ Object* pending_exception_ = nullptr;
// TODO(kschimpf): Change this to a stack of caught exceptions (rather than
// just innermost catching try block).
Object* wasm_caught_exception_ = nullptr;
// Communication channel between Isolate::FindHandler and the CEntry.
- Context* pending_handler_context_;
- Address pending_handler_entrypoint_;
- Address pending_handler_constant_pool_;
- Address pending_handler_fp_;
- Address pending_handler_sp_;
+ Context* pending_handler_context_ = nullptr;
+ Address pending_handler_entrypoint_ = kNullAddress;
+ Address pending_handler_constant_pool_ = kNullAddress;
+ Address pending_handler_fp_ = kNullAddress;
+ Address pending_handler_sp_ = kNullAddress;
// Communication channel between Isolate::Throw and message consumers.
- bool rethrowing_message_;
- Object* pending_message_obj_;
+ bool rethrowing_message_ = false;
+ Object* pending_message_obj_ = nullptr;
// Use a separate value for scheduled exceptions to preserve the
// invariants that hold about pending_exception. We may want to
// unify them later.
- Object* scheduled_exception_;
- bool external_caught_exception_;
- SaveContext* save_context_;
+ Object* scheduled_exception_ = nullptr;
+ bool external_caught_exception_ = false;
+ SaveContext* save_context_ = nullptr;
// Stack.
- Address c_entry_fp_; // the frame pointer of the top c entry frame
- Address handler_; // try-blocks are chained through the stack
- Address c_function_; // C function that was called at c entry.
+ // The frame pointer of the top c entry frame.
+ Address c_entry_fp_ = kNullAddress;
+ // Try-blocks are chained through the stack.
+ Address handler_ = kNullAddress;
+ // C function that was called at c entry.
+ Address c_function_ = kNullAddress;
// Throwing an exception may cause a Promise rejection. For this purpose
// we keep track of a stack of nested promises and the corresponding
// try-catch handlers.
- PromiseOnStack* promise_on_stack_;
+ PromiseOnStack* promise_on_stack_ = nullptr;
#ifdef USE_SIMULATOR
- Simulator* simulator_;
+ Simulator* simulator_ = nullptr;
#endif
- Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
- // the external callback we're currently in
- ExternalCallbackScope* external_callback_scope_;
- StateTag current_vm_state_;
+ // The stack pointer of the bottom JS entry frame.
+ Address js_entry_sp_ = kNullAddress;
+ // The external callback we're currently in.
+ ExternalCallbackScope* external_callback_scope_ = nullptr;
+ StateTag current_vm_state_ = EXTERNAL;
// Call back function to report unsafe JS accesses.
- v8::FailedAccessCheckCallback failed_access_check_callback_;
+ v8::FailedAccessCheckCallback failed_access_check_callback_ = nullptr;
private:
- void InitializeInternal();
-
- v8::TryCatch* try_catch_handler_;
+ v8::TryCatch* try_catch_handler_ = nullptr;
};
@@ -416,6 +433,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(ExtensionCallback, wasm_module_callback, &NoExtension) \
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr) \
+ V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, nullptr) \
V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
@@ -527,7 +545,7 @@ class Isolate : private HiddenFactory {
}
// Returns the isolate inside which the current thread is running.
- INLINE(static Isolate* Current()) {
+ V8_INLINE static Isolate* Current() {
DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
Isolate* isolate = reinterpret_cast<Isolate*>(
base::Thread::GetExistingThreadLocal(isolate_key_));
@@ -723,10 +741,10 @@ class Isolate : private HiddenFactory {
Handle<String> StackTraceString();
// Stores a stack trace in a stack-allocated temporary buffer which will
// end up in the minidump for debugging purposes.
- NO_INLINE(void PushStackTraceAndDie(void* ptr1 = nullptr,
- void* ptr2 = nullptr,
- void* ptr3 = nullptr,
- void* ptr4 = nullptr));
+ V8_NOINLINE void PushStackTraceAndDie(void* ptr1 = nullptr,
+ void* ptr2 = nullptr,
+ void* ptr3 = nullptr,
+ void* ptr4 = nullptr);
Handle<FixedArray> CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options);
Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
@@ -766,6 +784,13 @@ class Isolate : private HiddenFactory {
}
debug::ConsoleDelegate* console_delegate() { return console_delegate_; }
+ void set_async_event_delegate(debug::AsyncEventDelegate* delegate) {
+ async_event_delegate_ = delegate;
+ PromiseHookStateUpdated();
+ }
+ void OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
+ debug::DebugAsyncActionType);
+
// Re-throw an exception. This involves no error reporting since error
// reporting was handled when the exception was thrown originally.
Object* ReThrow(Object* exception);
@@ -897,7 +922,6 @@ class Isolate : private HiddenFactory {
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
- wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
@@ -979,9 +1003,7 @@ class Isolate : private HiddenFactory {
HeapProfiler* heap_profiler() const { return heap_profiler_; }
#ifdef DEBUG
- static size_t non_disposed_isolates() {
- return non_disposed_isolates_.Value();
- }
+ static size_t non_disposed_isolates() { return non_disposed_isolates_; }
#endif
v8::internal::Factory* factory() {
@@ -1022,8 +1044,6 @@ class Isolate : private HiddenFactory {
bool NeedsSourcePositionsForProfiling() const;
- bool NeedsDetailedOptimizedCodeLineInfo() const;
-
bool is_best_effort_code_coverage() const {
return code_coverage_mode() == debug::Coverage::kBestEffort;
}
@@ -1076,6 +1096,32 @@ class Isolate : private HiddenFactory {
date_cache_ = date_cache;
}
+#ifdef V8_INTL_SUPPORT
+ icu::RegexMatcher* language_singleton_regexp_matcher() {
+ return language_singleton_regexp_matcher_;
+ }
+
+ icu::RegexMatcher* language_tag_regexp_matcher() {
+ return language_tag_regexp_matcher_;
+ }
+
+ icu::RegexMatcher* language_variant_regexp_matcher() {
+ return language_variant_regexp_matcher_;
+ }
+
+ void set_language_tag_regexp_matchers(
+ icu::RegexMatcher* language_singleton_regexp_matcher,
+ icu::RegexMatcher* language_tag_regexp_matcher,
+ icu::RegexMatcher* language_variant_regexp_matcher) {
+ DCHECK_NULL(language_singleton_regexp_matcher_);
+ DCHECK_NULL(language_tag_regexp_matcher_);
+ DCHECK_NULL(language_variant_regexp_matcher_);
+ language_singleton_regexp_matcher_ = language_singleton_regexp_matcher;
+ language_tag_regexp_matcher_ = language_tag_regexp_matcher;
+ language_variant_regexp_matcher_ = language_variant_regexp_matcher;
+ }
+#endif // V8_INTL_SUPPORT
+
static const int kProtectorValid = 1;
static const int kProtectorInvalid = 0;
@@ -1107,7 +1153,10 @@ class Isolate : private HiddenFactory {
bool IsPromiseResolveLookupChainIntact();
// Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
- // initial %PromisePrototype% yields the initial method.
+ // initial %PromisePrototype% yields the initial method. In addition this
+ // protector also guards the negative lookup of "then" on the intrinsic
+ // %ObjectPrototype%, meaning that such lookups are guaranteed to yield
+ // undefined without triggering any side-effects.
bool IsPromiseThenLookupChainIntact();
bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
@@ -1140,9 +1189,6 @@ class Isolate : private HiddenFactory {
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
- V8_EXPORT_PRIVATE CallInterfaceDescriptorData* call_descriptor_data(
- int index);
-
void IterateDeferredHandles(RootVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1240,8 +1286,16 @@ class Isolate : private HiddenFactory {
int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif
- Address promise_hook_or_debug_is_active_address() {
- return reinterpret_cast<Address>(&promise_hook_or_debug_is_active_);
+ Address promise_hook_address() {
+ return reinterpret_cast<Address>(&promise_hook_);
+ }
+
+ Address async_event_delegate_address() {
+ return reinterpret_cast<Address>(&async_event_delegate_);
+ }
+
+ Address promise_hook_or_async_event_delegate_address() {
+ return reinterpret_cast<Address>(&promise_hook_or_async_event_delegate_);
}
Address pending_microtask_count_address() {
@@ -1252,11 +1306,13 @@ class Isolate : private HiddenFactory {
return reinterpret_cast<Address>(&handle_scope_implementer_);
}
- Address debug_execution_mode_address() {
- return reinterpret_cast<Address>(&debug_execution_mode_);
- }
-
- void DebugStateUpdated();
+ void SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
+ void* data);
+ void RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t offset_in_bytes, int32_t value,
+ double timeout_in_ms,
+ AtomicsWaitWakeHandle* stop_handle);
void SetPromiseHook(PromiseHook hook);
void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
@@ -1272,15 +1328,14 @@ class Isolate : private HiddenFactory {
// Off-heap builtins cannot embed constants within the code object itself,
// and thus need to load them from the root list.
bool ShouldLoadConstantsFromRootList() const {
-#ifdef V8_EMBEDDED_BUILTINS
- return (serializer_enabled() &&
- builtins_constants_table_builder() != nullptr);
-#else
- return false;
-#endif // V8_EMBEDDED_BUILTINS
+ if (FLAG_embedded_builtins) {
+ return (serializer_enabled() &&
+ builtins_constants_table_builder() != nullptr);
+ } else {
+ return false;
+ }
}
-#ifdef V8_EMBEDDED_BUILTINS
// Called only prior to serialization.
// This function copies off-heap-safe builtins off the heap, creates off-heap
// trampolines, and sets up this isolate's embedded blob.
@@ -1293,10 +1348,10 @@ class Isolate : private HiddenFactory {
static const uint8_t* CurrentEmbeddedBlob();
static uint32_t CurrentEmbeddedBlobSize();
- // TODO(jgruber): Remove these in favor of the static methods above.
+ // These always return the same result as static methods above, but don't
+ // access the global atomic variable (and thus *might be* slightly faster).
const uint8_t* embedded_blob() const;
uint32_t embedded_blob_size() const;
-#endif
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
array_buffer_allocator_ = allocator;
@@ -1347,6 +1402,12 @@ class Isolate : private HiddenFactory {
bool IsIsolateInBackground() { return is_isolate_in_background_; }
+ void EnableMemorySavingsMode() { memory_savings_mode_active_ = true; }
+
+ void DisableMemorySavingsMode() { memory_savings_mode_active_ = false; }
+
+ bool IsMemorySavingsModeActive() { return memory_savings_mode_active_; }
+
PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
@@ -1363,6 +1424,12 @@ class Isolate : private HiddenFactory {
elements_deletion_counter_ = value;
}
+ wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
+ void set_wasm_engine(std::shared_ptr<wasm::WasmEngine> engine) {
+ DCHECK_NULL(wasm_engine_); // Only call once before {Init}.
+ wasm_engine_ = std::move(engine);
+ }
+
const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
return top_backup_incumbent_scope_;
}
@@ -1479,6 +1546,10 @@ class Isolate : private HiddenFactory {
void SetTerminationOnExternalTryCatch();
+ void PromiseHookStateUpdated();
+ void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
+ Handle<JSPromise> promise);
+
const char* RAILModeName(RAILMode rail_mode) const {
switch (rail_mode) {
case PERFORMANCE_RESPONSE:
@@ -1493,10 +1564,6 @@ class Isolate : private HiddenFactory {
return "";
}
- // TODO(alph): Remove along with the deprecated GetCpuProfiler().
- friend v8::CpuProfiler* v8::Isolate::GetCpuProfiler();
- CpuProfiler* EnsureCpuProfiler();
-
base::Atomic32 id_;
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
@@ -1538,11 +1605,11 @@ class Isolate : private HiddenFactory {
RegExpStack* regexp_stack_;
std::vector<int> regexp_indices_;
DateCache* date_cache_;
- CallInterfaceDescriptorData* call_descriptor_data_;
base::RandomNumberGenerator* random_number_generator_;
base::RandomNumberGenerator* fuzzer_rng_;
base::AtomicValue<RAILMode> rail_mode_;
- bool promise_hook_or_debug_is_active_;
+ v8::Isolate::AtomicsWaitCallback atomics_wait_callback_;
+ void* atomics_wait_callback_data_;
PromiseHook promise_hook_;
HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_;
HostInitializeImportMetaObjectCallback
@@ -1550,6 +1617,12 @@ class Isolate : private HiddenFactory {
base::Mutex rail_mutex_;
double load_start_time_ms_;
+#ifdef V8_INTL_SUPPORT
+ icu::RegexMatcher* language_singleton_regexp_matcher_;
+ icu::RegexMatcher* language_tag_regexp_matcher_;
+ icu::RegexMatcher* language_variant_regexp_matcher_;
+#endif // V8_INTL_SUPPORT
+
// Whether the isolate has been created for snapshotting.
bool serializer_enabled_;
@@ -1566,17 +1639,20 @@ class Isolate : private HiddenFactory {
// to prioritize between memory usage and latency.
bool is_isolate_in_background_;
+ // True if the isolate is in memory savings mode. This flag is used to
+ // favor memory over runtime performance.
+ bool memory_savings_mode_active_;
+
// Time stamp at initialization.
double time_millis_at_init_;
#ifdef DEBUG
- static base::AtomicNumber<size_t> non_disposed_isolates_;
+ static std::atomic<size_t> non_disposed_isolates_;
JSObject::SpillInformation js_spill_information_;
#endif
Debug* debug_;
- CpuProfiler* cpu_profiler_;
HeapProfiler* heap_profiler_;
std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
FunctionEntryHook function_entry_hook_;
@@ -1640,7 +1716,6 @@ class Isolate : private HiddenFactory {
std::vector<Object*> partial_snapshot_cache_;
-#ifdef V8_EMBEDDED_BUILTINS
// Used during builtins compilation to build the builtins constants table,
// which is stored on the root list prior to serialization.
BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
@@ -1649,7 +1724,6 @@ class Isolate : private HiddenFactory {
const uint8_t* embedded_blob_ = nullptr;
uint32_t embedded_blob_size_ = 0;
-#endif
v8::ArrayBuffer::Allocator* array_buffer_allocator_;
@@ -1659,6 +1733,10 @@ class Isolate : private HiddenFactory {
debug::ConsoleDelegate* console_delegate_ = nullptr;
+ debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
+ bool promise_hook_or_async_event_delegate_ = false;
+ int async_task_count_ = 0;
+
v8::Isolate::AbortOnUncaughtExceptionCallback
abort_on_uncaught_exception_callback_;
@@ -1670,7 +1748,7 @@ class Isolate : private HiddenFactory {
size_t elements_deletion_counter_ = 0;
- std::unique_ptr<wasm::WasmEngine> wasm_engine_;
+ std::shared_ptr<wasm::WasmEngine> wasm_engine_;
std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;
@@ -1873,72 +1951,6 @@ class SafeForInterruptsScope : public InterruptsScope {
virtual ~SafeForInterruptsScope() = default;
};
-class CodeTracer final : public Malloced {
- public:
- explicit CodeTracer(int isolate_id) : file_(nullptr), scope_depth_(0) {
- if (!ShouldRedirect()) {
- file_ = stdout;
- return;
- }
-
- if (FLAG_redirect_code_traces_to == nullptr) {
- SNPrintF(filename_,
- "code-%d-%d.asm",
- base::OS::GetCurrentProcessId(),
- isolate_id);
- } else {
- StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
- }
-
- WriteChars(filename_.start(), "", 0, false);
- }
-
- class Scope {
- public:
- explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
- ~Scope() { tracer_->CloseFile(); }
-
- FILE* file() const { return tracer_->file(); }
-
- private:
- CodeTracer* tracer_;
- };
-
- void OpenFile() {
- if (!ShouldRedirect()) {
- return;
- }
-
- if (file_ == nullptr) {
- file_ = base::OS::FOpen(filename_.start(), "ab");
- }
-
- scope_depth_++;
- }
-
- void CloseFile() {
- if (!ShouldRedirect()) {
- return;
- }
-
- if (--scope_depth_ == 0) {
- fclose(file_);
- file_ = nullptr;
- }
- }
-
- FILE* file() const { return file_; }
-
- private:
- static bool ShouldRedirect() {
- return FLAG_redirect_code_traces;
- }
-
- EmbeddedVector<char, 128> filename_;
- FILE* file_;
- int scope_depth_;
-};
-
class StackTraceFailureMessage {
public:
explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
diff --git a/deps/v8/src/js/OWNERS b/deps/v8/src/js/OWNERS
index de2152c056..2ca285742b 100644
--- a/deps/v8/src/js/OWNERS
+++ b/deps/v8/src/js/OWNERS
@@ -3,6 +3,7 @@ set noparent
adamk@chromium.org
bmeurer@chromium.org
cbruni@chromium.org
+gsathya@chromium.org
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 3589a055b1..35db963d18 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -58,24 +58,26 @@ function GetSortedArrayKeys(array, indices) {
}
-function SparseJoinWithSeparatorJS(array, keys, length, use_locale, separator) {
+function SparseJoinWithSeparatorJS(
+ array, keys, length, use_locale, separator, locales, options) {
var keys_length = keys.length;
var elements = new InternalArray(keys_length * 2);
for (var i = 0; i < keys_length; i++) {
var key = keys[i];
elements[i * 2] = key;
- elements[i * 2 + 1] = ConvertToString(use_locale, array[key]);
+ elements[i * 2 + 1] = ConvertToString(
+ use_locale, array[key], locales, options);
}
return %SparseJoinWithSeparator(elements, length, separator);
}
// Optimized for sparse arrays if separator is ''.
-function SparseJoin(array, keys, use_locale) {
+function SparseJoin(array, keys, use_locale, locales, options) {
var keys_length = keys.length;
var elements = new InternalArray(keys_length);
for (var i = 0; i < keys_length; i++) {
- elements[i] = ConvertToString(use_locale, array[keys[i]]);
+ elements[i] = ConvertToString(use_locale, array[keys[i]], locales, options);
}
return %StringBuilderConcat(elements, keys_length, '');
}
@@ -128,28 +130,29 @@ function StackHas(stack, v) {
// join invocations.
var visited_arrays = new Stack();
-function DoJoin(array, length, is_array, separator, use_locale) {
+function DoJoin(
+ array, length, is_array, separator, use_locale, locales, options) {
if (UseSparseVariant(array, length, is_array, length)) {
%NormalizeElements(array);
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, length));
if (separator === '') {
if (keys.length === 0) return '';
- return SparseJoin(array, keys, use_locale);
+ return SparseJoin(array, keys, use_locale, locales, options);
} else {
return SparseJoinWithSeparatorJS(
- array, keys, length, use_locale, separator);
+ array, keys, length, use_locale, separator, locales, options);
}
}
// Fast case for one-element arrays.
if (length === 1) {
- return ConvertToString(use_locale, array[0]);
+ return ConvertToString(use_locale, array[0], locales, options);
}
// Construct an array for the elements.
var elements = new InternalArray(length);
for (var i = 0; i < length; i++) {
- elements[i] = ConvertToString(use_locale, array[i]);
+ elements[i] = ConvertToString(use_locale, array[i], locales, options);
}
if (separator === '') {
@@ -159,7 +162,7 @@ function DoJoin(array, length, is_array, separator, use_locale) {
}
}
-function Join(array, length, separator, use_locale) {
+function Join(array, length, separator, use_locale, locales, options) {
if (length === 0) return '';
var is_array = IS_ARRAY(array);
@@ -173,7 +176,8 @@ function Join(array, length, separator, use_locale) {
// Attempt to convert the elements.
try {
- return DoJoin(array, length, is_array, separator, use_locale);
+ return DoJoin(
+ array, length, is_array, separator, use_locale, locales, options);
} finally {
// Make sure to remove the last element of the visited array no
// matter what happens.
@@ -182,9 +186,18 @@ function Join(array, length, separator, use_locale) {
}
-function ConvertToString(use_locale, x) {
+function ConvertToString(use_locale, x, locales, options) {
if (IS_NULL_OR_UNDEFINED(x)) return '';
- return TO_STRING(use_locale ? x.toLocaleString() : x);
+ if (use_locale) {
+ if (IS_NULL_OR_UNDEFINED(locales)) {
+ return TO_STRING(x.toLocaleString());
+ } else if (IS_NULL_OR_UNDEFINED(options)) {
+ return TO_STRING(x.toLocaleString(locales));
+ }
+ return TO_STRING(x.toLocaleString(locales, options));
+ }
+
+ return TO_STRING(x);
}
@@ -347,17 +360,21 @@ DEFINE_METHOD(
}
);
-function InnerArrayToLocaleString(array, length) {
- return Join(array, TO_LENGTH(length), ',', true);
+// ecma402 #sup-array.prototype.tolocalestring
+function InnerArrayToLocaleString(array, length, locales, options) {
+ return Join(array, TO_LENGTH(length), ',', true, locales, options);
}
DEFINE_METHOD(
GlobalArray.prototype,
+ // ecma402 #sup-array.prototype.tolocalestring
toLocaleString() {
var array = TO_OBJECT(this);
var arrayLen = array.length;
- return InnerArrayToLocaleString(array, arrayLen);
+ var locales = arguments[0];
+ var options = arguments[1];
+ return InnerArrayToLocaleString(array, arrayLen, locales, options);
}
);
@@ -391,46 +408,6 @@ DEFINE_METHOD(
);
-// Removes the last element from the array and returns it. See
-// ECMA-262, section 15.4.4.6.
-function ArrayPopFallback() {
- var array = TO_OBJECT(this);
- var n = TO_LENGTH(array.length);
- if (n == 0) {
- array.length = n;
- return;
- }
-
- n--;
- var value = array[n];
- delete array[n];
- array.length = n;
- return value;
-}
-
-
-// Appends the arguments to the end of the array and returns the new
-// length of the array. See ECMA-262, section 15.4.4.7.
-function ArrayPushFallback() {
- var array = TO_OBJECT(this);
- var n = TO_LENGTH(array.length);
- var m = arguments.length;
-
- // Subtract n from kMaxSafeInteger rather than testing m + n >
- // kMaxSafeInteger. n may already be kMaxSafeInteger. In that case adding
- // e.g., 1 would not be safe.
- if (m > kMaxSafeInteger - n) throw %make_type_error(kPushPastSafeLength, m, n);
-
- for (var i = 0; i < m; i++) {
- array[i+n] = arguments[i];
- }
-
- var new_length = n + m;
- array.length = new_length;
- return new_length;
-}
-
-
// For implementing reverse() on large, sparse arrays.
function SparseReverse(array, len) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
@@ -822,7 +799,6 @@ function InnerArraySort(array, length, comparefn) {
return array;
}
-
DEFINE_METHOD(
GlobalArray.prototype,
sort(comparefn) {
@@ -836,6 +812,7 @@ DEFINE_METHOD(
}
);
+
DEFINE_METHOD_LEN(
GlobalArray.prototype,
lastIndexOf(element, index) {
@@ -1050,7 +1027,6 @@ utils.Export(function(to) {
to.ArrayToString = ArrayToString;
to.ArrayValues = ArrayValues;
to.InnerArrayJoin = InnerArrayJoin;
- to.InnerArraySort = InnerArraySort;
to.InnerArrayToLocaleString = InnerArrayToLocaleString;
});
@@ -1060,8 +1036,6 @@ utils.Export(function(to) {
"array_keys_iterator", ArrayKeys,
"array_values_iterator", ArrayValues,
// Fallback implementations of Array builtins.
- "array_pop", ArrayPopFallback,
- "array_push", ArrayPushFallback,
"array_shift", ArrayShiftFallback,
"array_splice", ArraySpliceFallback,
"array_unshift", ArrayUnshiftFallback,
diff --git a/deps/v8/src/js/intl.js b/deps/v8/src/js/intl.js
index 2e51459883..82c28f79e0 100644
--- a/deps/v8/src/js/intl.js
+++ b/deps/v8/src/js/intl.js
@@ -29,6 +29,7 @@ var GlobalIntlv8BreakIterator = GlobalIntl.v8BreakIterator;
var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
+var GlobalArray = global.Array;
var IntlFallbackSymbol = utils.ImportNow("intl_fallback_symbol");
var InternalArray = utils.InternalArray;
var MathMax = global.Math.max;
@@ -38,6 +39,7 @@ var patternSymbol = utils.ImportNow("intl_pattern_symbol");
var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
var StringSubstr = GlobalString.prototype.substr;
var StringSubstring = GlobalString.prototype.substring;
+var ArraySlice = GlobalArray.prototype.slice;
utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
@@ -67,7 +69,7 @@ endmacro
/**
* Adds bound method to the prototype of the given object.
*/
-function AddBoundMethod(obj, methodName, implementation, length, typename,
+function AddBoundMethod(obj, methodName, implementation, length, type,
compat) {
%CheckIsBootstrapping();
var internalName = %CreatePrivateSymbol(methodName);
@@ -75,7 +77,10 @@ function AddBoundMethod(obj, methodName, implementation, length, typename,
DEFINE_METHOD(
obj.prototype,
get [methodName]() {
- var receiver = Unwrap(this, typename, obj, methodName, compat);
+ if(!IS_RECEIVER(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
+ }
+ var receiver = %IntlUnwrapReceiver(this, type, obj, methodName, compat);
if (IS_UNDEFINED(receiver[internalName])) {
var boundMethod;
if (IS_UNDEFINED(length) || length === 2) {
@@ -120,31 +125,19 @@ function IntlConstruct(receiver, constructor, create, newTarget, args,
-function Unwrap(receiver, typename, constructor, method, compat) {
- if (!%IsInitializedIntlObjectOfType(receiver, typename)) {
- if (compat && receiver instanceof constructor) {
- let fallback = receiver[IntlFallbackSymbol];
- if (%IsInitializedIntlObjectOfType(fallback, typename)) {
- return fallback;
- }
- }
- throw %make_type_error(kIncompatibleMethodReceiver, method, receiver);
- }
- return receiver;
-}
-
-
// -------------------------------------------------------------------
/**
* Caches available locales for each service.
*/
var AVAILABLE_LOCALES = {
+ __proto__ : null,
'collator': UNDEFINED,
'numberformat': UNDEFINED,
'dateformat': UNDEFINED,
'breakiterator': UNDEFINED,
'pluralrules': UNDEFINED,
+ 'relativetimeformat': UNDEFINED,
};
/**
@@ -363,23 +356,29 @@ function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
function getGetOption(options, caller) {
if (IS_UNDEFINED(options)) throw %make_error(kDefaultOptionsMissing, caller);
- var getOption = function getOption(property, type, values, defaultValue) {
- if (!IS_UNDEFINED(options[property])) {
- var value = options[property];
+ // Ecma 402 #sec-getoption
+ var getOption = function (property, type, values, fallback) {
+ // 1. Let value be ? Get(options, property).
+ var value = options[property];
+ // 2. If value is not undefined, then
+ if (!IS_UNDEFINED(value)) {
switch (type) {
+ // If type is "boolean", then let value be ToBoolean(value).
case 'boolean':
value = TO_BOOLEAN(value);
break;
+ // If type is "string", then let value be ToString(value).
case 'string':
value = TO_STRING(value);
break;
- case 'number':
- value = TO_NUMBER(value);
- break;
+ // Assert: type is "boolean" or "string".
default:
throw %make_error(kWrongValueType);
}
+ // d. If values is not undefined, then
+ // If values does not contain an element equal to value, throw a
+ // RangeError exception.
if (!IS_UNDEFINED(values) && %ArrayIndexOf(values, value, 0) === -1) {
throw %make_range_error(kValueOutOfRange, value, caller, property);
}
@@ -387,7 +386,7 @@ function getGetOption(options, caller) {
return value;
}
- return defaultValue;
+ return fallback;
}
return getOption;
@@ -426,6 +425,9 @@ function resolveLocale(service, requestedLocales, options) {
return resolved;
}
+%InstallToContext([
+ "resolve_locale", resolveLocale
+]);
/**
* Look up the longest non-empty prefix of |locale| that is an element of
@@ -463,7 +465,7 @@ function attemptSingleLookup(availableLocales, requestedLocale) {
var extensionMatch = %regexp_internal_match(
GetUnicodeExtensionRE(), requestedLocale);
var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
- return {locale: availableLocale, extension: extension};
+ return {__proto__: null, locale: availableLocale, extension: extension};
}
return UNDEFINED;
}
@@ -498,6 +500,7 @@ function lookupMatcher(service, requestedLocales) {
// Didn't find a match, return default.
return {
+ __proto__: null,
locale: 'und',
extension: ''
};
@@ -1002,8 +1005,9 @@ function CreateCollator(locales, options) {
* for a collator.
*/
var COLLATOR_KEY_MAP = {
- 'kn': {'property': 'numeric', 'type': 'boolean'},
- 'kf': {'property': 'caseFirst', 'type': 'string',
+ __proto__: null,
+ 'kn': { __proto__: null, 'property': 'numeric', 'type': 'boolean'},
+ 'kf': { __proto__: null, 'property': 'caseFirst', 'type': 'string',
'values': ['false', 'lower', 'upper']}
};
@@ -1053,7 +1057,7 @@ function CreateCollator(locales, options) {
var collator = %CreateCollator(requestedLocale, internalOptions, resolved);
- %MarkAsInitializedIntlObjectOfType(collator, 'collator');
+ %MarkAsInitializedIntlObjectOfType(collator, COLLATOR_TYPE);
collator[resolvedSymbol] = resolved;
return collator;
@@ -1079,8 +1083,12 @@ function CollatorConstructor() {
DEFINE_METHOD(
GlobalIntlCollator.prototype,
resolvedOptions() {
- var coll = Unwrap(this, 'collator', GlobalIntlCollator, 'resolvedOptions',
- false);
+ var methodName = 'resolvedOptions';
+ if(!IS_RECEIVER(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
+ }
+ var coll = %IntlUnwrapReceiver(this, COLLATOR_TYPE, GlobalIntlCollator,
+ methodName, false);
return {
locale: coll[resolvedSymbol].locale,
usage: coll[resolvedSymbol].usage,
@@ -1123,7 +1131,7 @@ function compare(collator, x, y) {
};
-AddBoundMethod(GlobalIntlCollator, 'compare', compare, 2, 'collator', false);
+AddBoundMethod(GlobalIntlCollator, 'compare', compare, 2, COLLATOR_TYPE, false);
function PluralRulesConstructor() {
if (IS_UNDEFINED(new.target)) {
@@ -1166,7 +1174,7 @@ function PluralRulesConstructor() {
var pluralRules = %CreatePluralRules(requestedLocale, internalOptions,
resolved);
- %MarkAsInitializedIntlObjectOfType(pluralRules, 'pluralrules');
+ %MarkAsInitializedIntlObjectOfType(pluralRules, PLURAL_RULES_TYPE);
pluralRules[resolvedSymbol] = resolved;
return pluralRules;
@@ -1176,7 +1184,7 @@ function PluralRulesConstructor() {
DEFINE_METHOD(
GlobalIntlPluralRules.prototype,
resolvedOptions() {
- if (!%IsInitializedIntlObjectOfType(this, 'pluralrules')) {
+ if (!%IsInitializedIntlObjectOfType(this, PLURAL_RULES_TYPE)) {
throw %make_type_error(kIncompatibleMethodReceiver,
'Intl.PluralRules.prototype.resolvedOptions',
this);
@@ -1201,8 +1209,7 @@ DEFINE_METHOD(
}
defineWECProperty(result, 'pluralCategories',
- this[resolvedSymbol].pluralCategories);
-
+ %_Call(ArraySlice, this[resolvedSymbol].pluralCategories));
return result;
}
);
@@ -1217,7 +1224,7 @@ DEFINE_METHOD(
DEFINE_METHOD(
GlobalIntlPluralRules.prototype,
select(value) {
- if (!%IsInitializedIntlObjectOfType(this, 'pluralrules')) {
+ if (!%IsInitializedIntlObjectOfType(this, PLURAL_RULES_TYPE)) {
throw %make_type_error(kIncompatibleMethodReceiver,
'Intl.PluralRules.prototype.select',
this);
@@ -1345,7 +1352,8 @@ function CreateNumberFormat(locales, options) {
* for a number format.
*/
var NUMBER_FORMAT_KEY_MAP = {
- 'nu': {'property': UNDEFINED, 'type': 'string'}
+ __proto__: null,
+ 'nu': {__proto__: null, 'property': UNDEFINED, 'type': 'string'}
};
var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
@@ -1378,7 +1386,7 @@ function CreateNumberFormat(locales, options) {
{value: currencyDisplay, writable: true});
}
- %MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat');
+ %MarkAsInitializedIntlObjectOfType(numberFormat, NUMBER_FORMAT_TYPE);
numberFormat[resolvedSymbol] = resolved;
return numberFormat;
@@ -1404,8 +1412,13 @@ function NumberFormatConstructor() {
DEFINE_METHOD(
GlobalIntlNumberFormat.prototype,
resolvedOptions() {
- var format = Unwrap(this, 'numberformat', GlobalIntlNumberFormat,
- 'resolvedOptions', true);
+ var methodName = 'resolvedOptions';
+ if(!IS_RECEIVER(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
+ }
+ var format = %IntlUnwrapReceiver(this, NUMBER_FORMAT_TYPE,
+ GlobalIntlNumberFormat,
+ methodName, true);
var result = {
locale: format[resolvedSymbol].locale,
numberingSystem: format[resolvedSymbol].numberingSystem,
@@ -1463,10 +1476,6 @@ function formatNumber(formatter, value) {
return %InternalNumberFormat(formatter, number);
}
-
-AddBoundMethod(GlobalIntlNumberFormat, 'format', formatNumber, 1,
- 'numberformat', true);
-
/**
* Returns a string that matches LDML representation of the options object.
*/
@@ -1705,8 +1714,9 @@ function CreateDateTimeFormat(locales, options) {
* for a date/time format.
*/
var DATETIME_FORMAT_KEY_MAP = {
- 'ca': {'property': UNDEFINED, 'type': 'string'},
- 'nu': {'property': UNDEFINED, 'type': 'string'}
+ __proto__: null,
+ 'ca': {__proto__: null, 'property': UNDEFINED, 'type': 'string'},
+ 'nu': {__proto__: null, 'property': UNDEFINED, 'type': 'string'}
};
var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
@@ -1734,13 +1744,14 @@ function CreateDateTimeFormat(locales, options) {
});
var dateFormat = %CreateDateTimeFormat(
- requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
+ requestedLocale,
+ {__proto__: null, skeleton: ldmlString, timeZone: tz}, resolved);
if (resolved.timeZone === "Etc/Unknown") {
throw %make_range_error(kUnsupportedTimeZone, tz);
}
- %MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat');
+ %MarkAsInitializedIntlObjectOfType(dateFormat, DATE_TIME_FORMAT_TYPE);
dateFormat[resolvedSymbol] = resolved;
return dateFormat;
@@ -1766,8 +1777,13 @@ function DateTimeFormatConstructor() {
DEFINE_METHOD(
GlobalIntlDateTimeFormat.prototype,
resolvedOptions() {
- var format = Unwrap(this, 'dateformat', GlobalIntlDateTimeFormat,
- 'resolvedOptions', true);
+ var methodName = 'resolvedOptions';
+ if(!IS_RECEIVER(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
+ }
+ var format = %IntlUnwrapReceiver(this, DATE_TIME_FORMAT_TYPE,
+ GlobalIntlDateTimeFormat,
+ methodName, true);
/**
* Maps ICU calendar names to LDML/BCP47 types for key 'ca'.
@@ -1776,6 +1792,7 @@ DEFINE_METHOD(
* http://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
*/
var ICU_CALENDAR_MAP = {
+ __proto__: null,
'gregorian': 'gregory',
'ethiopic-amete-alem': 'ethioaa'
};
@@ -1841,7 +1858,7 @@ function formatDate(formatter, dateValue) {
}
// Length is 1 as specified in ECMA 402 v2+
-AddBoundMethod(GlobalIntlDateTimeFormat, 'format', formatDate, 1, 'dateformat',
+AddBoundMethod(GlobalIntlDateTimeFormat, 'format', formatDate, 1, DATE_TIME_FORMAT_TYPE,
true);
@@ -1911,7 +1928,7 @@ function CreateBreakIterator(locales, options) {
var iterator = %CreateBreakIterator(locale.locale, internalOptions, resolved);
- %MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator');
+ %MarkAsInitializedIntlObjectOfType(iterator, BREAK_ITERATOR_TYPE);
iterator[resolvedSymbol] = resolved;
return iterator;
@@ -1941,8 +1958,13 @@ DEFINE_METHOD(
throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
- var segmenter = Unwrap(this, 'breakiterator', GlobalIntlv8BreakIterator,
- 'resolvedOptions', false);
+ var methodName = 'resolvedOptions';
+ if(!IS_RECEIVER(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
+ }
+ var segmenter = %IntlUnwrapReceiver(this, BREAK_ITERATOR_TYPE,
+ GlobalIntlv8BreakIterator, methodName,
+ false);
return {
locale: segmenter[resolvedSymbol].locale,
@@ -2012,16 +2034,19 @@ function breakType(iterator) {
AddBoundMethod(GlobalIntlv8BreakIterator, 'adoptText', adoptText, 1,
- 'breakiterator');
-AddBoundMethod(GlobalIntlv8BreakIterator, 'first', first, 0, 'breakiterator');
-AddBoundMethod(GlobalIntlv8BreakIterator, 'next', next, 0, 'breakiterator');
+ BREAK_ITERATOR_TYPE, false);
+AddBoundMethod(GlobalIntlv8BreakIterator, 'first', first, 0,
+ BREAK_ITERATOR_TYPE, false);
+AddBoundMethod(GlobalIntlv8BreakIterator, 'next', next, 0,
+ BREAK_ITERATOR_TYPE, false);
AddBoundMethod(GlobalIntlv8BreakIterator, 'current', current, 0,
- 'breakiterator');
+ BREAK_ITERATOR_TYPE, false);
AddBoundMethod(GlobalIntlv8BreakIterator, 'breakType', breakType, 0,
- 'breakiterator');
+ BREAK_ITERATOR_TYPE, false);
// Save references to Intl objects and methods we use, for added security.
var savedObjects = {
+ __proto__: null,
'collator': GlobalIntlCollator,
'numberformat': GlobalIntlNumberFormat,
'dateformatall': GlobalIntlDateTimeFormat,
@@ -2033,6 +2058,7 @@ var savedObjects = {
// Default (created with undefined locales and options parameters) collator,
// number and date format instances. They'll be created as needed.
var defaultObjects = {
+ __proto__: null,
'collator': UNDEFINED,
'numberformat': UNDEFINED,
'dateformatall': UNDEFINED,
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index 75f01e38c9..9e5c6d72d6 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -83,3 +83,11 @@ macro DEFINE_METHOD(obj, method_def) = DEFINE_METHOD_LEN(obj, method_def, -1);
# Constants. The compiler constant folds them.
define INFINITY = (1/0);
define UNDEFINED = (void 0);
+
+# This should be kept consistent with Intl::Type.
+define NUMBER_FORMAT_TYPE = 0;
+define COLLATOR_TYPE = 1;
+define DATE_TIME_FORMAT_TYPE = 2;
+define PLURAL_RULES_TYPE = 3;
+define BREAK_ITERATOR_TYPE = 4;
+define LOCALE_TYPE = 5;
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 3d42498b03..2c25b6c58a 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -59,14 +59,16 @@ function ValidateTypedArray(array, methodName) {
// ES6 section 22.2.3.27
+// ecma402 #sup-array.prototype.tolocalestring
DEFINE_METHOD(
GlobalTypedArray.prototype,
toLocaleString() {
ValidateTypedArray(this, "%TypedArray%.prototype.toLocaleString");
+ var locales = arguments[0];
+ var options = arguments[1];
var length = %_TypedArrayGetLength(this);
-
- return InnerArrayToLocaleString(this, length);
+ return InnerArrayToLocaleString(this, length, locales, options);
}
);
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 7da11db9cf..28819c4cd0 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -60,7 +60,7 @@ MaybeHandle<Object> JsonParseInternalizer::Internalize(Isolate* isolate,
Handle<JSObject> holder =
isolate->factory()->NewJSObject(isolate->object_function());
Handle<String> name = isolate->factory()->empty_string();
- JSObject::AddProperty(holder, name, object, NONE);
+ JSObject::AddProperty(isolate, holder, name, object, NONE);
return internalizer.InternalizeJsonProperty(holder, name);
}
@@ -69,7 +69,8 @@ MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
HandleScope outer_scope(isolate_);
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate_, value, Object::GetPropertyOrElement(holder, name), Object);
+ isolate_, value, Object::GetPropertyOrElement(isolate_, holder, name),
+ Object);
if (value->IsJSReceiver()) {
Handle<JSReceiver> object = Handle<JSReceiver>::cast(value);
Maybe<bool> is_array = Object::IsArray(object);
@@ -143,7 +144,7 @@ JsonParser<seq_one_byte>::JsonParser(Isolate* isolate, Handle<String> source)
isolate_),
position_(-1),
properties_(&zone_) {
- source_ = String::Flatten(source_);
+ source_ = String::Flatten(isolate, source_);
pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
// Optimized fast case where we only have Latin1 characters.
@@ -365,7 +366,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
HandleScope scope(isolate());
Handle<JSObject> json_object =
factory()->NewJSObject(object_constructor(), pretenure_);
- Handle<Map> map(json_object->map());
+ Handle<Map> map(json_object->map(), isolate());
int descriptor = 0;
VectorSegment<ZoneVector<Handle<Object>>> properties(&properties_);
DCHECK_EQ(c0_, '{');
@@ -404,7 +405,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
Handle<Map> target;
if (seq_one_byte) {
DisallowHeapAllocation no_gc;
- TransitionsAccessor transitions(*map, &no_gc);
+ TransitionsAccessor transitions(isolate(), *map, &no_gc);
key = transitions.ExpectedTransitionKey();
follow_expected = !key.is_null() && ParseJsonString(key);
// If the expected transition hits, follow it.
@@ -419,9 +420,9 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
if (key.is_null()) return ReportUnexpectedCharacter();
// If a transition was found, follow it and continue.
- transitioning =
- TransitionsAccessor(map).FindTransitionToField(key).ToHandle(
- &target);
+ transitioning = TransitionsAccessor(isolate(), map)
+ .FindTransitionToField(key)
+ .ToHandle(&target);
}
if (c0_ != ':') return ReportUnexpectedCharacter();
@@ -441,8 +442,9 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
->NowContains(value)) {
Handle<FieldType> value_type(
value->OptimalType(isolate(), expected_representation));
- Map::GeneralizeField(target, descriptor, details.constness(),
- expected_representation, value_type);
+ Map::GeneralizeField(isolate(), target, descriptor,
+ details.constness(), expected_representation,
+ value_type);
}
DCHECK(target->instance_descriptors()
->GetFieldType(descriptor)
@@ -831,7 +833,8 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
int position = position_;
uc32 c0 = c0_;
- uint32_t running_hash = isolate()->heap()->HashSeed();
+ uint32_t running_hash =
+ static_cast<uint32_t>(isolate()->heap()->HashSeed());
uint32_t index = 0;
bool is_array_index = true;
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index fc60e29ec9..ac1442b414 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -40,7 +40,6 @@ class JsonParser BASE_EMBEDDED {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Parse(
Isolate* isolate, Handle<String> source, Handle<Object> reviver) {
- PostponeInterruptsScope no_debug_breaks(isolate, StackGuard::DEBUGBREAK);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
JsonParser(isolate, source).ParseJson(), Object);
@@ -58,19 +57,19 @@ class JsonParser BASE_EMBEDDED {
// Parse a string containing a single JSON value.
MaybeHandle<Object> ParseJson();
- INLINE(void Advance());
+ V8_INLINE void Advance();
// The JSON lexical grammar is specified in the ECMAScript 5 standard,
// section 15.12.1.1. The only allowed whitespace characters between tokens
// are tab, carriage-return, newline and space.
- INLINE(void AdvanceSkipWhitespace());
- INLINE(void SkipWhitespace());
- INLINE(uc32 AdvanceGetChar());
+ V8_INLINE void AdvanceSkipWhitespace();
+ V8_INLINE void SkipWhitespace();
+ V8_INLINE uc32 AdvanceGetChar();
// Checks that current charater is c.
// If so, then consume c and skip whitespace.
- INLINE(bool MatchSkipWhiteSpace(uc32 c));
+ V8_INLINE bool MatchSkipWhiteSpace(uc32 c);
// A JSON string (production JSONString) is subset of valid JavaScript string
// literals. The string must only be double-quoted (not single-quoted), and
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index 3a04930dde..b1d95422b0 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -94,7 +94,6 @@ MaybeHandle<Object> JsonStringifier::Stringify(Handle<Object> object,
if (!gap->IsUndefined(isolate_) && !InitializeGap(gap)) {
return MaybeHandle<Object>();
}
- PostponeInterruptsScope no_debug_breaks(isolate_, StackGuard::DEBUGBREAK);
Result result = SerializeObject(object);
if (result == UNCHANGED) return factory()->undefined_value();
if (result == SUCCESS) return builder_.Finish();
@@ -134,10 +133,10 @@ bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
if (key.is_null()) continue;
// Object keys are internalized, so do it here.
key = factory()->InternalizeString(key);
- set = OrderedHashSet::Add(set, key);
+ set = OrderedHashSet::Add(isolate_, set, key);
}
property_list_ = OrderedHashSet::ConvertToKeysArray(
- set, GetKeysConversion::kKeepNumbers);
+ isolate_, set, GetKeysConversion::kKeepNumbers);
property_list_ = handle_scope.CloseAndEscape(property_list_);
} else if (replacer->IsCallable()) {
replacer_function_ = Handle<JSReceiver>::cast(replacer);
@@ -154,8 +153,8 @@ bool JsonStringifier::InitializeGap(Handle<Object> gap) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, gap,
Object::ToString(isolate_, gap), false);
} else if (value->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, gap, Object::ToNumber(gap),
- false);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, gap,
+ Object::ToNumber(isolate_, gap), false);
}
}
@@ -199,7 +198,7 @@ MaybeHandle<Object> JsonStringifier::ApplyToJsonFunction(Handle<Object> object,
// Retrieve toJSON function.
Handle<Object> fun;
{
- LookupIterator it(object_for_lookup, tojson_string_,
+ LookupIterator it(isolate_, object_for_lookup, tojson_string_,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
if (!fun->IsCallable()) return object;
@@ -232,8 +231,8 @@ Handle<JSReceiver> JsonStringifier::CurrentHolder(
if (length == 0) {
Handle<JSObject> holder =
factory()->NewJSObject(isolate_->object_function());
- JSObject::AddProperty(holder, factory()->empty_string(), initial_holder,
- NONE);
+ JSObject::AddProperty(isolate_, holder, factory()->empty_string(),
+ initial_holder, NONE);
return holder;
} else {
FixedArray* elements = FixedArray::cast(stack_->elements());
@@ -362,8 +361,8 @@ JsonStringifier::Result JsonStringifier::SerializeJSValue(
SerializeString(Handle<String>::cast(value));
} else if (raw->IsNumber()) {
Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, value, Object::ToNumber(object),
- EXCEPTION);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, value, Object::ToNumber(isolate_, object), EXCEPTION);
if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
SerializeHeapNumber(Handle<HeapNumber>::cast(value));
} else if (raw->IsBigInt()) {
@@ -529,7 +528,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
DCHECK(!js_obj->HasIndexedInterceptor());
DCHECK(!js_obj->HasNamedInterceptor());
- Handle<Map> map(js_obj->map());
+ Handle<Map> map(js_obj->map(), isolate_);
builder_.AppendCharacter('{');
Indent();
bool comma = false;
@@ -548,8 +547,8 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
field_index);
} else {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, property, Object::GetPropertyOrElement(js_obj, key),
- EXCEPTION);
+ isolate_, property,
+ Object::GetPropertyOrElement(isolate_, js_obj, key), EXCEPTION);
}
Result result = SerializeProperty(property, comma, key);
if (!comma && result == SUCCESS) comma = true;
@@ -583,9 +582,9 @@ JsonStringifier::Result JsonStringifier::SerializeJSReceiverSlow(
for (int i = 0; i < contents->length(); i++) {
Handle<String> key(String::cast(contents->get(i)), isolate_);
Handle<Object> property;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, property,
- Object::GetPropertyOrElement(object, key),
- EXCEPTION);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, property, Object::GetPropertyOrElement(isolate_, object, key),
+ EXCEPTION);
Result result = SerializeProperty(property, comma, key);
if (!comma && result == SUCCESS) comma = true;
if (result == EXCEPTION) return result;
@@ -705,7 +704,7 @@ void JsonStringifier::SerializeDeferredKey(bool deferred_comma,
}
void JsonStringifier::SerializeString(Handle<String> object) {
- object = String::Flatten(object);
+ object = String::Flatten(isolate_, object);
if (builder_.CurrentEncoding() == String::ONE_BYTE_ENCODING) {
if (object->IsOneByteRepresentationUnderneath()) {
SerializeString_<uint8_t, uint8_t>(object);
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 67cc8ea843..e9b97c7d1f 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -33,15 +33,14 @@ class JsonStringifier BASE_EMBEDDED {
Handle<Object> value, Handle<Object> key, Handle<Object> initial_holder);
// Entry point to serialize the object.
- INLINE(Result SerializeObject(Handle<Object> obj)) {
+ V8_INLINE Result SerializeObject(Handle<Object> obj) {
return Serialize_<false>(obj, false, factory()->empty_string());
}
// Serialize an array element.
// The index may serve as argument for the toJSON function.
- INLINE(Result SerializeElement(Isolate* isolate,
- Handle<Object> object,
- int i)) {
+ V8_INLINE Result SerializeElement(Isolate* isolate, Handle<Object> object,
+ int i) {
return Serialize_<false>(object,
false,
Handle<Object>(Smi::FromInt(i), isolate));
@@ -50,9 +49,8 @@ class JsonStringifier BASE_EMBEDDED {
// Serialize a object property.
// The key may or may not be serialized depending on the property.
// The key may also serve as argument for the toJSON function.
- INLINE(Result SerializeProperty(Handle<Object> object,
- bool deferred_comma,
- Handle<String> deferred_key)) {
+ V8_INLINE Result SerializeProperty(Handle<Object> object, bool deferred_comma,
+ Handle<String> deferred_key) {
DCHECK(!deferred_key.is_null());
return Serialize_<true>(object, deferred_comma, deferred_key);
}
@@ -60,20 +58,20 @@ class JsonStringifier BASE_EMBEDDED {
template <bool deferred_string_key>
Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
- INLINE(void SerializeDeferredKey(bool deferred_comma,
- Handle<Object> deferred_key));
+ V8_INLINE void SerializeDeferredKey(bool deferred_comma,
+ Handle<Object> deferred_key);
Result SerializeSmi(Smi* object);
Result SerializeDouble(double number);
- INLINE(Result SerializeHeapNumber(Handle<HeapNumber> object)) {
+ V8_INLINE Result SerializeHeapNumber(Handle<HeapNumber> object) {
return SerializeDouble(object->value());
}
Result SerializeJSValue(Handle<JSValue> object);
- INLINE(Result SerializeJSArray(Handle<JSArray> object));
- INLINE(Result SerializeJSObject(Handle<JSObject> object));
+ V8_INLINE Result SerializeJSArray(Handle<JSArray> object);
+ V8_INLINE Result SerializeJSObject(Handle<JSObject> object);
Result SerializeJSProxy(Handle<JSProxy> object);
Result SerializeJSReceiverSlow(Handle<JSReceiver> object);
@@ -83,20 +81,20 @@ class JsonStringifier BASE_EMBEDDED {
void SerializeString(Handle<String> object);
template <typename SrcChar, typename DestChar>
- INLINE(static void SerializeStringUnchecked_(
+ V8_INLINE static void SerializeStringUnchecked_(
Vector<const SrcChar> src,
- IncrementalStringBuilder::NoExtend<DestChar>* dest));
+ IncrementalStringBuilder::NoExtend<DestChar>* dest);
template <typename SrcChar, typename DestChar>
- INLINE(void SerializeString_(Handle<String> string));
+ V8_INLINE void SerializeString_(Handle<String> string);
template <typename Char>
- INLINE(static bool DoNotEscape(Char c));
+ V8_INLINE static bool DoNotEscape(Char c);
- INLINE(void NewLine());
- INLINE(void Indent() { indent_++; });
- INLINE(void Unindent() { indent_--; });
- INLINE(void Separator(bool first));
+ V8_INLINE void NewLine();
+ V8_INLINE void Indent() { indent_++; }
+ V8_INLINE void Unindent() { indent_--; }
+ V8_INLINE void Separator(bool first);
Handle<JSReceiver> CurrentHolder(Handle<Object> value,
Handle<Object> inital_holder);
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 0213ae0619..8ecbe0a1d7 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -12,6 +12,7 @@
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/module-inl.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
@@ -37,10 +38,10 @@ static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
// static
MaybeHandle<FixedArray> KeyAccumulator::GetKeys(
Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
- GetKeysConversion keys_conversion, bool is_for_in, bool skip_indices) {
+ GetKeysConversion keys_conversion, bool is_for_in) {
Isolate* isolate = object->GetIsolate();
- FastKeyAccumulator accumulator(isolate, object, mode, filter, is_for_in,
- skip_indices);
+ FastKeyAccumulator accumulator(isolate, object, mode, filter);
+ accumulator.set_is_for_in(is_for_in);
return accumulator.GetKeys(keys_conversion);
}
@@ -49,12 +50,12 @@ Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
return isolate_->factory()->empty_fixed_array();
}
if (mode_ == KeyCollectionMode::kOwnOnly &&
- keys_->map() == isolate_->heap()->fixed_array_map()) {
+ keys_->map() == ReadOnlyRoots(isolate_).fixed_array_map()) {
return Handle<FixedArray>::cast(keys_);
}
USE(ContainsOnlyValidKeys);
Handle<FixedArray> result =
- OrderedHashSet::ConvertToKeysArray(keys(), convert);
+ OrderedHashSet::ConvertToKeysArray(isolate(), keys(), convert);
DCHECK(ContainsOnlyValidKeys(result));
return result;
}
@@ -79,7 +80,7 @@ void KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
Handle<String>::cast(key)->AsArrayIndex(&index)) {
key = isolate_->factory()->NewNumberFromUint(index);
}
- Handle<OrderedHashSet> new_set = OrderedHashSet::Add(keys(), key);
+ Handle<OrderedHashSet> new_set = OrderedHashSet::Add(isolate(), keys(), key);
if (*new_set != *keys_) {
// The keys_ Set is converted directly to a FixedArray in GetKeys which can
// be left-trimmer. Hence the previous Set should not keep a pointer to the
@@ -135,9 +136,7 @@ MaybeHandle<FixedArray> FilterProxyKeys(KeyAccumulator* accumulator,
}
store_position++;
}
- if (store_position == 0) return isolate->factory()->empty_fixed_array();
- keys->Shrink(store_position);
- return keys;
+ return FixedArray::ShrinkOrEmpty(isolate, keys, store_position);
}
// Returns "nothing" in case of exception, "true" on success.
@@ -217,7 +216,7 @@ void KeyAccumulator::AddShadowingKey(Handle<Object> key) {
if (shadowing_keys_.is_null()) {
shadowing_keys_ = ObjectHashSet::New(isolate_, 16);
}
- shadowing_keys_ = ObjectHashSet::Add(shadowing_keys_, key);
+ shadowing_keys_ = ObjectHashSet::Add(isolate(), shadowing_keys_, key);
}
namespace {
@@ -356,8 +355,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
template <bool fast_properties>
MaybeHandle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
Handle<JSObject> object,
- GetKeysConversion convert,
- bool skip_indices) {
+ GetKeysConversion convert) {
Handle<FixedArray> keys;
ElementsAccessor* accessor = object->GetElementsAccessor();
if (fast_properties) {
@@ -366,13 +364,8 @@ MaybeHandle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
// TODO(cbruni): preallocate big enough array to also hold elements.
keys = KeyAccumulator::GetOwnEnumPropertyKeys(isolate, object);
}
- MaybeHandle<FixedArray> result;
- if (skip_indices) {
- result = keys;
- } else {
- result =
- accessor->PrependElementIndices(object, keys, convert, ONLY_ENUMERABLE);
- }
+ MaybeHandle<FixedArray> result =
+ accessor->PrependElementIndices(object, keys, convert, ONLY_ENUMERABLE);
if (FLAG_trace_for_in_enumerate) {
PrintF("| strings=%d symbols=0 elements=%u || prototypes>=1 ||\n",
@@ -410,8 +403,7 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
// Do not try to use the enum-cache for dict-mode objects.
if (map->is_dictionary_map()) {
- return GetOwnKeysWithElements<false>(isolate_, object, keys_conversion,
- skip_indices_);
+ return GetOwnKeysWithElements<false>(isolate_, object, keys_conversion);
}
int enum_length = receiver_->map()->EnumLength();
if (enum_length == kInvalidEnumCacheSentinel) {
@@ -429,8 +421,7 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
}
// The properties-only case failed because there were probably elements on the
// receiver.
- return GetOwnKeysWithElements<true>(isolate_, object, keys_conversion,
- skip_indices_);
+ return GetOwnKeysWithElements<true>(isolate_, object, keys_conversion);
}
MaybeHandle<FixedArray>
@@ -459,7 +450,6 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
GetKeysConversion keys_conversion) {
KeyAccumulator accumulator(isolate_, mode_, filter_);
accumulator.set_is_for_in(is_for_in_);
- accumulator.set_skip_indices(skip_indices_);
accumulator.set_last_non_empty_prototype(last_non_empty_prototype_);
MAYBE_RETURN(accumulator.CollectKeys(receiver_, receiver_),
@@ -601,7 +591,7 @@ int CollectOwnPropertyNamesInternal(Handle<JSObject> object,
if (filter & ONLY_ALL_CAN_READ) {
if (details.kind() != kAccessor) continue;
- Object* accessors = descs->GetValue(i);
+ Object* accessors = descs->GetStrongValue(i);
if (!accessors->IsAccessorInfo()) continue;
if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
}
@@ -634,7 +624,7 @@ Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
return isolate->factory()->empty_fixed_array();
}
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
- T::CopyEnumKeysTo(dictionary, storage, mode, accumulator);
+ T::CopyEnumKeysTo(isolate, dictionary, storage, mode, accumulator);
return storage;
}
} // namespace
@@ -672,7 +662,9 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
// throws for uninitialized exports.
for (int i = 0, n = enum_keys->length(); i < n; ++i) {
Handle<String> key(String::cast(enum_keys->get(i)), isolate_);
- if (Handle<JSModuleNamespace>::cast(object)->GetExport(key).is_null()) {
+ if (Handle<JSModuleNamespace>::cast(object)
+ ->GetExport(isolate(), key)
+ .is_null()) {
return Nothing<bool>();
}
}
@@ -707,15 +699,13 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
Handle<JSObject> object) {
- if (!skip_indices_) {
- MAYBE_RETURN((CollectInterceptorKeysInternal(
- receiver, object,
- handle(InterceptorInfo::cast(
- access_check_info->indexed_interceptor()),
- isolate_),
- this, kIndexed)),
- Nothing<bool>());
- }
+ MAYBE_RETURN((CollectInterceptorKeysInternal(
+ receiver, object,
+ handle(InterceptorInfo::cast(
+ access_check_info->indexed_interceptor()),
+ isolate_),
+ this, kIndexed)),
+ Nothing<bool>());
MAYBE_RETURN(
(CollectInterceptorKeysInternal(
receiver, object,
@@ -732,7 +722,7 @@ Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
// Check access rights if required.
if (object->IsAccessCheckNeeded() &&
- !isolate_->MayAccess(handle(isolate_->context()), object)) {
+ !isolate_->MayAccess(handle(isolate_->context(), isolate_), object)) {
// The cross-origin spec says that [[Enumerate]] shall return an empty
// iterator when it doesn't have access...
if (mode_ == KeyCollectionMode::kIncludePrototypes) {
@@ -779,11 +769,17 @@ Handle<FixedArray> KeyAccumulator::GetOwnEnumPropertyKeys(
namespace {
-struct NameComparator {
+class NameComparator {
+ public:
+ explicit NameComparator(Isolate* isolate) : isolate_(isolate) {}
+
bool operator()(uint32_t hash1, uint32_t hash2, const Handle<Name>& key1,
const Handle<Name>& key2) const {
- return Name::Equals(key1, key2);
+ return Name::Equals(isolate_, key1, key2);
}
+
+ private:
+ Isolate* isolate_;
};
} // namespace
@@ -882,7 +878,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
base::TemplateHashMapImpl<Handle<Name>, int, NameComparator,
ZoneAllocationPolicy>
unchecked_result_keys(ZoneHashMap::kDefaultHashMapCapacity,
- NameComparator(), alloc);
+ NameComparator(isolate_), alloc);
int unchecked_result_keys_size = 0;
for (int i = 0; i < trap_result->length(); ++i) {
Handle<Name> key(Name::cast(trap_result->get(i)), isolate_);
@@ -946,9 +942,9 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyTargetKeys(
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, keys,
- KeyAccumulator::GetKeys(
- target, KeyCollectionMode::kOwnOnly, filter_,
- GetKeysConversion::kConvertToString, is_for_in_, skip_indices_),
+ KeyAccumulator::GetKeys(target, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES,
+ GetKeysConversion::kConvertToString, is_for_in_),
Nothing<bool>());
Maybe<bool> result = AddKeysFromJSProxy(proxy, keys);
return result;
diff --git a/deps/v8/src/keys.h b/deps/v8/src/keys.h
index 5abbaac5cd..649d6a9599 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/keys.h
@@ -40,7 +40,7 @@ class KeyAccumulator final BASE_EMBEDDED {
static MaybeHandle<FixedArray> GetKeys(
Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
GetKeysConversion keys_conversion = GetKeysConversion::kKeepNumbers,
- bool is_for_in = false, bool skip_indices = false);
+ bool is_for_in = false);
Handle<FixedArray> GetKeys(
GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
@@ -128,19 +128,14 @@ class KeyAccumulator final BASE_EMBEDDED {
class FastKeyAccumulator {
public:
FastKeyAccumulator(Isolate* isolate, Handle<JSReceiver> receiver,
- KeyCollectionMode mode, PropertyFilter filter,
- bool is_for_in = false, bool skip_indices = false)
- : isolate_(isolate),
- receiver_(receiver),
- mode_(mode),
- filter_(filter),
- is_for_in_(is_for_in),
- skip_indices_(skip_indices) {
+ KeyCollectionMode mode, PropertyFilter filter)
+ : isolate_(isolate), receiver_(receiver), mode_(mode), filter_(filter) {
Prepare();
}
bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
bool has_empty_prototype() { return has_empty_prototype_; }
+ void set_is_for_in(bool value) { is_for_in_ = value; }
MaybeHandle<FixedArray> GetKeys(
GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
@@ -158,7 +153,6 @@ class FastKeyAccumulator {
KeyCollectionMode mode_;
PropertyFilter filter_;
bool is_for_in_ = false;
- bool skip_indices_ = false;
bool is_receiver_simple_enum_ = false;
bool has_empty_prototype_ = false;
diff --git a/deps/v8/src/label.h b/deps/v8/src/label.h
index eb93397518..cf81e6c303 100644
--- a/deps/v8/src/label.h
+++ b/deps/v8/src/label.h
@@ -46,18 +46,18 @@ class Label {
#endif
#endif
- INLINE(~Label()) {
+ V8_INLINE ~Label() {
DCHECK(!is_linked());
DCHECK(!is_near_linked());
}
- INLINE(void Unuse()) { pos_ = 0; }
- INLINE(void UnuseNear()) { near_link_pos_ = 0; }
+ V8_INLINE void Unuse() { pos_ = 0; }
+ V8_INLINE void UnuseNear() { near_link_pos_ = 0; }
- INLINE(bool is_bound() const) { return pos_ < 0; }
- INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
- INLINE(bool is_linked() const) { return pos_ > 0; }
- INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
+ V8_INLINE bool is_bound() const { return pos_ < 0; }
+ V8_INLINE bool is_unused() const { return pos_ == 0 && near_link_pos_ == 0; }
+ V8_INLINE bool is_linked() const { return pos_ > 0; }
+ V8_INLINE bool is_near_linked() const { return near_link_pos_ > 0; }
// Returns the position of bound or linked labels. Cannot be used
// for unused labels.
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index 40d3975786..e37b4a2edf 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -18,7 +18,7 @@ LayoutDescriptor* LayoutDescriptor::FromSmi(Smi* smi) {
Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
- if (length <= kSmiValueSize) {
+ if (length <= kBitsInSmiLayout) {
// The whole bit vector fits into a smi.
return handle(LayoutDescriptor::FromSmi(Smi::kZero), isolate);
}
@@ -130,7 +130,7 @@ bool LayoutDescriptor::IsSlowLayout() { return !IsSmi(); }
int LayoutDescriptor::capacity() {
- return IsSlowLayout() ? (length() * kBitsPerByte) : kSmiValueSize;
+ return IsSlowLayout() ? (length() * kBitsPerByte) : kBitsInSmiLayout;
}
@@ -161,10 +161,10 @@ int LayoutDescriptor::CalculateCapacity(Map* map, DescriptorArray* descriptors,
int layout_descriptor_length;
const int kMaxWordsPerField = kDoubleSize / kPointerSize;
- if (num_descriptors <= kSmiValueSize / kMaxWordsPerField) {
+ if (num_descriptors <= kBitsInSmiLayout / kMaxWordsPerField) {
// Even in the "worst" case (all fields are doubles) it would fit into
// a Smi, so no need to calculate length.
- layout_descriptor_length = kSmiValueSize;
+ layout_descriptor_length = kBitsInSmiLayout;
} else {
layout_descriptor_length = 0;
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index 41882cedaa..684b7d5cd6 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -14,8 +14,8 @@ namespace v8 {
namespace internal {
Handle<LayoutDescriptor> LayoutDescriptor::New(
- Handle<Map> map, Handle<DescriptorArray> descriptors, int num_descriptors) {
- Isolate* isolate = descriptors->GetIsolate();
+ Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
+ int num_descriptors) {
if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate);
int layout_descriptor_length =
@@ -37,11 +37,9 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(
return handle(layout_descriptor, isolate);
}
-
Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
- Handle<Map> map, PropertyDetails details) {
+ Isolate* isolate, Handle<Map> map, PropertyDetails details) {
DCHECK(map->owns_descriptors());
- Isolate* isolate = map->GetIsolate();
Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
isolate);
@@ -63,9 +61,8 @@ Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
return handle(layout_desc, isolate);
}
-
Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
- Handle<Map> map, PropertyDetails details,
+ Isolate* isolate, Handle<Map> map, PropertyDetails details,
Handle<LayoutDescriptor> full_layout_descriptor) {
DisallowHeapAllocation no_allocation;
LayoutDescriptor* layout_descriptor = map->layout_descriptor();
@@ -75,7 +72,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
DCHECK(details.location() != kField ||
layout_descriptor->IsTagged(details.field_index()));
- return handle(layout_descriptor, map->GetIsolate());
+ return handle(layout_descriptor, isolate);
}
int field_index = details.field_index();
int new_capacity = field_index + details.field_width_in_words();
@@ -89,7 +86,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
if (details.field_width_in_words() > 1) {
layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
}
- return handle(layout_descriptor, map->GetIsolate());
+ return handle(layout_descriptor, isolate);
}
@@ -142,13 +139,13 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
bool is_tagged = (value & layout_mask) == 0;
if (!is_tagged) value = ~value; // Count set bits instead of cleared bits.
value = value & ~(layout_mask - 1); // Clear bits we are not interested in.
- int sequence_length =
- base::bits::CountTrailingZeros(value) - layout_bit_index;
+ int sequence_length;
+ if (IsSlowLayout()) {
+ sequence_length = base::bits::CountTrailingZeros(value) - layout_bit_index;
- if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
- // This is a contiguous sequence till the end of current word, proceed
- // counting in the subsequent words.
- if (IsSlowLayout()) {
+ if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
+ // This is a contiguous sequence till the end of current word, proceed
+ // counting in the subsequent words.
++layout_word_index;
int num_words = number_of_layout_words();
for (; layout_word_index < num_words; layout_word_index++) {
@@ -161,7 +158,17 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
if (sequence_length >= max_sequence_length) break;
if (cur_sequence_length != kBitsPerLayoutWord) break;
}
+ if (is_tagged && (field_index + sequence_length == capacity())) {
+ // The contiguous sequence of tagged fields lasts till the end of the
+ // layout descriptor which means that all the fields starting from
+ // field_index are tagged.
+ sequence_length = std::numeric_limits<int>::max();
+ }
}
+ } else { // Fast layout.
+ sequence_length = Min(base::bits::CountTrailingZeros(value),
+ static_cast<unsigned>(kBitsInSmiLayout)) -
+ layout_bit_index;
if (is_tagged && (field_index + sequence_length == capacity())) {
// The contiguous sequence of tagged fields lasts till the end of the
// layout descriptor which means that all the fields starting from
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 1332a5efaa..0e75096197 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -51,20 +51,20 @@ class LayoutDescriptor : public ByteArray {
// Builds layout descriptor optimized for given |map| by |num_descriptors|
// elements of given descriptors array. The |map|'s descriptors could be
// different.
- static Handle<LayoutDescriptor> New(Handle<Map> map,
+ static Handle<LayoutDescriptor> New(Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors,
int num_descriptors);
// Modifies |map|'s layout descriptor or creates a new one if necessary by
// appending property with |details| to it.
- static Handle<LayoutDescriptor> ShareAppend(Handle<Map> map,
+ static Handle<LayoutDescriptor> ShareAppend(Isolate* isolate, Handle<Map> map,
PropertyDetails details);
// Creates new layout descriptor by appending property with |details| to
// |map|'s layout descriptor and if it is still fast then returns it.
// Otherwise the |full_layout_descriptor| is returned.
static Handle<LayoutDescriptor> AppendIfFastOrUseFull(
- Handle<Map> map, PropertyDetails details,
+ Isolate* isolate, Handle<Map> map, PropertyDetails details,
Handle<LayoutDescriptor> full_layout_descriptor);
// Layout descriptor that corresponds to an object all fields of which are
@@ -96,6 +96,10 @@ class LayoutDescriptor : public ByteArray {
LayoutDescriptor* SetTaggedForTesting(int field_index, bool tagged);
private:
+ // Exclude sign-bit to simplify encoding.
+ static constexpr int kBitsInSmiLayout =
+ SmiValuesAre32Bits() ? 32 : kSmiValueSize - 1;
+
static const int kBitsPerLayoutWord = 32;
int number_of_layout_words() { return length() / kUInt32Size; }
uint32_t get_layout_word(int index) const { return get_uint32(index); }
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index b4aa7baf72..647306d627 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -24,17 +24,18 @@ namespace tracing {
// convert internally to determine the category name from the char enabled
// pointer.
const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
- "toplevel",
+ "toplevel", "tracing already shutdown",
"tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
"__metadata"};
// The enabled flag is char instead of bool so that the API can be used from C.
unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
// Indexes here have to match the g_category_groups array indexes above.
-const int g_category_categories_exhausted = 1;
+const int g_category_already_shutdown = 1;
+const int g_category_categories_exhausted = 2;
// Metadata category not used in V8.
-// const int g_category_metadata = 2;
-const int g_num_builtin_categories = 3;
+// const int g_category_metadata = 3;
+const int g_num_builtin_categories = 4;
// Skip default categories.
v8::base::AtomicWord g_category_index = g_num_builtin_categories;
@@ -102,6 +103,10 @@ void TracingController::UpdateTraceEventDuration(
const uint8_t* TracingController::GetCategoryGroupEnabled(
const char* category_group) {
+ if (!trace_buffer_) {
+ DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
+ return &g_category_group_enabled[g_category_already_shutdown];
+ }
return GetCategoryGroupEnabledInternal(category_group);
}
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 938a84bffd..c5551fcac1 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -109,7 +109,7 @@ void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
DCHECK(symbol);
OFStream& os = log_->os_;
os << "symbol(";
- if (!symbol->name()->IsUndefined(symbol->GetIsolate())) {
+ if (!symbol->name()->IsUndefined()) {
os << "\"";
AppendDetailed(String::cast(symbol->name()), false);
os << "\" ";
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index da9e126879..77d68ef94e 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -114,7 +114,7 @@ class CodeEventLogger::NameBuffer {
} else {
Symbol* symbol = Symbol::cast(name);
AppendBytes("symbol(");
- if (!symbol->name()->IsUndefined(symbol->GetIsolate())) {
+ if (!symbol->name()->IsUndefined()) {
AppendBytes("\"");
AppendString(String::cast(symbol->name()));
AppendBytes("\" ");
@@ -191,8 +191,8 @@ class CodeEventLogger::NameBuffer {
uc16 utf16_buffer[kUtf16BufferSize];
};
-
-CodeEventLogger::CodeEventLogger() : name_buffer_(new NameBuffer) { }
+CodeEventLogger::CodeEventLogger(Isolate* isolate)
+ : isolate_(isolate), name_buffer_(new NameBuffer) {}
CodeEventLogger::~CodeEventLogger() { delete name_buffer_; }
@@ -267,10 +267,10 @@ void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
// Linux perf tool logging support
class PerfBasicLogger : public CodeEventLogger {
public:
- PerfBasicLogger();
+ explicit PerfBasicLogger(Isolate* isolate);
~PerfBasicLogger() override;
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override {}
+ void CodeMoveEvent(AbstractCode* from, Address to) override {}
void CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) override {}
@@ -293,7 +293,8 @@ const char PerfBasicLogger::kFilenameFormatString[] = "/tmp/perf-%d.map";
// Extra space for the PID in the filename
const int PerfBasicLogger::kFilenameBufferPadding = 16;
-PerfBasicLogger::PerfBasicLogger() : perf_output_handle_(nullptr) {
+PerfBasicLogger::PerfBasicLogger(Isolate* isolate)
+ : CodeEventLogger(isolate), perf_output_handle_(nullptr) {
// Open the perf JIT dump file.
int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
ScopedVector<char> perf_dump_name(bufferSize);
@@ -405,7 +406,8 @@ void ExternalCodeEventListener::CodeCreateEvent(
void ExternalCodeEventListener::CodeCreateEvent(
CodeEventListener::LogEventsAndTags tag, AbstractCode* code, Name* name) {
Handle<String> name_string =
- Name::ToFunctionName(Handle<Name>(name, isolate_)).ToHandleChecked();
+ Name::ToFunctionName(isolate_, Handle<Name>(name, isolate_))
+ .ToHandleChecked();
CodeEvent code_event;
code_event.code_start_address =
@@ -425,7 +427,8 @@ void ExternalCodeEventListener::CodeCreateEvent(
CodeEventListener::LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* name) {
Handle<String> name_string =
- Name::ToFunctionName(Handle<Name>(name, isolate_)).ToHandleChecked();
+ Name::ToFunctionName(isolate_, Handle<Name>(name, isolate_))
+ .ToHandleChecked();
CodeEvent code_event;
code_event.code_start_address =
@@ -445,10 +448,11 @@ void ExternalCodeEventListener::CodeCreateEvent(
CodeEventListener::LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source, int line, int column) {
Handle<String> name_string =
- Name::ToFunctionName(Handle<Name>(shared->Name(), isolate_))
+ Name::ToFunctionName(isolate_, Handle<Name>(shared->Name(), isolate_))
.ToHandleChecked();
Handle<String> source_string =
- Name::ToFunctionName(Handle<Name>(source, isolate_)).ToHandleChecked();
+ Name::ToFunctionName(isolate_, Handle<Name>(source, isolate_))
+ .ToHandleChecked();
CodeEvent code_event;
code_event.code_start_address =
@@ -489,10 +493,10 @@ void ExternalCodeEventListener::RegExpCodeCreateEvent(AbstractCode* code,
// Low-level logging support.
class LowLevelLogger : public CodeEventLogger {
public:
- explicit LowLevelLogger(const char* file_name);
+ LowLevelLogger(Isolate* isolate, const char* file_name);
~LowLevelLogger() override;
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
void CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) override {}
void SnapshotPositionEvent(HeapObject* obj, int pos);
@@ -543,7 +547,8 @@ class LowLevelLogger : public CodeEventLogger {
const char LowLevelLogger::kLogExt[] = ".ll";
-LowLevelLogger::LowLevelLogger(const char* name) : ll_output_handle_(nullptr) {
+LowLevelLogger::LowLevelLogger(Isolate* isolate, const char* name)
+ : CodeEventLogger(isolate), ll_output_handle_(nullptr) {
// Open the low-level log file.
size_t len = strlen(name);
ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLogExt)));
@@ -610,10 +615,11 @@ void LowLevelLogger::LogRecordedBuffer(const wasm::WasmCode* code,
code->instructions().length());
}
-void LowLevelLogger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+void LowLevelLogger::CodeMoveEvent(AbstractCode* from, Address to) {
CodeMoveStruct event;
event.from_address = from->InstructionStart();
- event.to_address = to->InstructionStart();
+ size_t header_size = from->InstructionStart() - from->address();
+ event.to_address = to + header_size;
LogWriteStruct(event);
}
@@ -633,9 +639,9 @@ void LowLevelLogger::CodeMovingGCEvent() {
class JitLogger : public CodeEventLogger {
public:
- explicit JitLogger(JitCodeEventHandler code_event_handler);
+ JitLogger(Isolate* isolate, JitCodeEventHandler code_event_handler);
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
void CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) override {}
void AddCodeLinePosInfoEvent(void* jit_handler_data, int pc_offset,
@@ -655,10 +661,8 @@ class JitLogger : public CodeEventLogger {
base::Mutex logger_mutex_;
};
-
-JitLogger::JitLogger(JitCodeEventHandler code_event_handler)
- : code_event_handler_(code_event_handler) {
-}
+JitLogger::JitLogger(Isolate* isolate, JitCodeEventHandler code_event_handler)
+ : CodeEventLogger(isolate), code_event_handler_(code_event_handler) {}
void JitLogger::LogRecordedBuffer(AbstractCode* code,
SharedFunctionInfo* shared, const char* name,
@@ -672,11 +676,13 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
event.code_len = code->InstructionSize();
Handle<SharedFunctionInfo> shared_function_handle;
if (shared && shared->script()->IsScript()) {
- shared_function_handle = Handle<SharedFunctionInfo>(shared);
+ shared_function_handle =
+ Handle<SharedFunctionInfo>(shared, shared->GetIsolate());
}
event.script = ToApiHandle<v8::UnboundScript>(shared_function_handle);
event.name.str = name;
event.name.len = length;
+ event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
code_event_handler_(&event);
}
@@ -690,10 +696,11 @@ void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
event.code_len = code->instructions().length();
event.name.str = name;
event.name.len = length;
+ event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
code_event_handler_(&event);
}
-void JitLogger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+void JitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
base::LockGuard<base::Mutex> guard(&logger_mutex_);
JitCodeEvent event;
@@ -702,7 +709,13 @@ void JitLogger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
from->IsCode() ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
event.code_start = reinterpret_cast<void*>(from->InstructionStart());
event.code_len = from->InstructionSize();
- event.new_code_start = reinterpret_cast<void*>(to->InstructionStart());
+
+ // Calculate the header size.
+ const size_t header_size = from->InstructionStart() - from->address();
+
+ // Calculate the new start address of the instructions.
+ event.new_code_start = reinterpret_cast<void*>(to + header_size);
+ event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
code_event_handler_(&event);
}
@@ -719,6 +732,7 @@ void JitLogger::AddCodeLinePosInfoEvent(
event.line_info.offset = pc_offset;
event.line_info.pos = position;
event.line_info.position_type = position_type;
+ event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
code_event_handler_(&event);
}
@@ -728,6 +742,7 @@ void* JitLogger::StartCodePosInfoEvent() {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING;
+ event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
code_event_handler_(&event);
return event.user_data;
@@ -740,6 +755,7 @@ void JitLogger::EndCodePosInfoEvent(Address start_address,
event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
event.code_start = reinterpret_cast<void*>(start_address);
event.user_data = jit_handler_data;
+ event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
code_event_handler_(&event);
}
@@ -1044,7 +1060,7 @@ void Logger::SharedLibraryEvent(const std::string& library_path,
msg.WriteToLogFile();
}
-void Logger::CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
+void Logger::CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) {
if (!log_->IsEnabled()) return;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
@@ -1065,17 +1081,7 @@ void Logger::CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
deopt_location << "<unknown>";
}
msg << kNext << inlining_id << kNext << script_offset << kNext;
- switch (kind) {
- case kLazy:
- msg << "lazy" << kNext;
- break;
- case kSoft:
- msg << "soft" << kNext;
- break;
- case kEager:
- msg << "eager" << kNext;
- break;
- }
+ msg << Deoptimizer::MessageFor(kind) << kNext;
msg << deopt_location.str().c_str() << kNext
<< DeoptimizeReasonToString(info.deopt_reason);
msg.WriteToLogFile();
@@ -1304,40 +1310,20 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
Name* source, int line, int column) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
-
- Log::MessageBuilder msg(log_);
- AppendCodeCreateHeader(msg, tag, code, &timer_);
- msg << shared->DebugName() << " " << source << ":" << line << ":" << column
- << kNext << reinterpret_cast<void*>(shared->address()) << kNext
- << ComputeMarker(shared, code);
- msg.WriteToLogFile();
+ {
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(msg, tag, code, &timer_);
+ msg << shared->DebugName() << " " << source << ":" << line << ":" << column
+ << kNext << reinterpret_cast<void*>(shared->address()) << kNext
+ << ComputeMarker(shared, code);
+ msg.WriteToLogFile();
+ }
if (!FLAG_log_source_code) return;
Object* script_object = shared->script();
if (!script_object->IsScript()) return;
- // Make sure the script is written to the log file.
Script* script = Script::cast(script_object);
- int script_id = script->id();
- if (logged_source_code_.find(script_id) == logged_source_code_.end()) {
- // This script has not been logged yet.
- logged_source_code_.insert(script_id);
- Object* source_object = script->source();
- if (source_object->IsString()) {
- String* source_code = String::cast(source_object);
- msg << "script" << kNext << script_id << kNext;
-
- // Log the script name.
- if (script->name()->IsString()) {
- msg << String::cast(script->name()) << kNext;
- } else {
- msg << "<unknown>" << kNext;
- }
-
- // Log the source code.
- msg << source_code;
- msg.WriteToLogFile();
- }
- }
+ if (!EnsureLogScriptSource(script)) return;
// We log source code information in the form:
//
@@ -1360,10 +1346,11 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
// <function-id> is an index into the <fns> function table
// <fns> is the function table encoded as a sequence of strings
// S<shared-function-info-address>
+ Log::MessageBuilder msg(log_);
msg << "code-source-info" << kNext
- << reinterpret_cast<void*>(code->InstructionStart()) << kNext << script_id
- << kNext << shared->StartPosition() << kNext << shared->EndPosition()
- << kNext;
+ << reinterpret_cast<void*>(code->InstructionStart()) << kNext
+ << script->id() << kNext << shared->StartPosition() << kNext
+ << shared->EndPosition() << kNext;
SourcePositionTableIterator iterator(code->source_position_table());
bool is_first = true;
@@ -1444,10 +1431,9 @@ void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
msg.WriteToLogFile();
}
-void Logger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+void Logger::CodeMoveEvent(AbstractCode* from, Address to) {
if (!is_listening_to_code_events()) return;
- MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(),
- to->address());
+ MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(), to);
}
namespace {
@@ -1527,57 +1513,135 @@ void Logger::SuspectReadEvent(Name* name, Object* obj) {
if (!log_->IsEnabled() || !FLAG_log_suspect) return;
Log::MessageBuilder msg(log_);
String* class_name = obj->IsJSObject()
- ? JSObject::cast(obj)->class_name()
- : isolate_->heap()->empty_string();
+ ? JSObject::cast(obj)->class_name()
+ : ReadOnlyRoots(isolate_).empty_string();
msg << "suspect-read" << kNext << class_name << kNext << name;
msg.WriteToLogFile();
}
namespace {
void AppendFunctionMessage(Log::MessageBuilder& msg, const char* reason,
- Script* script, int script_id, double time_delta,
- int start_position, int end_position,
- base::ElapsedTimer* timer) {
- msg << "function" << Logger::kNext << reason << Logger::kNext;
- if (script) {
- if (script->name()->IsString()) {
- msg << String::cast(script->name());
- }
- msg << Logger::kNext << script->id();
- } else {
- msg << Logger::kNext << script_id;
- }
- msg << Logger::kNext << start_position << Logger::kNext << end_position
+ int script_id, double time_delta, int start_position,
+ int end_position, base::ElapsedTimer* timer) {
+ msg << "function" << Logger::kNext << reason << Logger::kNext << script_id
+ << Logger::kNext << start_position << Logger::kNext << end_position
<< Logger::kNext << time_delta << Logger::kNext
<< timer->Elapsed().InMicroseconds() << Logger::kNext;
}
} // namespace
-void Logger::FunctionEvent(const char* reason, Script* script, int script_id,
- double time_delta, int start_position,
- int end_position, String* function_name) {
+void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
+ int start_position, int end_position,
+ String* function_name) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
Log::MessageBuilder msg(log_);
- AppendFunctionMessage(msg, reason, script, script_id, time_delta,
- start_position, end_position, &timer_);
+ AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
+ end_position, &timer_);
if (function_name) msg << function_name;
msg.WriteToLogFile();
}
-void Logger::FunctionEvent(const char* reason, Script* script, int script_id,
- double time_delta, int start_position,
- int end_position, const char* function_name,
+void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
+ int start_position, int end_position,
+ const char* function_name,
size_t function_name_length) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
Log::MessageBuilder msg(log_);
- AppendFunctionMessage(msg, reason, script, script_id, time_delta,
- start_position, end_position, &timer_);
+ AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
+ end_position, &timer_);
if (function_name_length > 0) {
msg.AppendStringPart(function_name, function_name_length);
}
msg.WriteToLogFile();
}
+void Logger::CompilationCacheEvent(const char* action, const char* cache_type,
+ SharedFunctionInfo* sfi) {
+ if (!log_->IsEnabled() || !FLAG_log_function_events) return;
+ Log::MessageBuilder msg(log_);
+ int script_id = -1;
+ if (sfi->script()->IsScript()) {
+ script_id = Script::cast(sfi->script())->id();
+ }
+ msg << "compilation-cache" << Logger::kNext << action << Logger::kNext
+ << cache_type << Logger::kNext << script_id << Logger::kNext
+ << sfi->StartPosition() << Logger::kNext << sfi->EndPosition()
+ << Logger::kNext << timer_.Elapsed().InMicroseconds();
+ msg.WriteToLogFile();
+}
+
+void Logger::ScriptEvent(ScriptEventType type, int script_id) {
+ if (!log_->IsEnabled() || !FLAG_log_function_events) return;
+ Log::MessageBuilder msg(log_);
+ msg << "script" << Logger::kNext;
+ switch (type) {
+ case ScriptEventType::kReserveId:
+ msg << "reserve-id";
+ break;
+ case ScriptEventType::kCreate:
+ msg << "create";
+ break;
+ case ScriptEventType::kDeserialize:
+ msg << "deserialize";
+ break;
+ case ScriptEventType::kBackgroundCompile:
+ msg << "background-compile";
+ break;
+ case ScriptEventType::kStreamingCompile:
+ msg << "streaming-compile";
+ break;
+ }
+ msg << Logger::kNext << script_id << Logger::kNext
+ << timer_.Elapsed().InMicroseconds();
+ msg.WriteToLogFile();
+}
+
+void Logger::ScriptDetails(Script* script) {
+ if (!log_->IsEnabled() || !FLAG_log_function_events) return;
+ {
+ Log::MessageBuilder msg(log_);
+ msg << "script-details" << Logger::kNext << script->id() << Logger::kNext;
+ if (script->name()->IsString()) {
+ msg << String::cast(script->name());
+ }
+ msg << Logger::kNext << script->line_offset() << Logger::kNext
+ << script->column_offset() << Logger::kNext;
+ if (script->source_mapping_url()->IsString()) {
+ msg << String::cast(script->source_mapping_url());
+ }
+ msg.WriteToLogFile();
+ }
+ EnsureLogScriptSource(script);
+}
+
+bool Logger::EnsureLogScriptSource(Script* script) {
+ if (!log_->IsEnabled()) return false;
+ Log::MessageBuilder msg(log_);
+ // Make sure the script is written to the log file.
+ int script_id = script->id();
+ if (logged_source_code_.find(script_id) != logged_source_code_.end()) {
+ return false;
+ }
+ // This script has not been logged yet.
+ logged_source_code_.insert(script_id);
+ Object* source_object = script->source();
+ if (!source_object->IsString()) return false;
+ String* source_code = String::cast(source_object);
+ msg << "script-source" << kNext << script_id << kNext;
+
+ // Log the script name.
+ if (script->name()->IsString()) {
+ msg << String::cast(script->name()) << kNext;
+ } else {
+ msg << "<unknown>" << kNext;
+ }
+
+ // Log the source code.
+ msg << source_code;
+ msg.WriteToLogFile();
+ return true;
+}
+
void Logger::RuntimeCallTimerEvent() {
RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats();
RuntimeCallCounter* counter = stats->current_counter();
@@ -1715,10 +1779,10 @@ static void AddFunctionAndCode(SharedFunctionInfo* sfi,
Handle<SharedFunctionInfo>* sfis,
Handle<AbstractCode>* code_objects, int offset) {
if (sfis != nullptr) {
- sfis[offset] = Handle<SharedFunctionInfo>(sfi);
+ sfis[offset] = Handle<SharedFunctionInfo>(sfi, sfi->GetIsolate());
}
if (code_objects != nullptr) {
- code_objects[offset] = Handle<AbstractCode>(code_object);
+ code_objects[offset] = Handle<AbstractCode>(code_object, sfi->GetIsolate());
}
}
@@ -1765,23 +1829,23 @@ static int EnumerateCompiledFunctions(Heap* heap,
return compiled_funcs_count;
}
-static int EnumerateWasmModules(Heap* heap,
- Handle<WasmCompiledModule>* modules) {
+static int EnumerateWasmModuleObjects(
+ Heap* heap, Handle<WasmModuleObject>* module_objects) {
HeapIterator iterator(heap);
DisallowHeapAllocation no_gc;
- int wasm_modules_count = 0;
+ int module_objects_count = 0;
for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
- if (obj->IsWasmCompiledModule()) {
- WasmCompiledModule* module = WasmCompiledModule::cast(obj);
- if (modules != nullptr) {
- modules[wasm_modules_count] = Handle<WasmCompiledModule>(module);
+ if (obj->IsWasmModuleObject()) {
+ WasmModuleObject* module = WasmModuleObject::cast(obj);
+ if (module_objects != nullptr) {
+ module_objects[module_objects_count] = handle(module, heap->isolate());
}
- wasm_modules_count++;
+ module_objects_count++;
}
}
- return wasm_modules_count;
+ return module_objects_count;
}
void Logger::LogCodeObject(Object* object) {
@@ -1909,17 +1973,17 @@ bool Logger::SetUp(Isolate* isolate) {
log_ = new Log(this, log_file_name.str().c_str());
if (FLAG_perf_basic_prof) {
- perf_basic_logger_ = new PerfBasicLogger();
+ perf_basic_logger_ = new PerfBasicLogger(isolate);
AddCodeEventListener(perf_basic_logger_);
}
if (FLAG_perf_prof) {
- perf_jit_logger_ = new PerfJitLogger();
+ perf_jit_logger_ = new PerfJitLogger(isolate);
AddCodeEventListener(perf_jit_logger_);
}
if (FLAG_ll_prof) {
- ll_logger_ = new LowLevelLogger(log_file_name.str().c_str());
+ ll_logger_ = new LowLevelLogger(isolate, log_file_name.str().c_str());
AddCodeEventListener(ll_logger_);
}
@@ -1954,7 +2018,7 @@ void Logger::SetCodeEventHandler(uint32_t options,
}
if (event_handler) {
- jit_logger_ = new JitLogger(event_handler);
+ jit_logger_ = new JitLogger(isolate_, event_handler);
AddCodeEventListener(jit_logger_);
if (options & kJitCodeEventEnumExisting) {
HandleScope scope(isolate_);
@@ -2090,21 +2154,24 @@ void ExistingCodeLogger::LogCompiledFunctions() {
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
if (sfis[i]->function_data()->IsInterpreterData()) {
- LogExistingFunction(sfis[i],
- Handle<AbstractCode>(AbstractCode::cast(
- sfis[i]->InterpreterTrampoline())),
- CodeEventListener::INTERPRETED_FUNCTION_TAG);
+ LogExistingFunction(
+ sfis[i],
+ Handle<AbstractCode>(
+ AbstractCode::cast(sfis[i]->InterpreterTrampoline()), isolate_),
+ CodeEventListener::INTERPRETED_FUNCTION_TAG);
}
if (code_objects[i].is_identical_to(BUILTIN_CODE(isolate_, CompileLazy)))
continue;
LogExistingFunction(sfis[i], code_objects[i]);
}
- const int compiled_wasm_modules_count = EnumerateWasmModules(heap, nullptr);
- ScopedVector<Handle<WasmCompiledModule>> modules(compiled_wasm_modules_count);
- EnumerateWasmModules(heap, modules.start());
- for (int i = 0; i < compiled_wasm_modules_count; ++i) {
- modules[i]->LogWasmCodes(isolate_);
+ const int wasm_module_objects_count =
+ EnumerateWasmModuleObjects(heap, nullptr);
+ std::unique_ptr<Handle<WasmModuleObject>[]> module_objects(
+ new Handle<WasmModuleObject>[wasm_module_objects_count]);
+ EnumerateWasmModuleObjects(heap, module_objects.get());
+ for (int i = 0; i < wasm_module_objects_count; ++i) {
+ module_objects[i]->native_module()->LogWasmCodes(isolate_);
}
}
@@ -2143,12 +2210,12 @@ void ExistingCodeLogger::LogExistingFunction(
Handle<SharedFunctionInfo> shared, Handle<AbstractCode> code,
CodeEventListener::LogEventsAndTags tag) {
if (shared->script()->IsScript()) {
- Handle<Script> script(Script::cast(shared->script()));
+ Handle<Script> script(Script::cast(shared->script()), isolate_);
int line_num = Script::GetLineNumber(script, shared->StartPosition()) + 1;
int column_num =
Script::GetColumnNumber(script, shared->StartPosition()) + 1;
if (script->name()->IsString()) {
- Handle<String> script_name(String::cast(script->name()));
+ Handle<String> script_name(String::cast(script->name()), isolate_);
if (line_num > 0) {
CALL_CODE_EVENT_HANDLER(
CodeCreateEvent(Logger::ToNativeByScript(tag, *script), *code,
@@ -2162,7 +2229,7 @@ void ExistingCodeLogger::LogExistingFunction(
} else {
CALL_CODE_EVENT_HANDLER(CodeCreateEvent(
Logger::ToNativeByScript(tag, *script), *code, *shared,
- isolate_->heap()->empty_string(), line_num, column_num))
+ ReadOnlyRoots(isolate_).empty_string(), line_num, column_num))
}
} else if (shared->IsApiFunction()) {
// API function.
@@ -2178,8 +2245,8 @@ void ExistingCodeLogger::LogExistingFunction(
CALL_CODE_EVENT_HANDLER(CallbackEvent(shared->DebugName(), entry_point))
}
} else {
- CALL_CODE_EVENT_HANDLER(
- CodeCreateEvent(tag, *code, *shared, isolate_->heap()->empty_string()))
+ CALL_CODE_EVENT_HANDLER(CodeCreateEvent(
+ tag, *code, *shared, ReadOnlyRoots(isolate_).empty_string()))
}
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 35f6688559..485de0f4d1 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -74,7 +74,6 @@ class PerfJitLogger;
class Profiler;
class RuntimeCallTimer;
class Ticker;
-class WasmCompiledModule;
namespace interpreter {
enum class Bytecode : uint8_t;
@@ -125,6 +124,14 @@ class Logger : public CodeEventListener {
public:
enum StartEnd { START = 0, END = 1, STAMP = 2 };
+ enum class ScriptEventType {
+ kReserveId,
+ kCreate,
+ kDeserialize,
+ kBackgroundCompile,
+ kStreamingCompile
+ };
+
// The separator is used to write an unescaped "," into the log.
static const LogSeparator kNext = LogSeparator::kSeparator;
@@ -165,14 +172,20 @@ class Logger : public CodeEventListener {
// object.
void SuspectReadEvent(Name* name, Object* obj);
- void FunctionEvent(const char* reason, Script* script, int script_id,
- double time_delta_ms, int start_position = -1,
- int end_position = -1, String* function_name = nullptr);
- void FunctionEvent(const char* reason, Script* script, int script_id,
- double time_delta_ms, int start_position, int end_position,
+ // ==== Events logged by --log-function-events ====
+ void FunctionEvent(const char* reason, int script_id, double time_delta_ms,
+ int start_position = -1, int end_position = -1,
+ String* function_name = nullptr);
+ void FunctionEvent(const char* reason, int script_id, double time_delta_ms,
+ int start_position, int end_position,
const char* function_name = nullptr,
size_t function_name_length = 0);
+ void CompilationCacheEvent(const char* action, const char* cache_type,
+ SharedFunctionInfo* sfi);
+ void ScriptEvent(ScriptEventType type, int script_id);
+ void ScriptDetails(Script* script);
+
// ==== Events logged by --log-api. ====
void ApiSecurityCheck();
void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
@@ -209,7 +222,7 @@ class Logger : public CodeEventListener {
// Emits a code create event for a RegExp.
void RegExpCodeCreateEvent(AbstractCode* code, String* source);
// Emits a code move event.
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to);
+ void CodeMoveEvent(AbstractCode* from, Address to);
// Emits a code line info record event.
void CodeLinePosInfoRecordEvent(Address code_start,
ByteArray* source_position_table);
@@ -220,7 +233,7 @@ class Logger : public CodeEventListener {
void CodeNameEvent(Address addr, int pos, const char* code_name);
- void CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
+ void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta);
void ICEvent(const char* type, bool keyed, Map* map, Object* key,
@@ -246,8 +259,8 @@ class Logger : public CodeEventListener {
static void DefaultEventLoggerSentinel(const char* name, int event) {}
- INLINE(static void CallEventLogger(Isolate* isolate, const char* name,
- StartEnd se, bool expose_to_api));
+ V8_INLINE static void CallEventLogger(Isolate* isolate, const char* name,
+ StartEnd se, bool expose_to_api);
bool is_logging() {
return is_logging_;
@@ -263,7 +276,6 @@ class Logger : public CodeEventListener {
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code);
- void LogCompiledModule(Handle<WasmCompiledModule> module);
// Logs all compiled functions found in the heap.
void LogCompiledFunctions();
// Logs all accessor callbacks found in the heap.
@@ -278,8 +290,8 @@ class Logger : public CodeEventListener {
void LogMaps();
// Converts tag to a corresponding NATIVE_... if the script is native.
- INLINE(static CodeEventListener::LogEventsAndTags ToNativeByScript(
- CodeEventListener::LogEventsAndTags, Script*));
+ V8_INLINE static CodeEventListener::LogEventsAndTags ToNativeByScript(
+ CodeEventListener::LogEventsAndTags, Script*);
// Callback from Log, stops profiling in case of insufficient resources.
void LogFailure();
@@ -316,6 +328,10 @@ class Logger : public CodeEventListener {
// Logs an IntPtrTEvent regardless of whether FLAG_log is true.
void UncheckedIntPtrTEvent(const char* name, intptr_t value);
+ // Logs a scripts sources. Keeps track of all logged scripts to ensure that
+ // each script is logged only once.
+ bool EnsureLogScriptSource(Script* script);
+
Isolate* isolate_;
// The sampler used by the profiler and the sliding state window.
@@ -398,7 +414,7 @@ class TimerEventScope {
class CodeEventLogger : public CodeEventListener {
public:
- CodeEventLogger();
+ explicit CodeEventLogger(Isolate* isolate);
~CodeEventLogger() override;
void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
@@ -419,9 +435,12 @@ class CodeEventLogger : public CodeEventListener {
void SetterCallbackEvent(Name* name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override {}
- void CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
+ void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) override {}
+ protected:
+ Isolate* isolate_;
+
private:
class NameBuffer;
@@ -434,6 +453,7 @@ class CodeEventLogger : public CodeEventListener {
};
struct CodeEvent {
+ Isolate* isolate_;
uintptr_t code_start_address;
size_t code_size;
Handle<String> function_name;
@@ -466,11 +486,11 @@ class ExternalCodeEventListener : public CodeEventListener {
void GetterCallbackEvent(Name* name, Address entry_point) override {}
void SetterCallbackEvent(Name* name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override {}
+ void CodeMoveEvent(AbstractCode* from, Address to) override {}
void CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) override {}
void CodeMovingGCEvent() override {}
- void CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
+ void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) override {}
void StartListening(CodeEventHandler* code_event_handler);
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 517b31e561..32261c91d1 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -29,7 +29,8 @@ LookupIterator LookupIterator::PropertyOrElement(
if (!*success) {
DCHECK(isolate->has_pending_exception());
// Return an unusable dummy.
- return LookupIterator(receiver, isolate->factory()->empty_string());
+ return LookupIterator(isolate, receiver,
+ isolate->factory()->empty_string());
}
if (name->AsArrayIndex(&index)) {
@@ -61,7 +62,8 @@ LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
if (!*success) {
DCHECK(isolate->has_pending_exception());
// Return an unusable dummy.
- return LookupIterator(receiver, isolate->factory()->empty_string());
+ return LookupIterator(isolate, receiver,
+ isolate->factory()->empty_string());
}
if (name->AsArrayIndex(&index)) {
@@ -72,7 +74,7 @@ LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
return it;
}
- return LookupIterator(receiver, name, configuration);
+ return LookupIterator(isolate, receiver, name, configuration);
}
// TODO(ishell): Consider removing this way of LookupIterator creation.
@@ -84,7 +86,7 @@ LookupIterator LookupIterator::ForTransitionHandler(
if (!maybe_transition_map.ToHandle(&transition_map) ||
!transition_map->IsPrototypeValidityCellValid()) {
// This map is not a valid transition handler, so full lookup is required.
- return LookupIterator(receiver, name);
+ return LookupIterator(isolate, receiver, name);
}
PropertyDetails details = PropertyDetails::Empty();
@@ -108,8 +110,9 @@ LookupIterator LookupIterator::ForTransitionHandler(
if (!transition_map->is_dictionary_map()) {
int descriptor_number = transition_map->LastAdded();
- Handle<Map> new_map = Map::PrepareForDataProperty(
- transition_map, descriptor_number, kConst, value);
+ Handle<Map> new_map =
+ Map::PrepareForDataProperty(isolate, transition_map, descriptor_number,
+ PropertyConstness::kConst, value);
// Reload information; this is no-op if nothing changed.
it.property_details_ =
new_map->instance_descriptors()->GetDetails(descriptor_number);
@@ -237,7 +240,7 @@ Handle<Map> LookupIterator::GetReceiverMap() const {
bool LookupIterator::HasAccess() const {
DCHECK_EQ(ACCESS_CHECK, state_);
- return isolate_->MayAccess(handle(isolate_->context()),
+ return isolate_->MayAccess(handle(isolate_->context(), isolate_),
GetHolder<JSObject>());
}
@@ -272,7 +275,8 @@ bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver* holder) {
void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
- if (*name_ == heap()->constructor_string()) {
+ ReadOnlyRoots roots(heap());
+ if (*name_ == roots.constructor_string()) {
if (!isolate_->IsArraySpeciesLookupChainIntact() &&
!isolate_->IsTypedArraySpeciesLookupChainIntact() &&
!isolate_->IsPromiseSpeciesLookupChainIntact())
@@ -318,7 +322,7 @@ void LookupIterator::InternalUpdateProtector() {
isolate_->InvalidateTypedArraySpeciesProtector();
}
}
- } else if (*name_ == heap()->next_string()) {
+ } else if (*name_ == roots.next_string()) {
if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
// Setting the next property of %ArrayIteratorPrototype% also needs to
// invalidate the array iterator protector.
@@ -326,7 +330,7 @@ void LookupIterator::InternalUpdateProtector() {
*holder_, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) {
isolate_->InvalidateArrayIteratorProtector();
}
- } else if (*name_ == heap()->species_symbol()) {
+ } else if (*name_ == roots.species_symbol()) {
if (!isolate_->IsArraySpeciesLookupChainIntact() &&
!isolate_->IsTypedArraySpeciesLookupChainIntact() &&
!isolate_->IsPromiseSpeciesLookupChainIntact())
@@ -346,26 +350,33 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
}
- } else if (*name_ == heap()->is_concat_spreadable_symbol()) {
+ } else if (*name_ == roots.is_concat_spreadable_symbol()) {
if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
isolate_->InvalidateIsConcatSpreadableProtector();
- } else if (*name_ == heap()->iterator_symbol()) {
+ } else if (*name_ == roots.iterator_symbol()) {
if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
if (holder_->IsJSArray()) {
isolate_->InvalidateArrayIteratorProtector();
}
- } else if (*name_ == heap()->resolve_string()) {
+ } else if (*name_ == roots.resolve_string()) {
if (!isolate_->IsPromiseResolveLookupChainIntact()) return;
// Setting the "resolve" property on any %Promise% intrinsic object
// invalidates the Promise.resolve protector.
if (isolate_->IsInAnyContext(*holder_, Context::PROMISE_FUNCTION_INDEX)) {
isolate_->InvalidatePromiseResolveProtector();
}
- } else if (*name_ == heap()->then_string()) {
+ } else if (*name_ == roots.then_string()) {
if (!isolate_->IsPromiseThenLookupChainIntact()) return;
// Setting the "then" property on any JSPromise instance or on the
// initial %PromisePrototype% invalidates the Promise#then protector.
+ // Also setting the "then" property on the initial %ObjectPrototype%
+ // invalidates the Promise#then protector, since we use this protector
+ // to guard the fast-path in AsyncGeneratorResolve, where we can skip
+ // the ResolvePromise step and go directly to FulfillPromise if we
+ // know that the Object.prototype doesn't contain a "then" method.
if (holder_->IsJSPromise() ||
+ isolate_->IsInAnyContext(*holder_,
+ Context::INITIAL_OBJECT_PROTOTYPE_INDEX) ||
isolate_->IsInAnyContext(*holder_, Context::PROMISE_PROTOTYPE_INDEX)) {
isolate_->InvalidatePromiseThenProtector();
}
@@ -403,30 +414,32 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
if (holder_obj->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder_obj)->global_dictionary());
- Handle<PropertyCell> cell(dictionary->CellAt(dictionary_entry()));
+ JSGlobalObject::cast(*holder_obj)->global_dictionary(), isolate());
+ Handle<PropertyCell> cell(dictionary->CellAt(dictionary_entry()),
+ isolate());
property_details_ = cell->property_details();
- PropertyCell::PrepareForValue(dictionary, dictionary_entry(), value,
- property_details_);
+ PropertyCell::PrepareForValue(isolate(), dictionary, dictionary_entry(),
+ value, property_details_);
return;
}
if (!holder_obj->HasFastProperties()) return;
- PropertyConstness new_constness = kConst;
+ PropertyConstness new_constness = PropertyConstness::kConst;
if (FLAG_track_constant_fields) {
- if (constness() == kConst) {
+ if (constness() == PropertyConstness::kConst) {
DCHECK_EQ(kData, property_details_.kind());
// Check that current value matches new value otherwise we should make
// the property mutable.
- if (!IsConstFieldValueEqualTo(*value)) new_constness = kMutable;
+ if (!IsConstFieldValueEqualTo(*value))
+ new_constness = PropertyConstness::kMutable;
}
} else {
- new_constness = kMutable;
+ new_constness = PropertyConstness::kMutable;
}
Handle<Map> old_map(holder_obj->map(), isolate_);
Handle<Map> new_map = Map::PrepareForDataProperty(
- old_map, descriptor_number(), new_constness, value);
+ isolate(), old_map, descriptor_number(), new_constness, value);
if (old_map.is_identical_to(new_map)) {
// Update the property details if the representation was None.
@@ -459,18 +472,19 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
if (IsElement()) {
DCHECK(!holder_obj->HasFixedTypedArrayElements());
DCHECK(attributes != NONE || !holder_obj->HasFastElements());
- Handle<FixedArrayBase> elements(holder_obj->elements());
+ Handle<FixedArrayBase> elements(holder_obj->elements(), isolate());
holder_obj->GetElementsAccessor()->Reconfigure(holder_obj, elements,
number_, value, attributes);
ReloadPropertyInformation<true>();
} else if (holder_obj->HasFastProperties()) {
Handle<Map> old_map(holder_obj->map(), isolate_);
Handle<Map> new_map = Map::ReconfigureExistingProperty(
- old_map, descriptor_number(), i::kData, attributes);
+ isolate_, old_map, descriptor_number(), i::kData, attributes);
// Force mutable to avoid changing constant value by reconfiguring
// kData -> kAccessor -> kData.
- new_map = Map::PrepareForDataProperty(new_map, descriptor_number(),
- kMutable, value);
+ new_map =
+ Map::PrepareForDataProperty(isolate(), new_map, descriptor_number(),
+ PropertyConstness::kMutable, value);
JSObject::MigrateToMap(holder_obj, new_map);
ReloadPropertyInformation<false>();
}
@@ -487,20 +501,22 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
}
if (holder_obj->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder_obj)->global_dictionary());
+ JSGlobalObject::cast(*holder_obj)->global_dictionary(), isolate());
Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
- dictionary, dictionary_entry(), value, details);
+ isolate(), dictionary, dictionary_entry(), value, details);
cell->set_value(*value);
property_details_ = cell->property_details();
} else {
- Handle<NameDictionary> dictionary(holder_obj->property_dictionary());
+ Handle<NameDictionary> dictionary(holder_obj->property_dictionary(),
+ isolate());
PropertyDetails original_details =
dictionary->DetailsAt(dictionary_entry());
int enumeration_index = original_details.dictionary_index();
DCHECK_GT(enumeration_index, 0);
details = details.set_index(enumeration_index);
- dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
+ dictionary->SetEntry(isolate(), dictionary_entry(), *name(), *value,
+ details);
property_details_ = details;
}
state_ = DATA;
@@ -510,7 +526,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- holder->HeapObjectVerify();
+ holder->HeapObjectVerify(isolate());
}
#endif
}
@@ -557,7 +573,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
property_details_ = PropertyDetails(
kData, attributes, PropertyCellType::kUninitialized, index);
PropertyCellType new_type =
- PropertyCell::UpdatedType(cell, value, property_details_);
+ PropertyCell::UpdatedType(isolate(), cell, value, property_details_);
property_details_ = property_details_.set_cell_type(new_type);
cell->set_property_details(property_details_);
number_ = entry;
@@ -571,8 +587,9 @@ void LookupIterator::PrepareTransitionToDataProperty(
return;
}
- Handle<Map> transition = Map::TransitionToDataProperty(
- map, name_, value, attributes, kDefaultFieldConstness, store_mode);
+ Handle<Map> transition =
+ Map::TransitionToDataProperty(isolate_, map, name_, value, attributes,
+ kDefaultFieldConstness, store_mode);
state_ = TRANSITION;
transition_ = transition;
@@ -625,7 +642,7 @@ void LookupIterator::ApplyTransitionToDataProperty(
if (receiver->map()->is_prototype_map() && receiver->IsJSObject()) {
JSObject::InvalidatePrototypeChains(receiver->map());
}
- dictionary = NameDictionary::Add(dictionary, name(),
+ dictionary = NameDictionary::Add(isolate(), dictionary, name(),
isolate_->factory()->uninitialized_value(),
property_details_, &entry);
receiver->SetProperties(*dictionary);
@@ -722,7 +739,7 @@ void LookupIterator::TransitionToAccessorProperty(
return;
}
} else {
- pair = AccessorPair::Copy(pair);
+ pair = AccessorPair::Copy(isolate(), pair);
pair->SetComponents(*getter, *setter);
}
} else {
@@ -734,7 +751,7 @@ void LookupIterator::TransitionToAccessorProperty(
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- receiver->JSObjectVerify();
+ receiver->JSObjectVerify(isolate());
}
#endif
}
@@ -752,15 +769,15 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
isolate_->CountUsage(v8::Isolate::kIndexAccessor);
Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(receiver);
- dictionary =
- NumberDictionary::Set(dictionary, index_, pair, receiver, details);
+ dictionary = NumberDictionary::Set(isolate_, dictionary, index_, pair,
+ receiver, details);
receiver->RequireSlowElements(*dictionary);
if (receiver->HasSlowArgumentsElements()) {
FixedArray* parameter_map = FixedArray::cast(receiver->elements());
uint32_t length = parameter_map->length() - 2;
if (number_ < length) {
- parameter_map->set(number_ + 2, heap()->the_hole_value());
+ parameter_map->set(number_ + 2, ReadOnlyRoots(heap()).the_hole_value());
}
FixedArray::cast(receiver->elements())->set(1, *dictionary);
} else {
@@ -833,7 +850,7 @@ Handle<Object> LookupIterator::FetchValue() const {
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
} else {
- result = holder_->map()->instance_descriptors()->GetValue(number_);
+ result = holder_->map()->instance_descriptors()->GetStrongValue(number_);
}
return handle(result, isolate_);
}
@@ -842,7 +859,7 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object* value) const {
DCHECK(!IsElement());
DCHECK(holder_->HasFastProperties());
DCHECK_EQ(kField, property_details_.location());
- DCHECK_EQ(kConst, property_details_.constness());
+ DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
Handle<JSObject> holder = GetHolder<JSObject>();
FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
if (property_details_.representation().IsDouble()) {
@@ -853,7 +870,7 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object* value) const {
} else {
Object* current_value = holder->RawFastPropertyAt(field_index);
DCHECK(current_value->IsMutableHeapNumber());
- bits = HeapNumber::cast(current_value)->value_as_bits();
+ bits = MutableHeapNumber::cast(current_value)->value_as_bits();
}
// Use bit representation of double to to check for hole double, since
// manipulating the signaling NaN used for the hole in C++, e.g. with
@@ -904,7 +921,8 @@ Handle<Map> LookupIterator::GetFieldOwnerMap() const {
DCHECK_EQ(kField, property_details_.location());
DCHECK(!IsElement());
Map* holder_map = holder_->map();
- return handle(holder_map->FindFieldOwner(descriptor_number()), isolate_);
+ return handle(holder_map->FindFieldOwner(isolate(), descriptor_number()),
+ isolate_);
}
FieldIndex LookupIterator::GetFieldIndex() const {
@@ -955,16 +973,16 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
accessor->Set(object, number_, *value);
} else if (holder->HasFastProperties()) {
if (property_details_.location() == kField) {
- // Check that in case of kConst field the existing value is equal to
- // |value|.
- DCHECK_IMPLIES(
- !initializing_store && property_details_.constness() == kConst,
- IsConstFieldValueEqualTo(*value));
+ // Check that in case of VariableMode::kConst field the existing value is
+ // equal to |value|.
+ DCHECK_IMPLIES(!initializing_store && property_details_.constness() ==
+ PropertyConstness::kConst,
+ IsConstFieldValueEqualTo(*value));
JSObject::cast(*holder)->WriteToField(descriptor_number(),
property_details_, *value);
} else {
DCHECK_EQ(kDescriptor, property_details_.location());
- DCHECK_EQ(kConst, property_details_.constness());
+ DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
}
} else if (holder->IsJSGlobalObject()) {
GlobalDictionary* dictionary =
@@ -999,7 +1017,7 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
JSReceiver* LookupIterator::NextHolder(Map* map) {
DisallowHeapAllocation no_gc;
- if (map->prototype() == heap()->null_value()) return nullptr;
+ if (map->prototype() == ReadOnlyRoots(heap()).null_value()) return nullptr;
if (!check_prototype_chain() && !map->has_hidden_prototype()) return nullptr;
return JSReceiver::cast(map->prototype());
}
@@ -1049,7 +1067,7 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
if (!is_element && map->IsJSGlobalObjectMap()) {
GlobalDictionary* dict =
JSGlobalObject::cast(holder)->global_dictionary();
- int number = dict->FindEntry(name_);
+ int number = dict->FindEntry(isolate(), name_);
if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
number_ = static_cast<uint32_t>(number);
PropertyCell* cell = dict->CellAt(number_);
@@ -1102,7 +1120,7 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
} else {
DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
NameDictionary* dict = holder->property_dictionary();
- int number = dict->FindEntry(name_);
+ int number = dict->FindEntry(isolate(), name_);
if (number == NameDictionary::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
property_details_ = dict->DetailsAt(number_);
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 5a4135d8e0..36ba78be27 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -44,10 +44,6 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
BEFORE_PROPERTY = INTERCEPTOR
};
- LookupIterator(Handle<Object> receiver, Handle<Name> name,
- Configuration configuration = DEFAULT)
- : LookupIterator(name->GetIsolate(), receiver, name, configuration) {}
-
LookupIterator(Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
Configuration configuration = DEFAULT)
: LookupIterator(isolate, receiver, name, GetRoot(isolate, receiver),
@@ -56,7 +52,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
LookupIterator(Handle<Object> receiver, Handle<Name> name,
Handle<JSReceiver> holder,
Configuration configuration = DEFAULT)
- : LookupIterator(name->GetIsolate(), receiver, name, holder,
+ : LookupIterator(holder->GetIsolate(), receiver, name, holder,
configuration) {}
LookupIterator(Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
@@ -110,7 +106,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
it.name_ = name;
return it;
}
- return LookupIterator(receiver, name, configuration);
+ return LookupIterator(isolate, receiver, name, configuration);
}
static LookupIterator PropertyOrElement(
@@ -273,11 +269,11 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
if (IsElement()) return;
// This list must be kept in sync with
// CodeStubAssembler::CheckForAssociatedProtector!
- if (*name_ == heap()->is_concat_spreadable_symbol() ||
- *name_ == heap()->constructor_string() ||
- *name_ == heap()->next_string() || *name_ == heap()->species_symbol() ||
- *name_ == heap()->iterator_symbol() ||
- *name_ == heap()->resolve_string() || *name_ == heap()->then_string()) {
+ ReadOnlyRoots roots(heap());
+ if (*name_ == roots.is_concat_spreadable_symbol() ||
+ *name_ == roots.constructor_string() || *name_ == roots.next_string() ||
+ *name_ == roots.species_symbol() || *name_ == roots.iterator_symbol() ||
+ *name_ == roots.resolve_string() || *name_ == roots.then_string()) {
InternalUpdateProtector();
}
}
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index d420ab52d4..af5c60536b 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -212,7 +212,7 @@ class NoRootArrayScope {
class ParameterCount BASE_EMBEDDED {
public:
explicit ParameterCount(Register reg) : reg_(reg), immediate_(0) {}
- explicit ParameterCount(int imm) : reg_(no_reg), immediate_(imm) {}
+ explicit ParameterCount(uint16_t imm) : reg_(no_reg), immediate_(imm) {}
bool is_reg() const { return reg_.is_valid(); }
bool is_immediate() const { return !is_reg(); }
@@ -221,14 +221,14 @@ class ParameterCount BASE_EMBEDDED {
DCHECK(is_reg());
return reg_;
}
- int immediate() const {
+ uint16_t immediate() const {
DCHECK(is_immediate());
return immediate_;
}
private:
const Register reg_;
- const int immediate_;
+ const uint16_t immediate_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ParameterCount);
};
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/map-updater.cc
index 1b8bdfdf12..a0ac5d3cd0 100644
--- a/deps/v8/src/map-updater.cc
+++ b/deps/v8/src/map-updater.cc
@@ -44,7 +44,7 @@ Object* MapUpdater::GetValue(int descriptor) const {
return *new_value_;
}
DCHECK_EQ(kDescriptor, GetDetails(descriptor).location());
- return old_descriptors_->GetValue(descriptor);
+ return old_descriptors_->GetStrongValue(descriptor);
}
FieldType* MapUpdater::GetFieldType(int descriptor) const {
@@ -78,7 +78,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
if (location == kField) {
return handle(descriptors->GetFieldType(descriptor), isolate_);
} else {
- return descriptors->GetValue(descriptor)
+ return descriptors->GetStrongValue(descriptor)
->OptimalType(isolate_, representation);
}
}
@@ -118,7 +118,7 @@ Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
// We don't know if this is a first property kind reconfiguration
// and we don't know which value was in this property previously
// therefore we can't treat such a property as constant.
- new_constness_ = kMutable;
+ new_constness_ = PropertyConstness::kMutable;
new_representation_ = representation;
new_field_type_ = field_type;
}
@@ -163,16 +163,16 @@ void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type) {
- Map::GeneralizeField(map, modify_index, new_constness, new_representation,
- new_field_type);
+ Map::GeneralizeField(isolate_, map, modify_index, new_constness,
+ new_representation, new_field_type);
DCHECK_EQ(*old_descriptors_, old_map_->instance_descriptors());
}
MapUpdater::State MapUpdater::CopyGeneralizeAllFields(const char* reason) {
- result_map_ = Map::CopyGeneralizeAllFields(old_map_, new_elements_kind_,
- modified_descriptor_, new_kind_,
- new_attributes_, reason);
+ result_map_ = Map::CopyGeneralizeAllFields(
+ isolate_, old_map_, new_elements_kind_, modified_descriptor_, new_kind_,
+ new_attributes_, reason);
state_ = kEnd;
return state_; // Done.
}
@@ -199,13 +199,13 @@ MapUpdater::State MapUpdater::TryRecofigureToDataFieldInplace() {
DCHECK_EQ(kField, old_details.location());
if (FLAG_trace_generalization) {
old_map_->PrintGeneralization(
- stdout, "uninitialized field", modified_descriptor_, old_nof_, old_nof_,
- false, old_representation, new_representation_,
+ isolate_, stdout, "uninitialized field", modified_descriptor_, old_nof_,
+ old_nof_, false, old_representation, new_representation_,
handle(old_descriptors_->GetFieldType(modified_descriptor_), isolate_),
MaybeHandle<Object>(), new_field_type_, MaybeHandle<Object>());
}
- Handle<Map> field_owner(old_map_->FindFieldOwner(modified_descriptor_),
- isolate_);
+ Handle<Map> field_owner(
+ old_map_->FindFieldOwner(isolate_, modified_descriptor_), isolate_);
GeneralizeField(field_owner, modified_descriptor_, new_constness_,
new_representation_, new_field_type_);
@@ -224,7 +224,7 @@ MapUpdater::State MapUpdater::TryRecofigureToDataFieldInplace() {
MapUpdater::State MapUpdater::FindRootMap() {
DCHECK_EQ(kInitialized, state_);
// Check the state of the root map.
- root_map_ = handle(old_map_->FindRootMap(), isolate_);
+ root_map_ = handle(old_map_->FindRootMap(isolate_), isolate_);
ElementsKind from_kind = root_map_->elements_kind();
ElementsKind to_kind = new_elements_kind_;
if (root_map_->is_deprecated()) {
@@ -232,7 +232,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
result_map_ = handle(
JSFunction::cast(root_map_->GetConstructor())->initial_map(), isolate_);
if (from_kind != to_kind) {
- result_map_ = Map::AsElementsKind(result_map_, to_kind);
+ result_map_ = Map::AsElementsKind(isolate_, result_map_, to_kind);
}
DCHECK(result_map_->is_dictionary_map());
return state_;
@@ -294,7 +294,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
// From here on, use the map with correct elements kind as root map.
if (from_kind != to_kind) {
- root_map_ = Map::AsElementsKind(root_map_, to_kind);
+ root_map_ = Map::AsElementsKind(isolate_, root_map_, to_kind);
}
state_ = kAtRootMap;
return state_; // Not done yet.
@@ -307,7 +307,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
int root_nof = root_map_->NumberOfOwnDescriptors();
for (int i = root_nof; i < old_nof_; ++i) {
PropertyDetails old_details = GetDetails(i);
- Map* transition = TransitionsAccessor(target_map_)
+ Map* transition = TransitionsAccessor(isolate_, target_map_)
.SearchTransition(GetKey(i), old_details.kind(),
old_details.attributes());
if (transition == nullptr) break;
@@ -321,7 +321,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
DCHECK_EQ(old_details.kind(), tmp_details.kind());
DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
if (old_details.kind() == kAccessor &&
- !EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
+ !EqualImmutableValues(GetValue(i),
+ tmp_descriptors->GetStrongValue(i))) {
// TODO(ishell): mutable accessors are not implemented yet.
return CopyGeneralizeAllFields("GenAll_Incompatible");
}
@@ -347,7 +348,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
old_field_type);
} else {
// kDescriptor: Check that the value matches.
- if (!EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
+ if (!EqualImmutableValues(GetValue(i),
+ tmp_descriptors->GetStrongValue(i))) {
break;
}
}
@@ -374,13 +376,14 @@ MapUpdater::State MapUpdater::FindTargetMap() {
target_descriptors->GetFieldType(modified_descriptor_)));
} else {
DCHECK(details.location() == kField ||
- EqualImmutableValues(*new_value_, target_descriptors->GetValue(
- modified_descriptor_)));
+ EqualImmutableValues(
+ *new_value_,
+ target_descriptors->GetStrongValue(modified_descriptor_)));
}
}
#endif
if (*target_map_ != *old_map_) {
- old_map_->NotifyLeafMapLayoutChange();
+ old_map_->NotifyLeafMapLayoutChange(isolate_);
}
result_map_ = target_map_;
state_ = kEnd;
@@ -390,7 +393,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
// Find the last compatible target map in the transition tree.
for (int i = target_nof; i < old_nof_; ++i) {
PropertyDetails old_details = GetDetails(i);
- Map* transition = TransitionsAccessor(target_map_)
+ Map* transition = TransitionsAccessor(isolate_, target_map_)
.SearchTransition(GetKey(i), old_details.kind(),
old_details.attributes());
if (transition == nullptr) break;
@@ -404,7 +407,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
#endif
if (old_details.kind() == kAccessor &&
- !EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
+ !EqualImmutableValues(GetValue(i),
+ tmp_descriptors->GetStrongValue(i))) {
return CopyGeneralizeAllFields("GenAll_Incompatible");
}
DCHECK(!tmp_map->is_deprecated());
@@ -447,7 +451,8 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
current_offset += old_details.field_width_in_words();
}
Descriptor d(handle(GetKey(i), isolate_),
- handle(old_descriptors_->GetValue(i), isolate_), old_details);
+ MaybeObjectHandle(old_descriptors_->GetValue(i), isolate_),
+ old_details);
new_descriptors->Set(i, &d);
}
@@ -471,16 +476,17 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
PropertyLocation next_location =
old_details.location() == kField ||
target_details.location() == kField ||
- !EqualImmutableValues(target_descriptors->GetValue(i),
+ !EqualImmutableValues(target_descriptors->GetStrongValue(i),
GetValue(i))
? kField
: kDescriptor;
if (!FLAG_track_constant_fields && next_location == kField) {
- next_constness = kMutable;
+ next_constness = PropertyConstness::kMutable;
}
// Ensure that mutable values are stored in fields.
- DCHECK_IMPLIES(next_constness == kMutable, next_location == kField);
+ DCHECK_IMPLIES(next_constness == PropertyConstness::kMutable,
+ next_location == kField);
Representation next_representation =
old_details.representation().generalize(
@@ -502,7 +508,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
isolate_, instance_type, &next_constness, &next_representation,
&next_field_type);
- Handle<Object> wrapped_type(Map::WrapFieldType(next_field_type));
+ MaybeObjectHandle wrapped_type(Map::WrapFieldType(next_field_type));
Descriptor d;
if (next_kind == kData) {
d = Descriptor::DataField(key, current_offset, next_attributes,
@@ -516,7 +522,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
new_descriptors->Set(i, &d);
} else {
DCHECK_EQ(kDescriptor, next_location);
- DCHECK_EQ(kConst, next_constness);
+ DCHECK_EQ(PropertyConstness::kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
Descriptor d;
@@ -555,10 +561,11 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
!Map::IsInplaceGeneralizableField(
next_constness, next_representation, *next_field_type));
- Handle<Object> wrapped_type(Map::WrapFieldType(next_field_type));
+ MaybeObjectHandle wrapped_type(Map::WrapFieldType(next_field_type));
Descriptor d;
if (next_kind == kData) {
- DCHECK_IMPLIES(!FLAG_track_constant_fields, next_constness == kMutable);
+ DCHECK_IMPLIES(!FLAG_track_constant_fields,
+ next_constness == PropertyConstness::kMutable);
d = Descriptor::DataField(key, current_offset, next_attributes,
next_constness, next_representation,
wrapped_type);
@@ -570,7 +577,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
new_descriptors->Set(i, &d);
} else {
DCHECK_EQ(kDescriptor, next_location);
- DCHECK_EQ(kConst, next_constness);
+ DCHECK_EQ(PropertyConstness::kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
if (next_kind == kData) {
@@ -596,7 +603,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
Name* name = descriptors->GetKey(i);
PropertyDetails details = descriptors->GetDetails(i);
Map* next =
- TransitionsAccessor(current, &no_allocation)
+ TransitionsAccessor(isolate_, current, &no_allocation)
.SearchTransition(name, details.kind(), details.attributes());
if (next == nullptr) break;
DescriptorArray* next_descriptors = next->instance_descriptors();
@@ -614,8 +621,8 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
break;
}
} else {
- if (!EqualImmutableValues(descriptors->GetValue(i),
- next_descriptors->GetValue(i))) {
+ if (!EqualImmutableValues(descriptors->GetStrongValue(i),
+ next_descriptors->GetStrongValue(i))) {
break;
}
}
@@ -632,13 +639,13 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
DCHECK_NE(old_nof_, split_nof);
PropertyDetails split_details = GetDetails(split_nof);
- TransitionsAccessor transitions(split_map);
+ TransitionsAccessor transitions(isolate_, split_map);
// Invalidate a transition target at |key|.
Map* maybe_transition = transitions.SearchTransition(
GetKey(split_nof), split_details.kind(), split_details.attributes());
if (maybe_transition != nullptr) {
- maybe_transition->DeprecateTransitionTree();
+ maybe_transition->DeprecateTransitionTree(isolate_);
}
// If |maybe_transition| is not nullptr then the transition array already
@@ -648,7 +655,7 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
}
- old_map_->NotifyLeafMapLayoutChange();
+ old_map_->NotifyLeafMapLayoutChange(isolate_);
if (FLAG_trace_generalization && modified_descriptor_ >= 0) {
PropertyDetails old_details =
@@ -663,34 +670,35 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
old_field_type = handle(
old_descriptors_->GetFieldType(modified_descriptor_), isolate_);
} else {
- old_value =
- handle(old_descriptors_->GetValue(modified_descriptor_), isolate_);
+ old_value = handle(old_descriptors_->GetStrongValue(modified_descriptor_),
+ isolate_);
}
if (new_details.location() == kField) {
new_field_type =
handle(new_descriptors->GetFieldType(modified_descriptor_), isolate_);
} else {
- new_value =
- handle(new_descriptors->GetValue(modified_descriptor_), isolate_);
+ new_value = handle(new_descriptors->GetStrongValue(modified_descriptor_),
+ isolate_);
}
old_map_->PrintGeneralization(
- stdout, "", modified_descriptor_, split_nof, old_nof_,
+ isolate_, stdout, "", modified_descriptor_, split_nof, old_nof_,
old_details.location() == kDescriptor && new_location_ == kField,
old_details.representation(), new_details.representation(),
old_field_type, old_value, new_field_type, new_value);
}
Handle<LayoutDescriptor> new_layout_descriptor =
- LayoutDescriptor::New(split_map, new_descriptors, old_nof_);
+ LayoutDescriptor::New(isolate_, split_map, new_descriptors, old_nof_);
- Handle<Map> new_map = Map::AddMissingTransitions(split_map, new_descriptors,
- new_layout_descriptor);
+ Handle<Map> new_map = Map::AddMissingTransitions(
+ isolate_, split_map, new_descriptors, new_layout_descriptor);
// Deprecated part of the transition tree is no longer reachable, so replace
// current instance descriptors in the "survived" part of the tree with
// the new descriptors to maintain descriptors sharing invariant.
- split_map->ReplaceDescriptors(*new_descriptors, *new_layout_descriptor);
+ split_map->ReplaceDescriptors(isolate_, *new_descriptors,
+ *new_layout_descriptor);
result_map_ = new_map;
state_ = kEnd;
diff --git a/deps/v8/src/map-updater.h b/deps/v8/src/map-updater.h
index 7c5e92f2bf..5dcb018373 100644
--- a/deps/v8/src/map-updater.h
+++ b/deps/v8/src/map-updater.h
@@ -53,7 +53,9 @@ class MapUpdater {
is_transitionable_fast_elements_kind_(
IsTransitionableFastElementsKind(new_elements_kind_)) {
// We shouldn't try to update remote objects.
- DCHECK(!old_map->FindRootMap()->GetConstructor()->IsFunctionTemplateInfo());
+ DCHECK(!old_map->FindRootMap(isolate)
+ ->GetConstructor()
+ ->IsFunctionTemplateInfo());
}
// Prepares for reconfiguring of a property at |descriptor| to data field
@@ -170,7 +172,7 @@ class MapUpdater {
int modified_descriptor_ = -1;
PropertyKind new_kind_ = kData;
PropertyAttributes new_attributes_ = NONE;
- PropertyConstness new_constness_ = kMutable;
+ PropertyConstness new_constness_ = PropertyConstness::kMutable;
PropertyLocation new_location_ = kField;
Representation new_representation_ = Representation::None();
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 3bd45b2e50..4fd27f9807 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -56,13 +56,11 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
int start = -1;
int end = -1;
- Handle<Object> script_handle = factory->undefined_value();
+ Handle<Script> script_handle = isolate->factory()->empty_script();
if (location != nullptr) {
start = location->start_pos();
end = location->end_pos();
- script_handle = Script::GetWrapper(location->script());
- } else {
- script_handle = Script::GetWrapper(isolate->factory()->empty_script());
+ script_handle = location->script();
}
Handle<Object> stack_frames_handle = stack_frames.is_null()
@@ -85,7 +83,7 @@ void MessageHandler::ReportMessage(Isolate* isolate, const MessageLocation* loc,
// and ignore scheduled exceptions callbacks can throw.
// We pass the exception object into the message handler callback though.
- Object* exception_object = isolate->heap()->undefined_value();
+ Object* exception_object = ReadOnlyRoots(isolate).undefined_value();
if (isolate->has_pending_exception()) {
exception_object = isolate->pending_exception();
}
@@ -190,11 +188,11 @@ namespace {
Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
if (!script->has_eval_from_shared())
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
- Handle<SharedFunctionInfo> shared(script->eval_from_shared());
+ Handle<SharedFunctionInfo> shared(script->eval_from_shared(), isolate);
// Find the name of the function calling eval.
- if (shared->Name()->BooleanValue()) {
+ if (shared->Name()->BooleanValue(isolate)) {
return shared->Name();
}
@@ -203,12 +201,13 @@ Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
Object* EvalFromScript(Isolate* isolate, Handle<Script> script) {
if (!script->has_eval_from_shared())
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
- Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared());
+ Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared(),
+ isolate);
return eval_from_shared->script()->IsScript()
? eval_from_shared->script()
- : isolate->heap()->undefined_value();
+ : ReadOnlyRoots(isolate).undefined_value();
}
MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
@@ -223,7 +222,7 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
Handle<Object> eval_from_function_name =
handle(EvalFromFunctionName(isolate, script), isolate);
- if (eval_from_function_name->BooleanValue()) {
+ if (eval_from_function_name->BooleanValue(isolate)) {
Handle<String> str;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, str, Object::ToString(isolate, eval_from_function_name),
@@ -525,10 +524,10 @@ int StringIndexOf(Isolate* isolate, Handle<String> subject,
// 2. subject == pattern.
bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
Handle<String> pattern) {
- if (String::Equals(subject, pattern)) return true;
+ if (String::Equals(isolate, subject, pattern)) return true;
- FlatStringReader subject_reader(isolate, String::Flatten(subject));
- FlatStringReader pattern_reader(isolate, String::Flatten(pattern));
+ FlatStringReader subject_reader(isolate, String::Flatten(isolate, subject));
+ FlatStringReader pattern_reader(isolate, String::Flatten(isolate, pattern));
int pattern_index = pattern_reader.length() - 1;
int subject_index = subject_reader.length() - 1;
@@ -650,7 +649,7 @@ void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
if (array->IsWasmInterpretedFrame(frame_ix)) {
code_ = nullptr;
} else {
- code_ = wasm_instance_->compiled_module()->GetNativeModule()->code(
+ code_ = wasm_instance_->module_object()->native_module()->code(
wasm_func_index_);
}
offset_ = array->Offset(frame_ix)->value();
@@ -664,10 +663,10 @@ Handle<Object> WasmStackFrame::GetFunction() const {
Handle<Object> WasmStackFrame::GetFunctionName() {
Handle<Object> name;
- Handle<WasmSharedModuleData> shared(wasm_instance_->module_object()->shared(),
- isolate_);
- if (!WasmSharedModuleData::GetFunctionNameOrNull(isolate_, shared,
- wasm_func_index_)
+ Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
+ isolate_);
+ if (!WasmModuleObject::GetFunctionNameOrNull(isolate_, module_object,
+ wasm_func_index_)
.ToHandle(&name)) {
name = isolate_->factory()->null_value();
}
@@ -677,13 +676,12 @@ Handle<Object> WasmStackFrame::GetFunctionName() {
MaybeHandle<String> WasmStackFrame::ToString() {
IncrementalStringBuilder builder(isolate_);
- Handle<WasmSharedModuleData> shared(wasm_instance_->module_object()->shared(),
- isolate_);
+ Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
+ isolate_);
MaybeHandle<String> module_name =
- WasmSharedModuleData::GetModuleNameOrNull(isolate_, shared);
- MaybeHandle<String> function_name =
- WasmSharedModuleData::GetFunctionNameOrNull(isolate_, shared,
- wasm_func_index_);
+ WasmModuleObject::GetModuleNameOrNull(isolate_, module_object);
+ MaybeHandle<String> function_name = WasmModuleObject::GetFunctionNameOrNull(
+ isolate_, module_object, wasm_func_index_);
bool has_name = !module_name.is_null() || !function_name.is_null();
if (has_name) {
if (module_name.is_null()) {
@@ -726,7 +724,7 @@ Handle<Object> WasmStackFrame::Null() const {
bool WasmStackFrame::HasScript() const { return true; }
Handle<Script> WasmStackFrame::GetScript() const {
- return handle(wasm_instance_->module_object()->shared()->script(), isolate_);
+ return handle(wasm_instance_->module_object()->script(), isolate_);
}
AsmJsWasmStackFrame::AsmJsWasmStackFrame() {}
@@ -750,15 +748,13 @@ Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
}
Handle<Object> AsmJsWasmStackFrame::GetFileName() {
- Handle<Script> script(wasm_instance_->module_object()->shared()->script(),
- isolate_);
+ Handle<Script> script(wasm_instance_->module_object()->script(), isolate_);
DCHECK(script->IsUserJavaScript());
return handle(script->name(), isolate_);
}
Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
- Handle<Script> script(wasm_instance_->module_object()->shared()->script(),
- isolate_);
+ Handle<Script> script(wasm_instance_->module_object()->script(), isolate_);
DCHECK_EQ(Script::TYPE_NORMAL, script->type());
return ScriptNameOrSourceUrl(script, isolate_);
}
@@ -768,26 +764,24 @@ int AsmJsWasmStackFrame::GetPosition() const {
int byte_offset =
FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(code_,
offset_);
- Handle<WasmSharedModuleData> shared(wasm_instance_->module_object()->shared(),
- isolate_);
+ Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
+ isolate_);
DCHECK_LE(0, byte_offset);
- return WasmSharedModuleData::GetSourcePosition(
- shared, wasm_func_index_, static_cast<uint32_t>(byte_offset),
- is_at_number_conversion_);
+ return WasmModuleObject::GetSourcePosition(module_object, wasm_func_index_,
+ static_cast<uint32_t>(byte_offset),
+ is_at_number_conversion_);
}
int AsmJsWasmStackFrame::GetLineNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object()->shared()->script(),
- isolate_);
+ Handle<Script> script(wasm_instance_->module_object()->script(), isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetLineNumber(script, GetPosition()) + 1;
}
int AsmJsWasmStackFrame::GetColumnNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object()->shared()->script(),
- isolate_);
+ Handle<Script> script(wasm_instance_->module_object()->script(), isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
@@ -949,7 +943,8 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<JSArray> raw_stack_array = Handle<JSArray>::cast(raw_stack);
DCHECK(raw_stack_array->elements()->IsFixedArray());
- Handle<FrameArray> elems(FrameArray::cast(raw_stack_array->elements()));
+ Handle<FrameArray> elems(FrameArray::cast(raw_stack_array->elements()),
+ isolate);
// If there's a user-specified "prepareStackFrames" function, call it on the
// frames and use its result.
@@ -1046,7 +1041,7 @@ Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
// here to improve the efficiency of converting it to a C string and
// other operations that are likely to take place (see GetLocalizedMessage
// for example).
- return String::Flatten(result_string);
+ return String::Flatten(isolate, result_string);
}
@@ -1153,8 +1148,8 @@ MaybeHandle<String> GetStringPropertyOrDefault(Isolate* isolate,
Handle<String> key,
Handle<String> default_str) {
Handle<Object> obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, JSObject::GetProperty(recv, key),
- String);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj,
+ JSObject::GetProperty(isolate, recv, key), String);
Handle<String> str;
if (obj->IsUndefined(isolate)) {
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index c4877ddf17..1d1a07d7b6 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -337,6 +337,7 @@ class ErrorUtils : public AllStatic {
T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %") \
T(InvalidRegExpExecResult, \
"RegExp exec method returned something other than an Object or null") \
+ T(InvalidUnit, "Invalid unit argument for %() '%'") \
T(IteratorResultNotAnObject, "Iterator result % is not an object") \
T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \
T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
@@ -489,6 +490,8 @@ class ErrorUtils : public AllStatic {
"Cannot supply flags when constructing one RegExp from another") \
T(RegExpNonObject, "% getter called on non-object %") \
T(RegExpNonRegExp, "% getter called on non-RegExp object") \
+ T(RelativeDateTimeFormatterBadParameters, \
+ "Incorrect RelativeDateTimeFormatter provided") \
T(ResolverNotAFunction, "Promise resolver % is not a function") \
T(ReturnMethodNotCallable, "The iterator's 'return' method is not callable") \
T(SharedArrayBufferTooShort, \
@@ -754,14 +757,7 @@ class ErrorUtils : public AllStatic {
T(DataCloneDeserializationError, "Unable to deserialize cloned data.") \
T(DataCloneDeserializationVersionError, \
"Unable to deserialize cloned data due to invalid or unsupported " \
- "version.") \
- /* Builtins-Trace Errors */ \
- T(TraceEventCategoryError, "Trace event category must be a string.") \
- T(TraceEventNameError, "Trace event name must be a string.") \
- T(TraceEventNameLengthError, \
- "Trace event name must not be an empty string.") \
- T(TraceEventPhaseError, "Trace event phase must be a number.") \
- T(TraceEventIDError, "Trace event id must be a number.")
+ "version.")
class MessageTemplate {
public:
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index c4bb55c3d9..e9f84b6100 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -196,7 +196,7 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_target_object(HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -204,9 +204,9 @@ void RelocInfo::set_target_object(HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target));
- host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
+ heap->incremental_marking()->RecordWriteIntoCode(host(), this,
+ HeapObject::cast(target));
+ heap->RecordWriteIntoCode(host(), this, target);
}
}
@@ -250,13 +250,6 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
-void RelocInfo::set_wasm_code_table_entry(Address target,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
- Assembler::set_target_address_at(pc_, constant_pool_, target,
- icache_flush_mode);
-}
-
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@@ -293,7 +286,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
@@ -318,13 +311,6 @@ void Assembler::CheckBuffer() {
}
-void Assembler::CheckTrampolinePoolQuick(int extra_instructions) {
- if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
- CheckTrampolinePool();
- }
-}
-
-
void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index eb95d7e985..bc7dd6bdc1 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -39,6 +39,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
+#include "src/deoptimizer.h"
#include "src/mips/assembler-mips-inl.h"
namespace v8 {
@@ -183,8 +184,9 @@ Register ToRegister(int num) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
-const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
+const int RelocInfo::kApplyMask =
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -198,34 +200,27 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
-Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
-uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- Assembler::target_address_at(pc_, constant_pool_));
-}
-
-void RelocInfo::set_embedded_address(Address address,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
-}
-
-void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_,
- static_cast<Address>(size), flush_mode);
+int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(address, icache_flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return embedded_address();
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return static_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
}
// -----------------------------------------------------------------------------
@@ -268,8 +263,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- IMMUTABLE, TENURED);
+ object =
+ isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
@@ -318,8 +313,9 @@ const Instr kLwSwInstrTypeMask = 0xFFE00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options, void* buffer,
+ int buffer_size)
+ : AssemblerBase(options, buffer, buffer_size),
scratch_register_list_(at.bit()) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@@ -668,6 +664,19 @@ bool Assembler::IsOri(Instr instr) {
return opcode == ORI;
}
+bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rd_field = GetRd(instr);
+ uint32_t rs_field = GetRs(instr);
+ uint32_t rt_field = GetRt(instr);
+ uint32_t rd_reg = static_cast<uint32_t>(rd.code());
+ uint32_t rs_reg = static_cast<uint32_t>(rs.code());
+ uint32_t function_field = GetFunctionField(instr);
+ // Checks if the instruction is a OR with zero_reg argument (aka MOV).
+ bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
+ rs_field == rs_reg && rt_field == 0;
+ return res;
+}
bool Assembler::IsNop(Instr instr, unsigned int type) {
// See Assembler::nop(type).
@@ -902,10 +911,38 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
return;
}
- DCHECK(IsBranch(instr) || IsLui(instr));
+ DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
if (IsBranch(instr)) {
instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr);
+ } else if (IsMov(instr, t8, ra)) {
+ Instr instr_lui = instr_at(pos + 4 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 5 * Assembler::kInstrSize);
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+
+ int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
+
+ if (is_int16(imm_short)) {
+ // Optimize by converting to regular branch with 16-bit
+ // offset
+ Instr instr_b = BEQ;
+ instr_b = SetBranchOffset(pos, target_pos, instr_b);
+
+ instr_at_put(pos, instr_b);
+ instr_at_put(pos + 1 * Assembler::kInstrSize, 0);
+ } else {
+ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
+ DCHECK_EQ(imm & 3, 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pos + 4 * Assembler::kInstrSize,
+ instr_lui | ((imm >> 16) & kImm16Mask));
+ instr_at_put(pos + 5 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ }
} else {
Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
@@ -3746,9 +3783,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
if (!RelocInfo::IsNone(rinfo.rmode())) {
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !serializer_enabled() && !emit_debug_code()) {
+ if (options().disable_reloc_info_for_patching) return;
+ if (RelocInfo::IsOnlyForSerializer(rmode) &&
+ !options().record_reloc_info_for_serialization && !emit_debug_code()) {
return;
}
DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
@@ -3756,7 +3793,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
}
-
void Assembler::BlockTrampolinePoolFor(int instructions) {
CheckTrampolinePoolQuick(instructions);
BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
@@ -3791,49 +3827,37 @@ void Assembler::CheckTrampolinePool() {
bc(&after_pool);
} else {
b(&after_pool);
- nop();
}
+ nop();
int pool_start = pc_offset();
- if (IsMipsArchVariant(kMips32r6)) {
- for (int i = 0; i < unbound_labels_count_; i++) {
- uint32_t imm32;
- imm32 = jump_address(&after_pool);
- uint32_t lui_offset, jic_offset;
- UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and
- // available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
- lui(scratch, lui_offset);
- jic(scratch, jic_offset);
- }
- CheckBuffer();
- }
- } else {
- for (int i = 0; i < unbound_labels_count_; i++) {
- uint32_t imm32;
- imm32 = jump_address(&after_pool);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and
- // available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(scratch, (imm32 & kHiMask) >> kLuiShift);
- ori(scratch, scratch, (imm32 & kImm16Mask));
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ {
+ // printf("Generate trampoline %d\n", i);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and
+ // available to be patched.
+ if (IsMipsArchVariant(kMips32r6)) {
+ bc(&after_pool);
+ nop();
+ } else {
+ Label find_pc;
+ or_(t8, ra, zero_reg);
+ bal(&find_pc);
+ or_(t9, ra, zero_reg);
+ bind(&find_pc);
+ or_(ra, t8, zero_reg);
+ lui(t8, 0);
+ ori(t8, t8, 0);
+ addu(t9, t9, t8);
+ // Instruction jr will take or_ from the next trampoline.
+ // in its branch delay slot. This is the expected behavior
+ // in order to decrease size of trampoline pool.
+ jr(t9);
}
- CheckBuffer();
- jr(scratch);
- nop();
}
}
+ nop();
bind(&after_pool);
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 1be3704ae2..ea34e7a440 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -387,20 +387,21 @@ constexpr MSAControlRegister MSACSR = {kMSACSRRegister};
class Operand BASE_EMBEDDED {
public:
// Immediate.
- INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE))
+ V8_INLINE explicit Operand(int32_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
- INLINE(explicit Operand(const ExternalReference& f))
+ V8_INLINE explicit Operand(const ExternalReference& f)
: rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) {
value_.immediate = static_cast<int32_t>(f.address());
}
- INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
+ V8_INLINE explicit Operand(const char* s);
+ V8_INLINE explicit Operand(Object** opp);
+ V8_INLINE explicit Operand(Context** cpp);
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value)) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi* value)
+ : rm_(no_reg), rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -408,10 +409,10 @@ class Operand BASE_EMBEDDED {
static Operand EmbeddedCode(CodeStub* stub);
// Register.
- INLINE(explicit Operand(Register rm)) : rm_(rm) {}
+ V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
// Return true if this is a register operand.
- INLINE(bool is_reg() const);
+ V8_INLINE bool is_reg() const;
inline int32_t immediate() const;
@@ -491,9 +492,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : Assembler(IsolateData(isolate), buffer, buffer_size) {}
- Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
+ Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -566,18 +565,19 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
- INLINE(static void set_target_address_at)
- (Address pc, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ V8_INLINE static void set_target_address_at(
+ Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
set_target_value_at(pc, static_cast<uint32_t>(target), icache_flush_mode);
}
// On MIPS there is no Constant Pool so we skip that parameter.
- INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
+ V8_INLINE static Address target_address_at(Address pc,
+ Address constant_pool) {
return target_address_at(pc);
}
- INLINE(static void set_target_address_at(
+ V8_INLINE static void set_target_address_at(
Address pc, Address constant_pool, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
set_target_address_at(pc, target, icache_flush_mode);
}
@@ -612,6 +612,11 @@ class Assembler : public AssemblerBase {
// Difference between address of current opcode and target address offset.
static constexpr int kBranchPCOffset = 4;
+ // Difference between address of current opcode and target address offset,
+ // when we are generatinga sequence of instructions for long relative PC
+ // branches
+ static constexpr int kLongBranchPCOffset = 12;
+
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
// MIPS platform, as Code, Embedded Object or External-reference pointers
@@ -644,11 +649,8 @@ class Assembler : public AssemblerBase {
// Max offset for compact branch instructions with 26-bit offset field
static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
-#ifdef _MIPS_ARCH_MIPS32R6
- static constexpr int kTrampolineSlotsSize = 2 * kInstrSize;
-#else
- static constexpr int kTrampolineSlotsSize = 4 * kInstrSize;
-#endif
+ static constexpr int kTrampolineSlotsSize =
+ IsMipsArchVariant(kMips32r6) ? 2 * kInstrSize : 8 * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
@@ -1765,6 +1767,7 @@ class Assembler : public AssemblerBase {
static bool IsBeqc(Instr instr);
static bool IsBnec(Instr instr);
static bool IsJicOrJialc(Instr instr);
+ static bool IsMov(Instr instr, Register rd, Register rs);
static bool IsJump(Instr instr);
static bool IsJ(Instr instr);
@@ -1881,6 +1884,9 @@ class Assembler : public AssemblerBase {
void EndBlockTrampolinePool() {
trampoline_pool_blocked_nesting_--;
+ if (trampoline_pool_blocked_nesting_ == 0) {
+ CheckTrampolinePoolQuick(1);
+ }
}
bool is_trampoline_pool_blocked() const {
@@ -1916,7 +1922,11 @@ class Assembler : public AssemblerBase {
}
}
- inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+ inline void CheckTrampolinePoolQuick(int extra_instructions = 0) {
+ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
+ CheckTrampolinePool();
+ }
+ }
inline void CheckBuffer();
@@ -2186,23 +2196,9 @@ class Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
- // The following functions help with avoiding allocations of embedded heap
- // objects during the code assembly phase. {RequestHeapObject} records the
- // need for a future heap number allocation or code stub generation. After
- // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
- // objects and place them where they are expected (determined by the pc offset
- // associated with each request). That is, for each request, it will patch the
- // dummy heap object handle that we emitted during code assembly with the
- // actual heap object handle.
- protected:
- // TODO(neis): Make private if its use can be moved out of TurboAssembler.
- void RequestHeapObject(HeapObjectRequest request);
-
private:
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
- std::forward_list<HeapObjectRequest> heap_object_requests_;
-
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
friend class BlockTrampolinePoolScope;
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index e545f88178..8fc1c35cc7 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -26,21 +26,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
- __ sll(t9, a0, kPointerSizeLog2);
- __ Addu(t9, sp, t9);
- __ sw(a1, MemOperand(t9, 0));
- __ Push(a1);
- __ Push(a2);
- __ Addu(a0, a0, Operand(3));
- __ TailCallRuntime(Runtime::kNewArray);
-}
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Isolate* isolate = masm->isolate();
@@ -223,6 +208,17 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
+ if (FLAG_embedded_builtins) {
+ if (masm->root_array_available() &&
+ isolate()->ShouldLoadConstantsFromRootList()) {
+ // This is basically an inlined version of Call(Handle<Code>) that loads
+ // the code object into kScratchReg instead of t9.
+ __ Move(t9, target);
+ __ IndirectLoadConstant(kScratchReg, GetCode());
+ __ Call(kScratchReg, Code::kHeaderSize - kHeapObjectTag);
+ return;
+ }
+ }
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target);
@@ -311,280 +307,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq, a3, Operand(kind));
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
- // a0 - number of arguments
- // a1 - constructor?
- // sp[0] - last argument
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
- holey_initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
- } else if (mode == DONT_OVERRIDE) {
- // is the low bit set? If so, we are holey and that is good.
- Label normal_sequence;
- __ And(kScratchReg, a3, Operand(1));
- __ Branch(&normal_sequence, ne, kScratchReg, Operand(zero_reg));
-
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
- __ Addu(a3, a3, Operand(1));
-
- if (FLAG_debug_code) {
- __ lw(t1, FieldMemOperand(a2, 0));
- __ LoadRoot(kScratchReg, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, AbortReason::kExpectedAllocationSite, t1,
- Operand(kScratchReg));
- }
-
- // Save the resulting elements kind in type info. We can't just store a3
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field...upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ lw(t0, FieldMemOperand(
- a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ sw(t0, FieldMemOperand(
- a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
-
- __ bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq, a3, Operand(kind));
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-template<class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(isolate, kind);
- stub.GetCode();
- if (AllocationSite::ShouldTrack(kind)) {
- T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode();
- }
- }
-}
-
-void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayNArgumentsConstructorStub stub(isolate);
- stub.GetCode();
- ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things.
- InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
- stubh1.GetCode();
- InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
- stubh2.GetCode();
- }
-}
-
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- Label not_zero_case, not_one_case;
- __ And(kScratchReg, a0, a0);
- __ Branch(&not_zero_case, ne, kScratchReg, Operand(zero_reg));
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
- // -- a1 : constructor
- // -- a2 : AllocationSite or undefined
- // -- a3 : Original constructor
- // -- sp[0] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ SmiTst(t0, kScratchReg);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
- kScratchReg, Operand(zero_reg));
- __ GetObjectType(t0, t0, t1);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t1,
- Operand(MAP_TYPE));
-
- // We should either have undefined in a2 or a valid AllocationSite
- __ AssertUndefinedOrAllocationSite(a2, t0);
- }
-
- // Enter the context of the Array function.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- Label subclassing;
- __ Branch(&subclassing, ne, a1, Operand(a3));
-
- Label no_info;
- // Get the elements kind and case on that.
- __ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
- __ Branch(&no_info, eq, a2, Operand(kScratchReg));
-
- __ lw(a3, FieldMemOperand(
- a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ SmiUntag(a3);
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-
- // Subclassing.
- __ bind(&subclassing);
- __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
- __ sw(a1, MemOperand(kScratchReg));
- __ li(kScratchReg, Operand(3));
- __ addu(a0, a0, kScratchReg);
- __ Push(a3, a2);
- __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
-}
-
-
-void InternalArrayConstructorStub::GenerateCase(
- MacroAssembler* masm, ElementsKind kind) {
-
- InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
- __ TailCallStub(&stub0, lo, a0, Operand(1));
-
- ArrayNArgumentsConstructorStub stubN(isolate());
- __ TailCallStub(&stubN, hi, a0, Operand(1));
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument.
- __ lw(kScratchReg, MemOperand(sp, 0));
-
- InternalArraySingleArgumentConstructorStub
- stub1_holey(isolate(), GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey, ne, kScratchReg, Operand(zero_reg));
- }
-
- InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
- __ TailCallStub(&stub1);
-}
-
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argc
- // -- a1 : constructor
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ SmiTst(a3, kScratchReg);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
- kScratchReg, Operand(zero_reg));
- __ GetObjectType(a3, a3, t0);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0,
- Operand(MAP_TYPE));
- }
-
- // Figure out the right elements kind.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Load the map's "bit field 2" into a3. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(a3);
-
- if (FLAG_debug_code) {
- Label done;
- __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
- a3, Operand(HOLEY_ELEMENTS));
- __ bind(&done);
- }
-
- Label fast_elements_case;
- __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
- GenerateCase(masm, HOLEY_ELEMENTS);
-
- __ bind(&fast_elements_case);
- GenerateCase(masm, PACKED_ELEMENTS);
-}
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@@ -618,7 +340,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
// Additional parameter is the address of the actual callback.
- __ li(t9, Operand(thunk_ref));
+ __ li(t9, thunk_ref);
__ jmp(&end_profiler_check);
__ bind(&profiler_disabled);
@@ -626,7 +348,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
- __ li(s5, Operand(next_address));
+ __ li(s5, next_address);
__ lw(s0, MemOperand(s5, kNextOffset));
__ lw(s1, MemOperand(s5, kLimitOffset));
__ lw(s2, MemOperand(s5, kLevelOffset));
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index ae00e70785..6478b4e7c4 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -145,6 +145,9 @@ const uint32_t kLeastSignificantByteInInt32Offset = 3;
namespace v8 {
namespace internal {
+// TODO(sigurds): Change this value once we use relative jumps.
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
@@ -219,6 +222,11 @@ const int32_t kPrefHintStoreRetained = 7;
const int32_t kPrefHintWritebackInvalidate = 25;
const int32_t kPrefHintPrepareForStore = 30;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 256;
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 34d748a26b..f64953de12 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -84,7 +84,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(a1, &context_check);
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ li(a1, Operand(type())); // Bailout type.
+ __ li(a1, Operand(static_cast<int>(deopt_kind())));
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
__ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
diff --git a/deps/v8/src/mips/frame-constants-mips.h b/deps/v8/src/mips/frame-constants-mips.h
index e90c7d957f..243ad6cdc2 100644
--- a/deps/v8/src/mips/frame-constants-mips.h
+++ b/deps/v8/src/mips/frame-constants-mips.h
@@ -5,6 +5,9 @@
#ifndef V8_MIPS_FRAME_CONSTANTS_MIPS_H_
#define V8_MIPS_FRAME_CONSTANTS_MIPS_H_
+#include "src/base/macros.h"
+#include "src/frame-constants.h"
+
namespace v8 {
namespace internal {
@@ -34,6 +37,19 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kConstantPoolOffset = 0; // Not used.
};
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 4;
+ static constexpr int kNumberOfSavedFpParamRegs = 7;
+
+ // FP-relative.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 5bdcc754df..e0d122fc49 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -57,13 +57,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return t1; }
const Register ApiGetterDescriptor::HolderRegister() { return a0; }
const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
-const Register MathPowTaggedDescriptor::exponent() { return a2; }
-
-const Register MathPowIntegerDescriptor::exponent() {
- return MathPowTaggedDescriptor::exponent();
-}
-
-
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
@@ -74,13 +67,13 @@ const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
@@ -177,24 +170,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ConstructTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a3: new target
- // a0: number of arguments
- Register registers[] = {a1, a3, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void TransitionElementsKindDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void AbortJSDescriptor::InitializePlatformSpecific(
+void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -203,54 +179,19 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // a0 -- number of arguments
- // a1 -- function
- // a2 -- allocation site with elements kind
- Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // a0 -- number of arguments
- // a1 -- function
- // a2 -- allocation site with elements kind
- Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
@@ -305,7 +246,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+namespace {
+
+void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (argc)
@@ -315,6 +258,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+} // namespace
+
+void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
+void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 1e83fbe7f6..c254e4a78e 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -9,7 +9,6 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
-#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -21,14 +20,15 @@
#include "src/mips/macro-assembler-mips.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, buffer, size, create_code_object) {
+MacroAssembler::MacroAssembler(Isolate* isolate,
+ const AssemblerOptions& options, void* buffer,
+ int size, CodeObjectRequired create_code_object)
+ : TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@@ -40,17 +40,6 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
}
}
-TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size),
- isolate_(isolate),
- has_double_zero_reg_set_(false) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ = Handle<HeapObject>::New(
- isolate->heap()->self_reference_marker(), isolate);
- }
-}
-
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
return rt.rm() == zero_reg;
@@ -138,14 +127,14 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- lw(destination, MemOperand(kRootRegister, index << kPointerSizeLog2));
+ lw(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond, Register src1,
const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
- lw(destination, MemOperand(kRootRegister, index << kPointerSizeLog2));
+ lw(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
}
@@ -223,6 +212,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Addu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
@@ -810,6 +800,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
slti(rd, rs, rt.immediate());
} else {
// li handles the relocation.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = rd == at ? t8 : temps.Acquire();
DCHECK(rs != scratch);
@@ -833,6 +824,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
sltiu(rd, rs, static_cast<uint16_t>(rt.immediate()));
} else {
// li handles the relocation.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = rd == at ? t8 : temps.Acquire();
DCHECK(rs != scratch);
@@ -847,6 +839,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
slt(rd, rt.rm(), rs);
} else {
// li handles the relocation.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
@@ -861,6 +854,7 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
sltu(rd, rt.rm(), rs);
} else {
// li handles the relocation.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
@@ -885,6 +879,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
slt(rd, rt.rm(), rs);
} else {
// li handles the relocation.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
@@ -898,6 +893,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
sltu(rd, rt.rm(), rs);
} else {
// li handles the relocation.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
@@ -915,6 +911,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
} else {
if (rt.is_reg()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
subu(scratch, zero_reg, rt.rm());
@@ -984,71 +981,67 @@ void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
// Word Swap Byte
void TurboAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
- DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
-
- Register input = src;
- if (operand_size == 2) {
- input = dest;
- Seh(dest, src);
- } else if (operand_size == 1) {
- input = dest;
- Seb(dest, src);
- }
- // No need to do any preparation if operand_size is 4
+ DCHECK(operand_size == 2 || operand_size == 4);
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
- wsbh(dest, input);
- rotr(dest, dest, 16);
+ if (operand_size == 2) {
+ wsbh(dest, src);
+ seh(dest, dest);
+ } else {
+ wsbh(dest, src);
+ rotr(dest, dest, 16);
+ }
} else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
- Register tmp = at;
- Register tmp2 = t8;
- DCHECK(dest != tmp && dest != tmp2);
- DCHECK(src != tmp && src != tmp2);
+ if (operand_size == 2) {
+ DCHECK(src != at && dest != at);
+ srl(at, src, 8);
+ andi(at, at, 0xFF);
+ sll(dest, src, 8);
+ or_(dest, dest, at);
+
+ // Sign-extension
+ sll(dest, dest, 16);
+ sra(dest, dest, 16);
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register tmp = at;
+ Register tmp2 = t8;
+ DCHECK(dest != tmp && dest != tmp2);
+ DCHECK(src != tmp && src != tmp2);
- andi(tmp2, input, 0xFF);
- sll(tmp, tmp2, 24);
+ andi(tmp2, src, 0xFF);
+ sll(tmp, tmp2, 24);
- andi(tmp2, input, 0xFF00);
- sll(tmp2, tmp2, 8);
- or_(tmp, tmp, tmp2);
+ andi(tmp2, src, 0xFF00);
+ sll(tmp2, tmp2, 8);
+ or_(tmp, tmp, tmp2);
- srl(tmp2, input, 8);
- andi(tmp2, tmp2, 0xFF00);
- or_(tmp, tmp, tmp2);
+ srl(tmp2, src, 8);
+ andi(tmp2, tmp2, 0xFF00);
+ or_(tmp, tmp, tmp2);
- srl(tmp2, input, 24);
- or_(dest, tmp, tmp2);
+ srl(tmp2, src, 24);
+ or_(dest, tmp, tmp2);
+ }
}
}
void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
- DCHECK(operand_size == 1 || operand_size == 2);
+ DCHECK_EQ(operand_size, 2);
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
- Register input = src;
- if (operand_size == 1) {
- input = dest;
- andi(dest, src, 0xFF);
- } else {
- input = dest;
- andi(dest, src, 0xFFFF);
- }
- // No need to do any preparation if operand_size is 4
-
- wsbh(dest, input);
- rotr(dest, dest, 16);
+ wsbh(dest, src);
+ andi(dest, dest, 0xFFFF);
} else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
- if (operand_size == 1) {
- sll(dest, src, 24);
- } else {
- Register tmp = at;
+ DCHECK(src != at && dest != at);
+ srl(at, src, 8);
+ andi(at, at, 0xFF);
+ sll(dest, src, 8);
+ or_(dest, dest, at);
- andi(tmp, src, 0xFF00);
- sll(dest, src, 24);
- sll(tmp, tmp, 8);
- or_(dest, tmp, dest);
- }
+ // Zero-extension
+ andi(dest, dest, 0xFFFF);
}
}
@@ -1291,6 +1284,7 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
swc1(nextfpreg,
MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
} else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
@@ -1333,22 +1327,22 @@ void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
}
void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(dst, value);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
li(dst, Operand(value), mode);
}
void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupExternalReference(dst, value);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, value);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
li(dst, Operand(value), mode);
}
@@ -1669,6 +1663,7 @@ void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
ins_(rt, rs, pos, size);
} else {
DCHECK(rt != t8 && rs != t8);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Subu(scratch, zero_reg, Operand(1));
@@ -1840,6 +1835,7 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
void TurboAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
if (IsMipsArchVariant(kLoongson) && fd == fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Mfhc1(t8, fs);
trunc_w_d(fd, fs);
Mthc1(t8, fs);
@@ -1850,6 +1846,7 @@ void TurboAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
void TurboAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
if (IsMipsArchVariant(kLoongson) && fd == fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Mfhc1(t8, fs);
round_w_d(fd, fs);
Mthc1(t8, fs);
@@ -1860,6 +1857,7 @@ void TurboAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
void TurboAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
if (IsMipsArchVariant(kLoongson) && fd == fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Mfhc1(t8, fs);
floor_w_d(fd, fs);
Mthc1(t8, fs);
@@ -1870,6 +1868,7 @@ void TurboAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
void TurboAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
if (IsMipsArchVariant(kLoongson) && fd == fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Mfhc1(t8, fs);
ceil_w_d(fd, fs);
Mthc1(t8, fs);
@@ -2330,6 +2329,7 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) {
void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
const Operand& rt, Condition cond) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
mov(rd, zero_reg);
@@ -2479,6 +2479,7 @@ void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
if (IsMipsArchVariant(kLoongson)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK_EQ(cc, 0);
@@ -2504,6 +2505,7 @@ void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
if (IsMipsArchVariant(kLoongson)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK_EQ(cc, 0);
@@ -2529,6 +2531,7 @@ void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
void TurboAssembler::Clz(Register rd, Register rs) {
if (IsMipsArchVariant(kLoongson)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rd != t8 && rd != t9 && rs != t8 && rs != t9);
Register mask = t8;
Register scratch = t9;
@@ -2605,6 +2608,7 @@ void TurboAssembler::Popcnt(Register rd, Register rs) {
uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
uint32_t value = 0x01010101; // (T)~(T)0/255
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Register scratch2 = t8;
@@ -2693,6 +2697,7 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DoubleRegister single_scratch = kScratchDoubleReg.low();
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -2717,7 +2722,8 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
- DoubleRegister double_input) {
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -2727,7 +2733,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
- Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
lw(result, MemOperand(sp, 0));
Addu(sp, sp, Operand(kDoubleSize));
@@ -3609,54 +3619,28 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return false;
}
-#ifdef V8_EMBEDDED_BUILTINS
-void TurboAssembler::LookupConstant(Register destination,
- Handle<Object> object) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Ensure the given object is in the builtins constants table and fetch its
- // index.
- BuiltinsConstantsTableBuilder* builder =
- isolate()->builtins_constants_table_builder();
- uint32_t index = builder->AddObject(object);
-
- // TODO(jgruber): Load builtins from the builtins table.
- // TODO(jgruber): Ensure that code generation can recognize constant targets
- // in kArchCallCodeObject.
-
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
-
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
- lw(destination, FieldMemOperand(destination, FixedArray::kHeaderSize +
- index * kPointerSize));
+ lw(destination,
+ FieldMemOperand(destination,
+ FixedArray::kHeaderSize + constant_index * kPointerSize));
}
-void TurboAssembler::LookupExternalReference(Register destination,
- ExternalReference reference) {
- CHECK(reference.address() !=
- ExternalReference::roots_array_start(isolate()).address());
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Encode as an index into the external reference table stored on the isolate.
-
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
- uint32_t index = v.index();
-
- // Generate code to load from the external reference table.
-
- int32_t roots_to_external_reference_offset =
- Heap::roots_to_external_reference_table_offset() +
- ExternalReferenceTable::OffsetOfEntry(index);
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ lw(destination, MemOperand(kRootRegister, offset));
+}
- lw(destination,
- MemOperand(kRootRegister, roots_to_external_reference_offset));
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ Addu(destination, kRootRegister, offset);
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::Jump(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
@@ -3764,8 +3748,9 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
uint32_t lui_offset, jic_offset;
UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
- DCHECK(MustUseReg(rmode));
- RecordRelocInfo(rmode, target);
+ if (MustUseReg(rmode)) {
+ RecordRelocInfo(rmode, target);
+ }
lui(t9, lui_offset);
Jump(t9, jic_offset, al, zero_reg, Operand(zero_reg), bd);
} else {
@@ -3785,13 +3770,26 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(t9, code);
- Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
- return;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(t9, code);
+ Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(t9, 0, cond, rs, rt, bd);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
@@ -3943,13 +3941,25 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(t9, code);
- Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(t9, code);
+ Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(t9, 0, cond, rs, rt, bd);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -4464,9 +4474,9 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- lw(expected_reg,
- FieldMemOperand(temp_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ lhu(expected_reg,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(function, new_target, expected, actual, flag);
@@ -4538,6 +4548,7 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
void TurboAssembler::AddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
@@ -4567,6 +4578,7 @@ void TurboAssembler::AddOverflow(Register dst, Register left,
void TurboAssembler::SubOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
@@ -4596,6 +4608,7 @@ void TurboAssembler::SubOverflow(Register dst, Register left,
void TurboAssembler::MulOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
Register scratch2 = t9;
@@ -4622,8 +4635,8 @@ void TurboAssembler::MulOverflow(Register dst, Register left,
}
}
-void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles) {
+void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
+ Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -4631,9 +4644,8 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
PrepareCEntryArgs(f->nargs);
PrepareCEntryFunction(ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
- Call(code, RelocInfo::CODE_TARGET);
+ DCHECK(!AreAliased(centry, a0, a1));
+ Call(centry, Code::kHeaderSize - kHeapObjectTag);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@@ -4731,18 +4743,17 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
const char* msg = GetAbortReason(reason);
- if (msg != nullptr) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+#endif
- if (FLAG_trap_on_abort) {
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
stop(msg);
return;
}
-#endif
Move(a0, Smi::FromInt(static_cast<int>(reason)));
@@ -4786,14 +4797,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
- int stack_offset, fp_offset;
- if (type == StackFrame::INTERNAL) {
- stack_offset = -4 * kPointerSize;
- fp_offset = 2 * kPointerSize;
- } else {
- stack_offset = -3 * kPointerSize;
- fp_offset = 1 * kPointerSize;
- }
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int stack_offset = -3 * kPointerSize;
+ const int fp_offset = 1 * kPointerSize;
addiu(sp, sp, stack_offset);
stack_offset = -stack_offset - kPointerSize;
sw(ra, MemOperand(sp, stack_offset));
@@ -4802,14 +4808,8 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
stack_offset -= kPointerSize;
li(t9, Operand(StackFrame::TypeToMarker(type)));
sw(t9, MemOperand(sp, stack_offset));
- if (type == StackFrame::INTERNAL) {
- DCHECK_EQ(stack_offset, kPointerSize);
- li(t9, CodeObject());
- sw(t9, MemOperand(sp, 0));
- } else {
- DCHECK_EQ(stack_offset, 0);
- }
// Adjust FP to point to saved FP.
+ DCHECK_EQ(stack_offset, 0);
Addu(fp, sp, Operand(fp_offset));
}
@@ -4834,6 +4834,7 @@ void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -4919,6 +4920,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool do_return,
bool argument_count_is_length) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
@@ -5060,20 +5062,9 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
-void MacroAssembler::AssertFixedArray(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- SmiTst(object, t8);
- Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, t8,
- Operand(zero_reg));
- GetObjectType(object, t8, t8);
- Check(eq, AbortReason::kOperandIsNotAFixedArray, t8,
- Operand(FIXED_ARRAY_TYPE));
- }
-}
-
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
@@ -5088,6 +5079,7 @@ void MacroAssembler::AssertConstructor(Register object) {
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
@@ -5101,6 +5093,7 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
@@ -5113,6 +5106,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
@@ -5140,9 +5134,9 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
AssertNotSmi(object);
LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Branch(&done_checking, eq, object, Operand(scratch));
- lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
- LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, AbortReason::kExpectedUndefinedOrCell, t8, Operand(scratch));
+ GetObjectType(object, scratch, scratch);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
+ Operand(ALLOCATION_SITE_TYPE));
bind(&done_checking);
}
}
@@ -5170,9 +5164,12 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_left);
// Operands are equal, but check for +/-0.
- mfc1(t8, src1);
- Branch(&return_left, eq, t8, Operand(zero_reg));
- Branch(&return_right);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ mfc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5216,9 +5213,12 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_right);
// Left equals right => check for -0.
- mfc1(t8, src1);
- Branch(&return_right, eq, t8, Operand(zero_reg));
- Branch(&return_left);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ mfc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5262,9 +5262,12 @@ void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
BranchTrueShortF(&return_left);
// Left equals right => check for -0.
- Mfhc1(t8, src1);
- Branch(&return_left, eq, t8, Operand(zero_reg));
- Branch(&return_right);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Mfhc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5309,9 +5312,12 @@ void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
BranchTrueShortF(&return_right);
// Left equals right => check for -0.
- Mfhc1(t8, src1);
- Branch(&return_right, eq, t8, Operand(zero_reg));
- Branch(&return_left);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Mfhc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5385,6 +5391,7 @@ void TurboAssembler::CallCFunction(ExternalReference function,
// Linux/MIPS convention demands that register t9 contains
// the address of the function being call in case of
// Position independent code
+ BlockTrampolinePoolScope block_trampoline_pool(this);
li(t9, function);
CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
}
@@ -5438,17 +5445,20 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
// allow preemption, so the return address in the link register
// stays correct.
- if (function_base != t9) {
- mov(t9, function_base);
- function_base = t9;
- }
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (function_base != t9) {
+ mov(t9, function_base);
+ function_base = t9;
+ }
- if (function_offset != 0) {
- addiu(t9, t9, function_offset);
- function_offset = 0;
- }
+ if (function_offset != 0) {
+ addiu(t9, t9, function_offset);
+ function_offset = 0;
+ }
- Call(function_base, function_offset);
+ Call(function_base, function_offset);
+ }
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 8d7f20079b..248dd4f905 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -8,6 +8,7 @@
#include "src/assembler.h"
#include "src/globals.h"
#include "src/mips/assembler-mips.h"
+#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
@@ -24,9 +25,13 @@ constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;
constexpr Register kInterpreterDispatchTableRegister = t6;
+
constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallCodeStartRegister = a2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kJavaScriptCallExtraArg1Register = a2;
+
constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
@@ -126,20 +131,13 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
-class TurboAssembler : public Assembler {
+class TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object);
-
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() const { return has_frame_; }
-
- Isolate* isolate() const { return isolate_; }
-
- Handle<HeapObject> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
+ TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : TurboAssemblerBase(isolate, options, buffer, buffer_size,
+ create_code_object) {}
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -157,6 +155,7 @@ class TurboAssembler : public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
li(kRootRegister, Operand(roots_array_start));
+ Addu(kRootRegister, kRootRegister, kRootRegisterBias);
}
// Jump unconditionally to given label.
@@ -250,11 +249,10 @@ class TurboAssembler : public Assembler {
void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
-#ifdef V8_EMBEDDED_BUILTINS
- void LookupConstant(Register destination, Handle<Object> object);
- void LookupExternalReference(Register destination,
- ExternalReference reference);
-#endif // V8_EMBEDDED_BUILTINS
+ void LoadFromConstantsTable(Register destination,
+ int constant_index) override;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
+ void LoadRootRelative(Register destination, int32_t offset) override;
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
@@ -279,7 +277,9 @@ class TurboAssembler : public Assembler {
COND_ARGS);
void Call(Label* target);
- void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
+ void CallForDeoptimization(Address target, int deopt_id,
+ RelocInfo::Mode rmode) {
+ USE(deopt_id);
Call(target, rmode);
}
@@ -551,9 +551,9 @@ class TurboAssembler : public Assembler {
void CallStubDelayed(CodeStub* stub, COND_ARGS);
#undef COND_ARGS
- // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
- void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ // Call a runtime routine. This expects {centry} to contain a fitting CEntry
+ // builtin for the target runtime function and uses an indirect call.
+ void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
@@ -568,7 +568,7 @@ class TurboAssembler : public Assembler {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
- DoubleRegister double_input);
+ DoubleRegister double_input, StubCallMode stub_mode);
// Conditional move.
void Movz(Register rd, Register rs, Register rt);
@@ -813,7 +813,7 @@ class TurboAssembler : public Assembler {
Func GetLabelFunction);
// Load an object from the root table.
- void LoadRoot(Register destination, Heap::RootListIndex index);
+ void LoadRoot(Register destination, Heap::RootListIndex index) override;
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond,
Register src1, const Operand& src2);
@@ -834,6 +834,16 @@ class TurboAssembler : public Assembler {
void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
BranchDelaySlot bd = PROTECT);
+ void JumpIfEqual(Register a, int32_t b, Label* dest) {
+ li(kScratchReg, Operand(b));
+ Branch(dest, eq, a, Operand(kScratchReg));
+ }
+
+ void JumpIfLessThan(Register a, int32_t b, Label* dest) {
+ li(kScratchReg, Operand(b));
+ Branch(dest, lt, a, Operand(kScratchReg));
+ }
+
// Push a standard frame, consisting of ra, fp, context and JS function.
void PushStandardFrame(Register function_reg);
@@ -846,9 +856,6 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister();
- bool root_array_available() const { return root_array_available_; }
- void set_root_array_available(bool v) { root_array_available_ = v; }
-
protected:
void BranchLong(Label* L, BranchDelaySlot bdslot);
@@ -856,14 +863,8 @@ class TurboAssembler : public Assembler {
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
- // This handle will be patched with the code object on installation.
- Handle<HeapObject> code_object_;
-
private:
- bool has_frame_ = false;
- bool root_array_available_ = true;
- Isolate* const isolate_;
- bool has_double_zero_reg_set_;
+ bool has_double_zero_reg_set_ = false;
void CallCFunctionHelper(Register function_base, int16_t function_offset,
int num_reg_arguments, int num_double_arguments);
@@ -921,7 +922,11 @@ class TurboAssembler : public Assembler {
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ CodeObjectRequired create_code_object)
+ : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
+ size, create_code_object) {}
+ MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int size, CodeObjectRequired create_code_object);
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
@@ -1140,9 +1145,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void AssertNotSmi(Register object);
void AssertSmi(Register object);
- // Abort execution if argument is not a FixedArray, enabled via --debug-code.
- void AssertFixedArray(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 2bace20c7d..d58b899755 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -473,7 +473,7 @@ void MipsDebugger::Debug() {
|| (strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
int32_t value;
- OFStream os(stdout);
+ StdoutStream os;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
os << arg1 << ": \n";
@@ -751,7 +751,7 @@ void MipsDebugger::Debug() {
PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the Debugger.\n");
+ PrintF(" stop and give control to the Debugger.\n");
PrintF(" All stop codes are watched:\n");
PrintF(" - They can be enabled / disabled: the Simulator\n");
PrintF(" will / won't stop when hitting them.\n");
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 08f22d9980..d0a1688367 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -164,7 +164,7 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_target_object(HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -173,9 +173,9 @@ void RelocInfo::set_target_object(HeapObject* target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target));
- host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
+ heap->incremental_marking()->RecordWriteIntoCode(host(), this,
+ HeapObject::cast(target));
+ heap->RecordWriteIntoCode(host(), this, target);
}
}
@@ -212,13 +212,6 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
-void RelocInfo::set_wasm_code_table_entry(Address target,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
- Assembler::set_target_address_at(pc_, constant_pool_, target,
- icache_flush_mode);
-}
-
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@@ -255,7 +248,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
@@ -280,13 +273,6 @@ void Assembler::CheckBuffer() {
}
-void Assembler::CheckTrampolinePoolQuick(int extra_instructions) {
- if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
- CheckTrampolinePool();
- }
-}
-
-
void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index b06516730e..edb17b7b22 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -38,6 +38,7 @@
#include "src/base/cpu.h"
#include "src/code-stubs.h"
+#include "src/deoptimizer.h"
#include "src/mips64/assembler-mips64-inl.h"
namespace v8 {
@@ -161,8 +162,9 @@ Register ToRegister(int num) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
-const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
+const int RelocInfo::kApplyMask =
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -176,34 +178,27 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
-Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
-uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- (Assembler::target_address_at(pc_, constant_pool_)));
-}
-
-void RelocInfo::set_embedded_address(Address address,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
-}
-
-void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_,
- static_cast<Address>(size), flush_mode);
+int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(address, icache_flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return embedded_address();
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return static_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
}
// -----------------------------------------------------------------------------
@@ -247,8 +242,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- IMMUTABLE, TENURED);
+ object =
+ isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
@@ -296,8 +291,9 @@ const Instr kLwSwInstrTypeMask = 0xFFE00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options, void* buffer,
+ int buffer_size)
+ : AssemblerBase(options, buffer, buffer_size),
scratch_register_list_(at.bit()) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@@ -589,6 +585,19 @@ bool Assembler::IsBnec(Instr instr) {
return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
}
+bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rd_field = GetRd(instr);
+ uint32_t rs_field = GetRs(instr);
+ uint32_t rt_field = GetRt(instr);
+ uint32_t rd_reg = static_cast<uint32_t>(rd.code());
+ uint32_t rs_reg = static_cast<uint32_t>(rs.code());
+ uint32_t function_field = GetFunctionField(instr);
+ // Checks if the instruction is a OR with zero_reg argument (aka MOV).
+ bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
+ rs_field == rs_reg && rt_field == 0;
+ return res;
+}
bool Assembler::IsJump(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
@@ -869,6 +878,34 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_ori | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori2 | (imm & kImm16Mask));
+ } else if (IsMov(instr, t8, ra)) {
+ Instr instr_lui = instr_at(pos + 4 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 5 * Assembler::kInstrSize);
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+
+ int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
+
+ if (is_int16(imm_short)) {
+ // Optimize by converting to regular branch with 16-bit
+ // offset
+ Instr instr_b = BEQ;
+ instr_b = SetBranchOffset(pos, target_pos, instr_b);
+
+ instr_at_put(pos, instr_b);
+ instr_at_put(pos + 1 * Assembler::kInstrSize, 0);
+ } else {
+ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
+ DCHECK_EQ(imm & 3, 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pos + 4 * Assembler::kInstrSize,
+ instr_lui | ((imm >> 16) & kImm16Mask));
+ instr_at_put(pos + 5 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ }
} else if (IsJ(instr) || IsJal(instr)) {
int32_t imm28 = target_pos - pos;
DCHECK_EQ(imm28 & 3, 0);
@@ -4097,9 +4134,10 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
if (!RelocInfo::IsNone(rinfo.rmode())) {
+ if (options().disable_reloc_info_for_patching) return;
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !serializer_enabled() && !emit_debug_code()) {
+ if (RelocInfo::IsOnlyForSerializer(rmode) &&
+ !options().record_reloc_info_for_serialization && !emit_debug_code()) {
return;
}
DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
@@ -4147,15 +4185,30 @@ void Assembler::CheckTrampolinePool() {
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal
+ { // Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and available
// to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- j(&after_pool);
+ if (kArchVariant == kMips64r6) {
+ bc(&after_pool);
+ nop();
+ } else {
+ Label find_pc;
+ or_(t8, ra, zero_reg);
+ bal(&find_pc);
+ or_(t9, ra, zero_reg);
+ bind(&find_pc);
+ or_(ra, t8, zero_reg);
+ lui(t8, 0);
+ ori(t8, t8, 0);
+ daddu(t9, t9, t8);
+ // Instruction jr will take or_ from the next trampoline.
+ // in its branch delay slot. This is the expected behavior
+ // in order to decrease size of trampoline pool.
+ jr(t9);
+ }
}
- nop();
}
+ nop();
bind(&after_pool);
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 93323e3dd6..f94db35974 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -394,20 +394,21 @@ constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
class Operand BASE_EMBEDDED {
public:
// Immediate.
- INLINE(explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE))
+ V8_INLINE explicit Operand(int64_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
- INLINE(explicit Operand(const ExternalReference& f))
+ V8_INLINE explicit Operand(const ExternalReference& f)
: rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) {
value_.immediate = static_cast<int64_t>(f.address());
}
- INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
+ V8_INLINE explicit Operand(const char* s);
+ V8_INLINE explicit Operand(Object** opp);
+ V8_INLINE explicit Operand(Context** cpp);
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value)) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi* value)
+ : rm_(no_reg), rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -415,10 +416,10 @@ class Operand BASE_EMBEDDED {
static Operand EmbeddedCode(CodeStub* stub);
// Register.
- INLINE(explicit Operand(Register rm)) : rm_(rm) {}
+ V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
// Return true if this is a register operand.
- INLINE(bool is_reg() const);
+ V8_INLINE bool is_reg() const;
inline int64_t immediate() const;
@@ -498,9 +499,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : Assembler(IsolateData(isolate), buffer, buffer_size) {}
- Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
+ Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -574,18 +573,19 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
- INLINE(static void set_target_address_at(
+ V8_INLINE static void set_target_address_at(
Address pc, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
set_target_value_at(pc, target, icache_flush_mode);
}
// On MIPS there is no Constant Pool so we skip that parameter.
- INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
+ V8_INLINE static Address target_address_at(Address pc,
+ Address constant_pool) {
return target_address_at(pc);
}
- INLINE(static void set_target_address_at(
+ V8_INLINE static void set_target_address_at(
Address pc, Address constant_pool, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
set_target_address_at(pc, target, icache_flush_mode);
}
@@ -622,6 +622,11 @@ class Assembler : public AssemblerBase {
// Difference between address of current opcode and target address offset.
static constexpr int kBranchPCOffset = 4;
+ // Difference between address of current opcode and target address offset,
+ // when we are generatinga sequence of instructions for long relative PC
+ // branches
+ static constexpr int kLongBranchPCOffset = 12;
+
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
// MIPS platform, as Code, Embedded Object or External-reference pointers
@@ -655,7 +660,8 @@ class Assembler : public AssemblerBase {
// Max offset for compact branch instructions with 26-bit offset field
static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
- static constexpr int kTrampolineSlotsSize = 2 * kInstrSize;
+ static constexpr int kTrampolineSlotsSize =
+ kArchVariant == kMips64r6 ? 2 * kInstrSize : 8 * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
@@ -1845,6 +1851,7 @@ class Assembler : public AssemblerBase {
static bool IsJ(Instr instr);
static bool IsLui(Instr instr);
static bool IsOri(Instr instr);
+ static bool IsMov(Instr instr, Register rd, Register rs);
static bool IsJal(Instr instr);
static bool IsJr(Instr instr);
@@ -1950,6 +1957,9 @@ class Assembler : public AssemblerBase {
void EndBlockTrampolinePool() {
trampoline_pool_blocked_nesting_--;
+ if (trampoline_pool_blocked_nesting_ == 0) {
+ CheckTrampolinePoolQuick(1);
+ }
}
bool is_trampoline_pool_blocked() const {
@@ -1985,7 +1995,11 @@ class Assembler : public AssemblerBase {
}
}
- inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+ void CheckTrampolinePoolQuick(int extra_instructions = 0) {
+ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
+ CheckTrampolinePool();
+ }
+ }
private:
// Avoid overflows for displacements etc.
@@ -2250,23 +2264,9 @@ class Assembler : public AssemblerBase {
RegList scratch_register_list_;
- // The following functions help with avoiding allocations of embedded heap
- // objects during the code assembly phase. {RequestHeapObject} records the
- // need for a future heap number allocation or code stub generation. After
- // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
- // objects and place them where they are expected (determined by the pc offset
- // associated with each request). That is, for each request, it will patch the
- // dummy heap object handle that we emitted during code assembly with the
- // actual heap object handle.
- protected:
- // TODO(neis): Make private if its use can be moved out of TurboAssembler.
- void RequestHeapObject(HeapObjectRequest request);
-
private:
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
- std::forward_list<HeapObjectRequest> heap_object_requests_;
-
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
friend class BlockTrampolinePoolScope;
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 321fa44f55..5ed97cc004 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -25,21 +25,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
- __ dsll(t9, a0, kPointerSizeLog2);
- __ Daddu(t9, sp, t9);
- __ Sd(a1, MemOperand(t9, 0));
- __ Push(a1);
- __ Push(a2);
- __ Daddu(a0, a0, 3);
- __ TailCallRuntime(Runtime::kNewArray);
-}
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Isolate* isolate = masm->isolate();
@@ -222,6 +207,19 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
+ if (FLAG_embedded_builtins) {
+ if (masm->root_array_available() &&
+ isolate()->ShouldLoadConstantsFromRootList()) {
+ // This is basically an inlined version of Call(Handle<Code>) that loads
+ // the code object into kScratchReg instead of t9.
+ __ Move(t9, target);
+ __ IndirectLoadConstant(kScratchReg, GetCode());
+ __ Daddu(kScratchReg, kScratchReg,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(kScratchReg);
+ return;
+ }
+ }
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target);
@@ -311,280 +309,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq, a3, Operand(kind));
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
- // a0 - number of arguments
- // a1 - constructor?
- // sp[0] - last argument
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
- holey_initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
- } else if (mode == DONT_OVERRIDE) {
- // is the low bit set? If so, we are holey and that is good.
- Label normal_sequence;
- __ And(kScratchReg, a3, Operand(1));
- __ Branch(&normal_sequence, ne, kScratchReg, Operand(zero_reg));
-
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
- __ Daddu(a3, a3, Operand(1));
-
- if (FLAG_debug_code) {
- __ Ld(a5, FieldMemOperand(a2, 0));
- __ LoadRoot(kScratchReg, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, AbortReason::kExpectedAllocationSite, a5,
- Operand(kScratchReg));
- }
-
- // Save the resulting elements kind in type info. We can't just store a3
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field...upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ Ld(a4, FieldMemOperand(
- a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ Sd(a4, FieldMemOperand(
- a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
-
- __ bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq, a3, Operand(kind));
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-template<class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(isolate, kind);
- stub.GetCode();
- if (AllocationSite::ShouldTrack(kind)) {
- T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode();
- }
- }
-}
-
-void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayNArgumentsConstructorStub stub(isolate);
- stub.GetCode();
- ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things.
- InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
- stubh1.GetCode();
- InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
- stubh2.GetCode();
- }
-}
-
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- Label not_zero_case, not_one_case;
- __ And(kScratchReg, a0, a0);
- __ Branch(&not_zero_case, ne, kScratchReg, Operand(zero_reg));
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argc (only if argument_count() == ANY)
- // -- a1 : constructor
- // -- a2 : AllocationSite or undefined
- // -- a3 : new target
- // -- sp[0] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ SmiTst(a4, kScratchReg);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
- kScratchReg, Operand(zero_reg));
- __ GetObjectType(a4, a4, a5);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a5,
- Operand(MAP_TYPE));
-
- // We should either have undefined in a2 or a valid AllocationSite
- __ AssertUndefinedOrAllocationSite(a2, a4);
- }
-
- // Enter the context of the Array function.
- __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- Label subclassing;
- __ Branch(&subclassing, ne, a1, Operand(a3));
-
- Label no_info;
- // Get the elements kind and case on that.
- __ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
- __ Branch(&no_info, eq, a2, Operand(kScratchReg));
-
- __ Ld(a3, FieldMemOperand(
- a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ SmiUntag(a3);
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-
- // Subclassing.
- __ bind(&subclassing);
- __ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
- __ Sd(a1, MemOperand(kScratchReg));
- __ li(kScratchReg, Operand(3));
- __ Daddu(a0, a0, kScratchReg);
- __ Push(a3, a2);
- __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
-}
-
-
-void InternalArrayConstructorStub::GenerateCase(
- MacroAssembler* masm, ElementsKind kind) {
-
- InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
- __ TailCallStub(&stub0, lo, a0, Operand(1));
-
- ArrayNArgumentsConstructorStub stubN(isolate());
- __ TailCallStub(&stubN, hi, a0, Operand(1));
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument.
- __ Ld(kScratchReg, MemOperand(sp, 0));
-
- InternalArraySingleArgumentConstructorStub
- stub1_holey(isolate(), GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey, ne, kScratchReg, Operand(zero_reg));
- }
-
- InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
- __ TailCallStub(&stub1);
-}
-
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argc
- // -- a1 : constructor
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ SmiTst(a3, kScratchReg);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
- kScratchReg, Operand(zero_reg));
- __ GetObjectType(a3, a3, a4);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a4,
- Operand(MAP_TYPE));
- }
-
- // Figure out the right elements kind.
- __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Load the map's "bit field 2" into a3. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ Lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(a3);
-
- if (FLAG_debug_code) {
- Label done;
- __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
- a3, Operand(HOLEY_ELEMENTS));
- __ bind(&done);
- }
-
- Label fast_elements_case;
- __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
- GenerateCase(masm, HOLEY_ELEMENTS);
-
- __ bind(&fast_elements_case);
- GenerateCase(masm, PACKED_ELEMENTS);
-}
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
DCHECK(static_cast<int>(offset) == offset);
@@ -620,7 +344,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
// Additional parameter is the address of the actual callback.
- __ li(t9, Operand(thunk_ref));
+ __ li(t9, thunk_ref);
__ jmp(&end_profiler_check);
__ bind(&profiler_disabled);
@@ -628,7 +352,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
- __ li(s5, Operand(next_address));
+ __ li(s5, next_address);
__ Ld(s0, MemOperand(s5, kNextOffset));
__ Ld(s1, MemOperand(s5, kLimitOffset));
__ Lw(s2, MemOperand(s5, kLevelOffset));
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 32f306b9c5..9f3869dff2 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -84,8 +84,10 @@ const uint32_t kMipsSdlOffset = 0;
#if defined(V8_TARGET_LITTLE_ENDIAN)
const uint32_t kLeastSignificantByteInInt32Offset = 0;
+const uint32_t kLessSignificantWordInDoublewordOffset = 0;
#elif defined(V8_TARGET_BIG_ENDIAN)
const uint32_t kLeastSignificantByteInInt32Offset = 3;
+const uint32_t kLessSignificantWordInDoublewordOffset = 4;
#else
#error Unknown endianness
#endif
@@ -95,7 +97,6 @@ const uint32_t kLeastSignificantByteInInt32Offset = 3;
#endif
#include <inttypes.h>
-
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
@@ -106,6 +107,9 @@ const uint32_t kLeastSignificantByteInInt32Offset = 3;
namespace v8 {
namespace internal {
+// TODO(sigurds): Change this value once we use relative jumps.
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
@@ -180,6 +184,11 @@ const int32_t kPrefHintStoreRetained = 7;
const int32_t kPrefHintWritebackInvalidate = 25;
const int32_t kPrefHintPrepareForStore = 30;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 256;
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index f6aa77d46c..1d3e88372e 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -84,7 +84,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(a1, &context_check);
__ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ li(a1, Operand(type())); // Bailout type.
+ __ li(a1, Operand(static_cast<int>(deopt_kind())));
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
// a4: already has fp-to-sp delta.
diff --git a/deps/v8/src/mips64/frame-constants-mips64.h b/deps/v8/src/mips64/frame-constants-mips64.h
index 22f01002c7..e91ccf9480 100644
--- a/deps/v8/src/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/mips64/frame-constants-mips64.h
@@ -5,6 +5,9 @@
#ifndef V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
#define V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
+#include "src/base/macros.h"
+#include "src/frame-constants.h"
+
namespace v8 {
namespace internal {
@@ -34,6 +37,19 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kConstantPoolOffset = 0; // Not used.
};
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 8;
+ static constexpr int kNumberOfSavedFpParamRegs = 7;
+
+ // FP-relative.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(7);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 95b7bba51e..94058aa721 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -57,13 +57,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return a5; }
const Register ApiGetterDescriptor::HolderRegister() { return a0; }
const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
-const Register MathPowTaggedDescriptor::exponent() { return a2; }
-
-const Register MathPowIntegerDescriptor::exponent() {
- return MathPowTaggedDescriptor::exponent();
-}
-
-
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
@@ -74,13 +67,13 @@ const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
@@ -177,24 +170,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ConstructTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a3: new target
- // a0: number of arguments
- Register registers[] = {a1, a3, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void TransitionElementsKindDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void AbortJSDescriptor::InitializePlatformSpecific(
+void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -203,54 +179,19 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // a0 -- number of arguments
- // a1 -- function
- // a2 -- allocation site with elements kind
- Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // a0 -- number of arguments
- // a1 -- function
- // a2 -- allocation site with elements kind
- Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
@@ -305,7 +246,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+namespace {
+
+void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (argc)
@@ -315,6 +258,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+} // namespace
+
+void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
+void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 83ceae69d1..889d09f27e 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -9,7 +9,6 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
-#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -21,14 +20,15 @@
#include "src/mips64/macro-assembler-mips64.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, buffer, size, create_code_object) {
+MacroAssembler::MacroAssembler(Isolate* isolate,
+ const AssemblerOptions& options, void* buffer,
+ int size, CodeObjectRequired create_code_object)
+ : TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@@ -40,17 +40,6 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
}
}
-TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size),
- isolate_(isolate),
- has_double_zero_reg_set_(false) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ = Handle<HeapObject>::New(
- isolate->heap()->self_reference_marker(), isolate);
- }
-}
-
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
return rt.rm() == zero_reg;
@@ -138,14 +127,14 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+ Ld(destination, MemOperand(s6, RootRegisterOffset(index)));
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond, Register src1,
const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
- Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+ Ld(destination, MemOperand(s6, RootRegisterOffset(index)));
}
@@ -223,6 +212,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Daddu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
@@ -949,6 +939,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
@@ -972,6 +963,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
@@ -987,6 +979,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rs != scratch);
li(scratch, rt);
slt(rd, scratch, rs);
@@ -1001,6 +994,7 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rs != scratch);
li(scratch, rt);
sltu(rd, scratch, rs);
@@ -1025,6 +1019,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rs != scratch);
li(scratch, rt);
slt(rd, scratch, rs);
@@ -1038,6 +1033,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rs != scratch);
li(scratch, rt);
sltu(rd, scratch, rs);
@@ -1128,23 +1124,14 @@ void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
// Change endianness
void TurboAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
- DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
- operand_size == 8);
+ DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8);
DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
- if (operand_size == 1) {
- seb(src, src);
- sll(src, src, 0);
- dsbh(dest, src);
- dshd(dest, dest);
- } else if (operand_size == 2) {
- seh(src, src);
- sll(src, src, 0);
- dsbh(dest, src);
- dshd(dest, dest);
+ if (operand_size == 2) {
+ wsbh(dest, src);
+ seh(dest, dest);
} else if (operand_size == 4) {
- sll(src, src, 0);
- dsbh(dest, src);
- dshd(dest, dest);
+ wsbh(dest, src);
+ rotr(dest, dest, 16);
} else {
dsbh(dest, src);
dshd(dest, dest);
@@ -1153,20 +1140,14 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src,
void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
- DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
- if (operand_size == 1) {
- andi(src, src, 0xFF);
- dsbh(dest, src);
- dshd(dest, dest);
- } else if (operand_size == 2) {
- andi(src, src, 0xFFFF);
- dsbh(dest, src);
- dshd(dest, dest);
+ DCHECK(operand_size == 2 || operand_size == 4);
+ if (operand_size == 2) {
+ wsbh(dest, src);
+ andi(dest, dest, 0xFFFF);
} else {
- dsll32(src, src, 0);
- dsrl32(src, src, 0);
- dsbh(dest, src);
- dshd(dest, dest);
+ wsbh(dest, src);
+ rotr(dest, dest, 16);
+ dinsu_(dest, zero_reg, 32, 32);
}
}
@@ -1572,22 +1553,22 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
}
void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(dst, value);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
li(dst, Operand(value), mode);
}
void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupExternalReference(dst, value);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, value);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
li(dst, Operand(value), mode);
}
@@ -2141,11 +2122,14 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
mfc1(t8, fs);
Cvt_d_uw(fd, t8);
}
void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
DCHECK(rs != at);
@@ -2157,12 +2141,14 @@ void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
}
void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_d_ul(fd, t8);
}
void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
@@ -2190,12 +2176,14 @@ void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
}
void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_s_uw(fd, t8);
}
void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
DCHECK(rs != at);
@@ -2207,12 +2195,14 @@ void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
}
void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_s_ul(fd, t8);
}
void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
@@ -2263,6 +2253,7 @@ void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
void MacroAssembler::Trunc_l_ud(FPURegister fd,
FPURegister fs,
FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Load to GPR.
dmfc1(t8, fs);
// Reset sign bit.
@@ -2868,6 +2859,7 @@ void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
const Operand& rt, Condition cond) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
mov(rd, zero_reg);
@@ -3085,6 +3077,7 @@ void TurboAssembler::Popcnt(Register rd, Register rs) {
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
Register scratch2 = t8;
srl(scratch, rs, 1);
@@ -3113,6 +3106,7 @@ void TurboAssembler::Dpopcnt(Register rd, Register rs) {
uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
Register scratch2 = t8;
dsrl(scratch, rs, 1);
@@ -3202,6 +3196,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
Label* done) {
DoubleRegister single_scratch = kScratchDoubleReg.low();
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
Register scratch2 = t9;
@@ -3224,7 +3219,8 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
- DoubleRegister double_input) {
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -3234,7 +3230,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
- Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
Ld(result, MemOperand(sp, 0));
Daddu(sp, sp, Operand(kDoubleSize));
@@ -3395,6 +3395,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
// Be careful to always use shifted_branch_offset only just before the
@@ -3609,6 +3610,7 @@ bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
if (!is_near(L, OffsetSize::kOffset16)) return false;
UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
int32_t offset32;
@@ -4118,54 +4120,28 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return false;
}
-#ifdef V8_EMBEDDED_BUILTINS
-void TurboAssembler::LookupConstant(Register destination,
- Handle<Object> object) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Ensure the given object is in the builtins constants table and fetch its
- // index.
- BuiltinsConstantsTableBuilder* builder =
- isolate()->builtins_constants_table_builder();
- uint32_t index = builder->AddObject(object);
-
- // TODO(jgruber): Load builtins from the builtins table.
- // TODO(jgruber): Ensure that code generation can recognize constant targets
- // in kArchCallCodeObject.
-
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
-
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
- Ld(destination, FieldMemOperand(destination, FixedArray::kHeaderSize +
- index * kPointerSize));
+ Ld(destination,
+ FieldMemOperand(destination,
+ FixedArray::kHeaderSize + constant_index * kPointerSize));
}
-void TurboAssembler::LookupExternalReference(Register destination,
- ExternalReference reference) {
- CHECK(reference.address() !=
- ExternalReference::roots_array_start(isolate()).address());
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Encode as an index into the external reference table stored on the isolate.
-
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
- uint32_t index = v.index();
-
- // Generate code to load from the external reference table.
-
- int32_t roots_to_external_reference_offset =
- Heap::roots_to_external_reference_table_offset() +
- ExternalReferenceTable::OffsetOfEntry(index);
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ Ld(destination, MemOperand(kRootRegister, offset));
+}
- Ld(destination,
- MemOperand(kRootRegister, roots_to_external_reference_offset));
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ Daddu(destination, kRootRegister, Operand(offset));
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::Jump(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
@@ -4200,9 +4176,12 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
}
// The first instruction of 'li' may be placed in the delay slot.
// This is not an issue, t9 is expected to be clobbered anyway.
- li(t9, Operand(target, rmode));
- Jump(t9, al, zero_reg, Operand(zero_reg), bd);
- bind(&skip);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(t9, Operand(target, rmode));
+ Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+ bind(&skip);
+ }
}
void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
@@ -4215,14 +4194,27 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(t9, code);
- Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
- Jump(t9, cond, rs, rt, bd);
- return;
+ if (FLAG_embedded_builtins) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(t9, code);
+ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Jump(t9, cond, rs, rt, bd);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(t9, cond, rs, rt, bd);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
@@ -4306,14 +4298,26 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(t9, code);
- Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(t9, cond, rs, rt, bd);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(t9, code);
+ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(t9, cond, rs, rt, bd);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(t9, cond, rs, rt, bd);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -4781,11 +4785,10 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register temp_reg = t0;
Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // The argument count is stored as int32_t on 64-bit platforms.
- // TODO(plind): Smi on 32-bit platforms.
- Lw(expected_reg,
- FieldMemOperand(temp_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ // The argument count is stored as uint16_t
+ Lhu(expected_reg,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(a1, new_target, expected, actual, flag);
}
@@ -4857,6 +4860,7 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
void TurboAssembler::DaddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
@@ -4886,6 +4890,7 @@ void TurboAssembler::DaddOverflow(Register dst, Register left,
void TurboAssembler::DsubOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
@@ -4915,6 +4920,7 @@ void TurboAssembler::DsubOverflow(Register dst, Register left,
void TurboAssembler::MulOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
@@ -4941,8 +4947,8 @@ void TurboAssembler::MulOverflow(Register dst, Register left,
xor_(overflow, overflow, scratch);
}
-void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles) {
+void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
+ Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -4950,9 +4956,9 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
PrepareCEntryArgs(f->nargs);
PrepareCEntryFunction(ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
- Call(code, RelocInfo::CODE_TARGET);
+ DCHECK(!AreAliased(centry, a0, a1));
+ Daddu(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@@ -5050,18 +5056,17 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
const char* msg = GetAbortReason(reason);
- if (msg != nullptr) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+#endif
- if (FLAG_trap_on_abort) {
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
stop(msg);
return;
}
-#endif
Move(a0, Smi::FromInt(static_cast<int>(reason)));
@@ -5106,14 +5111,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
- int stack_offset, fp_offset;
- if (type == StackFrame::INTERNAL) {
- stack_offset = -4 * kPointerSize;
- fp_offset = 2 * kPointerSize;
- } else {
- stack_offset = -3 * kPointerSize;
- fp_offset = 1 * kPointerSize;
- }
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int stack_offset = -3 * kPointerSize;
+ const int fp_offset = 1 * kPointerSize;
daddiu(sp, sp, stack_offset);
stack_offset = -stack_offset - kPointerSize;
Sd(ra, MemOperand(sp, stack_offset));
@@ -5122,14 +5122,8 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
stack_offset -= kPointerSize;
li(t9, Operand(StackFrame::TypeToMarker(type)));
Sd(t9, MemOperand(sp, stack_offset));
- if (type == StackFrame::INTERNAL) {
- DCHECK_EQ(stack_offset, kPointerSize);
- li(t9, CodeObject());
- Sd(t9, MemOperand(sp, 0));
- } else {
- DCHECK_EQ(stack_offset, 0);
- }
// Adjust FP to point to saved FP.
+ DCHECK_EQ(stack_offset, 0);
Daddu(fp, sp, Operand(fp_offset));
}
@@ -5189,17 +5183,20 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
- // Accessed from ExitFrame::code_slot.
- li(t8, CodeObject(), CONSTANT_SIZE);
- Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Accessed from ExitFrame::code_slot.
+ li(t8, CodeObject(), CONSTANT_SIZE);
+ Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
- // Save the frame pointer and the context in top.
- li(t8,
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
- Sd(fp, MemOperand(t8));
- li(t8,
- ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- Sd(cp, MemOperand(t8));
+ // Save the frame pointer and the context in top.
+ li(t8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ isolate()));
+ Sd(fp, MemOperand(t8));
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ Sd(cp, MemOperand(t8));
+ }
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
@@ -5235,6 +5232,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool do_return,
bool argument_count_is_length) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
@@ -5321,10 +5319,11 @@ void MacroAssembler::AssertStackIsAligned() {
}
}
-void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
if (SmiValuesAre32Bits()) {
- Lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+ Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
} else {
+ DCHECK(SmiValuesAre31Bits());
Lw(dst, src);
SmiUntag(dst);
}
@@ -5395,20 +5394,9 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
-void MacroAssembler::AssertFixedArray(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- SmiTst(object, t8);
- Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, t8,
- Operand(zero_reg));
- GetObjectType(object, t8, t8);
- Check(eq, AbortReason::kOperandIsNotAFixedArray, t8,
- Operand(FIXED_ARRAY_TYPE));
- }
-}
-
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
@@ -5423,6 +5411,7 @@ void MacroAssembler::AssertConstructor(Register object) {
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
@@ -5436,6 +5425,7 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
@@ -5448,6 +5438,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
@@ -5475,9 +5466,9 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
AssertNotSmi(object);
LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Branch(&done_checking, eq, object, Operand(scratch));
- Ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
- LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, AbortReason::kExpectedUndefinedOrCell, t8, Operand(scratch));
+ GetObjectType(object, scratch, scratch);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
+ Operand(ALLOCATION_SITE_TYPE));
bind(&done_checking);
}
}
@@ -5505,10 +5496,13 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_left);
// Operands are equal, but check for +/-0.
- mfc1(t8, src1);
- dsll32(t8, t8, 0);
- Branch(&return_left, eq, t8, Operand(zero_reg));
- Branch(&return_right);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ mfc1(t8, src1);
+ dsll32(t8, t8, 0);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5552,10 +5546,13 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_right);
// Left equals right => check for -0.
- mfc1(t8, src1);
- dsll32(t8, t8, 0);
- Branch(&return_right, eq, t8, Operand(zero_reg));
- Branch(&return_left);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ mfc1(t8, src1);
+ dsll32(t8, t8, 0);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5599,9 +5596,12 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_left);
// Left equals right => check for -0.
- dmfc1(t8, src1);
- Branch(&return_left, eq, t8, Operand(zero_reg));
- Branch(&return_right);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ dmfc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5645,9 +5645,12 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
BranchTrueShortF(&return_right);
// Left equals right => check for -0.
- dmfc1(t8, src1);
- Branch(&return_right, eq, t8, Operand(zero_reg));
- Branch(&return_left);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ dmfc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+ }
bind(&return_right);
if (src2 != dst) {
@@ -5720,6 +5723,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
li(t9, function);
CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments);
}
@@ -5773,14 +5777,16 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (function != t9) {
+ mov(t9, function);
+ function = t9;
+ }
- if (function != t9) {
- mov(t9, function);
- function = t9;
+ Call(function);
}
- Call(function);
-
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 1c59f5a341..3636568136 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -8,6 +8,7 @@
#include "src/assembler.h"
#include "src/globals.h"
#include "src/mips64/assembler-mips64.h"
+#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
@@ -24,9 +25,13 @@ constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
constexpr Register kInterpreterDispatchTableRegister = t2;
+
constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallCodeStartRegister = a2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kJavaScriptCallExtraArg1Register = a2;
+
constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
@@ -131,18 +136,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
}
-inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
- // Assumes that Smis are shifted by 32 bits.
- STATIC_ASSERT(kSmiShift == 32);
- return MemOperand(rm, SmiWordOffset(offset));
-}
-
-
-inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
- return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
-}
-
-
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
// TODO(plind): Currently ONLY used for O32. Should be fixed for
@@ -155,20 +148,13 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
-class TurboAssembler : public Assembler {
+class TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object);
-
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() const { return has_frame_; }
-
- Isolate* isolate() const { return isolate_; }
-
- Handle<HeapObject> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
+ TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : TurboAssemblerBase(isolate, options, buffer, buffer_size,
+ create_code_object) {}
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -186,6 +172,7 @@ class TurboAssembler : public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
li(kRootRegister, Operand(roots_array_start));
+ daddiu(kRootRegister, kRootRegister, kRootRegisterBias);
}
// Jump unconditionally to given label.
@@ -282,11 +269,10 @@ class TurboAssembler : public Assembler {
void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
-#ifdef V8_EMBEDDED_BUILTINS
- void LookupConstant(Register destination, Handle<Object> object);
- void LookupExternalReference(Register destination,
- ExternalReference reference);
-#endif // V8_EMBEDDED_BUILTINS
+ void LoadFromConstantsTable(Register destination,
+ int constant_index) override;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
+ void LoadRootRelative(Register destination, int32_t offset) override;
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
@@ -308,7 +294,9 @@ class TurboAssembler : public Assembler {
COND_ARGS);
void Call(Label* target);
- void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
+ void CallForDeoptimization(Address target, int deopt_id,
+ RelocInfo::Mode rmode) {
+ USE(deopt_id);
Call(target, rmode);
}
@@ -508,12 +496,13 @@ class TurboAssembler : public Assembler {
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3
+ void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre32Bits()) {
- STATIC_ASSERT(kSmiShift == 32);
- dsra32(dst, src, 0);
+ dsra32(dst, src, kSmiShift - 32);
} else {
- sra(dst, src, kSmiTagSize);
+ DCHECK(SmiValuesAre31Bits());
+ sra(dst, src, kSmiShift);
}
}
@@ -582,9 +571,9 @@ class TurboAssembler : public Assembler {
void CallStubDelayed(CodeStub* stub, COND_ARGS);
#undef COND_ARGS
- // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
- void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ // Call a runtime routine. This expects {centry} to contain a fitting CEntry
+ // builtin for the target runtime function and uses an indirect call.
+ void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
@@ -599,7 +588,7 @@ class TurboAssembler : public Assembler {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
- DoubleRegister double_input);
+ DoubleRegister double_input, StubCallMode stub_mode);
// Conditional move.
void Movz(Register rd, Register rs, Register rt);
@@ -794,7 +783,7 @@ class TurboAssembler : public Assembler {
Func GetLabelFunction);
// Load an object from the root table.
- void LoadRoot(Register destination, Heap::RootListIndex index);
+ void LoadRoot(Register destination, Heap::RootListIndex index) override;
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond,
Register src1, const Operand& src2);
@@ -852,6 +841,16 @@ class TurboAssembler : public Assembler {
void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
BranchDelaySlot bd = PROTECT);
+ void JumpIfEqual(Register a, int32_t b, Label* dest) {
+ li(kScratchReg, Operand(b));
+ Branch(dest, eq, a, Operand(kScratchReg));
+ }
+
+ void JumpIfLessThan(Register a, int32_t b, Label* dest) {
+ li(kScratchReg, Operand(b));
+ Branch(dest, lt, a, Operand(kScratchReg));
+ }
+
// Push a standard frame, consisting of ra, fp, context and JS function.
void PushStandardFrame(Register function_reg);
@@ -872,21 +871,12 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister();
- bool root_array_available() const { return root_array_available_; }
- void set_root_array_available(bool v) { root_array_available_ = v; }
-
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
- // This handle will be patched with the code object on installation.
- Handle<HeapObject> code_object_;
-
private:
- bool has_frame_ = false;
- bool root_array_available_ = true;
- Isolate* const isolate_;
- bool has_double_zero_reg_set_;
+ bool has_double_zero_reg_set_ = false;
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
@@ -945,7 +935,11 @@ class TurboAssembler : public Assembler {
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ CodeObjectRequired create_code_object)
+ : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
+ size, create_code_object) {}
+ MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int size, CodeObjectRequired create_code_object);
bool IsNear(Label* L, Condition cond, int rs_reg);
@@ -1177,9 +1171,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void SmiTag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (SmiValuesAre32Bits()) {
- STATIC_ASSERT(kSmiShift == 32);
dsll32(dst, src, 0);
} else {
+ DCHECK(SmiValuesAre31Bits());
Addu(dst, src, src);
}
}
@@ -1194,14 +1188,12 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// The int portion is upper 32-bits of 64-bit word.
dsra(dst, src, kSmiShift - scale);
} else {
+ DCHECK(SmiValuesAre31Bits());
DCHECK_GE(scale, kSmiTagSize);
sll(dst, src, scale - kSmiTagSize);
}
}
- // Combine load with untagging or scaling.
- void SmiLoadUntag(Register dst, MemOperand src);
-
// Test if the register contains a smi.
inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask));
@@ -1223,9 +1215,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void AssertNotSmi(Register object);
void AssertSmi(Register object);
- // Abort execution if argument is not a FixedArray, enabled via --debug-code.
- void AssertFixedArray(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 6e4a2ebd2a..f5231fe89f 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -418,7 +418,7 @@ void MipsDebugger::Debug() {
|| (strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
int64_t value;
- OFStream os(stdout);
+ StdoutStream os;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
os << arg1 << ": \n";
@@ -683,7 +683,7 @@ void MipsDebugger::Debug() {
PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the Debugger.\n");
+ PrintF(" stop and give control to the Debugger.\n");
PrintF(" All stop codes are watched:\n");
PrintF(" - They can be enabled / disabled: the Simulator\n");
PrintF(" will / won't stop when hitting them.\n");
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index 0d25ad8886..d4ee72654f 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -9,6 +9,7 @@
#include "src/feedback-vector.h"
#include "src/objects-body-descriptors.h"
#include "src/objects/hash-table.h"
+#include "src/objects/js-collection.h"
#include "src/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -148,6 +149,49 @@ class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
}
};
+template <bool includeWeakNext>
+class AllocationSite::BodyDescriptorImpl final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(AllocationSite::kCommonPointerFieldEndOffset ==
+ AllocationSite::kPretenureDataOffset);
+ STATIC_ASSERT(AllocationSite::kPretenureDataOffset + kInt32Size ==
+ AllocationSite::kPretenureCreateCountOffset);
+ STATIC_ASSERT(AllocationSite::kPretenureCreateCountOffset + kInt32Size ==
+ AllocationSite::kWeakNextOffset);
+
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ if (offset >= AllocationSite::kStartOffset &&
+ offset < AllocationSite::kCommonPointerFieldEndOffset) {
+ return true;
+ }
+ // check for weak_next offset
+ if (includeWeakNext &&
+ map->instance_size() == AllocationSite::kSizeWithWeakNext &&
+ offset == AllocationSite::kWeakNextOffset) {
+ return true;
+ }
+ return false;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ // Iterate over all the common pointer fields
+ IteratePointers(obj, AllocationSite::kStartOffset,
+ AllocationSite::kCommonPointerFieldEndOffset, v);
+ // Skip PretenureDataOffset and PretenureCreateCount which are Int32 fields
+ // Visit weak_next only for full body descriptor and if it has weak_next
+ // field
+ if (includeWeakNext && object_size == AllocationSite::kSizeWithWeakNext)
+ IteratePointers(obj, AllocationSite::kWeakNextOffset,
+ AllocationSite::kSizeWithWeakNext, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(kByteLengthOffset + kPointerSize == kBackingStoreOffset);
@@ -191,7 +235,7 @@ class SmallOrderedHashTable<Derived>::BodyDescriptor final
ObjectVisitor* v) {
Derived* table = reinterpret_cast<Derived*>(obj);
- int offset = kHeaderSize + kDataTableStartOffset;
+ int offset = kDataTableStartOffset;
int entry = 0;
for (int i = 0; i < table->Capacity(); i++) {
for (int j = 0; j < Derived::kEntrySize; j++) {
@@ -344,11 +388,46 @@ class FeedbackVector::BodyDescriptor final : public BodyDescriptorBase {
}
};
-template <JSWeakCollection::BodyVisitingPolicy body_visiting_policy>
+class PreParsedScopeData::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return offset == kScopeDataOffset || offset >= kChildDataStartOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointer(obj, kScopeDataOffset, v);
+ IteratePointers(obj, kChildDataStartOffset, object_size, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ return PreParsedScopeData::SizeFor(PreParsedScopeData::cast(obj)->length());
+ }
+};
+
+class PrototypeInfo::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return offset >= HeapObject::kHeaderSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, HeapObject::kHeaderSize, kObjectCreateMapOffset, v);
+ IterateMaybeWeakPointer(obj, kObjectCreateMapOffset, v);
+ IteratePointers(obj, kObjectCreateMapOffset + kPointerSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ return obj->SizeFromMap(map);
+ }
+};
+
class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
public:
- STATIC_ASSERT(kTableOffset + kPointerSize == kNextOffset);
- STATIC_ASSERT(kNextOffset + kPointerSize == kSize);
+ STATIC_ASSERT(kTableOffset + kPointerSize == kSize);
static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
return IsValidSlotImpl(map, obj, offset);
@@ -357,12 +436,7 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
- if (body_visiting_policy == kIgnoreWeakness) {
- IterateBodyImpl(map, obj, kPropertiesOrHashOffset, object_size, v);
- } else {
- IteratePointers(obj, kPropertiesOrHashOffset, kTableOffset, v);
- IterateBodyImpl(map, obj, kSize, object_size, v);
- }
+ IterateBodyImpl(map, obj, kPropertiesOrHashOffset, object_size, v);
}
static inline int SizeOf(Map* map, HeapObject* object) {
@@ -430,7 +504,7 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map* map, HeapObject* obj, ObjectVisitor* v) {
- int mode_mask = RelocInfo::kCodeTargetMask |
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
@@ -492,7 +566,7 @@ class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
if (offset < kMemoryStartOffset) return true;
- if (offset < kCompiledModuleOffset) return false;
+ if (offset < kModuleObjectOffset) return false;
return IsValidSlotImpl(map, obj, offset);
}
@@ -528,6 +602,31 @@ class Map::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map* map, HeapObject* obj) { return Map::kSize; }
};
+class DataHandler::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return offset >= HeapObject::kHeaderSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ static_assert(kSmiHandlerOffset < kData1Offset,
+ "Field order must be in sync with this iteration code");
+ static_assert(kData1Offset < kSizeWithData1,
+ "Field order must be in sync with this iteration code");
+ IteratePointers(obj, kSmiHandlerOffset, kData1Offset, v);
+ if (object_size >= kSizeWithData1) {
+ IterateMaybeWeakPointer(obj, kData1Offset, v);
+ IteratePointers(obj, kData1Offset + kPointerSize, object_size, v);
+ }
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return object->SizeFromMap(map);
+ }
+};
+
template <typename Op, typename ReturnType, typename T1, typename T2,
typename T3, typename T4>
ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
@@ -555,9 +654,18 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
switch (type) {
case FIXED_ARRAY_TYPE:
- case BOILERPLATE_DESCRIPTION_TYPE:
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ case EPHEMERON_HASH_TABLE_TYPE:
case SCOPE_INFO_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -622,6 +730,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_BOUND_FUNCTION_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
#endif // V8_INTL_SUPPORT
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
@@ -670,12 +779,19 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case CODE_DATA_CONTAINER_TYPE:
return Op::template apply<CodeDataContainer::BodyDescriptor>(p1, p2, p3,
p4);
+ case PRE_PARSED_SCOPE_DATA_TYPE:
+ return Op::template apply<PreParsedScopeData::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
+ return Op::template apply<
+ UncompiledDataWithPreParsedScope::BodyDescriptor>(p1, p2, p3, p4);
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
case FREE_SPACE_TYPE:
case BIGINT_TYPE:
+ case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
return ReturnType();
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
@@ -689,20 +805,23 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<SharedFunctionInfo::BodyDescriptor>(p1, p2, p3,
p4);
}
+ case ALLOCATION_SITE_TYPE:
+ return Op::template apply<AllocationSite::BodyDescriptor>(p1, p2, p3, p4);
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
- if (type == ALLOCATION_SITE_TYPE) {
- return Op::template apply<AllocationSite::BodyDescriptor>(p1, p2, p3,
- p4);
+ if (type == PROTOTYPE_INFO_TYPE) {
+ return Op::template apply<PrototypeInfo::BodyDescriptor>(p1, p2, p3,
+ p4);
} else {
return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
}
case CALL_HANDLER_INFO_TYPE:
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
- return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
+ return Op::template apply<DataHandler::BodyDescriptor>(p1, p2, p3, p4);
default:
PrintF("Unknown type: %d\n", type);
UNREACHABLE();
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index d4ed349da3..8f149c8788 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -13,17 +13,24 @@
#include "src/layout-descriptor.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/hash-table-inl.h"
-#include "src/objects/literal-objects.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/literal-objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-locale-inl.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-regexp-string-iterator-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-relative-time-format-inl.h"
+#endif // V8_INTL_SUPPORT
#include "src/objects/maybe-object.h"
#include "src/objects/microtask-inl.h"
-#include "src/objects/module.h"
+#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
@@ -57,80 +64,90 @@ namespace internal {
#ifdef VERIFY_HEAP
-void Object::ObjectVerify() {
+void Object::ObjectVerify(Isolate* isolate) {
if (IsSmi()) {
- Smi::cast(this)->SmiVerify();
+ Smi::cast(this)->SmiVerify(isolate);
} else {
- HeapObject::cast(this)->HeapObjectVerify();
+ HeapObject::cast(this)->HeapObjectVerify(isolate);
}
CHECK(!IsConstructor() || IsCallable());
}
-
-void Object::VerifyPointer(Object* p) {
+void Object::VerifyPointer(Isolate* isolate, Object* p) {
if (p->IsHeapObject()) {
- HeapObject::VerifyHeapPointer(p);
+ HeapObject::VerifyHeapPointer(isolate, p);
} else {
CHECK(p->IsSmi());
}
}
-void MaybeObject::VerifyMaybeObjectPointer(MaybeObject* p) {
+void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject* p) {
HeapObject* heap_object;
if (p->ToStrongOrWeakHeapObject(&heap_object)) {
- HeapObject::VerifyHeapPointer(heap_object);
+ HeapObject::VerifyHeapPointer(isolate, heap_object);
} else {
CHECK(p->IsSmi() || p->IsClearedWeakHeapObject());
}
}
namespace {
-void VerifyForeignPointer(HeapObject* host, Object* foreign) {
- host->VerifyPointer(foreign);
- CHECK(foreign->IsUndefined(host->GetIsolate()) ||
- Foreign::IsNormalized(foreign));
+void VerifyForeignPointer(Isolate* isolate, HeapObject* host, Object* foreign) {
+ host->VerifyPointer(isolate, foreign);
+ CHECK(foreign->IsUndefined(isolate) || Foreign::IsNormalized(foreign));
}
} // namespace
-void Smi::SmiVerify() {
+void Smi::SmiVerify(Isolate* isolate) {
CHECK(IsSmi());
CHECK(!IsCallable());
CHECK(!IsConstructor());
}
-
-void HeapObject::HeapObjectVerify() {
- VerifyHeapPointer(map());
+void HeapObject::HeapObjectVerify(Isolate* isolate) {
+ VerifyHeapPointer(isolate, map());
CHECK(map()->IsMap());
- InstanceType instance_type = map()->instance_type();
-
- switch (instance_type) {
+ switch (map()->instance_type()) {
#define STRING_TYPE_CASE(TYPE, size, name, camel_name) case TYPE:
STRING_TYPE_LIST(STRING_TYPE_CASE)
#undef STRING_TYPE_CASE
- String::cast(this)->StringVerify();
+ String::cast(this)->StringVerify(isolate);
break;
case SYMBOL_TYPE:
- Symbol::cast(this)->SymbolVerify();
+ Symbol::cast(this)->SymbolVerify(isolate);
break;
case MAP_TYPE:
- Map::cast(this)->MapVerify();
+ Map::cast(this)->MapVerify(isolate);
break;
case HEAP_NUMBER_TYPE:
+ CHECK(IsHeapNumber());
+ break;
case MUTABLE_HEAP_NUMBER_TYPE:
- HeapNumber::cast(this)->HeapNumberVerify();
+ CHECK(IsMutableHeapNumber());
break;
case BIGINT_TYPE:
- BigInt::cast(this)->BigIntVerify();
+ BigInt::cast(this)->BigIntVerify(isolate);
break;
case CALL_HANDLER_INFO_TYPE:
- CallHandlerInfo::cast(this)->CallHandlerInfoVerify();
+ CallHandlerInfo::cast(this)->CallHandlerInfoVerify(isolate);
+ break;
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ ObjectBoilerplateDescription::cast(this)
+ ->ObjectBoilerplateDescriptionVerify(isolate);
break;
+ // FixedArray types
case HASH_TABLE_TYPE:
- case BOILERPLATE_DESCRIPTION_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ case EPHEMERON_HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
case SCOPE_INFO_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -140,58 +157,58 @@ void HeapObject::HeapObjectVerify() {
case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
- FixedArray::cast(this)->FixedArrayVerify();
+ FixedArray::cast(this)->FixedArrayVerify(isolate);
break;
case WEAK_FIXED_ARRAY_TYPE:
- WeakFixedArray::cast(this)->WeakFixedArrayVerify();
+ WeakFixedArray::cast(this)->WeakFixedArrayVerify(isolate);
break;
case WEAK_ARRAY_LIST_TYPE:
- WeakArrayList::cast(this)->WeakArrayListVerify();
+ WeakArrayList::cast(this)->WeakArrayListVerify(isolate);
break;
case FIXED_DOUBLE_ARRAY_TYPE:
- FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
+ FixedDoubleArray::cast(this)->FixedDoubleArrayVerify(isolate);
break;
case FEEDBACK_METADATA_TYPE:
- FeedbackMetadata::cast(this)->FeedbackMetadataVerify();
+ FeedbackMetadata::cast(this)->FeedbackMetadataVerify(isolate);
break;
case BYTE_ARRAY_TYPE:
- ByteArray::cast(this)->ByteArrayVerify();
+ ByteArray::cast(this)->ByteArrayVerify(isolate);
break;
case BYTECODE_ARRAY_TYPE:
- BytecodeArray::cast(this)->BytecodeArrayVerify();
+ BytecodeArray::cast(this)->BytecodeArrayVerify(isolate);
break;
case DESCRIPTOR_ARRAY_TYPE:
- DescriptorArray::cast(this)->DescriptorArrayVerify();
+ DescriptorArray::cast(this)->DescriptorArrayVerify(isolate);
break;
case TRANSITION_ARRAY_TYPE:
- TransitionArray::cast(this)->TransitionArrayVerify();
+ TransitionArray::cast(this)->TransitionArrayVerify(isolate);
break;
case PROPERTY_ARRAY_TYPE:
- PropertyArray::cast(this)->PropertyArrayVerify();
+ PropertyArray::cast(this)->PropertyArrayVerify(isolate);
break;
case FREE_SPACE_TYPE:
- FreeSpace::cast(this)->FreeSpaceVerify();
+ FreeSpace::cast(this)->FreeSpaceVerify(isolate);
break;
case FEEDBACK_CELL_TYPE:
- FeedbackCell::cast(this)->FeedbackCellVerify();
+ FeedbackCell::cast(this)->FeedbackCellVerify(isolate);
break;
case FEEDBACK_VECTOR_TYPE:
- FeedbackVector::cast(this)->FeedbackVectorVerify();
+ FeedbackVector::cast(this)->FeedbackVectorVerify(isolate);
break;
-#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- Fixed##Type##Array::cast(this)->FixedTypedArrayVerify(); \
- break;
+#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ Fixed##Type##Array::cast(this)->FixedTypedArrayVerify(isolate); \
+ break;
- TYPED_ARRAYS(VERIFY_TYPED_ARRAY)
+ TYPED_ARRAYS(VERIFY_TYPED_ARRAY)
#undef VERIFY_TYPED_ARRAY
case CODE_TYPE:
- Code::cast(this)->CodeVerify();
+ Code::cast(this)->CodeVerify(isolate);
break;
case ODDBALL_TYPE:
- Oddball::cast(this)->OddballVerify();
+ Oddball::cast(this)->OddballVerify(isolate);
break;
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
@@ -202,174 +219,183 @@ void HeapObject::HeapObjectVerify() {
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
case WASM_TABLE_TYPE:
- JSObject::cast(this)->JSObjectVerify();
+ JSObject::cast(this)->JSObjectVerify(isolate);
break;
case WASM_INSTANCE_TYPE:
- WasmInstanceObject::cast(this)->WasmInstanceObjectVerify();
+ WasmInstanceObject::cast(this)->WasmInstanceObjectVerify(isolate);
break;
case JS_ARGUMENTS_TYPE:
- JSArgumentsObject::cast(this)->JSArgumentsObjectVerify();
+ JSArgumentsObject::cast(this)->JSArgumentsObjectVerify(isolate);
break;
case JS_GENERATOR_OBJECT_TYPE:
- JSGeneratorObject::cast(this)->JSGeneratorObjectVerify();
+ JSGeneratorObject::cast(this)->JSGeneratorObjectVerify(isolate);
break;
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- JSAsyncGeneratorObject::cast(this)->JSAsyncGeneratorObjectVerify();
+ JSAsyncGeneratorObject::cast(this)->JSAsyncGeneratorObjectVerify(isolate);
break;
case JS_VALUE_TYPE:
- JSValue::cast(this)->JSValueVerify();
+ JSValue::cast(this)->JSValueVerify(isolate);
break;
case JS_DATE_TYPE:
- JSDate::cast(this)->JSDateVerify();
+ JSDate::cast(this)->JSDateVerify(isolate);
break;
case JS_BOUND_FUNCTION_TYPE:
- JSBoundFunction::cast(this)->JSBoundFunctionVerify();
+ JSBoundFunction::cast(this)->JSBoundFunctionVerify(isolate);
break;
case JS_FUNCTION_TYPE:
- JSFunction::cast(this)->JSFunctionVerify();
+ JSFunction::cast(this)->JSFunctionVerify(isolate);
break;
case JS_GLOBAL_PROXY_TYPE:
- JSGlobalProxy::cast(this)->JSGlobalProxyVerify();
+ JSGlobalProxy::cast(this)->JSGlobalProxyVerify(isolate);
break;
case JS_GLOBAL_OBJECT_TYPE:
- JSGlobalObject::cast(this)->JSGlobalObjectVerify();
+ JSGlobalObject::cast(this)->JSGlobalObjectVerify(isolate);
break;
case CELL_TYPE:
- Cell::cast(this)->CellVerify();
+ Cell::cast(this)->CellVerify(isolate);
break;
case PROPERTY_CELL_TYPE:
- PropertyCell::cast(this)->PropertyCellVerify();
+ PropertyCell::cast(this)->PropertyCellVerify(isolate);
break;
case WEAK_CELL_TYPE:
- WeakCell::cast(this)->WeakCellVerify();
+ WeakCell::cast(this)->WeakCellVerify(isolate);
break;
case JS_ARRAY_TYPE:
- JSArray::cast(this)->JSArrayVerify();
+ JSArray::cast(this)->JSArrayVerify(isolate);
break;
case JS_MODULE_NAMESPACE_TYPE:
- JSModuleNamespace::cast(this)->JSModuleNamespaceVerify();
+ JSModuleNamespace::cast(this)->JSModuleNamespaceVerify(isolate);
break;
case JS_SET_TYPE:
- JSSet::cast(this)->JSSetVerify();
+ JSSet::cast(this)->JSSetVerify(isolate);
break;
case JS_MAP_TYPE:
- JSMap::cast(this)->JSMapVerify();
+ JSMap::cast(this)->JSMapVerify(isolate);
break;
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
- JSSetIterator::cast(this)->JSSetIteratorVerify();
+ JSSetIterator::cast(this)->JSSetIteratorVerify(isolate);
break;
case JS_MAP_KEY_ITERATOR_TYPE:
case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
case JS_MAP_VALUE_ITERATOR_TYPE:
- JSMapIterator::cast(this)->JSMapIteratorVerify();
+ JSMapIterator::cast(this)->JSMapIteratorVerify(isolate);
break;
case JS_ARRAY_ITERATOR_TYPE:
- JSArrayIterator::cast(this)->JSArrayIteratorVerify();
+ JSArrayIterator::cast(this)->JSArrayIteratorVerify(isolate);
break;
case JS_STRING_ITERATOR_TYPE:
- JSStringIterator::cast(this)->JSStringIteratorVerify();
+ JSStringIterator::cast(this)->JSStringIteratorVerify(isolate);
break;
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- JSAsyncFromSyncIterator::cast(this)->JSAsyncFromSyncIteratorVerify();
+ JSAsyncFromSyncIterator::cast(this)->JSAsyncFromSyncIteratorVerify(
+ isolate);
break;
case JS_WEAK_MAP_TYPE:
- JSWeakMap::cast(this)->JSWeakMapVerify();
+ JSWeakMap::cast(this)->JSWeakMapVerify(isolate);
break;
case JS_WEAK_SET_TYPE:
- JSWeakSet::cast(this)->JSWeakSetVerify();
+ JSWeakSet::cast(this)->JSWeakSetVerify(isolate);
break;
case JS_PROMISE_TYPE:
- JSPromise::cast(this)->JSPromiseVerify();
+ JSPromise::cast(this)->JSPromiseVerify(isolate);
break;
case JS_REGEXP_TYPE:
- JSRegExp::cast(this)->JSRegExpVerify();
+ JSRegExp::cast(this)->JSRegExpVerify(isolate);
break;
case JS_REGEXP_STRING_ITERATOR_TYPE:
- JSRegExpStringIterator::cast(this)->JSRegExpStringIteratorVerify();
+ JSRegExpStringIterator::cast(this)->JSRegExpStringIteratorVerify(isolate);
break;
case FILLER_TYPE:
break;
case JS_PROXY_TYPE:
- JSProxy::cast(this)->JSProxyVerify();
+ JSProxy::cast(this)->JSProxyVerify(isolate);
break;
case FOREIGN_TYPE:
- Foreign::cast(this)->ForeignVerify();
+ Foreign::cast(this)->ForeignVerify(isolate);
+ break;
+ case PRE_PARSED_SCOPE_DATA_TYPE:
+ PreParsedScopeData::cast(this)->PreParsedScopeDataVerify(isolate);
+ break;
+ case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
+ UncompiledDataWithoutPreParsedScope::cast(this)
+ ->UncompiledDataWithoutPreParsedScopeVerify(isolate);
+ break;
+ case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
+ UncompiledDataWithPreParsedScope::cast(this)
+ ->UncompiledDataWithPreParsedScopeVerify(isolate);
break;
case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
+ SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify(isolate);
break;
case JS_MESSAGE_OBJECT_TYPE:
- JSMessageObject::cast(this)->JSMessageObjectVerify();
+ JSMessageObject::cast(this)->JSMessageObjectVerify(isolate);
break;
case JS_ARRAY_BUFFER_TYPE:
- JSArrayBuffer::cast(this)->JSArrayBufferVerify();
+ JSArrayBuffer::cast(this)->JSArrayBufferVerify(isolate);
break;
case JS_TYPED_ARRAY_TYPE:
- JSTypedArray::cast(this)->JSTypedArrayVerify();
+ JSTypedArray::cast(this)->JSTypedArrayVerify(isolate);
break;
case JS_DATA_VIEW_TYPE:
- JSDataView::cast(this)->JSDataViewVerify();
+ JSDataView::cast(this)->JSDataViewVerify(isolate);
break;
case SMALL_ORDERED_HASH_SET_TYPE:
- SmallOrderedHashSet::cast(this)->SmallOrderedHashTableVerify();
+ SmallOrderedHashSet::cast(this)->SmallOrderedHashTableVerify(isolate);
break;
case SMALL_ORDERED_HASH_MAP_TYPE:
- SmallOrderedHashMap::cast(this)->SmallOrderedHashTableVerify();
+ SmallOrderedHashMap::cast(this)->SmallOrderedHashTableVerify(isolate);
break;
case CODE_DATA_CONTAINER_TYPE:
- CodeDataContainer::cast(this)->CodeDataContainerVerify();
+ CodeDataContainer::cast(this)->CodeDataContainerVerify(isolate);
break;
#ifdef V8_INTL_SUPPORT
case JS_INTL_LOCALE_TYPE:
- JSLocale::cast(this)->JSLocaleVerify();
+ JSLocale::cast(this)->JSLocaleVerify(isolate);
+ break;
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ JSRelativeTimeFormat::cast(this)->JSRelativeTimeFormatVerify(isolate);
break;
#endif // V8_INTL_SUPPORT
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
- Name::cast(this)->Name##Verify(); \
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE: \
+ Name::cast(this)->Name##Verify(isolate); \
break;
- STRUCT_LIST(MAKE_STRUCT_CASE)
+ STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ case ALLOCATION_SITE_TYPE:
+ AllocationSite::cast(this)->AllocationSiteVerify(isolate);
+ break;
+
case LOAD_HANDLER_TYPE:
- LoadHandler::cast(this)->LoadHandlerVerify();
+ LoadHandler::cast(this)->LoadHandlerVerify(isolate);
break;
case STORE_HANDLER_TYPE:
- StoreHandler::cast(this)->StoreHandlerVerify();
+ StoreHandler::cast(this)->StoreHandlerVerify(isolate);
break;
}
}
-
-void HeapObject::VerifyHeapPointer(Object* p) {
+void HeapObject::VerifyHeapPointer(Isolate* isolate, Object* p) {
CHECK(p->IsHeapObject());
HeapObject* ho = HeapObject::cast(p);
- CHECK(ho->GetHeap()->Contains(ho));
+ CHECK(isolate->heap()->Contains(ho));
}
-
-void Symbol::SymbolVerify() {
+void Symbol::SymbolVerify(Isolate* isolate) {
CHECK(IsSymbol());
CHECK(HasHashCode());
CHECK_GT(Hash(), 0);
- CHECK(name()->IsUndefined(GetIsolate()) || name()->IsString());
+ CHECK(name()->IsUndefined(isolate) || name()->IsString());
CHECK_IMPLIES(IsPrivateField(), IsPrivate());
}
+void ByteArray::ByteArrayVerify(Isolate* isolate) { CHECK(IsByteArray()); }
-void HeapNumber::HeapNumberVerify() {
- CHECK(IsHeapNumber() || IsMutableHeapNumber());
-}
-
-void ByteArray::ByteArrayVerify() {
- CHECK(IsByteArray());
-}
-
-
-void BytecodeArray::BytecodeArrayVerify() {
+void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
// TODO(oth): Walk bytecodes and immediate values to validate sanity.
// - All bytecodes are known and well formed.
// - Jumps must go to new instructions starts.
@@ -377,31 +403,28 @@ void BytecodeArray::BytecodeArrayVerify() {
// - No consecutive sequences of prefix Wide / ExtraWide.
CHECK(IsBytecodeArray());
CHECK(constant_pool()->IsFixedArray());
- VerifyHeapPointer(constant_pool());
+ VerifyHeapPointer(isolate, constant_pool());
}
+void FreeSpace::FreeSpaceVerify(Isolate* isolate) { CHECK(IsFreeSpace()); }
-void FreeSpace::FreeSpaceVerify() {
- CHECK(IsFreeSpace());
-}
-
-void FeedbackCell::FeedbackCellVerify() {
+void FeedbackCell::FeedbackCellVerify(Isolate* isolate) {
CHECK(IsFeedbackCell());
- Isolate* const isolate = GetIsolate();
- VerifyHeapPointer(value());
+
+ VerifyHeapPointer(isolate, value());
CHECK(value()->IsUndefined(isolate) || value()->IsFeedbackVector());
}
-void FeedbackVector::FeedbackVectorVerify() {
+void FeedbackVector::FeedbackVectorVerify(Isolate* isolate) {
CHECK(IsFeedbackVector());
MaybeObject* code = optimized_code_weak_or_smi();
- MaybeObject::VerifyMaybeObjectPointer(code);
+ MaybeObject::VerifyMaybeObjectPointer(isolate, code);
CHECK(code->IsSmi() || code->IsClearedWeakHeapObject() ||
code->IsWeakHeapObject());
}
template <class Traits>
-void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
+void FixedTypedArray<Traits>::FixedTypedArrayVerify(Isolate* isolate) {
CHECK(IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
Traits::kInstanceType);
@@ -418,12 +441,11 @@ bool JSObject::ElementsAreSafeToExamine() {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
return reinterpret_cast<Map*>(elements()) !=
- GetHeap()->one_pointer_filler_map();
+ GetReadOnlyRoots().one_pointer_filler_map();
}
namespace {
-void VerifyJSObjectElements(JSObject* object) {
- Isolate* isolate = object->GetIsolate();
+void VerifyJSObjectElements(Isolate* isolate, JSObject* object) {
// Only TypedArrays can have these specialized elements.
if (object->IsJSTypedArray()) {
// TODO(cbruni): Fix CreateTypedArray to either not instantiate the object
@@ -459,9 +481,9 @@ void VerifyJSObjectElements(JSObject* object) {
}
} // namespace
-void JSObject::JSObjectVerify() {
- VerifyPointer(raw_properties_or_hash());
- VerifyHeapPointer(elements());
+void JSObject::JSObjectVerify(Isolate* isolate) {
+ VerifyPointer(isolate, raw_properties_or_hash());
+ VerifyHeapPointer(isolate, elements());
CHECK_IMPLIES(HasSloppyArgumentsElements(), IsJSArgumentsObject());
if (HasFastProperties()) {
@@ -480,7 +502,6 @@ void JSObject::JSObjectVerify() {
CHECK_EQ(0, delta % JSObject::kFieldsAdded);
}
DescriptorArray* descriptors = map()->instance_descriptors();
- Isolate* isolate = GetIsolate();
bool is_transitionable_fast_elements_kind =
IsTransitionableFastElementsKind(map()->elements_kind());
@@ -518,7 +539,7 @@ void JSObject::JSObjectVerify() {
FixedArray* keys = enum_cache->keys();
FixedArray* indices = enum_cache->indices();
CHECK_LE(map()->EnumLength(), keys->length());
- CHECK_IMPLIES(indices != isolate->heap()->empty_fixed_array(),
+ CHECK_IMPLIES(indices != ReadOnlyRoots(isolate).empty_fixed_array(),
keys->length() == indices->length());
}
}
@@ -527,31 +548,32 @@ void JSObject::JSObjectVerify() {
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
- (elements() == GetHeap()->empty_fixed_array()) ||
+ (elements() == GetReadOnlyRoots().empty_fixed_array()) ||
HasFastStringWrapperElements()),
- (elements()->map() == GetHeap()->fixed_array_map() ||
- elements()->map() == GetHeap()->fixed_cow_array_map()));
+ (elements()->map() == GetReadOnlyRoots().fixed_array_map() ||
+ elements()->map() == GetReadOnlyRoots().fixed_cow_array_map()));
CHECK_EQ(map()->has_fast_object_elements(), HasObjectElements());
- VerifyJSObjectElements(this);
+ VerifyJSObjectElements(isolate, this);
}
}
-
-void Map::MapVerify() {
- Heap* heap = GetHeap();
- CHECK(!heap->InNewSpace(this));
+void Map::MapVerify(Isolate* isolate) {
+ Heap* heap = isolate->heap();
+ CHECK(!Heap::InNewSpace(this));
CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
CHECK(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
static_cast<size_t>(instance_size()) < heap->Capacity()));
CHECK(GetBackPointer()->IsUndefined(heap->isolate()) ||
!Map::cast(GetBackPointer())->is_stable());
- VerifyHeapPointer(prototype());
- VerifyHeapPointer(instance_descriptors());
+ VerifyHeapPointer(isolate, prototype());
+ VerifyHeapPointer(isolate, instance_descriptors());
SLOW_DCHECK(instance_descriptors()->IsSortedNoDuplicates());
DisallowHeapAllocation no_gc;
- SLOW_DCHECK(TransitionsAccessor(this, &no_gc).IsSortedNoDuplicates());
- SLOW_DCHECK(TransitionsAccessor(this, &no_gc).IsConsistentWithBackPointers());
+ SLOW_DCHECK(
+ TransitionsAccessor(isolate, this, &no_gc).IsSortedNoDuplicates());
+ SLOW_DCHECK(TransitionsAccessor(isolate, this, &no_gc)
+ .IsConsistentWithBackPointers());
SLOW_DCHECK(!FLAG_unbox_double_fields ||
layout_descriptor()->IsConsistentWithMap(this));
if (!may_have_interesting_symbols()) {
@@ -577,54 +599,53 @@ void Map::MapVerify() {
prototype_validity_cell()->IsCell());
}
-
-void Map::DictionaryMapVerify() {
- MapVerify();
+void Map::DictionaryMapVerify(Isolate* isolate) {
+ MapVerify(isolate);
CHECK(is_dictionary_map());
CHECK_EQ(kInvalidEnumCacheSentinel, EnumLength());
- CHECK_EQ(GetHeap()->empty_descriptor_array(), instance_descriptors());
+ CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(),
+ instance_descriptors());
CHECK_EQ(0, UnusedPropertyFields());
CHECK_EQ(Map::GetVisitorId(this), visitor_id());
}
-void AliasedArgumentsEntry::AliasedArgumentsEntryVerify() {
+void AliasedArgumentsEntry::AliasedArgumentsEntryVerify(Isolate* isolate) {
VerifySmiField(kAliasedContextSlot);
}
-
-void FixedArray::FixedArrayVerify() {
+void FixedArray::FixedArrayVerify(Isolate* isolate) {
for (int i = 0; i < length(); i++) {
Object* e = get(i);
- VerifyPointer(e);
+ VerifyPointer(isolate, e);
}
}
-void WeakFixedArray::WeakFixedArrayVerify() {
+void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
for (int i = 0; i < length(); i++) {
- MaybeObject::VerifyMaybeObjectPointer(Get(i));
+ MaybeObject::VerifyMaybeObjectPointer(isolate, Get(i));
}
}
-void WeakArrayList::WeakArrayListVerify() {
+void WeakArrayList::WeakArrayListVerify(Isolate* isolate) {
for (int i = 0; i < length(); i++) {
- MaybeObject::VerifyMaybeObjectPointer(Get(i));
+ MaybeObject::VerifyMaybeObjectPointer(isolate, Get(i));
}
}
-void PropertyArray::PropertyArrayVerify() {
+void PropertyArray::PropertyArrayVerify(Isolate* isolate) {
if (length() == 0) {
- CHECK_EQ(this, this->GetHeap()->empty_property_array());
+ CHECK_EQ(this, ReadOnlyRoots(isolate).empty_property_array());
return;
}
// There are no empty PropertyArrays.
CHECK_LT(0, length());
for (int i = 0; i < length(); i++) {
Object* e = get(i);
- VerifyPointer(e);
+ VerifyPointer(isolate, e);
}
}
-void FixedDoubleArray::FixedDoubleArrayVerify() {
+void FixedDoubleArray::FixedDoubleArrayVerify(Isolate* isolate) {
for (int i = 0; i < length(); i++) {
if (!is_the_hole(i)) {
uint64_t value = get_representation(i);
@@ -639,9 +660,9 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
}
-void FeedbackMetadata::FeedbackMetadataVerify() {
+void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
if (slot_count() == 0) {
- CHECK_EQ(GetHeap()->empty_feedback_metadata(), this);
+ CHECK_EQ(ReadOnlyRoots(isolate).empty_feedback_metadata(), this);
} else {
FeedbackMetadataIterator iter(this);
while (iter.HasNext()) {
@@ -653,62 +674,71 @@ void FeedbackMetadata::FeedbackMetadataVerify() {
}
}
-void DescriptorArray::DescriptorArrayVerify() {
- FixedArrayVerify();
+void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
+ WeakFixedArrayVerify(isolate);
int nof_descriptors = number_of_descriptors();
if (number_of_descriptors_storage() == 0) {
- Heap* heap = GetHeap();
- CHECK_EQ(heap->empty_descriptor_array(), this);
+ Heap* heap = isolate->heap();
+ CHECK_EQ(ReadOnlyRoots(heap).empty_descriptor_array(), this);
CHECK_EQ(2, length());
CHECK_EQ(0, nof_descriptors);
- CHECK_EQ(heap->empty_enum_cache(), GetEnumCache());
+ CHECK_EQ(ReadOnlyRoots(heap).empty_enum_cache(), GetEnumCache());
} else {
CHECK_LT(2, length());
CHECK_LE(LengthFor(nof_descriptors), length());
- Isolate* isolate = GetIsolate();
// Check that properties with private symbols names are non-enumerable.
for (int descriptor = 0; descriptor < nof_descriptors; descriptor++) {
- Object* key = get(ToKeyIndex(descriptor));
+ Object* key = get(ToKeyIndex(descriptor))->ToObject();
// number_of_descriptors() may be out of sync with the actual descriptors
// written during descriptor array construction.
if (key->IsUndefined(isolate)) continue;
+ PropertyDetails details = GetDetails(descriptor);
if (Name::cast(key)->IsPrivate()) {
- PropertyDetails details = GetDetails(descriptor);
CHECK_NE(details.attributes() & DONT_ENUM, 0);
}
+ MaybeObject* value = get(ToValueIndex(descriptor));
+ HeapObject* heap_object;
+ if (details.location() == kField) {
+ CHECK(value == MaybeObject::FromObject(FieldType::None()) ||
+ value == MaybeObject::FromObject(FieldType::Any()) ||
+ value->IsClearedWeakHeapObject() ||
+ (value->ToWeakHeapObject(&heap_object) && heap_object->IsMap()));
+ } else {
+ CHECK(!value->IsWeakOrClearedHeapObject());
+ CHECK(!value->ToObject()->IsMap());
+ }
}
}
}
-void TransitionArray::TransitionArrayVerify() {
- WeakFixedArrayVerify();
+void TransitionArray::TransitionArrayVerify(Isolate* isolate) {
+ WeakFixedArrayVerify(isolate);
CHECK_LE(LengthFor(number_of_transitions()), length());
}
-void JSArgumentsObject::JSArgumentsObjectVerify() {
+void JSArgumentsObject::JSArgumentsObjectVerify(Isolate* isolate) {
if (IsSloppyArgumentsElementsKind(GetElementsKind())) {
SloppyArgumentsElements::cast(elements())
- ->SloppyArgumentsElementsVerify(this);
+ ->SloppyArgumentsElementsVerify(isolate, this);
}
- Isolate* isolate = GetIsolate();
if (isolate->IsInAnyContext(map(), Context::SLOPPY_ARGUMENTS_MAP_INDEX) ||
isolate->IsInAnyContext(map(),
Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX) ||
isolate->IsInAnyContext(map(),
Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)) {
- VerifyObjectField(JSSloppyArgumentsObject::kLengthOffset);
- VerifyObjectField(JSSloppyArgumentsObject::kCalleeOffset);
+ VerifyObjectField(isolate, JSSloppyArgumentsObject::kLengthOffset);
+ VerifyObjectField(isolate, JSSloppyArgumentsObject::kCalleeOffset);
} else if (isolate->IsInAnyContext(map(),
Context::STRICT_ARGUMENTS_MAP_INDEX)) {
- VerifyObjectField(JSStrictArgumentsObject::kLengthOffset);
+ VerifyObjectField(isolate, JSStrictArgumentsObject::kLengthOffset);
}
- JSObjectVerify();
+ JSObjectVerify(isolate);
}
-void SloppyArgumentsElements::SloppyArgumentsElementsVerify(JSObject* holder) {
- Isolate* isolate = GetIsolate();
- FixedArrayVerify();
+void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
+ JSObject* holder) {
+ FixedArrayVerify(isolate);
// Abort verification if only partially initialized (can't use arguments()
// getter because it does FixedArray::cast()).
if (get(kArgumentsIndex)->IsUndefined(isolate)) return;
@@ -717,11 +747,11 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(JSObject* holder) {
bool is_fast = kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
CHECK(IsFixedArray());
CHECK_GE(length(), 2);
- CHECK_EQ(map(), isolate->heap()->sloppy_arguments_elements_map());
+ CHECK_EQ(map(), ReadOnlyRoots(isolate).sloppy_arguments_elements_map());
Context* context_object = Context::cast(context());
FixedArray* arg_elements = FixedArray::cast(arguments());
if (arg_elements->length() == 0) {
- CHECK(arg_elements == isolate->heap()->empty_fixed_array());
+ CHECK(arg_elements == ReadOnlyRoots(isolate).empty_fixed_array());
return;
}
ElementsAccessor* accessor;
@@ -760,37 +790,35 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(JSObject* holder) {
CHECK_LE(maxMappedIndex, arg_elements->length());
}
-void JSGeneratorObject::JSGeneratorObjectVerify() {
+void JSGeneratorObject::JSGeneratorObjectVerify(Isolate* isolate) {
// In an expression like "new g()", there can be a point where a generator
// object is allocated but its fields are all undefined, as it hasn't yet been
// initialized by the generator. Hence these weak checks.
- VerifyObjectField(kFunctionOffset);
- VerifyObjectField(kContextOffset);
- VerifyObjectField(kReceiverOffset);
- VerifyObjectField(kRegisterFileOffset);
- VerifyObjectField(kContinuationOffset);
+ VerifyObjectField(isolate, kFunctionOffset);
+ VerifyObjectField(isolate, kContextOffset);
+ VerifyObjectField(isolate, kReceiverOffset);
+ VerifyObjectField(isolate, kParametersAndRegistersOffset);
+ VerifyObjectField(isolate, kContinuationOffset);
}
-void JSAsyncGeneratorObject::JSAsyncGeneratorObjectVerify() {
+void JSAsyncGeneratorObject::JSAsyncGeneratorObjectVerify(Isolate* isolate) {
// Check inherited fields
- JSGeneratorObjectVerify();
- VerifyObjectField(kQueueOffset);
- queue()->HeapObjectVerify();
+ JSGeneratorObjectVerify(isolate);
+ VerifyObjectField(isolate, kQueueOffset);
+ queue()->HeapObjectVerify(isolate);
}
-void JSValue::JSValueVerify() {
+void JSValue::JSValueVerify(Isolate* isolate) {
Object* v = value();
if (v->IsHeapObject()) {
- VerifyHeapPointer(v);
+ VerifyHeapPointer(isolate, v);
}
}
-
-void JSDate::JSDateVerify() {
+void JSDate::JSDateVerify(Isolate* isolate) {
if (value()->IsHeapObject()) {
- VerifyHeapPointer(value());
+ VerifyHeapPointer(isolate, value());
}
- Isolate* isolate = GetIsolate();
CHECK(value()->IsUndefined(isolate) || value()->IsSmi() ||
value()->IsHeapNumber());
CHECK(year()->IsUndefined(isolate) || year()->IsSmi() || year()->IsNaN());
@@ -834,37 +862,34 @@ void JSDate::JSDateVerify() {
}
}
-
-void JSMessageObject::JSMessageObjectVerify() {
+void JSMessageObject::JSMessageObjectVerify(Isolate* isolate) {
CHECK(IsJSMessageObject());
- VerifyObjectField(kStartPositionOffset);
- VerifyObjectField(kEndPositionOffset);
- VerifyObjectField(kArgumentsOffset);
- VerifyObjectField(kScriptOffset);
- VerifyObjectField(kStackFramesOffset);
+ VerifyObjectField(isolate, kStartPositionOffset);
+ VerifyObjectField(isolate, kEndPositionOffset);
+ VerifyObjectField(isolate, kArgumentsOffset);
+ VerifyObjectField(isolate, kScriptOffset);
+ VerifyObjectField(isolate, kStackFramesOffset);
}
-
-void String::StringVerify() {
+void String::StringVerify(Isolate* isolate) {
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
- CHECK_IMPLIES(length() == 0, this == GetHeap()->empty_string());
+ CHECK_IMPLIES(length() == 0, this == ReadOnlyRoots(isolate).empty_string());
if (IsInternalizedString()) {
- CHECK(!GetHeap()->InNewSpace(this));
+ CHECK(!Heap::InNewSpace(this));
}
if (IsConsString()) {
- ConsString::cast(this)->ConsStringVerify();
+ ConsString::cast(this)->ConsStringVerify(isolate);
} else if (IsSlicedString()) {
- SlicedString::cast(this)->SlicedStringVerify();
+ SlicedString::cast(this)->SlicedStringVerify(isolate);
} else if (IsThinString()) {
- ThinString::cast(this)->ThinStringVerify();
+ ThinString::cast(this)->ThinStringVerify(isolate);
}
}
-
-void ConsString::ConsStringVerify() {
+void ConsString::ConsStringVerify(Isolate* isolate) {
CHECK(this->first()->IsString());
- CHECK(this->second() == GetHeap()->empty_string() ||
+ CHECK(this->second() == ReadOnlyRoots(isolate).empty_string() ||
this->second()->IsString());
CHECK_GE(this->length(), ConsString::kMinLength);
CHECK(this->length() == this->first()->length() + this->second()->length());
@@ -876,71 +901,67 @@ void ConsString::ConsStringVerify() {
}
}
-void ThinString::ThinStringVerify() {
+void ThinString::ThinStringVerify(Isolate* isolate) {
CHECK(this->actual()->IsInternalizedString());
CHECK(this->actual()->IsSeqString() || this->actual()->IsExternalString());
}
-void SlicedString::SlicedStringVerify() {
+void SlicedString::SlicedStringVerify(Isolate* isolate) {
CHECK(!this->parent()->IsConsString());
CHECK(!this->parent()->IsSlicedString());
CHECK_GE(this->length(), SlicedString::kMinLength);
}
-
-void JSBoundFunction::JSBoundFunctionVerify() {
+void JSBoundFunction::JSBoundFunctionVerify(Isolate* isolate) {
CHECK(IsJSBoundFunction());
- JSObjectVerify();
- VerifyObjectField(kBoundThisOffset);
- VerifyObjectField(kBoundTargetFunctionOffset);
- VerifyObjectField(kBoundArgumentsOffset);
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, kBoundThisOffset);
+ VerifyObjectField(isolate, kBoundTargetFunctionOffset);
+ VerifyObjectField(isolate, kBoundArgumentsOffset);
CHECK(IsCallable());
- Isolate* const isolate = GetIsolate();
if (!raw_bound_target_function()->IsUndefined(isolate)) {
CHECK(bound_target_function()->IsCallable());
CHECK_EQ(IsConstructor(), bound_target_function()->IsConstructor());
}
}
-void JSFunction::JSFunctionVerify() {
+void JSFunction::JSFunctionVerify(Isolate* isolate) {
CHECK(IsJSFunction());
- JSObjectVerify();
- VerifyHeapPointer(feedback_cell());
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, feedback_cell());
CHECK(feedback_cell()->IsFeedbackCell());
CHECK(code()->IsCode());
CHECK(map()->is_callable());
if (has_prototype_slot()) {
- VerifyObjectField(kPrototypeOrInitialMapOffset);
+ VerifyObjectField(isolate, kPrototypeOrInitialMapOffset);
}
}
-
-void SharedFunctionInfo::SharedFunctionInfoVerify() {
+void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
CHECK(IsSharedFunctionInfo());
- VerifyObjectField(kFunctionDataOffset);
- VerifyObjectField(kDebugInfoOffset);
- VerifyObjectField(kOuterScopeInfoOrFeedbackMetadataOffset);
- VerifyObjectField(kFunctionIdentifierOffset);
- VerifyObjectField(kNameOrScopeInfoOffset);
- VerifyObjectField(kScriptOffset);
+ VerifyObjectField(isolate, kFunctionDataOffset);
+ VerifyObjectField(isolate, kOuterScopeInfoOrFeedbackMetadataOffset);
+ VerifyObjectField(isolate, kFunctionIdentifierOrDebugInfoOffset);
+ VerifyObjectField(isolate, kNameOrScopeInfoOffset);
+ VerifyObjectField(isolate, kScriptOffset);
Object* value = name_or_scope_info();
CHECK(value == kNoSharedNameSentinel || value->IsString() ||
value->IsScopeInfo());
if (value->IsScopeInfo()) {
CHECK_LT(0, ScopeInfo::cast(value)->length());
- CHECK_NE(value, GetHeap()->empty_scope_info());
+ CHECK_NE(value, ReadOnlyRoots(isolate).empty_scope_info());
}
- Isolate* isolate = GetIsolate();
CHECK(HasWasmExportedFunctionData() || IsApiFunction() ||
HasBytecodeArray() || HasAsmWasmData() || HasBuiltinId() ||
- HasPreParsedScopeData());
+ HasUncompiledDataWithPreParsedScope() ||
+ HasUncompiledDataWithoutPreParsedScope());
- CHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId() ||
- HasInferredName());
+ CHECK(function_identifier_or_debug_info()->IsUndefined(isolate) ||
+ HasBuiltinFunctionId() || HasInferredName() || HasDebugInfo());
if (!is_compiled()) {
CHECK(!HasFeedbackMetadata());
@@ -959,8 +980,6 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
ScopeInfo* info = scope_info();
CHECK(kind() == info->function_kind());
CHECK_EQ(kind() == kModule, info->scope_type() == MODULE_SCOPE);
- CHECK_EQ(raw_start_position(), info->StartPosition());
- CHECK_EQ(raw_end_position(), info->EndPosition());
}
if (IsApiFunction()) {
@@ -977,35 +996,32 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
}
}
-
-void JSGlobalProxy::JSGlobalProxyVerify() {
+void JSGlobalProxy::JSGlobalProxyVerify(Isolate* isolate) {
CHECK(IsJSGlobalProxy());
- JSObjectVerify();
- VerifyObjectField(JSGlobalProxy::kNativeContextOffset);
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, JSGlobalProxy::kNativeContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
-
-void JSGlobalObject::JSGlobalObjectVerify() {
+void JSGlobalObject::JSGlobalObjectVerify(Isolate* isolate) {
CHECK(IsJSGlobalObject());
// Do not check the dummy global object for the builtins.
if (global_dictionary()->NumberOfElements() == 0 &&
elements()->length() == 0) {
return;
}
- JSObjectVerify();
+ JSObjectVerify(isolate);
}
-
-void Oddball::OddballVerify() {
+void Oddball::OddballVerify(Isolate* isolate) {
CHECK(IsOddball());
- Heap* heap = GetHeap();
- VerifyHeapPointer(to_string());
+ Heap* heap = isolate->heap();
+ VerifyHeapPointer(isolate, to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- CHECK(number == heap->nan_value() ||
- number == heap->hole_nan_value());
+ CHECK(number == ReadOnlyRoots(heap).nan_value() ||
+ number == ReadOnlyRoots(heap).hole_nan_value());
} else {
CHECK(number->IsSmi());
int value = Smi::ToInt(number);
@@ -1014,28 +1030,29 @@ void Oddball::OddballVerify() {
CHECK_LE(value, 1);
CHECK_GE(value, kLeastHiddenOddballNumber);
}
- if (map() == heap->undefined_map()) {
- CHECK(this == heap->undefined_value());
- } else if (map() == heap->the_hole_map()) {
- CHECK(this == heap->the_hole_value());
- } else if (map() == heap->null_map()) {
- CHECK(this == heap->null_value());
- } else if (map() == heap->boolean_map()) {
- CHECK(this == heap->true_value() ||
- this == heap->false_value());
- } else if (map() == heap->uninitialized_map()) {
- CHECK(this == heap->uninitialized_value());
- } else if (map() == heap->arguments_marker_map()) {
- CHECK(this == heap->arguments_marker());
- } else if (map() == heap->termination_exception_map()) {
- CHECK(this == heap->termination_exception());
- } else if (map() == heap->exception_map()) {
- CHECK(this == heap->exception());
- } else if (map() == heap->optimized_out_map()) {
- CHECK(this == heap->optimized_out());
- } else if (map() == heap->stale_register_map()) {
- CHECK(this == heap->stale_register());
- } else if (map() == heap->self_reference_marker_map()) {
+
+ ReadOnlyRoots roots(heap);
+ if (map() == roots.undefined_map()) {
+ CHECK(this == roots.undefined_value());
+ } else if (map() == roots.the_hole_map()) {
+ CHECK(this == roots.the_hole_value());
+ } else if (map() == roots.null_map()) {
+ CHECK(this == roots.null_value());
+ } else if (map() == roots.boolean_map()) {
+ CHECK(this == roots.true_value() || this == roots.false_value());
+ } else if (map() == roots.uninitialized_map()) {
+ CHECK(this == roots.uninitialized_value());
+ } else if (map() == roots.arguments_marker_map()) {
+ CHECK(this == roots.arguments_marker());
+ } else if (map() == roots.termination_exception_map()) {
+ CHECK(this == roots.termination_exception());
+ } else if (map() == roots.exception_map()) {
+ CHECK(this == roots.exception());
+ } else if (map() == roots.optimized_out_map()) {
+ CHECK(this == roots.optimized_out());
+ } else if (map() == roots.stale_register_map()) {
+ CHECK(this == roots.stale_register());
+ } else if (map() == roots.self_reference_marker_map()) {
// Multiple instances of this oddball may exist at once.
CHECK_EQ(kind(), Oddball::kSelfReferenceMarker);
} else {
@@ -1043,37 +1060,33 @@ void Oddball::OddballVerify() {
}
}
-
-void Cell::CellVerify() {
+void Cell::CellVerify(Isolate* isolate) {
CHECK(IsCell());
- VerifyObjectField(kValueOffset);
+ VerifyObjectField(isolate, kValueOffset);
}
-
-void PropertyCell::PropertyCellVerify() {
+void PropertyCell::PropertyCellVerify(Isolate* isolate) {
CHECK(IsPropertyCell());
- VerifyObjectField(kValueOffset);
+ VerifyObjectField(isolate, kValueOffset);
}
-
-void WeakCell::WeakCellVerify() {
+void WeakCell::WeakCellVerify(Isolate* isolate) {
CHECK(IsWeakCell());
- VerifyObjectField(kValueOffset);
+ VerifyObjectField(isolate, kValueOffset);
}
-void CodeDataContainer::CodeDataContainerVerify() {
+void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
CHECK(IsCodeDataContainer());
- VerifyObjectField(kNextCodeLinkOffset);
- CHECK(next_code_link()->IsCode() ||
- next_code_link()->IsUndefined(GetIsolate()));
+ VerifyObjectField(isolate, kNextCodeLinkOffset);
+ CHECK(next_code_link()->IsCode() || next_code_link()->IsUndefined(isolate));
}
-void Code::CodeVerify() {
+void Code::CodeVerify(Isolate* isolate) {
CHECK_LE(constant_pool_offset(), InstructionSize());
CHECK(IsAligned(InstructionStart(), kCodeAlignment));
- relocation_info()->ObjectVerify();
+ relocation_info()->ObjectVerify(isolate);
Address last_gc_pc = kNullAddress;
- Isolate* isolate = GetIsolate();
+
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Verify(isolate);
// Ensure that GC will not iterate twice over the same pointer.
@@ -1084,16 +1097,17 @@ void Code::CodeVerify() {
}
}
-
-void JSArray::JSArrayVerify() {
- JSObjectVerify();
- Isolate* isolate = GetIsolate();
+void JSArray::JSArrayVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
CHECK(length()->IsNumber() || length()->IsUndefined(isolate));
// If a GC was caused while constructing this array, the elements
// pointer may point to a one pointer filler map.
if (!ElementsAreSafeToExamine()) return;
if (elements()->IsUndefined(isolate)) return;
CHECK(elements()->IsFixedArray() || elements()->IsFixedDoubleArray());
+ if (elements()->length() == 0) {
+ CHECK_EQ(elements(), ReadOnlyRoots(isolate).empty_fixed_array());
+ }
if (!length()->IsNumber()) return;
// Verify that the length and the elements backing store are in sync.
if (length()->IsSmi() && HasFastElements()) {
@@ -1104,7 +1118,7 @@ void JSArray::JSArrayVerify() {
// Holey / Packed backing stores might have slack or might have not been
// properly initialized yet.
CHECK(size <= elements()->length() ||
- elements() == isolate->heap()->empty_fixed_array());
+ elements() == ReadOnlyRoots(isolate).empty_fixed_array());
} else {
CHECK(HasDictionaryElements());
uint32_t array_length;
@@ -1124,55 +1138,50 @@ void JSArray::JSArrayVerify() {
}
}
-
-void JSSet::JSSetVerify() {
+void JSSet::JSSetVerify(Isolate* isolate) {
CHECK(IsJSSet());
- JSObjectVerify();
- VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashSet() || table()->IsUndefined(GetIsolate()));
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, table());
+ CHECK(table()->IsOrderedHashSet() || table()->IsUndefined(isolate));
// TODO(arv): Verify OrderedHashTable too.
}
-
-void JSMap::JSMapVerify() {
+void JSMap::JSMapVerify(Isolate* isolate) {
CHECK(IsJSMap());
- JSObjectVerify();
- VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashMap() || table()->IsUndefined(GetIsolate()));
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, table());
+ CHECK(table()->IsOrderedHashMap() || table()->IsUndefined(isolate));
// TODO(arv): Verify OrderedHashTable too.
}
-
-void JSSetIterator::JSSetIteratorVerify() {
+void JSSetIterator::JSSetIteratorVerify(Isolate* isolate) {
CHECK(IsJSSetIterator());
- JSObjectVerify();
- VerifyHeapPointer(table());
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, table());
CHECK(table()->IsOrderedHashSet());
CHECK(index()->IsSmi());
}
-
-void JSMapIterator::JSMapIteratorVerify() {
+void JSMapIterator::JSMapIteratorVerify(Isolate* isolate) {
CHECK(IsJSMapIterator());
- JSObjectVerify();
- VerifyHeapPointer(table());
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, table());
CHECK(table()->IsOrderedHashMap());
CHECK(index()->IsSmi());
}
-
-void JSWeakMap::JSWeakMapVerify() {
+void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
CHECK(IsJSWeakMap());
- JSObjectVerify();
- VerifyHeapPointer(table());
- CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, table());
+ CHECK(table()->IsEphemeronHashTable() || table()->IsUndefined(isolate));
}
-void JSArrayIterator::JSArrayIteratorVerify() {
+void JSArrayIterator::JSArrayIteratorVerify(Isolate* isolate) {
CHECK(IsJSArrayIterator());
- JSObjectVerify();
+ JSObjectVerify(isolate);
CHECK(iterated_object()->IsJSReceiver() ||
- iterated_object()->IsUndefined(GetIsolate()));
+ iterated_object()->IsUndefined(isolate));
CHECK_GE(next_index()->Number(), 0);
CHECK_LE(next_index()->Number(), kMaxSafeInteger);
@@ -1187,113 +1196,115 @@ void JSArrayIterator::JSArrayIteratorVerify() {
}
}
-void JSStringIterator::JSStringIteratorVerify() {
+void JSStringIterator::JSStringIteratorVerify(Isolate* isolate) {
CHECK(IsJSStringIterator());
- JSObjectVerify();
+ JSObjectVerify(isolate);
CHECK(string()->IsString());
CHECK_GE(index(), 0);
CHECK_LE(index(), String::kMaxLength);
}
-void JSAsyncFromSyncIterator::JSAsyncFromSyncIteratorVerify() {
+void JSAsyncFromSyncIterator::JSAsyncFromSyncIteratorVerify(Isolate* isolate) {
CHECK(IsJSAsyncFromSyncIterator());
- JSObjectVerify();
- VerifyHeapPointer(sync_iterator());
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, sync_iterator());
}
-void JSWeakSet::JSWeakSetVerify() {
+void JSWeakSet::JSWeakSetVerify(Isolate* isolate) {
CHECK(IsJSWeakSet());
- JSObjectVerify();
- VerifyHeapPointer(table());
- CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, table());
+ CHECK(table()->IsEphemeronHashTable() || table()->IsUndefined(isolate));
}
-void Microtask::MicrotaskVerify() { CHECK(IsMicrotask()); }
+void Microtask::MicrotaskVerify(Isolate* isolate) { CHECK(IsMicrotask()); }
-void CallableTask::CallableTaskVerify() {
+void CallableTask::CallableTaskVerify(Isolate* isolate) {
CHECK(IsCallableTask());
- MicrotaskVerify();
- VerifyHeapPointer(callable());
+ MicrotaskVerify(isolate);
+ VerifyHeapPointer(isolate, callable());
CHECK(callable()->IsCallable());
- VerifyHeapPointer(context());
+ VerifyHeapPointer(isolate, context());
CHECK(context()->IsContext());
}
-void CallbackTask::CallbackTaskVerify() {
+void CallbackTask::CallbackTaskVerify(Isolate* isolate) {
CHECK(IsCallbackTask());
- MicrotaskVerify();
- VerifyHeapPointer(callback());
- VerifyHeapPointer(data());
+ MicrotaskVerify(isolate);
+ VerifyHeapPointer(isolate, callback());
+ VerifyHeapPointer(isolate, data());
}
-void PromiseReactionJobTask::PromiseReactionJobTaskVerify() {
+void PromiseReactionJobTask::PromiseReactionJobTaskVerify(Isolate* isolate) {
CHECK(IsPromiseReactionJobTask());
- MicrotaskVerify();
- Isolate* isolate = GetIsolate();
- VerifyPointer(argument());
- VerifyHeapPointer(context());
+ MicrotaskVerify(isolate);
+ VerifyPointer(isolate, argument());
+ VerifyHeapPointer(isolate, context());
CHECK(context()->IsContext());
- VerifyHeapPointer(handler());
+ VerifyHeapPointer(isolate, handler());
CHECK(handler()->IsUndefined(isolate) || handler()->IsCallable());
- VerifyHeapPointer(promise_or_capability());
+ VerifyHeapPointer(isolate, promise_or_capability());
CHECK(promise_or_capability()->IsJSPromise() ||
promise_or_capability()->IsPromiseCapability());
}
-void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskVerify() {
+void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskVerify(
+ Isolate* isolate) {
CHECK(IsPromiseFulfillReactionJobTask());
- PromiseReactionJobTaskVerify();
+ PromiseReactionJobTaskVerify(isolate);
}
-void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskVerify() {
+void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskVerify(
+ Isolate* isolate) {
CHECK(IsPromiseRejectReactionJobTask());
- PromiseReactionJobTaskVerify();
+ PromiseReactionJobTaskVerify(isolate);
}
-void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskVerify() {
+void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskVerify(
+ Isolate* isolate) {
CHECK(IsPromiseResolveThenableJobTask());
- MicrotaskVerify();
- VerifyHeapPointer(context());
+ MicrotaskVerify(isolate);
+ VerifyHeapPointer(isolate, context());
CHECK(context()->IsContext());
- VerifyHeapPointer(promise_to_resolve());
+ VerifyHeapPointer(isolate, promise_to_resolve());
CHECK(promise_to_resolve()->IsJSPromise());
- VerifyHeapPointer(then());
+ VerifyHeapPointer(isolate, then());
CHECK(then()->IsCallable());
CHECK(then()->IsJSReceiver());
- VerifyHeapPointer(thenable());
+ VerifyHeapPointer(isolate, thenable());
CHECK(thenable()->IsJSReceiver());
}
-void PromiseCapability::PromiseCapabilityVerify() {
+void PromiseCapability::PromiseCapabilityVerify(Isolate* isolate) {
CHECK(IsPromiseCapability());
- Isolate* isolate = GetIsolate();
- VerifyHeapPointer(promise());
+
+ VerifyHeapPointer(isolate, promise());
CHECK(promise()->IsJSReceiver() || promise()->IsUndefined(isolate));
- VerifyPointer(resolve());
- VerifyPointer(reject());
+ VerifyPointer(isolate, resolve());
+ VerifyPointer(isolate, reject());
}
-void PromiseReaction::PromiseReactionVerify() {
+void PromiseReaction::PromiseReactionVerify(Isolate* isolate) {
CHECK(IsPromiseReaction());
- Isolate* isolate = GetIsolate();
- VerifyPointer(next());
+
+ VerifyPointer(isolate, next());
CHECK(next()->IsSmi() || next()->IsPromiseReaction());
- VerifyHeapPointer(reject_handler());
+ VerifyHeapPointer(isolate, reject_handler());
CHECK(reject_handler()->IsUndefined(isolate) ||
reject_handler()->IsCallable());
- VerifyHeapPointer(fulfill_handler());
+ VerifyHeapPointer(isolate, fulfill_handler());
CHECK(fulfill_handler()->IsUndefined(isolate) ||
fulfill_handler()->IsCallable());
- VerifyHeapPointer(promise_or_capability());
+ VerifyHeapPointer(isolate, promise_or_capability());
CHECK(promise_or_capability()->IsJSPromise() ||
promise_or_capability()->IsPromiseCapability());
}
-void JSPromise::JSPromiseVerify() {
+void JSPromise::JSPromiseVerify(Isolate* isolate) {
CHECK(IsJSPromise());
- JSObjectVerify();
- VerifyPointer(reactions_or_result());
+ JSObjectVerify(isolate);
+ VerifyPointer(isolate, reactions_or_result());
VerifySmiField(kFlagsOffset);
if (status() == Promise::kPending) {
CHECK(reactions()->IsSmi() || reactions()->IsPromiseReaction());
@@ -1301,9 +1312,9 @@ void JSPromise::JSPromiseVerify() {
}
template <typename Derived>
-void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify() {
+void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify(
+ Isolate* isolate) {
CHECK(IsSmallOrderedHashTable());
- Isolate* isolate = GetIsolate();
int capacity = Capacity();
CHECK_GE(capacity, kMinCapacity);
@@ -1326,7 +1337,7 @@ void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify() {
for (int entry = 0; entry < NumberOfElements(); entry++) {
for (int offset = 0; offset < Derived::kEntrySize; offset++) {
Object* val = GetDataEntry(entry, offset);
- VerifyPointer(val);
+ VerifyPointer(isolate, val);
}
}
@@ -1347,14 +1358,13 @@ void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify() {
}
}
-template void
-SmallOrderedHashTable<SmallOrderedHashMap>::SmallOrderedHashTableVerify();
-template void
-SmallOrderedHashTable<SmallOrderedHashSet>::SmallOrderedHashTableVerify();
+template void SmallOrderedHashTable<
+ SmallOrderedHashMap>::SmallOrderedHashTableVerify(Isolate* isolate);
+template void SmallOrderedHashTable<
+ SmallOrderedHashSet>::SmallOrderedHashTableVerify(Isolate* isolate);
-void JSRegExp::JSRegExpVerify() {
- JSObjectVerify();
- Isolate* isolate = GetIsolate();
+void JSRegExp::JSRegExpVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
CHECK(data()->IsUndefined(isolate) || data()->IsFixedArray());
switch (TypeTag()) {
case JSRegExp::ATOM: {
@@ -1389,19 +1399,18 @@ void JSRegExp::JSRegExpVerify() {
}
}
-void JSRegExpStringIterator::JSRegExpStringIteratorVerify() {
+void JSRegExpStringIterator::JSRegExpStringIteratorVerify(Isolate* isolate) {
CHECK(IsJSRegExpStringIterator());
- JSObjectVerify();
+ JSObjectVerify(isolate);
CHECK(iterating_string()->IsString());
CHECK(iterating_regexp()->IsObject());
VerifySmiField(kFlagsOffset);
}
-void JSProxy::JSProxyVerify() {
+void JSProxy::JSProxyVerify(Isolate* isolate) {
CHECK(IsJSProxy());
- VerifyPointer(target());
- VerifyPointer(handler());
- Isolate* isolate = GetIsolate();
+ VerifyPointer(isolate, target());
+ VerifyPointer(isolate, handler());
if (!IsRevoked()) {
CHECK_EQ(target()->IsCallable(), map()->is_callable());
CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
@@ -1411,78 +1420,68 @@ void JSProxy::JSProxyVerify() {
CHECK_EQ(0, map()->NumberOfOwnDescriptors());
}
-
-void JSArrayBuffer::JSArrayBufferVerify() {
+void JSArrayBuffer::JSArrayBufferVerify(Isolate* isolate) {
CHECK(IsJSArrayBuffer());
- JSObjectVerify();
- VerifyPointer(byte_length());
+ JSObjectVerify(isolate);
+ VerifyPointer(isolate, byte_length());
CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber() ||
- byte_length()->IsUndefined(GetIsolate()));
+ byte_length()->IsUndefined(isolate));
}
-
-void JSArrayBufferView::JSArrayBufferViewVerify() {
+void JSArrayBufferView::JSArrayBufferViewVerify(Isolate* isolate) {
CHECK(IsJSArrayBufferView());
- JSObjectVerify();
- VerifyPointer(buffer());
- Isolate* isolate = GetIsolate();
+ JSObjectVerify(isolate);
+ VerifyPointer(isolate, buffer());
CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined(isolate) ||
buffer() == Smi::kZero);
- VerifyPointer(raw_byte_offset());
+ VerifyPointer(isolate, raw_byte_offset());
CHECK(raw_byte_offset()->IsSmi() || raw_byte_offset()->IsHeapNumber() ||
raw_byte_offset()->IsUndefined(isolate));
- VerifyPointer(raw_byte_length());
+ VerifyPointer(isolate, raw_byte_length());
CHECK(raw_byte_length()->IsSmi() || raw_byte_length()->IsHeapNumber() ||
raw_byte_length()->IsUndefined(isolate));
}
-
-void JSTypedArray::JSTypedArrayVerify() {
+void JSTypedArray::JSTypedArrayVerify(Isolate* isolate) {
CHECK(IsJSTypedArray());
- JSArrayBufferViewVerify();
- VerifyPointer(raw_length());
- CHECK(raw_length()->IsSmi() || raw_length()->IsUndefined(GetIsolate()));
- VerifyPointer(elements());
+ JSArrayBufferViewVerify(isolate);
+ VerifyPointer(isolate, raw_length());
+ CHECK(raw_length()->IsSmi() || raw_length()->IsUndefined(isolate));
+ VerifyPointer(isolate, elements());
}
-
-void JSDataView::JSDataViewVerify() {
+void JSDataView::JSDataViewVerify(Isolate* isolate) {
CHECK(IsJSDataView());
- JSArrayBufferViewVerify();
-}
-
-
-void Foreign::ForeignVerify() {
- CHECK(IsForeign());
+ JSArrayBufferViewVerify(isolate);
}
+void Foreign::ForeignVerify(Isolate* isolate) { CHECK(IsForeign()); }
-void AsyncGeneratorRequest::AsyncGeneratorRequestVerify() {
+void AsyncGeneratorRequest::AsyncGeneratorRequestVerify(Isolate* isolate) {
CHECK(IsAsyncGeneratorRequest());
VerifySmiField(kResumeModeOffset);
CHECK_GE(resume_mode(), JSGeneratorObject::kNext);
CHECK_LE(resume_mode(), JSGeneratorObject::kThrow);
CHECK(promise()->IsJSPromise());
- VerifyPointer(value());
- VerifyPointer(next());
- next()->ObjectVerify();
+ VerifyPointer(isolate, value());
+ VerifyPointer(isolate, next());
+ next()->ObjectVerify(isolate);
}
-void BigInt::BigIntVerify() {
+void BigInt::BigIntVerify(Isolate* isolate) {
CHECK(IsBigInt());
CHECK_GE(length(), 0);
CHECK_IMPLIES(is_zero(), !sign()); // There is no -0n.
}
-void JSModuleNamespace::JSModuleNamespaceVerify() {
+void JSModuleNamespace::JSModuleNamespaceVerify(Isolate* isolate) {
CHECK(IsJSModuleNamespace());
- VerifyPointer(module());
+ VerifyPointer(isolate, module());
}
-void ModuleInfoEntry::ModuleInfoEntryVerify() {
- Isolate* isolate = GetIsolate();
+void ModuleInfoEntry::ModuleInfoEntryVerify(Isolate* isolate) {
CHECK(IsModuleInfoEntry());
CHECK(export_name()->IsUndefined(isolate) || export_name()->IsString());
@@ -1499,16 +1498,16 @@ void ModuleInfoEntry::ModuleInfoEntryVerify() {
local_name()->IsUndefined(isolate));
}
-void Module::ModuleVerify() {
+void Module::ModuleVerify(Isolate* isolate) {
CHECK(IsModule());
- VerifyPointer(code());
- VerifyPointer(exports());
- VerifyPointer(module_namespace());
- VerifyPointer(requested_modules());
- VerifyPointer(script());
- VerifyPointer(import_meta());
- VerifyPointer(exception());
+ VerifyPointer(isolate, code());
+ VerifyPointer(isolate, exports());
+ VerifyPointer(isolate, module_namespace());
+ VerifyPointer(isolate, requested_modules());
+ VerifyPointer(isolate, script());
+ VerifyPointer(isolate, import_meta());
+ VerifyPointer(isolate, exception());
VerifySmiField(kHashOffset);
VerifySmiField(kStatusOffset);
@@ -1517,9 +1516,9 @@ void Module::ModuleVerify() {
(status() == kInstantiating && code()->IsJSFunction()) ||
(code()->IsSharedFunctionInfo()));
- CHECK_EQ(status() == kErrored, !exception()->IsTheHole(GetIsolate()));
+ CHECK_EQ(status() == kErrored, !exception()->IsTheHole(isolate));
- CHECK(module_namespace()->IsUndefined(GetIsolate()) ||
+ CHECK(module_namespace()->IsUndefined(isolate) ||
module_namespace()->IsJSModuleNamespace());
if (module_namespace()->IsJSModuleNamespace()) {
CHECK_LE(kInstantiating, status());
@@ -1528,63 +1527,104 @@ void Module::ModuleVerify() {
CHECK_EQ(requested_modules()->length(), info()->module_requests()->length());
- CHECK(import_meta()->IsTheHole(GetIsolate()) || import_meta()->IsJSObject());
+ CHECK(import_meta()->IsTheHole(isolate) || import_meta()->IsJSObject());
CHECK_NE(hash(), 0);
}
-void PrototypeInfo::PrototypeInfoVerify() {
+void PrototypeInfo::PrototypeInfoVerify(Isolate* isolate) {
CHECK(IsPrototypeInfo());
- CHECK(weak_cell()->IsWeakCell() || weak_cell()->IsUndefined(GetIsolate()));
- if (prototype_users()->IsFixedArrayOfWeakCells()) {
- FixedArrayOfWeakCells::cast(prototype_users())->FixedArrayVerify();
+ CHECK(weak_cell()->IsWeakCell() || weak_cell()->IsUndefined(isolate));
+ if (prototype_users()->IsWeakArrayList()) {
+ PrototypeUsers::Verify(WeakArrayList::cast(prototype_users()));
} else {
CHECK(prototype_users()->IsSmi());
}
}
-void Tuple2::Tuple2Verify() {
+void PrototypeUsers::Verify(WeakArrayList* array) {
+ if (array->length() == 0) {
+ // Allow empty & uninitialized lists.
+ return;
+ }
+ // Verify empty slot chain.
+ int empty_slot = Smi::ToInt(empty_slot_index(array));
+ int empty_slots_count = 0;
+ while (empty_slot != kNoEmptySlotsMarker) {
+ CHECK_GT(empty_slot, 0);
+ CHECK_LT(empty_slot, array->length());
+ empty_slot = Smi::ToInt(array->Get(empty_slot)->ToSmi());
+ ++empty_slots_count;
+ }
+
+ // Verify that all elements are either weak pointers or SMIs marking empty
+ // slots.
+ int weak_maps_count = 0;
+ for (int i = kFirstIndex; i < array->length(); ++i) {
+ HeapObject* heap_object;
+ MaybeObject* object = array->Get(i);
+ if ((object->ToWeakHeapObject(&heap_object) && heap_object->IsMap()) ||
+ object->IsClearedWeakHeapObject()) {
+ ++weak_maps_count;
+ } else {
+ CHECK(object->IsSmi());
+ }
+ }
+
+ CHECK_EQ(weak_maps_count + empty_slots_count + 1, array->length());
+}
+
+void Tuple2::Tuple2Verify(Isolate* isolate) {
CHECK(IsTuple2());
- Heap* heap = GetHeap();
- if (this == heap->empty_enum_cache()) {
- CHECK_EQ(heap->empty_fixed_array(), EnumCache::cast(this)->keys());
- CHECK_EQ(heap->empty_fixed_array(), EnumCache::cast(this)->indices());
+ Heap* heap = isolate->heap();
+ if (this == ReadOnlyRoots(heap).empty_enum_cache()) {
+ CHECK_EQ(ReadOnlyRoots(heap).empty_fixed_array(),
+ EnumCache::cast(this)->keys());
+ CHECK_EQ(ReadOnlyRoots(heap).empty_fixed_array(),
+ EnumCache::cast(this)->indices());
} else {
- VerifyObjectField(kValue1Offset);
- VerifyObjectField(kValue2Offset);
+ VerifyObjectField(isolate, kValue1Offset);
+ VerifyObjectField(isolate, kValue2Offset);
}
}
-void Tuple3::Tuple3Verify() {
+void Tuple3::Tuple3Verify(Isolate* isolate) {
CHECK(IsTuple3());
- VerifyObjectField(kValue1Offset);
- VerifyObjectField(kValue2Offset);
- VerifyObjectField(kValue3Offset);
+ VerifyObjectField(isolate, kValue1Offset);
+ VerifyObjectField(isolate, kValue2Offset);
+ VerifyObjectField(isolate, kValue3Offset);
}
-void WasmCompiledModule::WasmCompiledModuleVerify() {
- CHECK(IsWasmCompiledModule());
- VerifyObjectField(kNextInstanceOffset);
- VerifyObjectField(kPrevInstanceOffset);
- VerifyObjectField(kOwningInstanceOffset);
- VerifyObjectField(kNativeModuleOffset);
+void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
+ Isolate* isolate) {
+ CHECK(IsObjectBoilerplateDescription());
+ CHECK_GE(this->length(),
+ ObjectBoilerplateDescription::kDescriptionStartIndex);
+ this->FixedArrayVerify(isolate);
}
-void WasmDebugInfo::WasmDebugInfoVerify() {
+void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionVerify(
+ Isolate* isolate) {
+ CHECK(IsArrayBoilerplateDescription());
+ CHECK(constant_elements()->IsFixedArrayBase());
+ VerifyObjectField(isolate, kConstantElementsOffset);
+}
+
+void WasmDebugInfo::WasmDebugInfoVerify(Isolate* isolate) {
CHECK(IsWasmDebugInfo());
- VerifyObjectField(kInstanceOffset);
+ VerifyObjectField(isolate, kInstanceOffset);
CHECK(wasm_instance()->IsWasmInstanceObject());
- VerifyObjectField(kInterpreterHandleOffset);
- CHECK(interpreter_handle()->IsUndefined(GetIsolate()) ||
+ VerifyObjectField(isolate, kInterpreterHandleOffset);
+ CHECK(interpreter_handle()->IsUndefined(isolate) ||
interpreter_handle()->IsForeign());
- VerifyObjectField(kInterpretedFunctionsOffset);
- VerifyObjectField(kLocalsNamesOffset);
- VerifyObjectField(kCWasmEntriesOffset);
- VerifyObjectField(kCWasmEntryMapOffset);
+ VerifyObjectField(isolate, kInterpretedFunctionsOffset);
+ VerifyObjectField(isolate, kLocalsNamesOffset);
+ VerifyObjectField(isolate, kCWasmEntriesOffset);
+ VerifyObjectField(isolate, kCWasmEntryMapOffset);
}
-void WasmInstanceObject::WasmInstanceObjectVerify() {
- JSObjectVerify();
+void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
CHECK(IsWasmInstanceObject());
// Just generically check all tagged fields. Don't check the untagged fields,
@@ -1592,173 +1632,164 @@ void WasmInstanceObject::WasmInstanceObjectVerify() {
// WasmInstanceObject is not fully set up yet.
for (int offset = kHeaderSize; offset < kFirstUntaggedOffset;
offset += kPointerSize) {
- VerifyObjectField(offset);
+ VerifyObjectField(isolate, offset);
}
}
-void WasmExportedFunctionData::WasmExportedFunctionDataVerify() {
+void WasmExportedFunctionData::WasmExportedFunctionDataVerify(
+ Isolate* isolate) {
CHECK(IsWasmExportedFunctionData());
- VerifyObjectField(kWrapperCodeOffset);
+ VerifyObjectField(isolate, kWrapperCodeOffset);
CHECK(wrapper_code()->kind() == Code::JS_TO_WASM_FUNCTION ||
wrapper_code()->kind() == Code::C_WASM_ENTRY);
- VerifyObjectField(kInstanceOffset);
+ VerifyObjectField(isolate, kInstanceOffset);
VerifySmiField(kFunctionIndexOffset);
}
-void WasmSharedModuleData::WasmSharedModuleDataVerify() {
- CHECK(IsWasmSharedModuleData());
- VerifyObjectField(kManagedModuleOffset);
- CHECK(managed_module()->IsForeign());
- VerifyObjectField(kModuleBytesOffset);
- VerifyObjectField(kScriptOffset);
- VerifyObjectField(kAsmJsOffsetTableOffset);
- VerifyObjectField(kBreakPointInfosOffset);
+void WasmModuleObject::WasmModuleObjectVerify(Isolate* isolate) {
+ CHECK(IsWasmModuleObject());
+ VerifyObjectField(isolate, kNativeModuleOffset);
+ VerifyObjectField(isolate, kExportWrappersOffset);
+ VerifyObjectField(isolate, kScriptOffset);
+ VerifyObjectField(isolate, kAsmJsOffsetTableOffset);
+ VerifyObjectField(isolate, kBreakPointInfosOffset);
}
-void DataHandler::DataHandlerVerify() {
+void DataHandler::DataHandlerVerify(Isolate* isolate) {
CHECK(IsDataHandler());
CHECK_IMPLIES(!smi_handler()->IsSmi(),
smi_handler()->IsCode() && IsStoreHandler());
CHECK(validity_cell()->IsSmi() || validity_cell()->IsCell());
int data_count = data_field_count();
if (data_count >= 1) {
- VerifyObjectField(kData1Offset);
+ VerifyMaybeObjectField(isolate, kData1Offset);
}
if (data_count >= 2) {
- VerifyObjectField(kData2Offset);
+ VerifyObjectField(isolate, kData2Offset);
}
if (data_count >= 3) {
- VerifyObjectField(kData3Offset);
+ VerifyObjectField(isolate, kData3Offset);
}
}
-void LoadHandler::LoadHandlerVerify() {
- DataHandler::DataHandlerVerify();
+void LoadHandler::LoadHandlerVerify(Isolate* isolate) {
+ DataHandler::DataHandlerVerify(isolate);
// TODO(ishell): check handler integrity
}
-void StoreHandler::StoreHandlerVerify() {
- DataHandler::DataHandlerVerify();
+void StoreHandler::StoreHandlerVerify(Isolate* isolate) {
+ DataHandler::DataHandlerVerify(isolate);
// TODO(ishell): check handler integrity
}
-void AccessorInfo::AccessorInfoVerify() {
+void AccessorInfo::AccessorInfoVerify(Isolate* isolate) {
CHECK(IsAccessorInfo());
- VerifyPointer(name());
- VerifyPointer(expected_receiver_type());
- VerifyForeignPointer(this, getter());
- VerifyForeignPointer(this, setter());
- VerifyForeignPointer(this, js_getter());
- VerifyPointer(data());
+ VerifyPointer(isolate, name());
+ VerifyPointer(isolate, expected_receiver_type());
+ VerifyForeignPointer(isolate, this, getter());
+ VerifyForeignPointer(isolate, this, setter());
+ VerifyForeignPointer(isolate, this, js_getter());
+ VerifyPointer(isolate, data());
}
-
-void AccessorPair::AccessorPairVerify() {
+void AccessorPair::AccessorPairVerify(Isolate* isolate) {
CHECK(IsAccessorPair());
- VerifyPointer(getter());
- VerifyPointer(setter());
+ VerifyPointer(isolate, getter());
+ VerifyPointer(isolate, setter());
}
-
-void AccessCheckInfo::AccessCheckInfoVerify() {
+void AccessCheckInfo::AccessCheckInfoVerify(Isolate* isolate) {
CHECK(IsAccessCheckInfo());
- VerifyPointer(callback());
- VerifyPointer(named_interceptor());
- VerifyPointer(indexed_interceptor());
- VerifyPointer(data());
+ VerifyPointer(isolate, callback());
+ VerifyPointer(isolate, named_interceptor());
+ VerifyPointer(isolate, indexed_interceptor());
+ VerifyPointer(isolate, data());
}
-void CallHandlerInfo::CallHandlerInfoVerify() {
+void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
CHECK(IsCallHandlerInfo());
- CHECK(map() == GetHeap()->side_effect_call_handler_info_map() ||
- map() == GetHeap()->side_effect_free_call_handler_info_map() ||
- map() == GetHeap()->next_call_side_effect_free_call_handler_info_map());
- VerifyPointer(callback());
- VerifyPointer(js_callback());
- VerifyPointer(data());
+ CHECK(map() == ReadOnlyRoots(isolate).side_effect_call_handler_info_map() ||
+ map() ==
+ ReadOnlyRoots(isolate).side_effect_free_call_handler_info_map() ||
+ map() == ReadOnlyRoots(isolate)
+ .next_call_side_effect_free_call_handler_info_map());
+ VerifyPointer(isolate, callback());
+ VerifyPointer(isolate, js_callback());
+ VerifyPointer(isolate, data());
}
-void InterceptorInfo::InterceptorInfoVerify() {
+void InterceptorInfo::InterceptorInfoVerify(Isolate* isolate) {
CHECK(IsInterceptorInfo());
- VerifyForeignPointer(this, getter());
- VerifyForeignPointer(this, setter());
- VerifyForeignPointer(this, query());
- VerifyForeignPointer(this, deleter());
- VerifyForeignPointer(this, enumerator());
- VerifyPointer(data());
+ VerifyForeignPointer(isolate, this, getter());
+ VerifyForeignPointer(isolate, this, setter());
+ VerifyForeignPointer(isolate, this, query());
+ VerifyForeignPointer(isolate, this, deleter());
+ VerifyForeignPointer(isolate, this, enumerator());
+ VerifyPointer(isolate, data());
VerifySmiField(kFlagsOffset);
}
-
-void TemplateInfo::TemplateInfoVerify() {
- VerifyPointer(tag());
- VerifyPointer(property_list());
- VerifyPointer(property_accessors());
+void TemplateInfo::TemplateInfoVerify(Isolate* isolate) {
+ VerifyPointer(isolate, tag());
+ VerifyPointer(isolate, property_list());
+ VerifyPointer(isolate, property_accessors());
}
-
-void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
+void FunctionTemplateInfo::FunctionTemplateInfoVerify(Isolate* isolate) {
CHECK(IsFunctionTemplateInfo());
- TemplateInfoVerify();
- VerifyPointer(serial_number());
- VerifyPointer(call_code());
- VerifyPointer(prototype_template());
- VerifyPointer(parent_template());
- VerifyPointer(named_property_handler());
- VerifyPointer(indexed_property_handler());
- VerifyPointer(instance_template());
- VerifyPointer(signature());
- VerifyPointer(access_check_info());
- VerifyPointer(cached_property_name());
-}
-
-
-void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
+ TemplateInfoVerify(isolate);
+ VerifyPointer(isolate, serial_number());
+ VerifyPointer(isolate, call_code());
+ VerifyPointer(isolate, prototype_template());
+ VerifyPointer(isolate, parent_template());
+ VerifyPointer(isolate, named_property_handler());
+ VerifyPointer(isolate, indexed_property_handler());
+ VerifyPointer(isolate, instance_template());
+ VerifyPointer(isolate, signature());
+ VerifyPointer(isolate, access_check_info());
+ VerifyPointer(isolate, cached_property_name());
+}
+
+void ObjectTemplateInfo::ObjectTemplateInfoVerify(Isolate* isolate) {
CHECK(IsObjectTemplateInfo());
- TemplateInfoVerify();
- VerifyPointer(constructor());
- VerifyPointer(data());
+ TemplateInfoVerify(isolate);
+ VerifyPointer(isolate, constructor());
+ VerifyPointer(isolate, data());
}
-
-void AllocationSite::AllocationSiteVerify() {
+void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
CHECK(IsAllocationSite());
}
-
-void AllocationMemento::AllocationMementoVerify() {
+void AllocationMemento::AllocationMementoVerify(Isolate* isolate) {
CHECK(IsAllocationMemento());
- VerifyHeapPointer(allocation_site());
+ VerifyHeapPointer(isolate, allocation_site());
CHECK(!IsValid() || GetAllocationSite()->IsAllocationSite());
}
-
-void Script::ScriptVerify() {
+void Script::ScriptVerify(Isolate* isolate) {
CHECK(IsScript());
- VerifyPointer(source());
- VerifyPointer(name());
- VerifyPointer(wrapper());
- VerifyPointer(line_ends());
+ VerifyPointer(isolate, source());
+ VerifyPointer(isolate, name());
+ VerifyPointer(isolate, line_ends());
for (int i = 0; i < shared_function_infos()->length(); ++i) {
MaybeObject* maybe_object = shared_function_infos()->Get(i);
HeapObject* heap_object;
CHECK(maybe_object->IsWeakHeapObject() ||
maybe_object->IsClearedWeakHeapObject() ||
(maybe_object->ToStrongHeapObject(&heap_object) &&
- heap_object->IsUndefined(GetIsolate())));
+ heap_object->IsUndefined(isolate)));
}
}
-
-void NormalizedMapCache::NormalizedMapCacheVerify() {
- FixedArray::cast(this)->FixedArrayVerify();
+void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
+ FixedArray::cast(this)->FixedArrayVerify(isolate);
if (FLAG_enable_slow_asserts) {
- Isolate* isolate = GetIsolate();
for (int i = 0; i < length(); i++) {
Object* e = FixedArray::get(i);
if (e->IsWeakCell()) {
if (!WeakCell::cast(e)->cleared()) {
- Map::cast(WeakCell::cast(e)->value())->DictionaryMapVerify();
+ Map::cast(WeakCell::cast(e)->value())->DictionaryMapVerify(isolate);
}
} else {
CHECK(e->IsUndefined(isolate));
@@ -1767,48 +1798,71 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
}
}
-
-void DebugInfo::DebugInfoVerify() {
+void DebugInfo::DebugInfoVerify(Isolate* isolate) {
CHECK(IsDebugInfo());
- VerifyPointer(shared());
- VerifyPointer(debug_bytecode_array());
- VerifyPointer(break_points());
+ VerifyPointer(isolate, shared());
+ VerifyPointer(isolate, function_identifier());
+ VerifyPointer(isolate, original_bytecode_array());
+ VerifyPointer(isolate, break_points());
}
-
-void StackFrameInfo::StackFrameInfoVerify() {
+void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
CHECK(IsStackFrameInfo());
- VerifyPointer(script_name());
- VerifyPointer(script_name_or_source_url());
- VerifyPointer(function_name());
+ VerifyPointer(isolate, script_name());
+ VerifyPointer(isolate, script_name_or_source_url());
+ VerifyPointer(isolate, function_name());
}
-void PreParsedScopeData::PreParsedScopeDataVerify() {
+void PreParsedScopeData::PreParsedScopeDataVerify(Isolate* isolate) {
CHECK(IsPreParsedScopeData());
CHECK(scope_data()->IsByteArray());
- CHECK(child_data()->IsFixedArray());
+ CHECK_GE(length(), 0);
+
+ for (int i = 0; i < length(); ++i) {
+ Object* child = child_data(i);
+ CHECK(child->IsPreParsedScopeData() || child->IsNull());
+ VerifyPointer(isolate, child);
+ }
+}
+
+void UncompiledDataWithPreParsedScope::UncompiledDataWithPreParsedScopeVerify(
+ Isolate* isolate) {
+ CHECK(IsUncompiledDataWithPreParsedScope());
+ VerifyPointer(isolate, pre_parsed_scope_data());
+}
+
+void UncompiledDataWithoutPreParsedScope::
+ UncompiledDataWithoutPreParsedScopeVerify(Isolate* isolate) {
+ CHECK(IsUncompiledDataWithoutPreParsedScope());
}
-void InterpreterData::InterpreterDataVerify() {
+void InterpreterData::InterpreterDataVerify(Isolate* isolate) {
CHECK(IsInterpreterData());
CHECK(bytecode_array()->IsBytecodeArray());
CHECK(interpreter_trampoline()->IsCode());
}
#ifdef V8_INTL_SUPPORT
-void JSLocale::JSLocaleVerify() {
- VerifyObjectField(kLanguageOffset);
- VerifyObjectField(kScriptOffset);
- VerifyObjectField(kRegionOffset);
- VerifyObjectField(kBaseNameOffset);
- VerifyObjectField(kLocaleOffset);
+void JSLocale::JSLocaleVerify(Isolate* isolate) {
+ VerifyObjectField(isolate, kLanguageOffset);
+ VerifyObjectField(isolate, kScriptOffset);
+ VerifyObjectField(isolate, kRegionOffset);
+ VerifyObjectField(isolate, kBaseNameOffset);
+ VerifyObjectField(isolate, kLocaleOffset);
// Unicode extension fields.
- VerifyObjectField(kCalendarOffset);
- VerifyObjectField(kCaseFirstOffset);
- VerifyObjectField(kCollationOffset);
- VerifyObjectField(kHourCycleOffset);
- VerifyObjectField(kNumericOffset);
- VerifyObjectField(kNumberingSystemOffset);
+ VerifyObjectField(isolate, kCalendarOffset);
+ VerifyObjectField(isolate, kCaseFirstOffset);
+ VerifyObjectField(isolate, kCollationOffset);
+ VerifyObjectField(isolate, kHourCycleOffset);
+ VerifyObjectField(isolate, kNumericOffset);
+ VerifyObjectField(isolate, kNumberingSystemOffset);
+}
+
+void JSRelativeTimeFormat::JSRelativeTimeFormatVerify(Isolate* isolate) {
+ VerifyObjectField(isolate, kLocaleOffset);
+ VerifyObjectField(isolate, kStyleOffset);
+ VerifyObjectField(isolate, kNumericOffset);
+ VerifyObjectField(isolate, kFormatterOffset);
}
#endif // V8_INTL_SUPPORT
@@ -1816,7 +1870,8 @@ void JSLocale::JSLocaleVerify() {
#ifdef DEBUG
-void JSObject::IncrementSpillStatistics(SpillInformation* info) {
+void JSObject::IncrementSpillStatistics(Isolate* isolate,
+ SpillInformation* info) {
info->number_of_objects_++;
// Named properties
if (HasFastProperties()) {
@@ -1847,7 +1902,6 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
int holes = 0;
FixedArray* e = FixedArray::cast(elements());
int len = e->length();
- Isolate* isolate = GetIsolate();
for (int i = 0; i < len; i++) {
if (e->get(i)->IsTheHole(isolate)) holes++;
}
@@ -1919,7 +1973,6 @@ void JSObject::SpillInformation::Print() {
PrintF("\n");
}
-
bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
if (valid_entries == -1) valid_entries = number_of_descriptors();
Name* current_key = nullptr;
@@ -1941,19 +1994,20 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
return true;
}
-
bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
DCHECK_EQ(valid_entries, -1);
Name* prev_key = nullptr;
PropertyKind prev_kind = kData;
PropertyAttributes prev_attributes = NONE;
uint32_t prev_hash = 0;
+
for (int i = 0; i < number_of_transitions(); i++) {
Name* key = GetSortedKey(i);
uint32_t hash = key->Hash();
PropertyKind kind = kData;
PropertyAttributes attributes = NONE;
- if (!TransitionsAccessor::IsSpecialTransition(key)) {
+ if (!TransitionsAccessor::IsSpecialTransition(key->GetReadOnlyRoots(),
+ key)) {
Map* target = GetTarget(i);
PropertyDetails details =
TransitionsAccessor::GetTargetDetails(key, target);
@@ -2025,10 +2079,9 @@ bool CanLeak(Object* obj, Heap* heap, bool skip_weak_cell) {
return CanLeak(HeapObject::cast(obj)->map(), heap, skip_weak_cell);
}
-
-void Code::VerifyEmbeddedObjects(VerifyMode mode) {
+void Code::VerifyEmbeddedObjects(Isolate* isolate, VerifyMode mode) {
if (kind() == OPTIMIZED_FUNCTION) return;
- Heap* heap = GetIsolate()->heap();
+ Heap* heap = isolate->heap();
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
bool skip_weak_cell = (mode == kNoContextSpecificPointers) ? false : true;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
diff --git a/deps/v8/src/objects-definitions.h b/deps/v8/src/objects-definitions.h
new file mode 100644
index 0000000000..5e922a487c
--- /dev/null
+++ b/deps/v8/src/objects-definitions.h
@@ -0,0 +1,344 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_DEFINITIONS_H_
+#define V8_OBJECTS_DEFINITIONS_H_
+
+#include "src/heap-symbols.h"
+
+namespace v8 {
+
+namespace internal {
+
+// All Maps have a field instance_type containing a InstanceType.
+// It describes the type of the instances.
+//
+// As an example, a JavaScript object is a heap object and its map
+// instance_type is JS_OBJECT_TYPE.
+//
+// The names of the string instance types are intended to systematically
+// mirror their encoding in the instance_type field of the map. The default
+// encoding is considered TWO_BYTE. It is not mentioned in the name. ONE_BYTE
+// encoding is mentioned explicitly in the name. Likewise, the default
+// representation is considered sequential. It is not mentioned in the
+// name. The other representations (e.g. CONS, EXTERNAL) are explicitly
+// mentioned. Finally, the string is either a STRING_TYPE (if it is a normal
+// string) or a INTERNALIZED_STRING_TYPE (if it is a internalized string).
+//
+// NOTE: The following things are some that depend on the string types having
+// instance_types that are less than those of all other types:
+// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
+// Object::IsString.
+//
+// NOTE: Everything following JS_VALUE_TYPE is considered a
+// JSObject for GC purposes. The first four entries here have typeof
+// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
+//
+// NOTE: List had to be split into two, because of conditional item(s) from
+// INTL namespace. They can't just be appended to the end, because of the
+// checks we do in tests (expecting JS_FUNCTION_TYPE to be last).
+#define INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+ V(INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(STRING_TYPE) \
+ V(CONS_STRING_TYPE) \
+ V(EXTERNAL_STRING_TYPE) \
+ V(SLICED_STRING_TYPE) \
+ V(THIN_STRING_TYPE) \
+ V(ONE_BYTE_STRING_TYPE) \
+ V(CONS_ONE_BYTE_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(SLICED_ONE_BYTE_STRING_TYPE) \
+ V(THIN_ONE_BYTE_STRING_TYPE) \
+ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(SHORT_EXTERNAL_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ \
+ V(SYMBOL_TYPE) \
+ V(HEAP_NUMBER_TYPE) \
+ V(BIGINT_TYPE) \
+ V(ODDBALL_TYPE) \
+ \
+ V(MAP_TYPE) \
+ V(CODE_TYPE) \
+ V(MUTABLE_HEAP_NUMBER_TYPE) \
+ V(FOREIGN_TYPE) \
+ V(BYTE_ARRAY_TYPE) \
+ V(BYTECODE_ARRAY_TYPE) \
+ V(FREE_SPACE_TYPE) \
+ \
+ V(FIXED_INT8_ARRAY_TYPE) \
+ V(FIXED_UINT8_ARRAY_TYPE) \
+ V(FIXED_INT16_ARRAY_TYPE) \
+ V(FIXED_UINT16_ARRAY_TYPE) \
+ V(FIXED_INT32_ARRAY_TYPE) \
+ V(FIXED_UINT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT64_ARRAY_TYPE) \
+ V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
+ V(FIXED_BIGINT64_ARRAY_TYPE) \
+ V(FIXED_BIGUINT64_ARRAY_TYPE) \
+ \
+ V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(FEEDBACK_METADATA_TYPE) \
+ V(FILLER_TYPE) \
+ \
+ V(ACCESS_CHECK_INFO_TYPE) \
+ V(ACCESSOR_INFO_TYPE) \
+ V(ACCESSOR_PAIR_TYPE) \
+ V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
+ V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ V(DEBUG_INFO_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(INTERPRETER_DATA_TYPE) \
+ V(MODULE_INFO_ENTRY_TYPE) \
+ V(MODULE_TYPE) \
+ V(OBJECT_TEMPLATE_INFO_TYPE) \
+ V(PROMISE_CAPABILITY_TYPE) \
+ V(PROMISE_REACTION_TYPE) \
+ V(PROTOTYPE_INFO_TYPE) \
+ V(SCRIPT_TYPE) \
+ V(STACK_FRAME_INFO_TYPE) \
+ V(TUPLE2_TYPE) \
+ V(TUPLE3_TYPE) \
+ V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
+ V(WASM_DEBUG_INFO_TYPE) \
+ V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
+ \
+ V(CALLABLE_TASK_TYPE) \
+ V(CALLBACK_TASK_TYPE) \
+ V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
+ \
+ V(ALLOCATION_SITE_TYPE) \
+ \
+ V(FIXED_ARRAY_TYPE) \
+ V(OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \
+ V(HASH_TABLE_TYPE) \
+ V(ORDERED_HASH_MAP_TYPE) \
+ V(ORDERED_HASH_SET_TYPE) \
+ V(NAME_DICTIONARY_TYPE) \
+ V(GLOBAL_DICTIONARY_TYPE) \
+ V(NUMBER_DICTIONARY_TYPE) \
+ V(SIMPLE_NUMBER_DICTIONARY_TYPE) \
+ V(STRING_TABLE_TYPE) \
+ V(EPHEMERON_HASH_TABLE_TYPE) \
+ V(SCOPE_INFO_TYPE) \
+ V(SCRIPT_CONTEXT_TABLE_TYPE) \
+ \
+ V(BLOCK_CONTEXT_TYPE) \
+ V(CATCH_CONTEXT_TYPE) \
+ V(DEBUG_EVALUATE_CONTEXT_TYPE) \
+ V(EVAL_CONTEXT_TYPE) \
+ V(FUNCTION_CONTEXT_TYPE) \
+ V(MODULE_CONTEXT_TYPE) \
+ V(NATIVE_CONTEXT_TYPE) \
+ V(SCRIPT_CONTEXT_TYPE) \
+ V(WITH_CONTEXT_TYPE) \
+ \
+ V(WEAK_FIXED_ARRAY_TYPE) \
+ V(DESCRIPTOR_ARRAY_TYPE) \
+ V(TRANSITION_ARRAY_TYPE) \
+ \
+ V(CALL_HANDLER_INFO_TYPE) \
+ V(CELL_TYPE) \
+ V(CODE_DATA_CONTAINER_TYPE) \
+ V(FEEDBACK_CELL_TYPE) \
+ V(FEEDBACK_VECTOR_TYPE) \
+ V(LOAD_HANDLER_TYPE) \
+ V(PRE_PARSED_SCOPE_DATA_TYPE) \
+ V(PROPERTY_ARRAY_TYPE) \
+ V(PROPERTY_CELL_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
+ V(SMALL_ORDERED_HASH_MAP_TYPE) \
+ V(SMALL_ORDERED_HASH_SET_TYPE) \
+ V(STORE_HANDLER_TYPE) \
+ V(UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE) \
+ V(UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE) \
+ V(WEAK_CELL_TYPE) \
+ V(WEAK_ARRAY_LIST_TYPE) \
+ \
+ V(JS_PROXY_TYPE) \
+ V(JS_GLOBAL_OBJECT_TYPE) \
+ V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_MODULE_NAMESPACE_TYPE) \
+ V(JS_SPECIAL_API_OBJECT_TYPE) \
+ V(JS_VALUE_TYPE) \
+ V(JS_API_OBJECT_TYPE) \
+ V(JS_OBJECT_TYPE) \
+ \
+ V(JS_ARGUMENTS_TYPE) \
+ V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_ARRAY_ITERATOR_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
+ V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_DATE_TYPE) \
+ V(JS_ERROR_TYPE) \
+ V(JS_GENERATOR_OBJECT_TYPE) \
+ V(JS_MAP_TYPE) \
+ V(JS_MAP_KEY_ITERATOR_TYPE) \
+ V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_MAP_VALUE_ITERATOR_TYPE) \
+ V(JS_MESSAGE_OBJECT_TYPE) \
+ V(JS_PROMISE_TYPE) \
+ V(JS_REGEXP_TYPE) \
+ V(JS_REGEXP_STRING_ITERATOR_TYPE) \
+ V(JS_SET_TYPE) \
+ V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_SET_VALUE_ITERATOR_TYPE) \
+ V(JS_STRING_ITERATOR_TYPE) \
+ V(JS_WEAK_MAP_TYPE) \
+ V(JS_WEAK_SET_TYPE) \
+ V(JS_TYPED_ARRAY_TYPE) \
+ V(JS_DATA_VIEW_TYPE)
+
+#define INSTANCE_TYPE_LIST_AFTER_INTL(V) \
+ V(WASM_GLOBAL_TYPE) \
+ V(WASM_INSTANCE_TYPE) \
+ V(WASM_MEMORY_TYPE) \
+ V(WASM_MODULE_TYPE) \
+ V(WASM_TABLE_TYPE) \
+ V(JS_BOUND_FUNCTION_TYPE) \
+ V(JS_FUNCTION_TYPE)
+
+#ifdef V8_INTL_SUPPORT
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+ V(JS_INTL_LOCALE_TYPE) \
+ V(JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
+ INSTANCE_TYPE_LIST_AFTER_INTL(V)
+#else
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+ INSTANCE_TYPE_LIST_AFTER_INTL(V)
+#endif // V8_INTL_SUPPORT
+
+// Since string types are not consecutive, this macro is used to
+// iterate over them.
+#define STRING_TYPE_LIST(V) \
+ V(STRING_TYPE, kVariableSizeSentinel, string, String) \
+ V(ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, one_byte_string, \
+ OneByteString) \
+ V(CONS_STRING_TYPE, ConsString::kSize, cons_string, ConsString) \
+ V(CONS_ONE_BYTE_STRING_TYPE, ConsString::kSize, cons_one_byte_string, \
+ ConsOneByteString) \
+ V(SLICED_STRING_TYPE, SlicedString::kSize, sliced_string, SlicedString) \
+ V(SLICED_ONE_BYTE_STRING_TYPE, SlicedString::kSize, sliced_one_byte_string, \
+ SlicedOneByteString) \
+ V(EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, external_string, \
+ ExternalString) \
+ V(EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \
+ external_one_byte_string, ExternalOneByteString) \
+ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, ExternalTwoByteString::kSize, \
+ external_string_with_one_byte_data, ExternalStringWithOneByteData) \
+ V(SHORT_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kShortSize, \
+ short_external_string, ShortExternalString) \
+ V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kShortSize, \
+ short_external_one_byte_string, ShortExternalOneByteString) \
+ V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_string_with_one_byte_data, \
+ ShortExternalStringWithOneByteData) \
+ \
+ V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \
+ InternalizedString) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, \
+ one_byte_internalized_string, OneByteInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE, ExternalTwoByteString::kSize, \
+ external_internalized_string, ExternalInternalizedString) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
+ external_one_byte_internalized_string, ExternalOneByteInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_internalized_string_with_one_byte_data, \
+ ExternalInternalizedStringWithOneByteData) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \
+ ExternalTwoByteString::kShortSize, short_external_internalized_string, \
+ ShortExternalInternalizedString) \
+ V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, \
+ ExternalOneByteString::kShortSize, \
+ short_external_one_byte_internalized_string, \
+ ShortExternalOneByteInternalizedString) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_internalized_string_with_one_byte_data, \
+ ShortExternalInternalizedStringWithOneByteData) \
+ V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
+ V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
+ ThinOneByteString)
+
+// A struct is a simple object a set of object-valued fields. Including an
+// object type in this causes the compiler to generate most of the boilerplate
+// code for the class including allocation and garbage collection routines,
+// casts and predicates. All you need to define is the class, methods and
+// object verification routines. Easy, no?
+//
+// Note that for subtle reasons related to the ordering or numerical values of
+// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
+// manually.
+#define STRUCT_LIST(V) \
+ V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
+ V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
+ V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
+ V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
+ V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
+ V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request) \
+ V(DEBUG_INFO, DebugInfo, debug_info) \
+ V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
+ V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
+ V(INTERPRETER_DATA, InterpreterData, interpreter_data) \
+ V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
+ V(MODULE, Module, module) \
+ V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
+ V(PROMISE_CAPABILITY, PromiseCapability, promise_capability) \
+ V(PROMISE_REACTION, PromiseReaction, promise_reaction) \
+ V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
+ V(SCRIPT, Script, script) \
+ V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
+ V(TUPLE2, Tuple2, tuple2) \
+ V(TUPLE3, Tuple3, tuple3) \
+ V(ARRAY_BOILERPLATE_DESCRIPTION, ArrayBoilerplateDescription, \
+ array_boilerplate_description) \
+ V(WASM_DEBUG_INFO, WasmDebugInfo, wasm_debug_info) \
+ V(WASM_EXPORTED_FUNCTION_DATA, WasmExportedFunctionData, \
+ wasm_exported_function_data) \
+ V(CALLABLE_TASK, CallableTask, callable_task) \
+ V(CALLBACK_TASK, CallbackTask, callback_task) \
+ V(PROMISE_FULFILL_REACTION_JOB_TASK, PromiseFulfillReactionJobTask, \
+ promise_fulfill_reaction_job_task) \
+ V(PROMISE_REJECT_REACTION_JOB_TASK, PromiseRejectReactionJobTask, \
+ promise_reject_reaction_job_task) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_TASK, PromiseResolveThenableJobTask, \
+ promise_resolve_thenable_job_task)
+
+#define ALLOCATION_SITE_LIST(V) \
+ V(ALLOCATION_SITE, AllocationSite, WithWeakNext, allocation_site) \
+ V(ALLOCATION_SITE, AllocationSite, WithoutWeakNext, \
+ allocation_site_without_weaknext)
+
+#define DATA_HANDLER_LIST(V) \
+ V(LOAD_HANDLER, LoadHandler, 1, load_handler1) \
+ V(LOAD_HANDLER, LoadHandler, 2, load_handler2) \
+ V(LOAD_HANDLER, LoadHandler, 3, load_handler3) \
+ V(STORE_HANDLER, StoreHandler, 0, store_handler0) \
+ V(STORE_HANDLER, StoreHandler, 1, store_handler1) \
+ V(STORE_HANDLER, StoreHandler, 2, store_handler2) \
+ V(STORE_HANDLER, StoreHandler, 3, store_handler3)
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_DEFINITIONS_H_
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index edf48fecf5..e0a9a8becf 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -30,20 +30,10 @@
#include "src/layout-descriptor-inl.h"
#include "src/lookup-cache-inl.h"
#include "src/lookup.h"
-#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
-#include "src/objects/data-handler-inl.h"
#include "src/objects/descriptor-array.h"
-#include "src/objects/fixed-array-inl.h"
-#include "src/objects/js-array-inl.h"
-#include "src/objects/js-collection-inl.h"
-#include "src/objects/js-promise-inl.h"
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/js-regexp-string-iterator-inl.h"
#include "src/objects/literal-objects.h"
-#include "src/objects/map-inl.h"
-#include "src/objects/module-inl.h"
-#include "src/objects/name-inl.h"
+#include "src/objects/maybe-object-inl.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
#include "src/objects/template-objects.h"
@@ -51,6 +41,7 @@
#include "src/property-details.h"
#include "src/property.h"
#include "src/prototype.h"
+#include "src/roots-inl.h"
#include "src/transitions-inl.h"
#include "src/v8memory.h"
@@ -80,21 +71,36 @@ int PropertyDetails::field_width_in_words() const {
return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
}
+// TODO(v8:7786): For instance types that have a single map instance on the
+// roots, and when that map is a embedded in the binary, compare against the map
+// pointer rather than looking up the instance type.
+TYPE_CHECKER(AllocationSite, ALLOCATION_SITE_TYPE)
TYPE_CHECKER(BigInt, BIGINT_TYPE)
-TYPE_CHECKER(BoilerplateDescription, BOILERPLATE_DESCRIPTION_TYPE)
+TYPE_CHECKER(ObjectBoilerplateDescription, OBJECT_BOILERPLATE_DESCRIPTION_TYPE)
TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
+TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
+TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
+TYPE_CHECKER(CallHandlerInfo, CALL_HANDLER_INFO_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
-TYPE_CHECKER(ConstantElementsPair, TUPLE2_TYPE)
+TYPE_CHECKER(Code, CODE_TYPE)
+TYPE_CHECKER(CodeDataContainer, CODE_DATA_CONTAINER_TYPE)
TYPE_CHECKER(CoverageInfo, FIXED_ARRAY_TYPE)
TYPE_CHECKER(DescriptorArray, DESCRIPTOR_ARRAY_TYPE)
+TYPE_CHECKER(EphemeronHashTable, EPHEMERON_HASH_TABLE_TYPE)
TYPE_CHECKER(FeedbackCell, FEEDBACK_CELL_TYPE)
TYPE_CHECKER(FeedbackMetadata, FEEDBACK_METADATA_TYPE)
TYPE_CHECKER(FeedbackVector, FEEDBACK_VECTOR_TYPE)
+TYPE_CHECKER(FixedArrayExact, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(FixedArrayOfWeakCells, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
-TYPE_CHECKER(HashTable, HASH_TABLE_TYPE)
+TYPE_CHECKER(GlobalDictionary, GLOBAL_DICTIONARY_TYPE)
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+TYPE_CHECKER(JSArgumentsObject, JS_ARGUMENTS_TYPE)
+TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
+TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
TYPE_CHECKER(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE)
TYPE_CHECKER(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE)
TYPE_CHECKER(JSAsyncGeneratorObject, JS_ASYNC_GENERATOR_OBJECT_TYPE)
@@ -105,36 +111,67 @@ TYPE_CHECKER(JSDate, JS_DATE_TYPE)
TYPE_CHECKER(JSError, JS_ERROR_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-#ifdef V8_INTL_SUPPORT
-TYPE_CHECKER(JSLocale, JS_INTL_LOCALE_TYPE)
-#endif // V8_INTL_SUPPORT
+TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
+TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
+TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
+TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
+TYPE_CHECKER(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE)
+TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
+TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
+TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
+TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
+TYPE_CHECKER(NameDictionary, NAME_DICTIONARY_TYPE)
+TYPE_CHECKER(NativeContext, NATIVE_CONTEXT_TYPE)
+TYPE_CHECKER(NumberDictionary, NUMBER_DICTIONARY_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
-TYPE_CHECKER(PreParsedScopeData, TUPLE2_TYPE)
+TYPE_CHECKER(OrderedHashMap, ORDERED_HASH_MAP_TYPE)
+TYPE_CHECKER(OrderedHashSet, ORDERED_HASH_SET_TYPE)
+TYPE_CHECKER(PreParsedScopeData, PRE_PARSED_SCOPE_DATA_TYPE)
TYPE_CHECKER(PropertyArray, PROPERTY_ARRAY_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(PropertyDescriptorObject, FIXED_ARRAY_TYPE)
TYPE_CHECKER(ScopeInfo, SCOPE_INFO_TYPE)
+TYPE_CHECKER(ScriptContextTable, SCRIPT_CONTEXT_TABLE_TYPE)
+TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(SimpleNumberDictionary, SIMPLE_NUMBER_DICTIONARY_TYPE)
TYPE_CHECKER(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE)
TYPE_CHECKER(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE)
TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
+TYPE_CHECKER(StringTable, STRING_TABLE_TYPE)
+TYPE_CHECKER(Symbol, SYMBOL_TYPE)
TYPE_CHECKER(TemplateObjectDescription, TUPLE2_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
+TYPE_CHECKER(UncompiledDataWithoutPreParsedScope,
+ UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE)
+TYPE_CHECKER(UncompiledDataWithPreParsedScope,
+ UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE)
TYPE_CHECKER(WasmGlobalObject, WASM_GLOBAL_TYPE)
TYPE_CHECKER(WasmInstanceObject, WASM_INSTANCE_TYPE)
TYPE_CHECKER(WasmMemoryObject, WASM_MEMORY_TYPE)
TYPE_CHECKER(WasmModuleObject, WASM_MODULE_TYPE)
TYPE_CHECKER(WasmTableObject, WASM_TABLE_TYPE)
+TYPE_CHECKER(WeakArrayList, WEAK_ARRAY_LIST_TYPE)
TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
+#ifdef V8_INTL_SUPPORT
+TYPE_CHECKER(JSLocale, JS_INTL_LOCALE_TYPE)
+TYPE_CHECKER(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE)
+#endif // V8_INTL_SUPPORT
+
#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
#undef TYPED_ARRAY_TYPE_CHECKER
+bool HeapObject::IsUncompiledData() const {
+ return IsUncompiledDataWithoutPreParsedScope() ||
+ IsUncompiledDataWithPreParsedScope();
+}
bool HeapObject::IsFixedArrayBase() const {
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
@@ -167,8 +204,8 @@ bool HeapObject::IsJSGeneratorObject() const {
bool HeapObject::IsClassBoilerplate() const { return IsFixedArrayExact(); }
-bool HeapObject::IsExternal() const {
- return map()->FindRootMap() == GetHeap()->external_map();
+bool HeapObject::IsExternal(Isolate* isolate) const {
+ return map()->FindRootMap(isolate) == isolate->heap()->external_map();
}
#define IS_TYPE_FUNCTION_DEF(type_) \
@@ -178,24 +215,48 @@ bool HeapObject::IsExternal() const {
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
-#define IS_TYPE_FUNCTION_DEF(Type, Value) \
- bool Object::Is##Type(Isolate* isolate) const { \
- return this == isolate->heap()->Value(); \
- } \
- bool HeapObject::Is##Type(Isolate* isolate) const { \
- return this == isolate->heap()->Value(); \
- }
+#define IS_TYPE_FUNCTION_DEF(Type, Value) \
+ bool Object::Is##Type(Isolate* isolate) const { \
+ return Is##Type(ReadOnlyRoots(isolate->heap())); \
+ } \
+ bool Object::Is##Type(ReadOnlyRoots roots) const { \
+ return this == roots.Value(); \
+ } \
+ bool Object::Is##Type() const { \
+ return IsHeapObject() && HeapObject::cast(this)->Is##Type(); \
+ } \
+ bool HeapObject::Is##Type(Isolate* isolate) const { \
+ return Object::Is##Type(isolate); \
+ } \
+ bool HeapObject::Is##Type(ReadOnlyRoots roots) const { \
+ return Object::Is##Type(roots); \
+ } \
+ bool HeapObject::Is##Type() const { return Is##Type(GetReadOnlyRoots()); }
ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
bool Object::IsNullOrUndefined(Isolate* isolate) const {
- Heap* heap = isolate->heap();
- return this == heap->null_value() || this == heap->undefined_value();
+ return IsNullOrUndefined(ReadOnlyRoots(isolate));
+}
+
+bool Object::IsNullOrUndefined(ReadOnlyRoots roots) const {
+ return IsNull(roots) || IsUndefined(roots);
+}
+
+bool Object::IsNullOrUndefined() const {
+ return IsHeapObject() && HeapObject::cast(this)->IsNullOrUndefined();
}
bool HeapObject::IsNullOrUndefined(Isolate* isolate) const {
- Heap* heap = isolate->heap();
- return this == heap->null_value() || this == heap->undefined_value();
+ return Object::IsNullOrUndefined(isolate);
+}
+
+bool HeapObject::IsNullOrUndefined(ReadOnlyRoots roots) const {
+ return Object::IsNullOrUndefined(roots);
+}
+
+bool HeapObject::IsNullOrUndefined() const {
+ return IsNullOrUndefined(GetReadOnlyRoots());
}
bool HeapObject::IsString() const {
@@ -219,6 +280,10 @@ bool HeapObject::IsCallable() const { return map()->is_callable(); }
bool HeapObject::IsConstructor() const { return map()->is_constructor(); }
+bool HeapObject::IsModuleInfo() const {
+ return map() == GetReadOnlyRoots().module_info_map();
+}
+
bool HeapObject::IsTemplateInfo() const {
return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
}
@@ -342,8 +407,8 @@ bool HeapObject::IsEnumCache() const { return IsTuple2(); }
bool HeapObject::IsFrameArray() const { return IsFixedArrayExact(); }
bool HeapObject::IsArrayList() const {
- return map() == GetHeap()->array_list_map() ||
- this == GetHeap()->empty_fixed_array();
+ return map() == GetReadOnlyRoots().array_list_map() ||
+ this == GetReadOnlyRoots().empty_fixed_array();
}
bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArrayExact(); }
@@ -388,22 +453,9 @@ bool HeapObject::IsDependentCode() const {
}
bool HeapObject::IsContext() const {
- Map* map = this->map();
- Heap* heap = GetHeap();
- return (
- map == heap->function_context_map() || map == heap->catch_context_map() ||
- map == heap->with_context_map() || map == heap->native_context_map() ||
- map == heap->block_context_map() || map == heap->module_context_map() ||
- map == heap->eval_context_map() || map == heap->script_context_map() ||
- map == heap->debug_evaluate_context_map());
-}
-
-bool HeapObject::IsNativeContext() const {
- return map() == GetHeap()->native_context_map();
-}
-
-bool HeapObject::IsScriptContextTable() const {
- return map() == GetHeap()->script_context_table_map();
+ int instance_type = map()->instance_type();
+ return instance_type >= FIRST_CONTEXT_TYPE &&
+ instance_type <= LAST_CONTEXT_TYPE;
}
template <>
@@ -448,28 +500,16 @@ bool HeapObject::IsJSArrayBufferView() const {
return IsJSDataView() || IsJSTypedArray();
}
-bool HeapObject::IsDictionary() const {
- return IsHashTable() && this != GetHeap()->string_table();
-}
-
-bool HeapObject::IsGlobalDictionary() const {
- return map() == GetHeap()->global_dictionary_map();
-}
-
-bool HeapObject::IsNameDictionary() const {
- return map() == GetHeap()->name_dictionary_map();
-}
-
-bool HeapObject::IsNumberDictionary() const {
- return map() == GetHeap()->number_dictionary_map();
-}
-
-bool HeapObject::IsSimpleNumberDictionary() const {
- return map() == GetHeap()->simple_number_dictionary_map();
+bool HeapObject::IsHashTable() const {
+ int instance_type = map()->instance_type();
+ return instance_type >= FIRST_HASH_TABLE_TYPE &&
+ instance_type <= LAST_HASH_TABLE_TYPE;
}
-bool HeapObject::IsStringTable() const {
- return map() == GetHeap()->string_table_map();
+bool HeapObject::IsDictionary() const {
+ int instance_type = map()->instance_type();
+ return instance_type >= FIRST_DICTIONARY_TYPE &&
+ instance_type <= LAST_DICTIONARY_TYPE;
}
bool HeapObject::IsStringSet() const { return IsHashTable(); }
@@ -486,14 +526,6 @@ bool HeapObject::IsMapCache() const { return IsHashTable(); }
bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
-bool HeapObject::IsOrderedHashSet() const {
- return map() == GetHeap()->ordered_hash_set_map();
-}
-
-bool HeapObject::IsOrderedHashMap() const {
- return map() == GetHeap()->ordered_hash_map_map();
-}
-
bool Object::IsSmallOrderedHashTable() const {
return IsSmallOrderedHashSet() || IsSmallOrderedHashMap();
}
@@ -574,10 +606,11 @@ CAST_ACCESSOR(AllocationMemento)
CAST_ACCESSOR(AllocationSite)
CAST_ACCESSOR(AsyncGeneratorRequest)
CAST_ACCESSOR(BigInt)
-CAST_ACCESSOR(BoilerplateDescription)
+CAST_ACCESSOR(ObjectBoilerplateDescription)
CAST_ACCESSOR(Cell)
-CAST_ACCESSOR(ConstantElementsPair)
+CAST_ACCESSOR(ArrayBoilerplateDescription)
CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(EphemeronHashTable)
CAST_ACCESSOR(EnumCache)
CAST_ACCESSOR(FeedbackCell)
CAST_ACCESSOR(Foreign)
@@ -598,7 +631,9 @@ CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSStringIterator)
CAST_ACCESSOR(JSValue)
+CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(LayoutDescriptor)
+CAST_ACCESSOR(MutableHeapNumber)
CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(NumberDictionary)
@@ -610,7 +645,6 @@ CAST_ACCESSOR(OrderedHashMap)
CAST_ACCESSOR(OrderedHashSet)
CAST_ACCESSOR(PropertyArray)
CAST_ACCESSOR(PropertyCell)
-CAST_ACCESSOR(PrototypeInfo)
CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
CAST_ACCESSOR(SimpleNumberDictionary)
@@ -663,12 +697,13 @@ bool Object::FilterKey(PropertyFilter filter) {
Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
Representation representation) {
if (!representation.IsDouble()) return object;
- Handle<HeapNumber> result = isolate->factory()->NewHeapNumber(MUTABLE);
+ auto result = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
if (object->IsUninitialized(isolate)) {
result->set_value_as_bits(kHoleNanInt64);
} else if (object->IsMutableHeapNumber()) {
// Ensure that all bits of the double value are preserved.
- result->set_value_as_bits(HeapNumber::cast(*object)->value_as_bits());
+ result->set_value_as_bits(
+ MutableHeapNumber::cast(*object)->value_as_bits());
} else {
result->set_value(object->Number());
}
@@ -682,7 +717,8 @@ Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
DCHECK(object->FitsRepresentation(representation));
return object;
}
- return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value());
+ return isolate->factory()->NewHeapNumber(
+ MutableHeapNumber::cast(*object)->value());
}
Representation Object::OptimalRepresentation() {
@@ -691,8 +727,7 @@ Representation Object::OptimalRepresentation() {
return Representation::Smi();
} else if (FLAG_track_double_fields && IsHeapNumber()) {
return Representation::Double();
- } else if (FLAG_track_computed_fields &&
- IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
+ } else if (FLAG_track_computed_fields && IsUninitialized()) {
return Representation::None();
} else if (FLAG_track_heap_object_fields) {
DCHECK(IsHeapObject());
@@ -767,17 +802,15 @@ MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
}
// static
-MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
+MaybeHandle<Object> Object::ToNumber(Isolate* isolate, Handle<Object> input) {
if (input->IsNumber()) return input; // Shortcut.
- return ConvertToNumberOrNumeric(HeapObject::cast(*input)->GetIsolate(), input,
- Conversion::kToNumber);
+ return ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumber);
}
// static
-MaybeHandle<Object> Object::ToNumeric(Handle<Object> input) {
+MaybeHandle<Object> Object::ToNumeric(Isolate* isolate, Handle<Object> input) {
if (input->IsNumber() || input->IsBigInt()) return input; // Shortcut.
- return ConvertToNumberOrNumeric(HeapObject::cast(*input)->GetIsolate(), input,
- Conversion::kToNumeric);
+ return ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumeric);
}
// static
@@ -820,16 +853,17 @@ MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
return ConvertToIndex(isolate, input, error_index);
}
-MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
+MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
Handle<Name> name) {
- LookupIterator it(object, name);
+ LookupIterator it(isolate, object, name);
if (!it.IsFound()) return it.factory()->undefined_value();
return GetProperty(&it);
}
-MaybeHandle<Object> JSReceiver::GetProperty(Handle<JSReceiver> receiver,
+MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
Handle<Name> name) {
- LookupIterator it(receiver, name, receiver);
+ LookupIterator it(isolate, receiver, name, receiver);
if (!it.IsFound()) return it.factory()->undefined_value();
return Object::GetProperty(&it);
}
@@ -882,7 +916,7 @@ MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
Handle<JSReceiver> receiver,
const char* name) {
Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
- return GetProperty(receiver, str);
+ return GetProperty(isolate, receiver, str);
}
// static
@@ -896,10 +930,11 @@ V8_WARN_UNUSED_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
DisallowHeapAllocation no_gc;
HeapObject* prototype = HeapObject::cast(object->map()->prototype());
- HeapObject* null = isolate->heap()->null_value();
- HeapObject* empty_fixed_array = isolate->heap()->empty_fixed_array();
+ ReadOnlyRoots roots(isolate);
+ HeapObject* null = roots.null_value();
+ HeapObject* empty_fixed_array = roots.empty_fixed_array();
HeapObject* empty_slow_element_dictionary =
- isolate->heap()->empty_slow_element_dictionary();
+ roots.empty_slow_element_dictionary();
while (prototype != null) {
Map* map = prototype->map();
if (map->IsCustomElementsReceiverMap()) return false;
@@ -947,8 +982,12 @@ HeapObject* MapWord::ToForwardingAddress() {
#ifdef VERIFY_HEAP
-void HeapObject::VerifyObjectField(int offset) {
- VerifyPointer(READ_FIELD(this, offset));
+void HeapObject::VerifyObjectField(Isolate* isolate, int offset) {
+ VerifyPointer(isolate, READ_FIELD(this, offset));
+}
+
+void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) {
+ MaybeObject::VerifyMaybeObjectPointer(isolate, READ_WEAK_FIELD(this, offset));
}
void HeapObject::VerifySmiField(int offset) {
@@ -956,6 +995,11 @@ void HeapObject::VerifySmiField(int offset) {
}
#endif
+ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
+ // TODO(v8:7464): When RO_SPACE is embedded, this will access a global
+ // variable instead.
+ return ReadOnlyRoots(MemoryChunk::FromHeapObject(this)->heap());
+}
Heap* HeapObject::GetHeap() const {
Heap* heap = MemoryChunk::FromAddress(
@@ -970,6 +1014,20 @@ Isolate* HeapObject::GetIsolate() const {
return GetHeap()->isolate();
}
+Heap* NeverReadOnlySpaceObject::GetHeap() const {
+ MemoryChunk* chunk =
+ MemoryChunk::FromAddress(reinterpret_cast<Address>(this));
+ // Make sure we are not accessing an object in RO space.
+ SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE);
+ Heap* heap = chunk->heap();
+ SLOW_DCHECK(heap != nullptr);
+ return heap;
+}
+
+Isolate* NeverReadOnlySpaceObject::GetIsolate() const {
+ return GetHeap()->isolate();
+}
+
Map* HeapObject::map() const {
return map_word().ToMap();
}
@@ -978,14 +1036,15 @@ Map* HeapObject::map() const {
void HeapObject::set_map(Map* value) {
if (value != nullptr) {
#ifdef VERIFY_HEAP
- value->GetHeap()->VerifyObjectLayoutChange(this, value);
+ Heap::FromWritableHeapObject(this)->VerifyObjectLayoutChange(this, value);
#endif
}
set_map_word(MapWord::FromMap(value));
if (value != nullptr) {
// TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
- value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
+ Heap::FromWritableHeapObject(this)->incremental_marking()->RecordWrite(
+ this, nullptr, value);
}
}
@@ -997,14 +1056,15 @@ Map* HeapObject::synchronized_map() const {
void HeapObject::synchronized_set_map(Map* value) {
if (value != nullptr) {
#ifdef VERIFY_HEAP
- value->GetHeap()->VerifyObjectLayoutChange(this, value);
+ Heap::FromWritableHeapObject(this)->VerifyObjectLayoutChange(this, value);
#endif
}
synchronized_set_map_word(MapWord::FromMap(value));
if (value != nullptr) {
// TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
- value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
+ Heap::FromWritableHeapObject(this)->incremental_marking()->RecordWrite(
+ this, nullptr, value);
}
}
@@ -1013,7 +1073,7 @@ void HeapObject::synchronized_set_map(Map* value) {
void HeapObject::set_map_no_write_barrier(Map* value) {
if (value != nullptr) {
#ifdef VERIFY_HEAP
- value->GetHeap()->VerifyObjectLayoutChange(this, value);
+ Heap::FromWritableHeapObject(this)->VerifyObjectLayoutChange(this, value);
#endif
}
set_map_word(MapWord::FromMap(value));
@@ -1025,7 +1085,8 @@ void HeapObject::set_map_after_allocation(Map* value, WriteBarrierMode mode) {
DCHECK_NOT_NULL(value);
// TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
- value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
+ Heap::FromWritableHeapObject(this)->incremental_marking()->RecordWrite(
+ this, nullptr, value);
}
}
@@ -1058,30 +1119,28 @@ void HeapObject::synchronized_set_map_word(MapWord map_word) {
int HeapObject::Size() const { return SizeFromMap(map()); }
-double HeapNumber::value() const {
+double HeapNumberBase::value() const {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
-
-void HeapNumber::set_value(double value) {
+void HeapNumberBase::set_value(double value) {
WRITE_DOUBLE_FIELD(this, kValueOffset, value);
}
-uint64_t HeapNumber::value_as_bits() const {
+uint64_t HeapNumberBase::value_as_bits() const {
return READ_UINT64_FIELD(this, kValueOffset);
}
-void HeapNumber::set_value_as_bits(uint64_t bits) {
+void HeapNumberBase::set_value_as_bits(uint64_t bits) {
WRITE_UINT64_FIELD(this, kValueOffset, bits);
}
-int HeapNumber::get_exponent() {
+int HeapNumberBase::get_exponent() {
return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
kExponentShift) - kExponentBias;
}
-
-int HeapNumber::get_sign() {
+int HeapNumberBase::get_sign() {
return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
}
@@ -1092,14 +1151,19 @@ FixedArrayBase* JSObject::elements() const {
return static_cast<FixedArrayBase*>(array);
}
+bool AllocationSite::HasWeakNext() const {
+ return map() == GetReadOnlyRoots().allocation_site_map();
+}
+
void AllocationSite::Initialize() {
set_transition_info_or_boilerplate(Smi::kZero);
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::kZero);
set_pretenure_data(0);
set_pretenure_create_count(0);
- set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
- SKIP_WRITE_BARRIER);
+ set_dependent_code(
+ DependentCode::cast(GetReadOnlyRoots().empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
}
bool AllocationSite::IsZombie() const {
@@ -1167,7 +1231,7 @@ AllocationSite::PretenureDecision AllocationSite::pretenure_decision() const {
}
void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
- int value = pretenure_data();
+ int32_t value = pretenure_data();
set_pretenure_data(PretenureDecisionBits::update(value, decision));
}
@@ -1176,7 +1240,7 @@ bool AllocationSite::deopt_dependent_code() const {
}
void AllocationSite::set_deopt_dependent_code(bool deopt) {
- int value = pretenure_data();
+ int32_t value = pretenure_data();
set_pretenure_data(DeoptDependentCodeBit::update(value, deopt));
}
@@ -1185,7 +1249,7 @@ int AllocationSite::memento_found_count() const {
}
inline void AllocationSite::set_memento_found_count(int count) {
- int value = pretenure_data();
+ int32_t value = pretenure_data();
// Verify that we can count more mementos than we can possibly find in one
// new space collection.
DCHECK((GetHeap()->MaxSemiSpaceSize() /
@@ -1256,7 +1320,7 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
bool is_holey = IsHoleyElementsKind(current_kind);
if (current_kind == HOLEY_ELEMENTS) return;
- Object* the_hole = object->GetHeap()->the_hole_value();
+ Object* the_hole = object->GetReadOnlyRoots().the_hole_value();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
if (current == the_hole) {
@@ -1290,10 +1354,10 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode) {
- Heap* heap = object->GetHeap();
- if (elements->map() != heap->fixed_double_array_map()) {
- DCHECK(elements->map() == heap->fixed_array_map() ||
- elements->map() == heap->fixed_cow_array_map());
+ ReadOnlyRoots roots = object->GetReadOnlyRoots();
+ if (elements->map() != roots.fixed_double_array_map()) {
+ DCHECK(elements->map() == roots.fixed_array_map() ||
+ elements->map() == roots.fixed_cow_array_map());
if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
@@ -1325,11 +1389,11 @@ void JSObject::SetMapAndElements(Handle<JSObject> object,
Handle<FixedArrayBase> value) {
JSObject::MigrateToMap(object, new_map);
DCHECK((object->map()->has_fast_smi_or_object_elements() ||
- (*value == object->GetHeap()->empty_fixed_array()) ||
+ (*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
object->map()->has_fast_string_wrapper_elements()) ==
- (value->map() == object->GetHeap()->fixed_array_map() ||
- value->map() == object->GetHeap()->fixed_cow_array_map()));
- DCHECK((*value == object->GetHeap()->empty_fixed_array()) ||
+ (value->map() == object->GetReadOnlyRoots().fixed_array_map() ||
+ value->map() == object->GetReadOnlyRoots().fixed_cow_array_map()));
+ DCHECK((*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
(object->map()->has_fast_double_elements() ==
value->IsFixedDoubleArray()));
object->set_elements(*value);
@@ -1380,8 +1444,8 @@ void Oddball::set_kind(byte value) {
// static
-Handle<Object> Oddball::ToNumber(Handle<Oddball> input) {
- return handle(input->to_number(), input->GetIsolate());
+Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
+ return handle(input->to_number(), isolate);
}
@@ -1392,7 +1456,7 @@ ACCESSORS(PropertyCell, name, Name, kNameOffset)
ACCESSORS(PropertyCell, value, Object, kValueOffset)
ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset)
-PropertyDetails PropertyCell::property_details() {
+PropertyDetails PropertyCell::property_details() const {
return PropertyDetails(Smi::cast(property_details_raw()));
}
@@ -1408,8 +1472,8 @@ Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
void WeakCell::clear() {
// Either the garbage collector is clearing the cell or we are simply
// initializing the root empty weak cell.
- DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT ||
- this == GetHeap()->empty_weak_cell());
+ DCHECK(Heap::FromWritableHeapObject(this)->gc_state() == Heap::MARK_COMPACT ||
+ this == GetReadOnlyRoots().empty_weak_cell());
WRITE_FIELD(this, kValueOffset, Smi::kZero);
}
@@ -1419,7 +1483,7 @@ void WeakCell::initialize(HeapObject* val) {
// We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
- Heap* heap = val->GetHeap();
+ Heap* heap = Heap::FromWritableHeapObject(this);
WriteBarrierMode mode =
heap->incremental_marking()->marking_state()->IsBlack(this)
? UPDATE_WRITE_BARRIER
@@ -1560,8 +1624,8 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
if (IsUnboxedDoubleField(index)) {
DCHECK(value->IsMutableHeapNumber());
// Ensure that all bits of the double value are preserved.
- RawFastDoublePropertyAsBitsAtPut(index,
- HeapNumber::cast(value)->value_as_bits());
+ RawFastDoublePropertyAsBitsAtPut(
+ index, MutableHeapNumber::cast(value)->value_as_bits());
} else {
RawFastPropertyAtPut(index, value);
}
@@ -1575,7 +1639,7 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
if (details.representation().IsDouble()) {
// Nothing more to be done.
- if (value->IsUninitialized(this->GetIsolate())) {
+ if (value->IsUninitialized()) {
return;
}
// Manipulating the signaling NaN used for the hole and uninitialized
@@ -1592,8 +1656,7 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
if (IsUnboxedDoubleField(index)) {
RawFastDoublePropertyAsBitsAtPut(index, bits);
} else {
- HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
- DCHECK(box->IsMutableHeapNumber());
+ auto box = MutableHeapNumber::cast(RawFastPropertyAt(index));
box->set_value_as_bits(bits);
}
} else {
@@ -1626,10 +1689,9 @@ Object* JSObject::InObjectPropertyAtPut(int index,
void JSObject::InitializeBody(Map* map, int start_offset,
Object* pre_allocated_value,
Object* filler_value) {
- DCHECK(!filler_value->IsHeapObject() ||
- !GetHeap()->InNewSpace(filler_value));
+ DCHECK(!filler_value->IsHeapObject() || !Heap::InNewSpace(filler_value));
DCHECK(!pre_allocated_value->IsHeapObject() ||
- !GetHeap()->InNewSpace(pre_allocated_value));
+ !Heap::InNewSpace(pre_allocated_value));
int size = map->instance_size();
int offset = start_offset;
if (filler_value != pre_allocated_value) {
@@ -1648,7 +1710,7 @@ void JSObject::InitializeBody(Map* map, int start_offset,
}
void Struct::InitializeBody(int object_size) {
- Object* value = GetHeap()->undefined_value();
+ Object* value = GetReadOnlyRoots().undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value);
}
@@ -1667,10 +1729,8 @@ void Object::VerifyApiCallResultType() {
#if DEBUG
if (IsSmi()) return;
DCHECK(IsHeapObject());
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
- IsBigInt() || IsUndefined(isolate) || IsTrue(isolate) ||
- IsFalse(isolate) || IsNull(isolate))) {
+ IsBigInt() || IsUndefined() || IsTrue() || IsFalse() || IsNull())) {
FATAL("API call returned invalid object");
}
#endif // DEBUG
@@ -1688,7 +1748,7 @@ void PropertyArray::set(int index, Object* value) {
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
+ WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
}
int RegExpMatchInfo::NumberOfCaptureRegisters() {
@@ -1736,9 +1796,9 @@ void RegExpMatchInfo::SetCapture(int i, int value) {
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
- Heap* heap = GetHeap();
+ Heap* heap = Heap::FromWritableHeapObject(this);
if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
- if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
+ if (Heap::InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
}
@@ -1760,13 +1820,16 @@ bool HeapObject::NeedsRehashing() const {
return DescriptorArray::cast(this)->number_of_descriptors() > 1;
case TRANSITION_ARRAY_TYPE:
return TransitionArray::cast(this)->number_of_entries() > 1;
+ case ORDERED_HASH_MAP_TYPE:
+ return OrderedHashMap::cast(this)->NumberOfElements() > 0;
+ case ORDERED_HASH_SET_TYPE:
+ return OrderedHashSet::cast(this)->NumberOfElements() > 0;
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
case HASH_TABLE_TYPE:
- if (IsOrderedHashMap()) {
- return OrderedHashMap::cast(this)->NumberOfElements() > 0;
- } else if (IsOrderedHashSet()) {
- return OrderedHashSet::cast(this)->NumberOfElements() > 0;
- }
- return true;
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
return true;
@@ -1775,12 +1838,17 @@ bool HeapObject::NeedsRehashing() const {
}
}
+Address HeapObject::GetFieldAddress(int field_offset) const {
+ return FIELD_ADDR(this, field_offset);
+}
+
void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
+ value, mode);
}
Object** PropertyArray::data_start() {
@@ -1791,7 +1859,7 @@ ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
int DescriptorArray::number_of_descriptors() const {
- return Smi::ToInt(get(kDescriptorLengthIndex));
+ return Smi::ToInt(get(kDescriptorLengthIndex)->ToSmi());
}
int DescriptorArray::number_of_descriptors_storage() const {
@@ -1804,7 +1872,8 @@ int DescriptorArray::NumberOfSlackDescriptors() const {
void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) {
- set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
+ set(kDescriptorLengthIndex,
+ MaybeObject::FromObject(Smi::FromInt(number_of_descriptors)));
}
inline int DescriptorArray::number_of_entries() const {
@@ -1816,7 +1885,7 @@ void DescriptorArray::CopyEnumCacheFrom(DescriptorArray* array) {
}
EnumCache* DescriptorArray::GetEnumCache() {
- return EnumCache::cast(get(kEnumCacheIndex));
+ return EnumCache::cast(get(kEnumCacheIndex)->ToStrongHeapObject());
}
// Perform a binary search in a fixed array.
@@ -1898,7 +1967,6 @@ int LinearSearch(T* array, Name* name, int valid_entries,
}
}
-
template <SearchMode search_mode, typename T>
int Search(T* array, Name* name, int valid_entries, int* out_insertion_index) {
SLOW_DCHECK(array->IsSortedNoDuplicates());
@@ -1929,6 +1997,13 @@ int DescriptorArray::Search(Name* name, int valid_descriptors) {
nullptr);
}
+int DescriptorArray::Search(Name* name, Map* map) {
+ DCHECK(name->IsUniqueName());
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) return kNotFound;
+ return Search(name, number_of_own_descriptors);
+}
+
int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) {
DCHECK(name->IsUniqueName());
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
@@ -1948,23 +2023,23 @@ int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) {
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
- return RawFieldOfElementAt(ToKeyIndex(descriptor_number));
+ DCHECK((*RawFieldOfElementAt(ToKeyIndex(descriptor_number)))->IsObject());
+ return reinterpret_cast<Object**>(
+ RawFieldOfElementAt(ToKeyIndex(descriptor_number)));
}
-
-Object** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) {
- return GetKeySlot(descriptor_number);
+MaybeObject** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) {
+ return reinterpret_cast<MaybeObject**>(GetKeySlot(descriptor_number));
}
-
-Object** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) {
+MaybeObject** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) {
return GetValueSlot(descriptor_number - 1) + 1;
}
Name* DescriptorArray::GetKey(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
- return Name::cast(get(ToKeyIndex(descriptor_number)));
+ return Name::cast(get(ToKeyIndex(descriptor_number))->ToStrongHeapObject());
}
@@ -1980,11 +2055,11 @@ Name* DescriptorArray::GetSortedKey(int descriptor_number) {
void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
PropertyDetails details = GetDetails(descriptor_index);
- set(ToDetailsIndex(descriptor_index), details.set_pointer(pointer).AsSmi());
+ set(ToDetailsIndex(descriptor_index),
+ MaybeObject::FromObject(details.set_pointer(pointer).AsSmi()));
}
-
-Object** DescriptorArray::GetValueSlot(int descriptor_number) {
+MaybeObject** DescriptorArray::GetValueSlot(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
return RawFieldOfElementAt(ToValueIndex(descriptor_number));
}
@@ -1994,22 +2069,25 @@ int DescriptorArray::GetValueOffset(int descriptor_number) {
return OffsetOfElementAt(ToValueIndex(descriptor_number));
}
-
-Object* DescriptorArray::GetValue(int descriptor_number) {
+Object* DescriptorArray::GetStrongValue(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
- return get(ToValueIndex(descriptor_number));
+ return get(ToValueIndex(descriptor_number))->ToObject();
}
void DescriptorArray::SetValue(int descriptor_index, Object* value) {
- set(ToValueIndex(descriptor_index), value);
+ set(ToValueIndex(descriptor_index), MaybeObject::FromObject(value));
}
+MaybeObject* DescriptorArray::GetValue(int descriptor_number) {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ return get(ToValueIndex(descriptor_number));
+}
PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
- Object* details = get(ToDetailsIndex(descriptor_number));
- return PropertyDetails(Smi::cast(details));
+ MaybeObject* details = get(ToDetailsIndex(descriptor_number));
+ return PropertyDetails(details->ToSmi());
}
int DescriptorArray::GetFieldIndex(int descriptor_number) {
@@ -2019,28 +2097,23 @@ int DescriptorArray::GetFieldIndex(int descriptor_number) {
FieldType* DescriptorArray::GetFieldType(int descriptor_number) {
DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
- Object* wrapped_type = GetValue(descriptor_number);
+ MaybeObject* wrapped_type = GetValue(descriptor_number);
return Map::UnwrapFieldType(wrapped_type);
}
-void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
- desc->Init(handle(GetKey(descriptor_number), GetIsolate()),
- handle(GetValue(descriptor_number), GetIsolate()),
- GetDetails(descriptor_number));
-}
-
-void DescriptorArray::Set(int descriptor_number, Name* key, Object* value,
+void DescriptorArray::Set(int descriptor_number, Name* key, MaybeObject* value,
PropertyDetails details) {
// Range check.
DCHECK(descriptor_number < number_of_descriptors());
- set(ToKeyIndex(descriptor_number), key);
+ set(ToKeyIndex(descriptor_number), MaybeObject::FromObject(key));
set(ToValueIndex(descriptor_number), value);
- set(ToDetailsIndex(descriptor_number), details.AsSmi());
+ set(ToDetailsIndex(descriptor_number),
+ MaybeObject::FromObject(details.AsSmi()));
}
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
Name* key = *desc->GetKey();
- Object* value = *desc->GetValue();
+ MaybeObject* value = *desc->GetValue();
Set(descriptor_number, key, value, desc->GetDetails());
}
@@ -2071,6 +2144,14 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
SetSortedKey(second, first_key);
}
+MaybeObject* DescriptorArray::get(int index) const {
+ return WeakFixedArray::Get(index);
+}
+
+void DescriptorArray::set(int index, MaybeObject* value) {
+ WeakFixedArray::Set(index, value);
+}
+
bool StringSetShape::IsMatch(String* key, Object* value) {
DCHECK(value->IsString());
return key->Equals(String::cast(value));
@@ -2175,8 +2256,10 @@ int FreeSpace::Size() { return size(); }
FreeSpace* FreeSpace::next() {
- DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
- (!GetHeap()->deserialization_complete() && map() == nullptr));
+ DCHECK(map() == Heap::FromWritableHeapObject(this)->root(
+ Heap::kFreeSpaceMapRootIndex) ||
+ (!Heap::FromWritableHeapObject(this)->deserialization_complete() &&
+ map() == nullptr));
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
return reinterpret_cast<FreeSpace*>(
Memory::Address_at(address() + kNextOffset));
@@ -2184,8 +2267,10 @@ FreeSpace* FreeSpace::next() {
void FreeSpace::set_next(FreeSpace* next) {
- DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
- (!GetHeap()->deserialization_complete() && map() == nullptr));
+ DCHECK(map() == Heap::FromWritableHeapObject(this)->root(
+ Heap::kFreeSpaceMapRootIndex) ||
+ (!Heap::FromWritableHeapObject(this)->deserialization_complete() &&
+ map() == nullptr));
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
base::Relaxed_Store(
reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
@@ -2194,7 +2279,8 @@ void FreeSpace::set_next(FreeSpace* next) {
FreeSpace* FreeSpace::cast(HeapObject* o) {
- SLOW_DCHECK(!o->GetHeap()->deserialization_complete() || o->IsFreeSpace());
+ SLOW_DCHECK(!Heap::FromWritableHeapObject(o)->deserialization_complete() ||
+ o->IsFreeSpace());
return reinterpret_cast<FreeSpace*>(o);
}
@@ -2275,6 +2361,10 @@ int HeapObject::SizeFromMap(Map* map) const {
if (instance_type == BIGINT_TYPE) {
return BigInt::SizeFor(reinterpret_cast<const BigInt*>(this)->length());
}
+ if (instance_type == PRE_PARSED_SCOPE_DATA_TYPE) {
+ return PreParsedScopeData::SizeFor(
+ reinterpret_cast<const PreParsedScopeData*>(this)->length());
+ }
DCHECK(instance_type == CODE_TYPE);
return reinterpret_cast<const Code*>(this)->CodeSize();
}
@@ -2301,40 +2391,10 @@ SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode, kResumeModeOffset)
ACCESSORS(AsyncGeneratorRequest, value, Object, kValueOffset)
ACCESSORS(AsyncGeneratorRequest, promise, Object, kPromiseOffset)
-Map* PrototypeInfo::ObjectCreateMap() {
- return Map::cast(WeakCell::cast(object_create_map())->value());
-}
-
-// static
-void PrototypeInfo::SetObjectCreateMap(Handle<PrototypeInfo> info,
- Handle<Map> map) {
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- info->set_object_create_map(*cell);
-}
-
-bool PrototypeInfo::HasObjectCreateMap() {
- Object* cache = object_create_map();
- return cache->IsWeakCell() && !WeakCell::cast(cache)->cleared();
-}
-
-ACCESSORS(PrototypeInfo, weak_cell, Object, kWeakCellOffset)
-ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
-ACCESSORS(PrototypeInfo, object_create_map, Object, kObjectCreateMap)
-SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
-SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
-BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
-
ACCESSORS(Tuple2, value1, Object, kValue1Offset)
ACCESSORS(Tuple2, value2, Object, kValue2Offset)
ACCESSORS(Tuple3, value3, Object, kValue3Offset)
-SMI_ACCESSORS(ConstantElementsPair, elements_kind, kElementsKindOffset)
-ACCESSORS(ConstantElementsPair, constant_values, FixedArrayBase,
- kConstantValuesOffset)
-bool ConstantElementsPair::is_empty() const {
- return constant_values()->length() == 0;
-}
-
ACCESSORS(TemplateObjectDescription, raw_strings, FixedArray, kRawStringsOffset)
ACCESSORS(TemplateObjectDescription, cooked_strings, FixedArray,
kCookedStringsOffset)
@@ -2365,12 +2425,13 @@ void AllocationSite::set_transition_info(int value) {
}
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
-SMI_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
-SMI_ACCESSORS(AllocationSite, pretenure_create_count,
- kPretenureCreateCountOffset)
+INT32_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
+INT32_ACCESSORS(AllocationSite, pretenure_create_count,
+ kPretenureCreateCountOffset)
ACCESSORS(AllocationSite, dependent_code, DependentCode,
kDependentCodeOffset)
-ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
+ACCESSORS_CHECKED(AllocationSite, weak_next, Object, kWeakNextOffset,
+ HasWeakNext())
ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberIndex)
@@ -2457,7 +2518,7 @@ bool JSFunction::IsInOptimizationQueue() {
void JSFunction::CompleteInobjectSlackTrackingIfActive() {
if (!has_prototype_slot()) return;
if (has_initial_map() && initial_map()->IsInobjectSlackTrackingInProgress()) {
- initial_map()->CompleteInobjectSlackTracking();
+ initial_map()->CompleteInobjectSlackTracking(GetIsolate());
}
}
@@ -2472,7 +2533,7 @@ AbstractCode* JSFunction::abstract_code() {
Code* JSFunction::code() { return Code::cast(READ_FIELD(this, kCodeOffset)); }
void JSFunction::set_code(Code* value) {
- DCHECK(!GetHeap()->InNewSpace(value));
+ DCHECK(!Heap::InNewSpace(value));
WRITE_FIELD(this, kCodeOffset, value);
GetHeap()->incremental_marking()->RecordWrite(
this, HeapObject::RawField(this, kCodeOffset), value);
@@ -2480,7 +2541,7 @@ void JSFunction::set_code(Code* value) {
void JSFunction::set_code_no_write_barrier(Code* value) {
- DCHECK(!GetHeap()->InNewSpace(value));
+ DCHECK(!Heap::InNewSpace(value));
WRITE_FIELD(this, kCodeOffset, value);
}
@@ -2505,7 +2566,7 @@ void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
}
bool JSFunction::has_feedback_vector() const {
- return !feedback_cell()->value()->IsUndefined(GetIsolate());
+ return !feedback_cell()->value()->IsUndefined();
}
Context* JSFunction::context() {
@@ -2525,7 +2586,7 @@ Context* JSFunction::native_context() { return context()->native_context(); }
void JSFunction::set_context(Object* value) {
- DCHECK(value->IsUndefined(GetIsolate()) || value->IsContext());
+ DCHECK(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
}
@@ -2550,11 +2611,9 @@ bool JSFunction::has_initial_map() {
bool JSFunction::has_instance_prototype() {
DCHECK(has_prototype_slot());
- return has_initial_map() ||
- !prototype_or_initial_map()->IsTheHole(GetIsolate());
+ return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
}
-
bool JSFunction::has_prototype() {
DCHECK(has_prototype_slot());
return map()->has_non_instance_prototype() || has_instance_prototype();
@@ -2611,10 +2670,10 @@ void Foreign::set_foreign_address(Address value) {
template <class Derived>
void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
Object* value) {
- Address entry_offset =
- kHeaderSize + GetDataEntryOffset(entry, relative_index);
+ Address entry_offset = GetDataEntryOffset(entry, relative_index);
RELAXED_WRITE_FIELD(this, entry_offset, value);
- WRITE_BARRIER(GetHeap(), this, static_cast<int>(entry_offset), value);
+ WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
+ static_cast<int>(entry_offset), value);
}
ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
@@ -2623,7 +2682,8 @@ ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
ACCESSORS(JSGeneratorObject, input_or_debug_pos, Object, kInputOrDebugPosOffset)
SMI_ACCESSORS(JSGeneratorObject, resume_mode, kResumeModeOffset)
SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
-ACCESSORS(JSGeneratorObject, register_file, FixedArray, kRegisterFileOffset)
+ACCESSORS(JSGeneratorObject, parameters_and_registers, FixedArray,
+ kParametersAndRegistersOffset)
bool JSGeneratorObject::is_suspended() const {
DCHECK_LT(kGeneratorExecuting, 0);
@@ -2645,18 +2705,6 @@ SMI_ACCESSORS(JSAsyncGeneratorObject, is_awaiting, kIsAwaitingOffset)
ACCESSORS(JSValue, value, Object, kValueOffset)
-HeapNumber* HeapNumber::cast(Object* object) {
- SLOW_DCHECK(object->IsHeapNumber() || object->IsMutableHeapNumber());
- return reinterpret_cast<HeapNumber*>(object);
-}
-
-
-const HeapNumber* HeapNumber::cast(const Object* object) {
- SLOW_DCHECK(object->IsHeapNumber() || object->IsMutableHeapNumber());
- return reinterpret_cast<const HeapNumber*>(object);
-}
-
-
ACCESSORS(JSDate, value, Object, kValueOffset)
ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
ACCESSORS(JSDate, year, Object, kYearOffset)
@@ -2670,7 +2718,7 @@ ACCESSORS(JSDate, sec, Object, kSecOffset)
SMI_ACCESSORS(JSMessageObject, type, kTypeOffset)
ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
-ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
+ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
@@ -2687,11 +2735,11 @@ ElementsKind JSObject::GetElementsKind() {
if (ElementsAreSafeToExamine()) {
Map* map = fixed_array->map();
if (IsSmiOrObjectElementsKind(kind)) {
- DCHECK(map == GetHeap()->fixed_array_map() ||
- map == GetHeap()->fixed_cow_array_map());
+ DCHECK(map == GetReadOnlyRoots().fixed_array_map() ||
+ map == GetReadOnlyRoots().fixed_cow_array_map());
} else if (IsDoubleElementsKind(kind)) {
DCHECK(fixed_array->IsFixedDoubleArray() ||
- fixed_array == GetHeap()->empty_fixed_array());
+ fixed_array == GetReadOnlyRoots().empty_fixed_array());
} else if (kind == DICTIONARY_ELEMENTS) {
DCHECK(fixed_array->IsFixedArray());
DCHECK(fixed_array->IsDictionary());
@@ -2807,8 +2855,9 @@ NumberDictionary* JSObject::element_dictionary() {
}
// static
-Maybe<bool> Object::GreaterThan(Handle<Object> x, Handle<Object> y) {
- Maybe<ComparisonResult> result = Compare(x, y);
+Maybe<bool> Object::GreaterThan(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(isolate, x, y);
if (result.IsJust()) {
switch (result.FromJust()) {
case ComparisonResult::kGreaterThan:
@@ -2824,8 +2873,9 @@ Maybe<bool> Object::GreaterThan(Handle<Object> x, Handle<Object> y) {
// static
-Maybe<bool> Object::GreaterThanOrEqual(Handle<Object> x, Handle<Object> y) {
- Maybe<ComparisonResult> result = Compare(x, y);
+Maybe<bool> Object::GreaterThanOrEqual(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(isolate, x, y);
if (result.IsJust()) {
switch (result.FromJust()) {
case ComparisonResult::kEqual:
@@ -2841,8 +2891,9 @@ Maybe<bool> Object::GreaterThanOrEqual(Handle<Object> x, Handle<Object> y) {
// static
-Maybe<bool> Object::LessThan(Handle<Object> x, Handle<Object> y) {
- Maybe<ComparisonResult> result = Compare(x, y);
+Maybe<bool> Object::LessThan(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(isolate, x, y);
if (result.IsJust()) {
switch (result.FromJust()) {
case ComparisonResult::kLessThan:
@@ -2858,8 +2909,9 @@ Maybe<bool> Object::LessThan(Handle<Object> x, Handle<Object> y) {
// static
-Maybe<bool> Object::LessThanOrEqual(Handle<Object> x, Handle<Object> y) {
- Maybe<ComparisonResult> result = Compare(x, y);
+Maybe<bool> Object::LessThanOrEqual(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(isolate, x, y);
if (result.IsJust()) {
switch (result.FromJust()) {
case ComparisonResult::kEqual:
@@ -2873,20 +2925,20 @@ Maybe<bool> Object::LessThanOrEqual(Handle<Object> x, Handle<Object> y) {
return Nothing<bool>();
}
-MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
+MaybeHandle<Object> Object::GetPropertyOrElement(Isolate* isolate,
+ Handle<Object> object,
Handle<Name> name) {
- LookupIterator it =
- LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
return GetProperty(&it);
}
-MaybeHandle<Object> Object::SetPropertyOrElement(Handle<Object> object,
+MaybeHandle<Object> Object::SetPropertyOrElement(Isolate* isolate,
+ Handle<Object> object,
Handle<Name> name,
Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode) {
- LookupIterator it =
- LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_mode));
return value;
}
@@ -2894,20 +2946,22 @@ MaybeHandle<Object> Object::SetPropertyOrElement(Handle<Object> object,
MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
Handle<Name> name,
Handle<JSReceiver> holder) {
- LookupIterator it = LookupIterator::PropertyOrElement(
- name->GetIsolate(), receiver, name, holder);
+ LookupIterator it = LookupIterator::PropertyOrElement(holder->GetIsolate(),
+ receiver, name, holder);
return GetProperty(&it);
}
void JSReceiver::initialize_properties() {
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_property_dictionary()));
+ Heap* heap = GetHeap();
+ ReadOnlyRoots roots(heap);
+ DCHECK(!Heap::InNewSpace(roots.empty_fixed_array()));
+ DCHECK(!Heap::InNewSpace(heap->empty_property_dictionary()));
if (map()->is_dictionary_map()) {
WRITE_FIELD(this, kPropertiesOrHashOffset,
- GetHeap()->empty_property_dictionary());
+ heap->empty_property_dictionary());
} else {
- WRITE_FIELD(this, kPropertiesOrHashOffset, GetHeap()->empty_fixed_array());
+ WRITE_FIELD(this, kPropertiesOrHashOffset, roots.empty_fixed_array());
}
}
@@ -2936,8 +2990,8 @@ PropertyArray* JSReceiver::property_array() const {
DCHECK(HasFastProperties());
Object* prop = raw_properties_or_hash();
- if (prop->IsSmi() || prop == GetHeap()->empty_fixed_array()) {
- return GetHeap()->empty_property_array();
+ if (prop->IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
+ return GetReadOnlyRoots().empty_property_array();
}
return PropertyArray::cast(prop);
@@ -2969,7 +3023,7 @@ Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name) {
- LookupIterator it = LookupIterator::PropertyOrElement(name->GetIsolate(),
+ LookupIterator it = LookupIterator::PropertyOrElement(object->GetIsolate(),
object, name, object);
return GetPropertyAttributes(&it);
}
@@ -2978,7 +3032,7 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name) {
LookupIterator it = LookupIterator::PropertyOrElement(
- name->GetIsolate(), object, name, object, LookupIterator::OWN);
+ object->GetIsolate(), object, name, object, LookupIterator::OWN);
return GetPropertyAttributes(&it);
}
@@ -3042,12 +3096,10 @@ void AccessorPair::set(AccessorComponent component, Object* value) {
void AccessorPair::SetComponents(Object* getter, Object* setter) {
- Isolate* isolate = GetIsolate();
- if (!getter->IsNull(isolate)) set_getter(getter);
- if (!setter->IsNull(isolate)) set_setter(setter);
+ if (!getter->IsNull()) set_getter(getter);
+ if (!setter->IsNull()) set_setter(setter);
}
-
bool AccessorPair::Equals(AccessorPair* pair) {
return (this == pair) || pair->Equals(getter(), setter());
}
@@ -3064,18 +3116,19 @@ bool AccessorPair::ContainsAccessor() {
bool AccessorPair::IsJSAccessor(Object* obj) {
- return obj->IsCallable() || obj->IsUndefined(GetIsolate());
+ return obj->IsCallable() || obj->IsUndefined();
}
template <typename Derived, typename Shape>
-void Dictionary<Derived, Shape>::ClearEntry(int entry) {
- Object* the_hole = this->GetHeap()->the_hole_value();
+void Dictionary<Derived, Shape>::ClearEntry(Isolate* isolate, int entry) {
+ Object* the_hole = this->GetReadOnlyRoots().the_hole_value();
PropertyDetails details = PropertyDetails::Empty();
- Derived::cast(this)->SetEntry(entry, the_hole, the_hole, details);
+ Derived::cast(this)->SetEntry(isolate, entry, the_hole, the_hole, details);
}
template <typename Derived, typename Shape>
-void Dictionary<Derived, Shape>::SetEntry(int entry, Object* key, Object* value,
+void Dictionary<Derived, Shape>::SetEntry(Isolate* isolate, int entry,
+ Object* key, Object* value,
PropertyDetails details) {
DCHECK(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3);
DCHECK(!key->IsName() || details.dictionary_index() > 0);
@@ -3084,7 +3137,7 @@ void Dictionary<Derived, Shape>::SetEntry(int entry, Object* key, Object* value,
WriteBarrierMode mode = this->GetWriteBarrierMode(no_gc);
this->set(index + Derived::kEntryKeyIndex, key, mode);
this->set(index + Derived::kEntryValueIndex, value, mode);
- if (Shape::kHasDetails) DetailsAtPut(entry, details);
+ if (Shape::kHasDetails) DetailsAtPut(isolate, entry, details);
}
Object* GlobalDictionaryShape::Unwrap(Object* object) {
@@ -3106,25 +3159,23 @@ PropertyCell* GlobalDictionary::CellAt(int entry) {
return PropertyCell::cast(KeyAt(entry));
}
-bool GlobalDictionaryShape::IsLive(Isolate* isolate, Object* k) {
- Heap* heap = isolate->heap();
- DCHECK_NE(heap->the_hole_value(), k);
- return k != heap->undefined_value();
+bool GlobalDictionaryShape::IsLive(ReadOnlyRoots roots, Object* k) {
+ DCHECK_NE(roots.the_hole_value(), k);
+ return k != roots.undefined_value();
}
-bool GlobalDictionaryShape::IsKey(Isolate* isolate, Object* k) {
- return IsLive(isolate, k) &&
- !PropertyCell::cast(k)->value()->IsTheHole(isolate);
+bool GlobalDictionaryShape::IsKey(ReadOnlyRoots roots, Object* k) {
+ return IsLive(roots, k) && !PropertyCell::cast(k)->value()->IsTheHole(roots);
}
Name* GlobalDictionary::NameAt(int entry) { return CellAt(entry)->name(); }
Object* GlobalDictionary::ValueAt(int entry) { return CellAt(entry)->value(); }
-void GlobalDictionary::SetEntry(int entry, Object* key, Object* value,
- PropertyDetails details) {
+void GlobalDictionary::SetEntry(Isolate* isolate, int entry, Object* key,
+ Object* value, PropertyDetails details) {
DCHECK_EQ(key, PropertyCell::cast(value)->name());
set(EntryToIndex(entry) + kEntryKeyIndex, value);
- DetailsAtPut(entry, details);
+ DetailsAtPut(isolate, entry, details);
}
void GlobalDictionary::ValueAtPut(int entry, Object* value) {
@@ -3161,8 +3212,7 @@ int SimpleNumberDictionaryShape::GetMapRootIndex() {
}
bool NameDictionaryShape::IsMatch(Handle<Name> key, Object* other) {
- DCHECK(other->IsTheHole(key->GetIsolate()) ||
- Name::cast(other)->IsUniqueName());
+ DCHECK(other->IsTheHole() || Name::cast(other)->IsUniqueName());
DCHECK(key->IsUniqueName());
return *key == other;
}
@@ -3197,15 +3247,14 @@ PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary* dict, int entry) {
return dict->CellAt(entry)->property_details();
}
-
template <typename Dictionary>
-void GlobalDictionaryShape::DetailsAtPut(Dictionary* dict, int entry,
- PropertyDetails value) {
+void GlobalDictionaryShape::DetailsAtPut(Isolate* isolate, Dictionary* dict,
+ int entry, PropertyDetails value) {
DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
PropertyCell* cell = dict->CellAt(entry);
if (cell->property_details().IsReadOnly() != value.IsReadOnly()) {
cell->dependent_code()->DeoptimizeDependentCodeGroup(
- cell->GetIsolate(), DependentCode::kPropertyCellChangedGroup);
+ isolate, DependentCode::kPropertyCellChangedGroup);
}
cell->set_property_details(value);
}
@@ -3275,10 +3324,6 @@ Handle<Object> ObjectHashTableShape::AsHandle(Isolate* isolate,
return key;
}
-Handle<ObjectHashTable> ObjectHashTable::Shrink(Handle<ObjectHashTable> table) {
- return DerivedHashTable::Shrink(table);
-}
-
Relocatable::Relocatable(Isolate* isolate) {
isolate_ = isolate;
prev_ = isolate->relocatable_top();
@@ -3297,7 +3342,7 @@ Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType* table(TableType::cast(this->table()));
int index = Smi::ToInt(this->index());
Object* key = table->KeyAt(index);
- DCHECK(!key->IsTheHole(table->GetIsolate()));
+ DCHECK(!key->IsTheHole());
return key;
}
@@ -3361,6 +3406,11 @@ bool ScopeInfo::HasSimpleParameters() const {
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
#undef FIELD_ACCESSORS
+FreshlyAllocatedBigInt* FreshlyAllocatedBigInt::cast(Object* object) {
+ SLOW_DCHECK(object->IsBigInt());
+ return reinterpret_cast<FreshlyAllocatedBigInt*>(object);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 56efb170ee..aeadc2d398 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -10,13 +10,24 @@
#include "src/bootstrapper.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/instruction-stream.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-collection-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-locale-inl.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-regexp-string-iterator-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-relative-time-format-inl.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
+#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
@@ -31,12 +42,11 @@ namespace internal {
#ifdef OBJECT_PRINT
void Object::Print() {
- OFStream os(stdout);
+ StdoutStream os;
this->Print(os);
os << std::flush;
}
-
void Object::Print(std::ostream& os) { // NOLINT
if (IsSmi()) {
os << "Smi: " << std::hex << "0x" << Smi::ToInt(this);
@@ -54,15 +64,15 @@ void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
os << map()->instance_type();
}
os << "]";
- if (GetHeap()->InOldSpace(this)) os << " in OldSpace";
+ MemoryChunk* chunk = MemoryChunk::FromAddress(
+ reinterpret_cast<Address>(const_cast<HeapObject*>(this)));
+ if (chunk->owner()->identity() == OLD_SPACE) os << " in OldSpace";
if (!IsMap()) os << "\n - map: " << Brief(map());
}
-
void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
InstanceType instance_type = map()->instance_type();
- HandleScope scope(GetIsolate());
if (instance_type < FIRST_NONSTRING_TYPE) {
String::cast(this)->StringPrint(os);
os << "\n";
@@ -82,7 +92,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
break;
case MUTABLE_HEAP_NUMBER_TYPE:
os << "<mutable ";
- HeapNumber::cast(this)->HeapNumberPrint(os);
+ MutableHeapNumber::cast(this)->MutableHeapNumberPrint(os);
os << ">\n";
break;
case BIGINT_TYPE:
@@ -92,7 +102,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
break;
- case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
@@ -103,10 +112,27 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
FixedArray::cast(this)->FixedArrayPrint(os);
break;
- case BOILERPLATE_DESCRIPTION_TYPE:
- BoilerplateDescription::cast(this)->BoilerplateDescriptionPrint(os);
+ case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ ObjectHashTable::cast(this)->ObjectHashTablePrint(os);
+ break;
+ case NUMBER_DICTIONARY_TYPE:
+ NumberDictionary::cast(this)->NumberDictionaryPrint(os);
+ break;
+ case EPHEMERON_HASH_TABLE_TYPE:
+ EphemeronHashTable::cast(this)->EphemeronHashTablePrint(os);
+ break;
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ ObjectBoilerplateDescription::cast(this)
+ ->ObjectBoilerplateDescriptionPrint(os);
break;
case PROPERTY_ARRAY_TYPE:
PropertyArray::cast(this)->PropertyArrayPrint(os);
@@ -236,6 +262,17 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case CALL_HANDLER_INFO_TYPE:
CallHandlerInfo::cast(this)->CallHandlerInfoPrint(os);
break;
+ case PRE_PARSED_SCOPE_DATA_TYPE:
+ PreParsedScopeData::cast(this)->PreParsedScopeDataPrint(os);
+ break;
+ case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
+ UncompiledDataWithoutPreParsedScope::cast(this)
+ ->UncompiledDataWithoutPreParsedScopePrint(os);
+ break;
+ case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
+ UncompiledDataWithPreParsedScope::cast(this)
+ ->UncompiledDataWithPreParsedScopePrint(os);
+ break;
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(os);
break;
@@ -267,6 +304,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_INTL_LOCALE_TYPE:
JSLocale::cast(this)->JSLocalePrint(os);
break;
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ JSRelativeTimeFormat::cast(this)->JSRelativeTimeFormatPrint(os);
+ break;
#endif // V8_INTL_SUPPORT
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
@@ -275,6 +315,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ case ALLOCATION_SITE_TYPE:
+ AllocationSite::cast(this)->AllocationSitePrint(os);
+ break;
case LOAD_HANDLER_TYPE:
LoadHandler::cast(this)->LoadHandlerPrint(os);
break;
@@ -330,7 +373,6 @@ void ByteArray::ByteArrayPrint(std::ostream& os) { // NOLINT
os << "byte array, data starts at " << GetDataStartAddress();
}
-
void BytecodeArray::BytecodeArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "BytecodeArray");
Disassemble(os);
@@ -369,7 +411,7 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
break;
}
case kDescriptor:
- os << Brief(descs->GetValue(i));
+ os << Brief(descs->GetStrongValue(i));
break;
}
os << " ";
@@ -480,7 +522,6 @@ void PrintDictionaryElements(std::ostream& os, FixedArrayBase* elements) {
void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
SloppyArgumentsElements* elements) {
- Isolate* isolate = elements->GetIsolate();
FixedArray* arguments_store = elements->arguments();
os << "\n 0: context: " << Brief(elements->context())
<< "\n 1: arguments_store: " << Brief(arguments_store)
@@ -490,7 +531,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
Object* mapped_entry = elements->get_mapped_entry(i);
os << "\n " << raw_index << ": param(" << i
<< "): " << Brief(mapped_entry);
- if (mapped_entry->IsTheHole(isolate)) {
+ if (mapped_entry->IsTheHole()) {
os << " in the arguments_store[" << i << "]";
} else {
os << " in the context";
@@ -556,9 +597,9 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
os << "\n }\n";
}
-
static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
const char* id) { // NOLINT
+ Isolate* isolate = obj->GetIsolate();
obj->PrintHeader(os, id);
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
@@ -568,7 +609,7 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
} else {
os << "DictionaryProperties";
}
- PrototypeIterator iter(obj->GetIsolate(), obj);
+ PrototypeIterator iter(isolate, obj);
os << "]\n - prototype: " << Brief(iter.GetCurrent());
os << "\n - elements: " << Brief(obj->elements()) << " ["
<< ElementsKindToString(obj->map()->elements_kind());
@@ -583,8 +624,8 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
}
}
-
-static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
+static void JSObjectPrintBody(std::ostream& os,
+ JSObject* obj, // NOLINT
bool print_elements = true) {
os << "\n - properties: ";
Object* properties_or_hash = obj->raw_properties_or_hash();
@@ -607,7 +648,6 @@ static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
}
}
-
void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, nullptr);
JSObjectPrintBody(os, this);
@@ -650,7 +690,7 @@ void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
int col = script->GetColumnNumber(source_position()) + 1;
String* script_name = script->name()->IsString()
? String::cast(script->name())
- : GetIsolate()->heap()->empty_string();
+ : GetReadOnlyRoots().empty_string();
os << "\n - source position: " << source_position();
os << " (";
script_name->PrintUC16(os);
@@ -659,7 +699,7 @@ void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
os << ")";
}
}
- os << "\n - register file: " << Brief(register_file());
+ os << "\n - register file: " << Brief(parameters_and_registers());
os << "\n";
}
@@ -703,14 +743,13 @@ void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Symbol");
os << "\n - hash: " << Hash();
os << "\n - name: " << Brief(name());
- if (name()->IsUndefined(GetIsolate())) {
+ if (name()->IsUndefined()) {
os << " (" << PrivateSymbolToName() << ")";
}
os << "\n - private: " << is_private();
os << "\n";
}
-
void Map::MapPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Map");
os << "\n - type: " << instance_type();
@@ -762,9 +801,13 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
os << "\n - layout descriptor: ";
layout_descriptor()->ShortPrint(os);
}
- {
+
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(this);
+ // Read-only maps can't have transitions, which is fortunate because we need
+ // the isolate to iterate over the transitions.
+ if (chunk->owner()->identity() != RO_SPACE) {
DisallowHeapAllocation no_gc;
- TransitionsAccessor transitions(this, &no_gc);
+ TransitionsAccessor transitions(chunk->heap()->isolate(), this, &no_gc);
int nof_transitions = transitions.NumberOfTransitions();
if (nof_transitions > 0) {
os << "\n - transitions #" << nof_transitions << ": ";
@@ -817,6 +860,23 @@ void PrintFixedArrayWithHeader(std::ostream& os, FixedArray* array,
}
template <typename T>
+void PrintHashTableWithHeader(std::ostream& os, T* table, const char* type) {
+ table->PrintHeader(os, type);
+ os << "\n - length: " << table->length();
+ os << "\n - elements: " << table->NumberOfElements();
+ os << "\n - deleted: " << table->NumberOfDeletedElements();
+ os << "\n - capacity: " << table->Capacity();
+
+ os << "\n - elements: {";
+ for (int i = 0; i < table->Capacity(); i++) {
+ os << '\n'
+ << std::setw(12) << i << ": " << Brief(table->KeyAt(i)) << " -> "
+ << Brief(table->ValueAt(i));
+ }
+ os << "\n }\n";
+}
+
+template <typename T>
void PrintWeakArrayElements(std::ostream& os, T* array) {
// Print in array notation for non-sparse arrays.
MaybeObject* previous_value = array->length() > 0 ? array->Get(0) : nullptr;
@@ -840,30 +900,27 @@ void PrintWeakArrayElements(std::ostream& os, T* array) {
}
}
-void PrintWeakFixedArrayWithHeader(std::ostream& os, WeakFixedArray* array) {
- array->PrintHeader(os, "WeakFixedArray");
- os << "\n - length: " << array->length() << "\n";
- PrintWeakArrayElements(os, array);
- os << "\n";
+} // namespace
+
+void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
+ PrintFixedArrayWithHeader(os, this, "FixedArray");
}
-void PrintWeakArrayListWithHeader(std::ostream& os, WeakArrayList* array) {
- array->PrintHeader(os, "WeakArrayList");
- os << "\n - capacity: " << array->capacity();
- os << "\n - length: " << array->length() << "\n";
- PrintWeakArrayElements(os, array);
- os << "\n";
+void ObjectHashTable::ObjectHashTablePrint(std::ostream& os) {
+ PrintHashTableWithHeader(os, this, "ObjectHashTable");
}
-} // namespace
+void NumberDictionary::NumberDictionaryPrint(std::ostream& os) {
+ PrintHashTableWithHeader(os, this, "NumberDictionary");
+}
-void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
- PrintFixedArrayWithHeader(os, this,
- IsHashTable() ? "HashTable" : "FixedArray");
+void EphemeronHashTable::EphemeronHashTablePrint(std::ostream& os) {
+ PrintHashTableWithHeader(os, this, "EphemeronHashTable");
}
-void BoilerplateDescription::BoilerplateDescriptionPrint(std::ostream& os) {
- PrintFixedArrayWithHeader(os, this, "BoilerplateDescription");
+void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionPrint(
+ std::ostream& os) {
+ PrintFixedArrayWithHeader(os, this, "ObjectBoilerplateDescription");
}
void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
@@ -882,31 +939,33 @@ void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
}
void WeakFixedArray::WeakFixedArrayPrint(std::ostream& os) {
- PrintWeakFixedArrayWithHeader(os, this);
+ PrintHeader(os, "WeakFixedArray");
+ os << "\n - length: " << length() << "\n";
+ PrintWeakArrayElements(os, this);
+ os << "\n";
}
void WeakArrayList::WeakArrayListPrint(std::ostream& os) {
- PrintWeakArrayListWithHeader(os, this);
+ PrintHeader(os, "WeakArrayList");
+ os << "\n - capacity: " << capacity();
+ os << "\n - length: " << length() << "\n";
+ PrintWeakArrayElements(os, this);
+ os << "\n";
}
void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "TransitionArray");
- os << "\n - capacity: " << length();
- for (int i = 0; i < length(); i++) {
- os << "\n [" << i << "]: " << MaybeObjectBrief(Get(i));
- if (i == kPrototypeTransitionsIndex) os << " (prototype transitions)";
- if (i == kTransitionLengthIndex) os << " (number of transitions)";
- }
- os << "\n";
+ PrintInternal(os);
}
void FeedbackCell::FeedbackCellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FeedbackCell");
- if (map() == GetHeap()->no_closures_cell_map()) {
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ if (map() == roots.no_closures_cell_map()) {
os << "\n - no closures";
- } else if (map() == GetHeap()->one_closure_cell_map()) {
+ } else if (map() == roots.one_closure_cell_map()) {
os << "\n - one closure";
- } else if (map() == GetHeap()->many_closures_cell_map()) {
+ } else if (map() == roots.many_closures_cell_map()) {
os << "\n - many closures";
} else {
os << "\n - Invalid FeedbackCell map";
@@ -916,7 +975,7 @@ void FeedbackCell::FeedbackCellPrint(std::ostream& os) { // NOLINT
}
void FeedbackVectorSpec::Print() {
- OFStream os(stdout);
+ StdoutStream os;
FeedbackVectorSpecPrint(os);
@@ -941,12 +1000,6 @@ void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void FeedbackMetadata::Print() {
- OFStream os(stdout);
- FeedbackMetadataPrint(os);
- os << std::flush;
-}
-
void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) {
HeapObject::PrintHeader(os, "FeedbackMetadata");
os << "\n - slot_count: " << slot_count();
@@ -960,12 +1013,6 @@ void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) {
os << "\n";
}
-void FeedbackVector::Print() {
- OFStream os(stdout);
- FeedbackVectorPrint(os);
- os << std::flush;
-}
-
void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FeedbackVector");
os << "\n - length: " << length();
@@ -1081,7 +1128,6 @@ void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, this);
}
-
void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSMessageObject");
os << "\n - type: " << type();
@@ -1139,7 +1185,6 @@ static const char* const weekdays[] = {
"???", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
};
-
void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSDate");
os << "\n - value: " << Brief(value());
@@ -1171,14 +1216,12 @@ void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-
void JSSet::JSSetPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSSet");
os << " - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
-
void JSMap::JSMapPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSMap");
os << " - table: " << Brief(table());
@@ -1197,27 +1240,23 @@ void JSSetIterator::JSSetIteratorPrint(std::ostream& os) { // NOLINT
JSCollectionIteratorPrint(os);
}
-
void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSMapIterator");
JSCollectionIteratorPrint(os);
}
-
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSWeakMap");
os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
-
void JSWeakSet::JSWeakSetPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSWeakSet");
os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
-
void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSArrayBuffer");
os << "\n - backing_store: " << backing_store();
@@ -1231,7 +1270,6 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, this, !was_neutered());
}
-
void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSTypedArray");
os << "\n - buffer: " << Brief(buffer());
@@ -1259,7 +1297,6 @@ void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, this, !WasNeutered());
}
-
void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSBoundFunction");
os << "\n - bound_target_function: " << Brief(bound_target_function());
@@ -1269,6 +1306,7 @@ void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
}
void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
+ Isolate* isolate = GetIsolate();
JSObjectPrintHeader(os, this, "Function");
os << "\n - function prototype: ";
if (has_prototype_slot()) {
@@ -1292,11 +1330,11 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
if (builtin_index == Builtins::kDeserializeLazy) {
if (shared()->HasBuiltinId()) {
builtin_index = shared()->builtin_id();
- os << "\n - builtin: " << GetIsolate()->builtins()->name(builtin_index)
+ os << "\n - builtin: " << isolate->builtins()->name(builtin_index)
<< "(lazy)";
}
} else {
- os << "\n - builtin: " << GetIsolate()->builtins()->name(builtin_index);
+ os << "\n - builtin: " << isolate->builtins()->name(builtin_index);
}
}
@@ -1320,7 +1358,9 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
shared()->PrintSourceCode(os);
JSObjectPrintBody(os, this);
os << "\n - feedback vector: ";
- if (has_feedback_vector()) {
+ if (!shared()->HasFeedbackMetadata()) {
+ os << "feedback metadata is not available in SFI\n";
+ } else if (has_feedback_vector()) {
feedback_vector()->FeedbackVectorPrint(os);
} else {
os << "not available\n";
@@ -1375,7 +1415,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - start position: " << StartPosition();
os << "\n - end position: " << EndPosition();
if (HasDebugInfo()) {
- os << "\n - debug info: " << Brief(debug_info());
+ os << "\n - debug info: " << Brief(GetDebugInfo());
} else {
os << "\n - no debug info";
}
@@ -1393,7 +1433,6 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-
void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSGlobalProxy");
if (!GetIsolate()->bootstrapper()->IsActive()) {
@@ -1402,7 +1441,6 @@ void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, this);
}
-
void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSGlobalObject");
if (!GetIsolate()->bootstrapper()->IsActive()) {
@@ -1412,14 +1450,12 @@ void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, this);
}
-
void Cell::CellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Cell");
os << "\n - value: " << Brief(value());
os << "\n";
}
-
void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyCell");
os << "\n - name: ";
@@ -1429,7 +1465,7 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
property_details().PrintAsSlowTo(os);
PropertyCellType cell_type = property_details().cell_type();
os << "\n - cell_type: ";
- if (value()->IsTheHole(GetIsolate())) {
+ if (value()->IsTheHole()) {
switch (cell_type) {
case PropertyCellType::kUninitialized:
os << "Uninitialized";
@@ -1633,7 +1669,7 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
os << "\n - weak cell: " << Brief(weak_cell());
os << "\n - prototype users: " << Brief(prototype_users());
os << "\n - registry slot: " << registry_slot();
- os << "\n - object create map: " << Brief(object_create_map());
+ os << "\n - object create map: " << MaybeObjectBrief(object_create_map());
os << "\n - should_be_fast_map: " << should_be_fast_map();
os << "\n";
}
@@ -1653,8 +1689,11 @@ void Tuple3::Tuple3Print(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmCompiledModule::WasmCompiledModulePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "WasmCompiledModule");
+void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "ArrayBoilerplateDescription");
+ os << "\n - elements kind: " << elements_kind();
+ os << "\n - constant elements: " << Brief(constant_elements());
os << "\n";
}
@@ -1666,25 +1705,37 @@ void WasmDebugInfo::WasmDebugInfoPrint(std::ostream& os) { // NOLINT
void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "WasmInstanceObject");
- os << "\n - compiled_module: " << Brief(compiled_module());
os << "\n - module_object: " << Brief(module_object());
os << "\n - exports_object: " << Brief(exports_object());
os << "\n - native_context: " << Brief(native_context());
- os << "\n - memory_object: " << Brief(memory_object());
- os << "\n - globals_buffer: " << Brief(globals_buffer());
- os << "\n - imported_mutable_globals_buffers: "
- << Brief(imported_mutable_globals_buffers());
- os << "\n - debug_info: " << Brief(debug_info());
- os << "\n - table_object: " << Brief(table_object());
+ if (has_memory_object()) {
+ os << "\n - memory_object: " << Brief(memory_object());
+ }
+ if (has_globals_buffer()) {
+ os << "\n - globals_buffer: " << Brief(globals_buffer());
+ }
+ if (has_imported_mutable_globals_buffers()) {
+ os << "\n - imported_mutable_globals_buffers: "
+ << Brief(imported_mutable_globals_buffers());
+ }
+ if (has_debug_info()) {
+ os << "\n - debug_info: " << Brief(debug_info());
+ }
+ if (has_table_object()) {
+ os << "\n - table_object: " << Brief(table_object());
+ }
os << "\n - imported_function_instances: "
<< Brief(imported_function_instances());
os << "\n - imported_function_callables: "
<< Brief(imported_function_callables());
- os << "\n - indirect_function_table_instances: "
- << Brief(indirect_function_table_instances());
- os << "\n - managed_native_allocations: "
- << Brief(managed_native_allocations());
- os << "\n - managed_indirect_patcher: " << Brief(managed_indirect_patcher());
+ if (has_indirect_function_table_instances()) {
+ os << "\n - indirect_function_table_instances: "
+ << Brief(indirect_function_table_instances());
+ }
+ if (has_managed_native_allocations()) {
+ os << "\n - managed_native_allocations: "
+ << Brief(managed_native_allocations());
+ }
os << "\n - memory_start: " << static_cast<void*>(memory_start());
os << "\n - memory_size: " << memory_size();
os << "\n - memory_mask: " << AsHex(memory_mask());
@@ -1710,10 +1761,15 @@ void WasmExportedFunctionData::WasmExportedFunctionDataPrint(
os << "\n";
}
-void WasmSharedModuleData::WasmSharedModuleDataPrint(
- std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "WasmSharedModuleData");
+void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "WasmModuleObject");
+ JSObjectPrintBody(os, this);
os << "\n - module: " << module();
+ os << "\n - native module: " << native_module();
+ os << "\n - export wrappers: " << Brief(export_wrappers());
+ os << "\n - script: " << Brief(script());
+ os << "\n - asm_js_offset_table: " << Brief(asm_js_offset_table());
+ os << "\n - breakpoint_infos: " << Brief(breakpoint_infos());
os << "\n";
}
@@ -1724,7 +1780,7 @@ void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
os << "\n - validity_cell: " << Brief(validity_cell());
int data_count = data_field_count();
if (data_count >= 1) {
- os << "\n - data1: " << Brief(data1());
+ os << "\n - data1: " << MaybeObjectBrief(data1());
}
if (data_count >= 2) {
os << "\n - data2: " << Brief(data2());
@@ -1742,7 +1798,7 @@ void StoreHandler::StoreHandlerPrint(std::ostream& os) { // NOLINT
os << "\n - validity_cell: " << Brief(validity_cell());
int data_count = data_field_count();
if (data_count >= 1) {
- os << "\n - data1: " << Brief(data1());
+ os << "\n - data1: " << MaybeObjectBrief(data1());
}
if (data_count >= 2) {
os << "\n - data2: " << Brief(data2());
@@ -1832,7 +1888,7 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AllocationSite");
- os << "\n - weak_next: " << Brief(weak_next());
+ if (this->HasWeakNext()) os << "\n - weak_next: " << Brief(weak_next());
os << "\n - dependent code: " << Brief(dependent_code());
os << "\n - nested site: " << Brief(nested_site());
os << "\n - memento found count: "
@@ -1858,7 +1914,7 @@ void AllocationMemento::AllocationMementoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AllocationMemento");
os << "\n - allocation site: ";
if (IsValid()) {
- GetAllocationSite()->Print(os);
+ GetAllocationSite()->AllocationSitePrint(os);
} else {
os << "<invalid>\n";
}
@@ -1874,7 +1930,6 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n - type: " << type();
os << "\n - id: " << id();
os << "\n - context data: " << Brief(context_data());
- os << "\n - wrapper: " << Brief(wrapper());
os << "\n - compilation type: " << compilation_type();
os << "\n - line ends: " << Brief(line_ends());
if (has_eval_from_shared()) {
@@ -1904,6 +1959,16 @@ void JSLocale::JSLocalePrint(std::ostream& os) { // NOLINT
os << "\n - numberingSystem: " << Brief(numbering_system());
os << "\n";
}
+
+void JSRelativeTimeFormat::JSRelativeTimeFormatPrint(
+ std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSRelativeTimeFormat");
+ os << "\n - locale: " << Brief(locale());
+ os << "\n - style: " << StyleAsString();
+ os << "\n - numeric: " << NumericAsString();
+ os << "\n - formatter: " << Brief(formatter());
+ os << "\n";
+}
#endif // V8_INTL_SUPPORT
namespace {
@@ -1935,7 +2000,6 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
int flags = Flags();
os << "\n - parameters: " << ParameterCount();
- os << "\n - stack locals: " << StackLocalCount();
os << "\n - context locals : " << ContextLocalCount();
os << "\n - scope type: " << scope_type();
@@ -1970,10 +2034,6 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
}
os << "\n - length: " << length();
if (length() > 0) {
- PrintScopeInfoList(this, os, "parameters", 0, ParameterNamesIndex(),
- ParameterCount());
- PrintScopeInfoList(this, os, "stack slots", 0, StackLocalNamesIndex(),
- StackLocalCount());
PrintScopeInfoList(this, os, "context slots", Context::MIN_CONTEXT_SLOTS,
ContextLocalNamesIndex(), ContextLocalCount());
// TODO(neis): Print module stuff if present.
@@ -1986,9 +2046,10 @@ void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
os << "\n - flags: " << flags();
os << "\n - debugger_hints: " << debugger_hints();
os << "\n - shared: " << Brief(shared());
- os << "\n - debug bytecode array: " << Brief(debug_bytecode_array());
+ os << "\n - function_identifier: " << Brief(function_identifier());
+ os << "\n - original bytecode array: " << Brief(original_bytecode_array());
os << "\n - break_points: ";
- break_points()->Print(os);
+ break_points()->FixedArrayPrint(os);
os << "\n - coverage_info: " << Brief(coverage_info());
}
@@ -2015,9 +2076,8 @@ static void PrintBitMask(std::ostream& os, uint32_t value) { // NOLINT
}
}
-
void LayoutDescriptor::Print() {
- OFStream os(stdout);
+ StdoutStream os;
this->Print(os);
os << std::flush;
}
@@ -2037,8 +2097,7 @@ void LayoutDescriptor::Print(std::ostream& os) { // NOLINT
} else if (IsSmi()) {
os << "fast";
PrintBitMask(os, static_cast<uint32_t>(Smi::ToInt(this)));
- } else if (IsOddball() &&
- IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
+ } else if (IsOddball() && IsUninitialized()) {
os << "<uninitialized>";
} else {
os << "slow";
@@ -2054,7 +2113,27 @@ void LayoutDescriptor::Print(std::ostream& os) { // NOLINT
void PreParsedScopeData::PreParsedScopeDataPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PreParsedScopeData");
os << "\n - scope_data: " << Brief(scope_data());
- os << "\n - child_data: " << Brief(child_data());
+ os << "\n - length: " << length();
+ for (int i = 0; i < length(); ++i) {
+ os << "\n - [" << i << "]: " << Brief(child_data(i));
+ }
+ os << "\n";
+}
+
+void UncompiledDataWithoutPreParsedScope::
+ UncompiledDataWithoutPreParsedScopePrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "UncompiledDataWithoutPreParsedScope");
+ os << "\n - start position: " << start_position();
+ os << "\n - end position: " << end_position();
+ os << "\n";
+}
+
+void UncompiledDataWithPreParsedScope::UncompiledDataWithPreParsedScopePrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "UncompiledDataWithPreParsedScope");
+ os << "\n - start position: " << start_position();
+ os << "\n - end position: " << end_position();
+ os << "\n - pre_parsed_scope_data: " << Brief(pre_parsed_scope_data());
os << "\n";
}
@@ -2066,7 +2145,7 @@ void InterpreterData::InterpreterDataPrint(std::ostream& os) { // NOLINT
}
void MaybeObject::Print() {
- OFStream os(stdout);
+ StdoutStream os;
this->Print(os);
os << std::flush;
}
@@ -2090,6 +2169,12 @@ void MaybeObject::Print(std::ostream& os) {
#endif // OBJECT_PRINT
+void HeapNumber::HeapNumberPrint(std::ostream& os) { os << value(); }
+
+void MutableHeapNumber::MutableHeapNumberPrint(std::ostream& os) {
+ os << value();
+}
+
// TODO(cbruni): remove once the new maptracer is in place.
void Name::NameShortPrint() {
if (this->IsString()) {
@@ -2097,7 +2182,7 @@ void Name::NameShortPrint() {
} else {
DCHECK(this->IsSymbol());
Symbol* s = Symbol::cast(this);
- if (s->name()->IsUndefined(GetIsolate())) {
+ if (s->name()->IsUndefined()) {
PrintF("#<%s>", s->PrivateSymbolToName());
} else {
PrintF("<%s>", String::cast(s->name())->ToCString().get());
@@ -2112,7 +2197,7 @@ int Name::NameShortPrint(Vector<char> str) {
} else {
DCHECK(this->IsSymbol());
Symbol* s = Symbol::cast(this);
- if (s->name()->IsUndefined(GetIsolate())) {
+ if (s->name()->IsUndefined()) {
return SNPrintF(str, "#<%s>", s->PrivateSymbolToName());
} else {
return SNPrintF(str, "<%s>", String::cast(s->name())->ToCString().get());
@@ -2120,7 +2205,7 @@ int Name::NameShortPrint(Vector<char> str) {
}
}
-void Map::PrintMapDetails(std::ostream& os, JSObject* holder) {
+void Map::PrintMapDetails(std::ostream& os) {
DisallowHeapAllocation no_gc;
#ifdef OBJECT_PRINT
this->MapPrint(os);
@@ -2129,9 +2214,6 @@ void Map::PrintMapDetails(std::ostream& os, JSObject* holder) {
#endif
os << "\n";
instance_descriptors()->PrintDescriptors(os);
- if (is_dictionary_map() && holder != nullptr) {
- os << holder->property_dictionary() << "\n";
- }
}
void DescriptorArray::PrintDescriptors(std::ostream& os) {
@@ -2154,14 +2236,14 @@ void DescriptorArray::PrintDescriptorDetails(std::ostream& os, int descriptor,
PropertyDetails details = GetDetails(descriptor);
details.PrintAsFastTo(os, mode);
os << " @ ";
- Object* value = GetValue(descriptor);
switch (details.location()) {
case kField: {
- FieldType* field_type = Map::UnwrapFieldType(value);
+ FieldType* field_type = GetFieldType(descriptor);
field_type->PrintTo(os);
break;
}
case kDescriptor:
+ Object* value = GetStrongValue(descriptor);
os << Brief(value);
if (value->IsAccessorPair()) {
AccessorPair* pair = AccessorPair::cast(value);
@@ -2187,11 +2269,6 @@ char* String::ToAsciiArray() {
return buffer;
}
-void DescriptorArray::Print() {
- OFStream os(stdout);
- this->PrintDescriptors(os);
- os << std::flush;
-}
// static
void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name* key,
Map* target) {
@@ -2202,20 +2279,20 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name* key,
key->ShortPrint(os);
#endif
os << ": ";
- Heap* heap = key->GetHeap();
- if (key == heap->nonextensible_symbol()) {
+ ReadOnlyRoots roots = key->GetReadOnlyRoots();
+ if (key == roots.nonextensible_symbol()) {
os << "(transition to non-extensible)";
- } else if (key == heap->sealed_symbol()) {
+ } else if (key == roots.sealed_symbol()) {
os << "(transition to sealed)";
- } else if (key == heap->frozen_symbol()) {
+ } else if (key == roots.frozen_symbol()) {
os << "(transition to frozen)";
- } else if (key == heap->elements_transition_symbol()) {
+ } else if (key == roots.elements_transition_symbol()) {
os << "(transition to " << ElementsKindToString(target->elements_kind())
<< ")";
- } else if (key == heap->strict_function_transition_symbol()) {
+ } else if (key == roots.strict_function_transition_symbol()) {
os << " (transition to strict function)";
} else {
- DCHECK(!IsSpecialTransition(key));
+ DCHECK(!IsSpecialTransition(roots, key));
os << "(transition to ";
int descriptor = target->LastAdded();
DescriptorArray* descriptors = target->instance_descriptors();
@@ -2226,13 +2303,7 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name* key,
os << " -> " << Brief(target);
}
-void TransitionArray::Print() {
- OFStream os(stdout);
- Print(os);
-}
-
-// TODO(ishell): unify with TransitionArrayPrint().
-void TransitionArray::Print(std::ostream& os) {
+void TransitionArray::PrintInternal(std::ostream& os) {
int num_transitions = number_of_transitions();
os << "Transition array #" << num_transitions << ":";
for (int i = 0; i < num_transitions; i++) {
@@ -2255,12 +2326,12 @@ void TransitionsAccessor::PrintTransitions(std::ostream& os) { // NOLINT
break;
}
case kFullTransitionArray:
- return transitions()->Print(os);
+ return transitions()->PrintInternal(os);
}
}
void TransitionsAccessor::PrintTransitionTree() {
- OFStream os(stdout);
+ StdoutStream os;
os << "map= " << Brief(map_);
DisallowHeapAllocation no_gc;
PrintTransitionTree(os, 0, &no_gc);
@@ -2269,6 +2340,7 @@ void TransitionsAccessor::PrintTransitionTree() {
void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level,
DisallowHeapAllocation* no_gc) {
+ ReadOnlyRoots roots = ReadOnlyRoots(isolate_);
int num_transitions = NumberOfTransitions();
if (num_transitions == 0) return;
for (int i = 0; i < num_transitions; i++) {
@@ -2280,16 +2352,15 @@ void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level,
ss << Brief(target);
os << std::left << std::setw(50) << ss.str() << ": ";
- Heap* heap = key->GetHeap();
- if (key == heap->nonextensible_symbol()) {
+ if (key == roots.nonextensible_symbol()) {
os << "to non-extensible";
- } else if (key == heap->sealed_symbol()) {
+ } else if (key == roots.sealed_symbol()) {
os << "to sealed ";
- } else if (key == heap->frozen_symbol()) {
+ } else if (key == roots.frozen_symbol()) {
os << "to frozen";
- } else if (key == heap->elements_transition_symbol()) {
+ } else if (key == roots.elements_transition_symbol()) {
os << "to " << ElementsKindToString(target->elements_kind());
- } else if (key == heap->strict_function_transition_symbol()) {
+ } else if (key == roots.strict_function_transition_symbol()) {
os << "to strict function";
} else {
#ifdef OBJECT_PRINT
@@ -2298,21 +2369,21 @@ void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level,
key->ShortPrint(os);
#endif
os << " ";
- DCHECK(!IsSpecialTransition(key));
+ DCHECK(!IsSpecialTransition(ReadOnlyRoots(isolate_), key));
os << "to ";
int descriptor = target->LastAdded();
DescriptorArray* descriptors = target->instance_descriptors();
descriptors->PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
}
- TransitionsAccessor transitions(target, no_gc);
+ TransitionsAccessor transitions(isolate_, target, no_gc);
transitions.PrintTransitionTree(os, level + 1, no_gc);
}
}
void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
DisallowHeapAllocation no_gc;
- TransitionsAccessor ta(map(), &no_gc);
+ TransitionsAccessor ta(GetIsolate(), map(), &no_gc);
if (ta.NumberOfTransitions() == 0) return;
os << "\n - transitions";
ta.PrintTransitions(os);
@@ -2336,15 +2407,17 @@ extern void _v8_internal_Print_Code(void* object) {
i::wasm::WasmCode* wasm_code =
isolate->wasm_engine()->code_manager()->LookupCode(address);
if (wasm_code) {
- i::OFStream os(stdout);
- wasm_code->Disassemble(nullptr, isolate, os, address);
+ i::StdoutStream os;
+ wasm_code->Disassemble(nullptr, os, address);
return;
}
if (!isolate->heap()->InSpaceSlow(address, i::CODE_SPACE) &&
- !isolate->heap()->InSpaceSlow(address, i::LO_SPACE)) {
+ !isolate->heap()->InSpaceSlow(address, i::LO_SPACE) &&
+ !i::InstructionStream::PcIsOffHeap(isolate, address)) {
i::PrintF(
- "%p is not within the current isolate's large object or code spaces\n",
+ "%p is not within the current isolate's large object, code or embedded "
+ "spaces\n",
object);
return;
}
@@ -2355,37 +2428,13 @@ extern void _v8_internal_Print_Code(void* object) {
return;
}
#ifdef ENABLE_DISASSEMBLER
- i::OFStream os(stdout);
+ i::StdoutStream os;
code->Disassemble(nullptr, os, address);
#else // ENABLE_DISASSEMBLER
code->Print();
#endif // ENABLE_DISASSEMBLER
}
-extern void _v8_internal_Print_FeedbackMetadata(void* object) {
- if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
- printf("Please provide a feedback metadata object\n");
- } else {
- reinterpret_cast<i::FeedbackMetadata*>(object)->Print();
- }
-}
-
-extern void _v8_internal_Print_FeedbackVector(void* object) {
- if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
- printf("Please provide a feedback vector\n");
- } else {
- reinterpret_cast<i::FeedbackVector*>(object)->Print();
- }
-}
-
-extern void _v8_internal_Print_DescriptorArray(void* object) {
- if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
- printf("Please provide a descriptor array\n");
- } else {
- reinterpret_cast<i::DescriptorArray*>(object)->Print();
- }
-}
-
extern void _v8_internal_Print_LayoutDescriptor(void* object) {
i::Object* o = reinterpret_cast<i::Object*>(object);
if (!o->IsLayoutDescriptor()) {
@@ -2395,14 +2444,6 @@ extern void _v8_internal_Print_LayoutDescriptor(void* object) {
}
}
-extern void _v8_internal_Print_TransitionArray(void* object) {
- if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
- printf("Please provide a transition array\n");
- } else {
- reinterpret_cast<i::TransitionArray*>(object)->Print();
- }
-}
-
extern void _v8_internal_Print_StackTrace() {
i::Isolate* isolate = i::Isolate::Current();
isolate->PrintStack(stdout);
@@ -2415,8 +2456,8 @@ extern void _v8_internal_Print_TransitionTree(void* object) {
} else {
#if defined(DEBUG) || defined(OBJECT_PRINT)
i::DisallowHeapAllocation no_gc;
- i::TransitionsAccessor transitions(reinterpret_cast<i::Map*>(object),
- &no_gc);
+ i::Map* map = reinterpret_cast<i::Map*>(object);
+ i::TransitionsAccessor transitions(map->GetIsolate(), map, &no_gc);
transitions.PrintTransitionTree();
#endif
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 6913e68ed9..01421bacc4 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -25,12 +25,10 @@
#include "src/bootstrapper.h"
#include "src/builtins/builtins.h"
#include "src/code-stubs.h"
-#include "src/compilation-dependencies.h"
#include "src/compiler.h"
#include "src/counters-inl.h"
#include "src/counters.h"
#include "src/date.h"
-#include "src/debug/debug-evaluate.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
@@ -54,18 +52,26 @@
#include "src/messages.h"
#include "src/objects-body-descriptors-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
#include "src/objects/code-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-collection-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-locale.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-relative-time-format.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/literal-objects-inl.h"
#include "src/objects/map.h"
#include "src/objects/microtask-inl.h"
+#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/property-descriptor.h"
@@ -176,8 +182,7 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
MaybeHandle<JSReceiver> Object::ConvertReceiver(Isolate* isolate,
Handle<Object> object) {
if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
- if (*object == isolate->heap()->null_value() ||
- object->IsUndefined(isolate)) {
+ if (object->IsNullOrUndefined(isolate)) {
return isolate->global_proxy();
}
return Object::ToObject(isolate, object);
@@ -192,10 +197,10 @@ MaybeHandle<Object> Object::ConvertToNumberOrNumeric(Isolate* isolate,
return input;
}
if (input->IsString()) {
- return String::ToNumber(Handle<String>::cast(input));
+ return String::ToNumber(isolate, Handle<String>::cast(input));
}
if (input->IsOddball()) {
- return Oddball::ToNumber(Handle<Oddball>::cast(input));
+ return Oddball::ToNumber(isolate, Handle<Oddball>::cast(input));
}
if (input->IsSymbol()) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToNumber),
@@ -294,7 +299,7 @@ MaybeHandle<String> Object::ConvertToString(Isolate* isolate,
String);
}
if (input->IsBigInt()) {
- return BigInt::ToString(Handle<BigInt>::cast(input));
+ return BigInt::ToString(isolate, Handle<BigInt>::cast(input));
}
ASSIGN_RETURN_ON_EXCEPTION(
isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
@@ -460,7 +465,7 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
// static
MaybeHandle<Object> Object::ConvertToLength(Isolate* isolate,
Handle<Object> input) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(isolate, input), Object);
if (input->IsSmi()) {
int value = std::max(Smi::ToInt(*input), 0);
return handle(Smi::FromInt(value), isolate);
@@ -479,7 +484,7 @@ MaybeHandle<Object> Object::ConvertToIndex(
Isolate* isolate, Handle<Object> input,
MessageTemplate::Template error_index) {
if (input->IsUndefined(isolate)) return handle(Smi::kZero, isolate);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(isolate, input), Object);
if (input->IsSmi() && Smi::ToInt(*input) >= 0) return input;
double len = DoubleToInteger(input->Number()) + 0.0;
auto js_len = isolate->factory()->NewNumber(len);
@@ -489,10 +494,9 @@ MaybeHandle<Object> Object::ConvertToIndex(
return js_len;
}
-bool Object::BooleanValue() {
+bool Object::BooleanValue(Isolate* isolate) {
if (IsSmi()) return Smi::ToInt(this) != 0;
DCHECK(IsHeapObject());
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
if (IsBoolean()) return IsTrue(isolate);
if (IsNullOrUndefined(isolate)) return false;
if (IsUndetectable()) return false; // Undetectable object is false.
@@ -547,7 +551,8 @@ ComparisonResult Reverse(ComparisonResult result) {
} // anonymous namespace
// static
-Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y) {
+Maybe<ComparisonResult> Object::Compare(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
// ES6 section 7.2.11 Abstract Relational Comparison step 3 and 4.
if (!Object::ToPrimitive(x, ToPrimitiveHint::kNumber).ToHandle(&x) ||
!Object::ToPrimitive(y, ToPrimitiveHint::kNumber).ToHandle(&y)) {
@@ -555,20 +560,20 @@ Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y) {
}
if (x->IsString() && y->IsString()) {
// ES6 section 7.2.11 Abstract Relational Comparison step 5.
- return Just(
- String::Compare(Handle<String>::cast(x), Handle<String>::cast(y)));
+ return Just(String::Compare(isolate, Handle<String>::cast(x),
+ Handle<String>::cast(y)));
}
if (x->IsBigInt() && y->IsString()) {
- return Just(BigInt::CompareToString(Handle<BigInt>::cast(x),
+ return Just(BigInt::CompareToString(isolate, Handle<BigInt>::cast(x),
Handle<String>::cast(y)));
}
if (x->IsString() && y->IsBigInt()) {
- return Just(Reverse(BigInt::CompareToString(Handle<BigInt>::cast(y),
- Handle<String>::cast(x))));
+ return Just(Reverse(BigInt::CompareToString(
+ isolate, Handle<BigInt>::cast(y), Handle<String>::cast(x))));
}
// ES6 section 7.2.11 Abstract Relational Comparison step 6.
- if (!Object::ToNumeric(x).ToHandle(&x) ||
- !Object::ToNumeric(y).ToHandle(&y)) {
+ if (!Object::ToNumeric(isolate, x).ToHandle(&x) ||
+ !Object::ToNumeric(isolate, y).ToHandle(&y)) {
return Nothing<ComparisonResult>();
}
@@ -588,7 +593,8 @@ Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y) {
// static
-Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
+Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
+ Handle<Object> y) {
// This is the generic version of Abstract Equality Comparison. Must be in
// sync with CodeStubAssembler::Equal.
while (true) {
@@ -598,7 +604,8 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else if (y->IsBoolean()) {
return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
} else if (y->IsString()) {
- return Just(NumberEquals(x, String::ToNumber(Handle<String>::cast(y))));
+ return Just(NumberEquals(
+ x, String::ToNumber(isolate, Handle<String>::cast(y))));
} else if (y->IsBigInt()) {
return Just(BigInt::EqualToNumber(Handle<BigInt>::cast(y), x));
} else if (y->IsJSReceiver()) {
@@ -611,16 +618,16 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
}
} else if (x->IsString()) {
if (y->IsString()) {
- return Just(
- String::Equals(Handle<String>::cast(x), Handle<String>::cast(y)));
+ return Just(String::Equals(isolate, Handle<String>::cast(x),
+ Handle<String>::cast(y)));
} else if (y->IsNumber()) {
- x = String::ToNumber(Handle<String>::cast(x));
+ x = String::ToNumber(isolate, Handle<String>::cast(x));
return Just(NumberEquals(x, y));
} else if (y->IsBoolean()) {
- x = String::ToNumber(Handle<String>::cast(x));
+ x = String::ToNumber(isolate, Handle<String>::cast(x));
return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
} else if (y->IsBigInt()) {
- return Just(BigInt::EqualToString(Handle<BigInt>::cast(y),
+ return Just(BigInt::EqualToString(isolate, Handle<BigInt>::cast(y),
Handle<String>::cast(x)));
} else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
@@ -636,17 +643,17 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else if (y->IsNumber()) {
return Just(NumberEquals(Handle<Oddball>::cast(x)->to_number(), *y));
} else if (y->IsString()) {
- y = String::ToNumber(Handle<String>::cast(y));
+ y = String::ToNumber(isolate, Handle<String>::cast(y));
return Just(NumberEquals(Handle<Oddball>::cast(x)->to_number(), *y));
} else if (y->IsBigInt()) {
- x = Oddball::ToNumber(Handle<Oddball>::cast(x));
+ x = Oddball::ToNumber(isolate, Handle<Oddball>::cast(x));
return Just(BigInt::EqualToNumber(Handle<BigInt>::cast(y), x));
} else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
}
- x = Oddball::ToNumber(Handle<Oddball>::cast(x));
+ x = Oddball::ToNumber(isolate, Handle<Oddball>::cast(x));
} else {
return Just(false);
}
@@ -665,14 +672,14 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
if (y->IsBigInt()) {
return Just(BigInt::EqualToBigInt(BigInt::cast(*x), BigInt::cast(*y)));
}
- return Equals(y, x);
+ return Equals(isolate, y, x);
} else if (x->IsJSReceiver()) {
if (y->IsJSReceiver()) {
return Just(x.is_identical_to(y));
} else if (y->IsUndetectable()) {
return Just(x->IsUndetectable());
} else if (y->IsBoolean()) {
- y = Oddball::ToNumber(Handle<Oddball>::cast(y));
+ y = Oddball::ToNumber(isolate, Handle<Oddball>::cast(y));
} else if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x))
.ToHandle(&x)) {
return Nothing<bool>();
@@ -702,7 +709,8 @@ bool Object::StrictEquals(Object* that) {
// static
Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
if (object->IsNumber()) return isolate->factory()->number_string();
- if (object->IsOddball()) return handle(Oddball::cast(*object)->type_of());
+ if (object->IsOddball())
+ return handle(Oddball::cast(*object)->type_of(), isolate);
if (object->IsUndetectable()) {
return isolate->factory()->undefined_string();
}
@@ -733,8 +741,10 @@ MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
return isolate->factory()->NewConsString(Handle<String>::cast(lhs),
Handle<String>::cast(rhs));
}
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(isolate, rhs),
+ Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(isolate, lhs),
+ Object);
return isolate->factory()->NewNumber(lhs->Number() + rhs->Number());
}
@@ -762,7 +772,8 @@ MaybeHandle<Object> Object::OrdinaryHasInstance(Isolate* isolate,
Handle<Object> prototype;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype,
- Object::GetProperty(callable, isolate->factory()->prototype_string()),
+ Object::GetProperty(isolate, callable,
+ isolate->factory()->prototype_string()),
Object);
if (!prototype->IsJSReceiver()) {
THROW_NEW_ERROR(
@@ -802,7 +813,7 @@ MaybeHandle<Object> Object::InstanceOf(Isolate* isolate, Handle<Object> object,
isolate, result,
Execution::Call(isolate, inst_of_handler, callable, 1, &object),
Object);
- return isolate->factory()->ToBoolean(result->BooleanValue());
+ return isolate->factory()->ToBoolean(result->BooleanValue(isolate));
}
// The {callable} must have a [[Call]] internal method.
@@ -825,8 +836,8 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
Handle<Name> name) {
Handle<Object> func;
Isolate* isolate = receiver->GetIsolate();
- ASSIGN_RETURN_ON_EXCEPTION(isolate, func,
- JSReceiver::GetProperty(receiver, name), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, func, JSReceiver::GetProperty(isolate, receiver, name), Object);
if (func->IsNullOrUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
@@ -1004,7 +1015,8 @@ Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
}
// static
-MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
+MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
+ OnNonExistent on_non_existent) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
@@ -1038,6 +1050,12 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
return it->GetDataValue();
}
}
+
+ if (on_non_existent == OnNonExistent::kThrowReferenceError) {
+ THROW_NEW_ERROR(it->isolate(),
+ NewReferenceError(MessageTemplate::kNotDefined, it->name()),
+ Object);
+ }
return it->isolate()->factory()->undefined_value();
}
@@ -1219,7 +1237,7 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
if (maybe_name.ToHandle(&name) && name->IsString()) {
name_string = Handle<String>::cast(name);
} else if (info->class_name()->IsString()) {
- name_string = handle(String::cast(info->class_name()));
+ name_string = handle(String::cast(info->class_name()), isolate);
} else {
name_string = isolate->factory()->empty_string();
}
@@ -1281,7 +1299,7 @@ Handle<TemplateList> TemplateList::Add(Isolate* isolate,
STATIC_ASSERT(kFirstElementIndex == 1);
int index = list->length() + 1;
Handle<i::FixedArray> fixed_array = Handle<FixedArray>::cast(list);
- fixed_array = FixedArray::SetAndGrow(fixed_array, index, value);
+ fixed_array = FixedArray::SetAndGrow(isolate, fixed_array, index, value);
fixed_array->set(kLengthIndex, Smi::FromInt(index));
return Handle<TemplateList>::cast(fixed_array);
}
@@ -1322,7 +1340,7 @@ void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
object->HasFastStringWrapperElements());
FixedArray* raw_elems = FixedArray::cast(object->elements());
Heap* heap = object->GetHeap();
- if (raw_elems->map() != heap->fixed_cow_array_map()) return;
+ if (raw_elems->map() != ReadOnlyRoots(heap).fixed_cow_array_map()) return;
Isolate* isolate = heap->isolate();
Handle<FixedArray> elems(raw_elems, isolate);
Handle<FixedArray> writable_elems = isolate->factory()->CopyFixedArrayWithMap(
@@ -1400,6 +1418,8 @@ int JSObject::GetHeaderSize(InstanceType type,
#ifdef V8_INTL_SUPPORT
case JS_INTL_LOCALE_TYPE:
return JSLocale::kSize;
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ return JSRelativeTimeFormat::kSize;
#endif // V8_INTL_SUPPORT
case WASM_GLOBAL_TYPE:
return WasmGlobalObject::kSize;
@@ -1548,8 +1568,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
}
// static
-Address AccessorInfo::redirect(Isolate* isolate, Address address,
- AccessorComponent component) {
+Address AccessorInfo::redirect(Address address, AccessorComponent component) {
ApiFunction fun(address);
DCHECK_EQ(ACCESSOR_GETTER, component);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
@@ -1559,7 +1578,7 @@ Address AccessorInfo::redirect(Isolate* isolate, Address address,
Address AccessorInfo::redirected_getter() const {
Address accessor = v8::ToCData<Address>(getter());
if (accessor == kNullAddress) return kNullAddress;
- return redirect(GetIsolate(), accessor, ACCESSOR_GETTER);
+ return redirect(accessor, ACCESSOR_GETTER);
}
Address CallHandlerInfo::redirected_callback() const {
@@ -1633,8 +1652,8 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
// (signalling an exception) or a boolean Oddball.
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
if (result.is_null()) return Just(true);
- DCHECK(result->BooleanValue() || should_throw == kDontThrow);
- return Just(result->BooleanValue());
+ DCHECK(result->BooleanValue(isolate) || should_throw == kDontThrow);
+ return Just(result->BooleanValue(isolate));
}
// Regular accessor.
@@ -2010,8 +2029,9 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
if (object->IsJSGlobalObject()) {
Handle<JSGlobalObject> global_obj = Handle<JSGlobalObject>::cast(object);
- Handle<GlobalDictionary> dictionary(global_obj->global_dictionary());
- int entry = dictionary->FindEntry(isolate, name, hash);
+ Handle<GlobalDictionary> dictionary(global_obj->global_dictionary(),
+ isolate);
+ int entry = dictionary->FindEntry(ReadOnlyRoots(isolate), name, hash);
if (entry == GlobalDictionary::kNotFound) {
DCHECK_IMPLIES(global_obj->map()->is_prototype_map(),
@@ -2023,28 +2043,30 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
: PropertyCellType::kConstant;
details = details.set_cell_type(cell_type);
value = cell;
- dictionary = GlobalDictionary::Add(dictionary, name, value, details);
+ dictionary =
+ GlobalDictionary::Add(isolate, dictionary, name, value, details);
global_obj->set_global_dictionary(*dictionary);
} else {
- Handle<PropertyCell> cell =
- PropertyCell::PrepareForValue(dictionary, entry, value, details);
+ Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
+ isolate, dictionary, entry, value, details);
cell->set_value(*value);
}
} else {
- Handle<NameDictionary> dictionary(object->property_dictionary());
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
- int entry = dictionary->FindEntry(name);
+ int entry = dictionary->FindEntry(isolate, name);
if (entry == NameDictionary::kNotFound) {
DCHECK_IMPLIES(object->map()->is_prototype_map(),
Map::IsPrototypeChainInvalidated(object->map()));
- dictionary = NameDictionary::Add(dictionary, name, value, details);
+ dictionary =
+ NameDictionary::Add(isolate, dictionary, name, value, details);
object->SetProperties(*dictionary);
} else {
PropertyDetails original_details = dictionary->DetailsAt(entry);
int enumeration_index = original_details.dictionary_index();
DCHECK_GT(enumeration_index, 0);
details = details.set_index(enumeration_index);
- dictionary->SetEntry(entry, *name, *value, details);
+ dictionary->SetEntry(isolate, entry, *name, *value, details);
}
}
}
@@ -2102,7 +2124,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
if (!map->OnlyHasSimpleProperties()) return Just(false);
Handle<JSObject> from = Handle<JSObject>::cast(source);
- if (from->elements() != isolate->heap()->empty_fixed_array()) {
+ if (from->elements() != ReadOnlyRoots(isolate).empty_fixed_array()) {
return Just(false);
}
@@ -2120,7 +2142,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
if (!details.IsEnumerable()) continue;
if (details.kind() == kData) {
if (details.location() == kDescriptor) {
- prop_value = handle(descriptors->GetValue(i), isolate);
+ prop_value = handle(descriptors->GetStrongValue(i), isolate);
} else {
Representation representation = details.representation();
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
@@ -2128,8 +2150,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
}
} else {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, JSReceiver::GetProperty(from, next_key),
- Nothing<bool>());
+ isolate, prop_value,
+ JSReceiver::GetProperty(isolate, from, next_key), Nothing<bool>());
stable = from->map() == *map;
}
} else {
@@ -2257,7 +2279,7 @@ Map* Map::GetPrototypeChainRootMap(Isolate* isolate) const {
JSFunction::cast(native_context->get(constructor_function_index));
return constructor_function->initial_map();
}
- return isolate->heap()->null_value()->map();
+ return ReadOnlyRoots(isolate).null_value()->map();
}
// static
@@ -2333,7 +2355,7 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
if (is_array.FromJust()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, constructor,
- Object::GetProperty(original_array,
+ Object::GetProperty(isolate, original_array,
isolate->factory()->constructor_string()),
Object);
if (constructor->IsConstructor()) {
@@ -2350,7 +2372,8 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
if (constructor->IsJSReceiver()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, constructor,
- JSReceiver::GetProperty(Handle<JSReceiver>::cast(constructor),
+ JSReceiver::GetProperty(isolate,
+ Handle<JSReceiver>::cast(constructor),
isolate->factory()->species_symbol()),
Object);
if (constructor->IsNull(isolate)) {
@@ -2377,7 +2400,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Object::SpeciesConstructor(
Handle<Object> ctor_obj;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, ctor_obj,
- JSObject::GetProperty(recv, isolate->factory()->constructor_string()),
+ JSObject::GetProperty(isolate, recv,
+ isolate->factory()->constructor_string()),
Object);
if (ctor_obj->IsUndefined(isolate)) return default_ctor;
@@ -2393,7 +2417,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Object::SpeciesConstructor(
Handle<Object> species;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, species,
- JSObject::GetProperty(ctor, isolate->factory()->species_symbol()),
+ JSObject::GetProperty(isolate, ctor,
+ isolate->factory()->species_symbol()),
Object);
if (species->IsNullOrUndefined(isolate)) {
@@ -2503,7 +2528,7 @@ void Smi::SmiPrint(std::ostream& os) const { // NOLINT
os << value();
}
-Handle<String> String::SlowFlatten(Handle<ConsString> cons,
+Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
PretenureFlag pretenure) {
DCHECK_NE(cons->second()->length(), 0);
@@ -2513,17 +2538,15 @@ Handle<String> String::SlowFlatten(Handle<ConsString> cons,
// String::Flatten only in those cases where String::SlowFlatten is not
// called again.
if (cons->second()->IsConsString() && !cons->second()->IsFlat()) {
- cons = handle(ConsString::cast(cons->second()));
+ cons = handle(ConsString::cast(cons->second()), isolate);
} else {
- return String::Flatten(handle(cons->second()));
+ return String::Flatten(isolate, handle(cons->second(), isolate));
}
}
DCHECK(AllowHeapAllocation::IsAllowed());
- Isolate* isolate = cons->GetIsolate();
int length = cons->length();
- PretenureFlag tenure = isolate->heap()->InNewSpace(*cons) ? pretenure
- : TENURED;
+ PretenureFlag tenure = Heap::InNewSpace(*cons) ? pretenure : TENURED;
Handle<SeqString> result;
if (cons->IsOneByteRepresentation()) {
Handle<SeqOneByteString> flat = isolate->factory()->NewRawOneByteString(
@@ -2538,8 +2561,8 @@ Handle<String> String::SlowFlatten(Handle<ConsString> cons,
WriteToFlat(*cons, flat->GetChars(), 0, length);
result = flat;
}
- cons->set_first(*result);
- cons->set_second(isolate->heap()->empty_string());
+ cons->set_first(isolate, *result);
+ cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
DCHECK(result->IsFlat());
return result;
}
@@ -2565,8 +2588,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
if (size < ExternalString::kShortSize) return false;
- Heap* heap = GetHeap();
- if (heap->read_only_space()->Contains(this)) return false;
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(this);
+ // Read-only strings cannot be made external, since that would mutate the
+ // string.
+ if (chunk->owner()->identity() == RO_SPACE) return false;
+ Heap* heap = chunk->heap();
bool is_one_byte = this->IsOneByteRepresentation();
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect();
@@ -2580,20 +2606,27 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// the field caching the address of the backing store. When we encounter
// short external strings in generated code, we need to bailout to runtime.
Map* new_map;
+ ReadOnlyRoots roots(heap);
if (size < ExternalString::kSize) {
- new_map = is_internalized
- ? (is_one_byte
- ? heap->short_external_internalized_string_with_one_byte_data_map()
- : heap->short_external_internalized_string_map())
- : (is_one_byte ? heap->short_external_string_with_one_byte_data_map()
- : heap->short_external_string_map());
+ if (is_internalized) {
+ new_map =
+ is_one_byte
+ ? roots
+ .short_external_internalized_string_with_one_byte_data_map()
+ : roots.short_external_internalized_string_map();
+ } else {
+ new_map = is_one_byte
+ ? roots.short_external_string_with_one_byte_data_map()
+ : roots.short_external_string_map();
+ }
} else {
- new_map = is_internalized
- ? (is_one_byte
- ? heap->external_internalized_string_with_one_byte_data_map()
- : heap->external_internalized_string_map())
- : (is_one_byte ? heap->external_string_with_one_byte_data_map()
- : heap->external_string_map());
+ new_map =
+ is_internalized
+ ? (is_one_byte
+ ? roots.external_internalized_string_with_one_byte_data_map()
+ : roots.external_internalized_string_map())
+ : (is_one_byte ? roots.external_string_with_one_byte_data_map()
+ : roots.external_string_map());
}
// Byte size of the external String object.
@@ -2610,6 +2643,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_resource(resource);
+ heap->RegisterExternalString(this);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
return true;
}
@@ -2639,8 +2673,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
if (size < ExternalString::kShortSize) return false;
- Heap* heap = GetHeap();
- if (heap->read_only_space()->Contains(this)) return false;
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(this);
+ // Read-only strings cannot be made external, since that would mutate the
+ // string.
+ if (chunk->owner()->identity() == RO_SPACE) return false;
+ Heap* heap = chunk->heap();
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect();
@@ -2655,14 +2692,15 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// the field caching the address of the backing store. When we encounter
// short external strings in generated code, we need to bailout to runtime.
Map* new_map;
+ ReadOnlyRoots roots(heap);
if (size < ExternalString::kSize) {
new_map = is_internalized
- ? heap->short_external_one_byte_internalized_string_map()
- : heap->short_external_one_byte_string_map();
+ ? roots.short_external_one_byte_internalized_string_map()
+ : roots.short_external_one_byte_string_map();
} else {
new_map = is_internalized
- ? heap->external_one_byte_internalized_string_map()
- : heap->external_one_byte_string_map();
+ ? roots.external_one_byte_internalized_string_map()
+ : roots.external_one_byte_string_map();
}
// Byte size of the external String object.
@@ -2679,6 +2717,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
ExternalOneByteString* self = ExternalOneByteString::cast(this);
self->set_resource(resource);
+ heap->RegisterExternalString(this);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
return true;
}
@@ -2758,7 +2797,7 @@ void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
void JSObject::JSObjectShortPrint(StringStream* accumulator) {
switch (map()->instance_type()) {
case JS_ARRAY_TYPE: {
- double length = JSArray::cast(this)->length()->IsUndefined(GetIsolate())
+ double length = JSArray::cast(this)->length()->IsUndefined()
? 0
: JSArray::cast(this)->length()->Number();
accumulator->Add("<JSArray[%u]>", static_cast<uint32_t>(length));
@@ -2906,14 +2945,15 @@ MaybeHandle<JSFunction> Map::GetConstructorFunction(
int const constructor_function_index = map->GetConstructorFunctionIndex();
if (constructor_function_index != kNoConstructorFunctionIndex) {
return handle(
- JSFunction::cast(native_context->get(constructor_function_index)));
+ JSFunction::cast(native_context->get(constructor_function_index)),
+ native_context->GetIsolate());
}
}
return MaybeHandle<JSFunction>();
}
-
-void Map::PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
+void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
+ PropertyKind kind,
PropertyAttributes attributes) {
OFStream os(file);
os << "[reconfiguring]";
@@ -2925,7 +2965,7 @@ void Map::PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
}
os << ": " << (kind == kData ? "kData" : "ACCESSORS") << ", attrs: ";
os << attributes << " [";
- JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
+ JavaScriptFrame::PrintTop(isolate, file, false, true);
os << "]\n";
}
@@ -2974,10 +3014,17 @@ VisitorId Map::GetVisitorId(Map* map) {
return kVisitFreeSpace;
case FIXED_ARRAY_TYPE:
- case BOILERPLATE_DESCRIPTION_TYPE:
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case HASH_TABLE_TYPE:
- case DESCRIPTOR_ARRAY_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
case SCOPE_INFO_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -2989,8 +3036,12 @@ VisitorId Map::GetVisitorId(Map* map) {
case WITH_CONTEXT_TYPE:
return kVisitFixedArray;
+ case EPHEMERON_HASH_TABLE_TYPE:
+ return kVisitEphemeronHashTable;
+
case WEAK_FIXED_ARRAY_TYPE:
case WEAK_ARRAY_LIST_TYPE:
+ case DESCRIPTOR_ARRAY_TYPE:
return kVisitWeakArray;
case FIXED_DOUBLE_ARRAY_TYPE:
@@ -3057,6 +3108,12 @@ VisitorId Map::GetVisitorId(Map* map) {
case WASM_INSTANCE_TYPE:
return kVisitWasmInstanceObject;
+ case PRE_PARSED_SCOPE_DATA_TYPE:
+ return kVisitPreParsedScopeData;
+
+ case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
+ return kVisitUncompiledDataWithPreParsedScope;
+
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_TYPE:
@@ -3087,6 +3144,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_REGEXP_STRING_ITERATOR_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
#endif // V8_INTL_SUPPORT
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
@@ -3106,6 +3164,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case FEEDBACK_METADATA_TYPE:
+ case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
return kVisitDataObject;
case BIGINT_TYPE:
@@ -3126,17 +3185,20 @@ VisitorId Map::GetVisitorId(Map* map) {
case FIXED_FLOAT64_ARRAY_TYPE:
return kVisitFixedFloat64Array;
+ case ALLOCATION_SITE_TYPE:
+ return kVisitAllocationSite;
+
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
- if (instance_type == ALLOCATION_SITE_TYPE) {
- return kVisitAllocationSite;
+ if (instance_type == PROTOTYPE_INFO_TYPE) {
+ return kVisitPrototypeInfo;
}
return kVisitStruct;
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
- return kVisitStruct;
+ return kVisitDataHandler;
default:
UNREACHABLE();
@@ -3144,8 +3206,8 @@ VisitorId Map::GetVisitorId(Map* map) {
}
void Map::PrintGeneralization(
- FILE* file, const char* reason, int modify_index, int split,
- int descriptors, bool descriptor_to_field,
+ Isolate* isolate, FILE* file, const char* reason, int modify_index,
+ int split, int descriptors, bool descriptor_to_field,
Representation old_representation, Representation new_representation,
MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) {
@@ -3182,7 +3244,7 @@ void Map::PrintGeneralization(
os << "+" << (descriptors - split) << " maps";
}
os << ") [";
- JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
+ JavaScriptFrame::PrintTop(isolate, file, false, true);
os << "]\n";
}
@@ -3237,17 +3299,6 @@ bool JSObject::IsUnmodifiedApiObject(Object** o) {
}
void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
- if (!heap->Contains(this)) {
- os << "!!!INVALID POINTER!!!";
- return;
- }
- if (!heap->Contains(map())) {
- os << "!!!INVALID MAP!!!";
- return;
- }
-
os << AsHex(reinterpret_cast<Address>(this), kPointerHexDigits, true) << " ";
if (IsString()) {
@@ -3303,14 +3354,39 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case WITH_CONTEXT_TYPE:
os << "<WithContext[" << FixedArray::cast(this)->length() << "]>";
break;
+ case SCRIPT_CONTEXT_TABLE_TYPE:
+ os << "<ScriptContextTable[" << FixedArray::cast(this)->length() << "]>";
+ break;
case HASH_TABLE_TYPE:
os << "<HashTable[" << FixedArray::cast(this)->length() << "]>";
break;
+ case ORDERED_HASH_MAP_TYPE:
+ os << "<OrderedHashMap[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case ORDERED_HASH_SET_TYPE:
+ os << "<OrderedHashSet[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case NAME_DICTIONARY_TYPE:
+ os << "<NameDictionary[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case GLOBAL_DICTIONARY_TYPE:
+ os << "<GlobalDictionary[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case NUMBER_DICTIONARY_TYPE:
+ os << "<NumberDictionary[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ os << "<SimpleNumberDictionary[" << FixedArray::cast(this)->length()
+ << "]>";
+ break;
+ case STRING_TABLE_TYPE:
+ os << "<StringTable[" << FixedArray::cast(this)->length() << "]>";
+ break;
case FIXED_ARRAY_TYPE:
os << "<FixedArray[" << FixedArray::cast(this)->length() << "]>";
break;
- case BOILERPLATE_DESCRIPTION_TYPE:
- os << "<BoilerplateDescription[" << FixedArray::cast(this)->length()
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ os << "<ObjectBoilerplateDescription[" << FixedArray::cast(this)->length()
<< "]>";
break;
case FIXED_DOUBLE_ARRAY_TYPE:
@@ -3335,17 +3411,20 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<PropertyArray[" << PropertyArray::cast(this)->length() << "]>";
break;
case FEEDBACK_CELL_TYPE: {
- os << "<FeedbackCell[";
- if (map() == heap->no_closures_cell_map()) {
- os << "no closures";
- } else if (map() == heap->one_closure_cell_map()) {
- os << "one closure";
- } else if (map() == heap->many_closures_cell_map()) {
- os << "many closures";
- } else {
- os << "!!!INVALID MAP!!!";
+ {
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ os << "<FeedbackCell[";
+ if (map() == roots.no_closures_cell_map()) {
+ os << "no closures";
+ } else if (map() == roots.one_closure_cell_map()) {
+ os << "one closure";
+ } else if (map() == roots.many_closures_cell_map()) {
+ os << "many closures";
+ } else {
+ os << "!!!INVALID MAP!!!";
+ }
+ os << "]>";
}
- os << "]>";
break;
}
case FEEDBACK_VECTOR_TYPE:
@@ -3363,6 +3442,29 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
TYPED_ARRAYS(TYPED_ARRAY_SHORT_PRINT)
#undef TYPED_ARRAY_SHORT_PRINT
+ case PRE_PARSED_SCOPE_DATA_TYPE: {
+ PreParsedScopeData* data = PreParsedScopeData::cast(this);
+ os << "<PreParsedScopeData[" << data->length() << "]>";
+ break;
+ }
+
+ case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE: {
+ UncompiledDataWithoutPreParsedScope* data =
+ UncompiledDataWithoutPreParsedScope::cast(this);
+ os << "<UncompiledDataWithoutPreParsedScope (" << data->start_position()
+ << ", " << data->end_position() << ")]>";
+ break;
+ }
+
+ case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE: {
+ UncompiledDataWithPreParsedScope* data =
+ UncompiledDataWithPreParsedScope::cast(this);
+ os << "<UncompiledDataWithPreParsedScope (" << data->start_position()
+ << ", " << data->end_position()
+ << ") preparsed=" << Brief(data->pre_parsed_scope_data()) << ">";
+ break;
+ }
+
case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(this);
std::unique_ptr<char[]> debug_name = shared->DebugName()->ToCString();
@@ -3384,6 +3486,12 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ case ALLOCATION_SITE_TYPE: {
+ os << "<AllocationSite";
+ AllocationSite::cast(this)->BriefPrintDetails(os);
+ os << ">";
+ break;
+ }
case SCOPE_INFO_TYPE: {
ScopeInfo* scope = ScopeInfo::cast(this);
os << "<ScopeInfo";
@@ -3403,15 +3511,15 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
}
case ODDBALL_TYPE: {
- if (IsUndefined(isolate)) {
+ if (IsUndefined()) {
os << "<undefined>";
- } else if (IsTheHole(isolate)) {
+ } else if (IsTheHole()) {
os << "<the_hole>";
- } else if (IsNull(isolate)) {
+ } else if (IsNull()) {
os << "<null>";
- } else if (IsTrue(isolate)) {
+ } else if (IsTrue()) {
os << "<true>";
- } else if (IsFalse(isolate)) {
+ } else if (IsFalse()) {
os << "<false>";
} else {
os << "<Odd Oddball: ";
@@ -3426,14 +3534,14 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
}
case HEAP_NUMBER_TYPE: {
- os << "<Number ";
+ os << "<HeapNumber ";
HeapNumber::cast(this)->HeapNumberPrint(os);
os << ">";
break;
}
case MUTABLE_HEAP_NUMBER_TYPE: {
- os << "<MutableNumber ";
- HeapNumber::cast(this)->HeapNumberPrint(os);
+ os << "<MutableHeapNumber ";
+ MutableHeapNumber::cast(this)->MutableHeapNumberPrint(os);
os << '>';
break;
}
@@ -3509,6 +3617,10 @@ void Tuple3::BriefPrintDetails(std::ostream& os) {
<< Brief(value3());
}
+void ArrayBoilerplateDescription::BriefPrintDetails(std::ostream& os) {
+ os << " " << elements_kind() << ", " << Brief(constant_elements());
+}
+
void CallableTask::BriefPrintDetails(std::ostream& os) {
os << " callable=" << Brief(callable());
}
@@ -3539,54 +3651,51 @@ bool HeapObject::IsValidSlot(Map* map, int offset) {
this, offset, 0);
}
-void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
- os << value();
-}
-
String* JSReceiver::class_name() {
- if (IsFunction()) return GetHeap()->Function_string();
- if (IsJSArgumentsObject()) return GetHeap()->Arguments_string();
- if (IsJSArray()) return GetHeap()->Array_string();
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ if (IsFunction()) return roots.Function_string();
+ if (IsJSArgumentsObject()) return roots.Arguments_string();
+ if (IsJSArray()) return roots.Array_string();
if (IsJSArrayBuffer()) {
if (JSArrayBuffer::cast(this)->is_shared()) {
- return GetHeap()->SharedArrayBuffer_string();
+ return roots.SharedArrayBuffer_string();
}
- return GetHeap()->ArrayBuffer_string();
+ return roots.ArrayBuffer_string();
}
- if (IsJSArrayIterator()) return GetHeap()->ArrayIterator_string();
- if (IsJSDate()) return GetHeap()->Date_string();
- if (IsJSError()) return GetHeap()->Error_string();
- if (IsJSGeneratorObject()) return GetHeap()->Generator_string();
- if (IsJSMap()) return GetHeap()->Map_string();
- if (IsJSMapIterator()) return GetHeap()->MapIterator_string();
+ if (IsJSArrayIterator()) return roots.ArrayIterator_string();
+ if (IsJSDate()) return roots.Date_string();
+ if (IsJSError()) return roots.Error_string();
+ if (IsJSGeneratorObject()) return roots.Generator_string();
+ if (IsJSMap()) return roots.Map_string();
+ if (IsJSMapIterator()) return roots.MapIterator_string();
if (IsJSProxy()) {
- return map()->is_callable() ? GetHeap()->Function_string()
- : GetHeap()->Object_string();
+ return map()->is_callable() ? roots.Function_string()
+ : roots.Object_string();
}
- if (IsJSRegExp()) return GetHeap()->RegExp_string();
- if (IsJSSet()) return GetHeap()->Set_string();
- if (IsJSSetIterator()) return GetHeap()->SetIterator_string();
+ if (IsJSRegExp()) return roots.RegExp_string();
+ if (IsJSSet()) return roots.Set_string();
+ if (IsJSSetIterator()) return roots.SetIterator_string();
if (IsJSTypedArray()) {
#define SWITCH_KIND(Type, type, TYPE, ctype, size) \
if (map()->elements_kind() == TYPE##_ELEMENTS) { \
- return GetHeap()->Type##Array_string(); \
+ return roots.Type##Array_string(); \
}
TYPED_ARRAYS(SWITCH_KIND)
#undef SWITCH_KIND
}
if (IsJSValue()) {
Object* value = JSValue::cast(this)->value();
- if (value->IsBoolean()) return GetHeap()->Boolean_string();
- if (value->IsString()) return GetHeap()->String_string();
- if (value->IsNumber()) return GetHeap()->Number_string();
- if (value->IsBigInt()) return GetHeap()->BigInt_string();
- if (value->IsSymbol()) return GetHeap()->Symbol_string();
- if (value->IsScript()) return GetHeap()->Script_string();
+ if (value->IsBoolean()) return roots.Boolean_string();
+ if (value->IsString()) return roots.String_string();
+ if (value->IsNumber()) return roots.Number_string();
+ if (value->IsBigInt()) return roots.BigInt_string();
+ if (value->IsSymbol()) return roots.Symbol_string();
+ if (value->IsScript()) return roots.Script_string();
UNREACHABLE();
}
- if (IsJSWeakMap()) return GetHeap()->WeakMap_string();
- if (IsJSWeakSet()) return GetHeap()->WeakSet_string();
- if (IsJSGlobalProxy()) return GetHeap()->global_string();
+ if (IsJSWeakMap()) return roots.WeakMap_string();
+ if (IsJSWeakSet()) return roots.WeakSet_string();
+ if (IsJSGlobalProxy()) return roots.global_string();
Object* maybe_constructor = map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
@@ -3601,17 +3710,22 @@ String* JSReceiver::class_name() {
if (info->class_name()->IsString()) return String::cast(info->class_name());
}
- return GetHeap()->Object_string();
+ return roots.Object_string();
}
bool HeapObject::CanBeRehashed() const {
DCHECK(NeedsRehashing());
switch (map()->instance_type()) {
- case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
// TODO(yangguo): actually support rehashing OrderedHash{Map,Set}.
- return IsNameDictionary() || IsGlobalDictionary() ||
- IsNumberDictionary() || IsSimpleNumberDictionary() ||
- IsStringTable();
+ return false;
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ return true;
case DESCRIPTOR_ARRAY_TYPE:
return true;
case TRANSITION_ARRAY_TYPE:
@@ -3626,22 +3740,25 @@ bool HeapObject::CanBeRehashed() const {
return false;
}
-void HeapObject::RehashBasedOnMap() {
+void HeapObject::RehashBasedOnMap(Isolate* isolate) {
switch (map()->instance_type()) {
case HASH_TABLE_TYPE:
- if (IsNameDictionary()) {
- NameDictionary::cast(this)->Rehash();
- } else if (IsNumberDictionary()) {
- NumberDictionary::cast(this)->Rehash();
- } else if (IsSimpleNumberDictionary()) {
- SimpleNumberDictionary::cast(this)->Rehash();
- } else if (IsGlobalDictionary()) {
- GlobalDictionary::cast(this)->Rehash();
- } else if (IsStringTable()) {
- StringTable::cast(this)->Rehash();
- } else {
- UNREACHABLE();
- }
+ UNREACHABLE();
+ break;
+ case NAME_DICTIONARY_TYPE:
+ NameDictionary::cast(this)->Rehash(isolate);
+ break;
+ case GLOBAL_DICTIONARY_TYPE:
+ GlobalDictionary::cast(this)->Rehash(isolate);
+ break;
+ case NUMBER_DICTIONARY_TYPE:
+ NumberDictionary::cast(this)->Rehash(isolate);
+ break;
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ SimpleNumberDictionary::cast(this)->Rehash(isolate);
+ break;
+ case STRING_TABLE_TYPE:
+ StringTable::cast(this)->Rehash(isolate);
break;
case DESCRIPTOR_ARRAY_TYPE:
DCHECK_LE(1, DescriptorArray::cast(this)->number_of_descriptors());
@@ -3676,7 +3793,7 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
JSFunction* constructor = JSFunction::cast(maybe_constructor);
String* name = constructor->shared()->DebugName();
if (name->length() != 0 &&
- !name->Equals(isolate->heap()->Object_string())) {
+ !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
return handle(name, isolate);
}
} else if (maybe_constructor->IsFunctionTemplateInfo()) {
@@ -3693,7 +3810,7 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
if (maybe_tag->IsString()) return Handle<String>::cast(maybe_tag);
PrototypeIterator iter(isolate, receiver);
- if (iter.IsAtEnd()) return handle(receiver->class_name());
+ if (iter.IsAtEnd()) return handle(receiver->class_name(), isolate);
Handle<JSReceiver> start = PrototypeIterator::GetCurrent<JSReceiver>(iter);
LookupIterator it(receiver, isolate->factory()->constructor_string(), start,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
@@ -3706,14 +3823,14 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
}
return result.is_identical_to(isolate->factory()->Object_string())
- ? handle(receiver->class_name())
+ ? handle(receiver->class_name(), isolate)
: result;
}
Handle<Context> JSReceiver::GetCreationContext() {
JSReceiver* receiver = this;
// Externals are JSObjects with null as a constructor.
- DCHECK(!receiver->IsExternal());
+ DCHECK(!receiver->IsExternal(GetIsolate()));
Object* constructor = receiver->map()->GetConstructor();
JSFunction* function;
if (constructor->IsJSFunction()) {
@@ -3729,28 +3846,33 @@ Handle<Context> JSReceiver::GetCreationContext() {
}
return function->has_context()
- ? Handle<Context>(function->context()->native_context())
+ ? Handle<Context>(function->context()->native_context(),
+ receiver->GetIsolate())
: Handle<Context>::null();
}
// static
-Handle<Object> Map::WrapFieldType(Handle<FieldType> type) {
- if (type->IsClass()) return Map::WeakCellForMap(type->AsClass());
- return type;
+MaybeObjectHandle Map::WrapFieldType(Handle<FieldType> type) {
+ if (type->IsClass()) {
+ return MaybeObjectHandle::Weak(type->AsClass());
+ }
+ return MaybeObjectHandle(type);
}
// static
-FieldType* Map::UnwrapFieldType(Object* wrapped_type) {
- Object* value = wrapped_type;
- if (value->IsWeakCell()) {
- if (WeakCell::cast(value)->cleared()) return FieldType::None();
- value = WeakCell::cast(value)->value();
+FieldType* Map::UnwrapFieldType(MaybeObject* wrapped_type) {
+ if (wrapped_type->IsClearedWeakHeapObject()) {
+ return FieldType::None();
}
- return FieldType::cast(value);
+ HeapObject* heap_object;
+ if (wrapped_type->ToWeakHeapObject(&heap_object)) {
+ return FieldType::cast(heap_object);
+ }
+ return FieldType::cast(wrapped_type->ToObject());
}
-MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
- Handle<FieldType> type,
+MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name, Handle<FieldType> type,
PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
@@ -3764,13 +3886,11 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
return MaybeHandle<Map>();
}
- Isolate* isolate = map->GetIsolate();
-
// Compute the new index for new field.
int index = map->NextFreePropertyIndex();
if (map->instance_type() == JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
- constness = kMutable;
+ constness = PropertyConstness::kMutable;
representation = Representation::Tagged();
type = FieldType::Any(isolate);
} else {
@@ -3778,18 +3898,18 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
isolate, map->instance_type(), &constness, &representation, &type);
}
- Handle<Object> wrapped_type(WrapFieldType(type));
+ MaybeObjectHandle wrapped_type = WrapFieldType(type);
- DCHECK_IMPLIES(!FLAG_track_constant_fields, constness == kMutable);
+ DCHECK_IMPLIES(!FLAG_track_constant_fields,
+ constness == PropertyConstness::kMutable);
Descriptor d = Descriptor::DataField(name, index, attributes, constness,
representation, wrapped_type);
- Handle<Map> new_map = Map::CopyAddDescriptor(map, &d, flag);
+ Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
new_map->AccountAddedPropertyField();
return new_map;
}
-
-MaybeHandle<Map> Map::CopyWithConstant(Handle<Map> map,
+MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
Handle<Name> name,
Handle<Object> constant,
PropertyAttributes attributes,
@@ -3800,15 +3920,14 @@ MaybeHandle<Map> Map::CopyWithConstant(Handle<Map> map,
}
if (FLAG_track_constant_fields) {
- Isolate* isolate = map->GetIsolate();
Representation representation = constant->OptimalRepresentation();
Handle<FieldType> type = constant->OptimalType(isolate, representation);
- return CopyWithField(map, name, type, attributes, kConst, representation,
- flag);
+ return CopyWithField(isolate, map, name, type, attributes,
+ PropertyConstness::kConst, representation, flag);
} else {
// Allocate new instance descriptors with (name, constant) added.
Descriptor d = Descriptor::DataConstant(name, 0, constant, attributes);
- Handle<Map> new_map = Map::CopyAddDescriptor(map, &d, flag);
+ Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
return new_map;
}
}
@@ -3945,7 +4064,7 @@ namespace {
// store.
void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Isolate* isolate = object->GetIsolate();
- Handle<Map> old_map(object->map());
+ Handle<Map> old_map(object->map(), isolate);
// In case of a regular transition.
if (new_map->GetBackPointer() == *old_map) {
// If the map does not add named properties, simply set the map.
@@ -3978,7 +4097,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
DCHECK(details.representation().IsDouble());
DCHECK(!new_map->IsUnboxedDoubleField(index));
- Handle<Object> value = isolate->factory()->NewMutableHeapNumber();
+ auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
object->RawFastPropertyAtPut(index, *value);
object->synchronized_set_map(*new_map);
return;
@@ -3987,14 +4106,14 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// This migration is a transition from a map that has run out of property
// space. Extend the backing store.
int grow_by = new_map->UnusedPropertyFields() + 1;
- Handle<PropertyArray> old_storage(object->property_array());
+ Handle<PropertyArray> old_storage(object->property_array(), isolate);
Handle<PropertyArray> new_storage =
isolate->factory()->CopyPropertyArrayAndGrow(old_storage, grow_by);
// Properly initialize newly added property.
Handle<Object> value;
if (details.representation().IsDouble()) {
- value = isolate->factory()->NewMutableHeapNumber();
+ value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
} else {
value = isolate->factory()->uninitialized_value();
}
@@ -4033,8 +4152,10 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Handle<FixedArray> inobject_props =
isolate->factory()->NewFixedArray(inobject);
- Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
- Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors());
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors(),
+ isolate);
+ Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors(),
+ isolate);
int old_nof = old_map->NumberOfOwnDescriptors();
int new_nof = new_map->NumberOfOwnDescriptors();
@@ -4056,13 +4177,13 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// must already be prepared for data of certain type.
DCHECK(!details.representation().IsNone());
if (details.representation().IsDouble()) {
- value = isolate->factory()->NewMutableHeapNumber();
+ value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
} else {
value = isolate->factory()->uninitialized_value();
}
} else {
DCHECK_EQ(kData, old_details.kind());
- value = handle(old_descriptors->GetValue(i), isolate);
+ value = handle(old_descriptors->GetStrongValue(i), isolate);
DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
}
} else {
@@ -4070,9 +4191,11 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
FieldIndex index = FieldIndex::ForDescriptor(*old_map, i);
if (object->IsUnboxedDoubleField(index)) {
uint64_t old_bits = object->RawFastDoublePropertyAsBitsAt(index);
- value = isolate->factory()->NewHeapNumberFromBits(
- old_bits, representation.IsDouble() ? MUTABLE : IMMUTABLE);
-
+ if (representation.IsDouble()) {
+ value = isolate->factory()->NewMutableHeapNumberFromBits(old_bits);
+ } else {
+ value = isolate->factory()->NewHeapNumberFromBits(old_bits);
+ }
} else {
value = handle(object->RawFastPropertyAt(index), isolate);
if (!old_representation.IsDouble() && representation.IsDouble()) {
@@ -4100,7 +4223,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
DCHECK_EQ(kData, details.kind());
Handle<Object> value;
if (details.representation().IsDouble()) {
- value = isolate->factory()->NewMutableHeapNumber();
+ value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
} else {
value = isolate->factory()->uninitialized_value();
}
@@ -4133,7 +4256,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
DCHECK(value->IsMutableHeapNumber());
// Ensure that all bits of the double value are preserved.
object->RawFastDoublePropertyAsBitsAtPut(
- index, HeapNumber::cast(value)->value_as_bits());
+ index, MutableHeapNumber::cast(value)->value_as_bits());
if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
// Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object,
@@ -4177,7 +4300,7 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
- Handle<Map> map(object->map());
+ Handle<Map> map(object->map(), isolate);
// Allocate new content.
int real_size = map->NumberOfOwnDescriptors();
@@ -4191,10 +4314,10 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
Handle<NameDictionary> dictionary =
NameDictionary::New(isolate, property_count);
- Handle<DescriptorArray> descs(map->instance_descriptors());
+ Handle<DescriptorArray> descs(map->instance_descriptors(), isolate);
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
- Handle<Name> key(descs->GetKey(i));
+ Handle<Name> key(descs->GetKey(i), isolate);
Handle<Object> value;
if (details.location() == kField) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
@@ -4206,8 +4329,8 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
value = handle(object->RawFastPropertyAt(index), isolate);
if (details.representation().IsDouble()) {
DCHECK(value->IsMutableHeapNumber());
- Handle<HeapNumber> old = Handle<HeapNumber>::cast(value);
- value = isolate->factory()->NewHeapNumber(old->value());
+ double old_value = Handle<MutableHeapNumber>::cast(value)->value();
+ value = isolate->factory()->NewHeapNumber(old_value);
}
}
} else {
@@ -4217,12 +4340,12 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
} else {
DCHECK_EQ(kDescriptor, details.location());
- value = handle(descs->GetValue(i), isolate);
+ value = handle(descs->GetStrongValue(i), isolate);
}
DCHECK(!value.is_null());
PropertyDetails d(details.kind(), details.attributes(),
PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, key, value, d);
+ dictionary = NameDictionary::Add(isolate, dictionary, key, value, d);
}
// Copy the next enumeration index from instance descriptor.
@@ -4270,7 +4393,7 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
#ifdef DEBUG
if (FLAG_trace_normalization) {
- OFStream os(stdout);
+ StdoutStream os;
os << "Object properties have been normalized:\n";
object->Print(os);
}
@@ -4297,8 +4420,8 @@ void JSObject::NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
int expected_additional_properties) {
if (object->map() == *new_map) return;
- Handle<Map> old_map(object->map());
- NotifyMapChange(old_map, new_map, new_map->GetIsolate());
+ Handle<Map> old_map(object->map(), object->GetIsolate());
+ NotifyMapChange(old_map, new_map, object->GetIsolate());
if (old_map->is_dictionary_map()) {
// For slow-to-fast migrations JSObject::MigrateSlowToFast()
@@ -4320,8 +4443,9 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
old_map->set_owns_descriptors(false);
DCHECK(old_map->is_abandoned_prototype_map());
// Ensure that no transition was inserted for prototype migrations.
- DCHECK_EQ(0, TransitionsAccessor(old_map).NumberOfTransitions());
- DCHECK(new_map->GetBackPointer()->IsUndefined(new_map->GetIsolate()));
+ DCHECK_EQ(0, TransitionsAccessor(object->GetIsolate(), old_map)
+ .NumberOfTransitions());
+ DCHECK(new_map->GetBackPointer()->IsUndefined());
DCHECK(object->map() != *old_map);
}
} else {
@@ -4339,9 +4463,10 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
void JSObject::ForceSetPrototype(Handle<JSObject> object,
Handle<Object> proto) {
// object.__proto__ = proto;
- Handle<Map> old_map = Handle<Map>(object->map());
- Handle<Map> new_map = Map::Copy(old_map, "ForceSetPrototype");
- Map::SetPrototype(new_map, proto);
+ Handle<Map> old_map = Handle<Map>(object->map(), object->GetIsolate());
+ Handle<Map> new_map =
+ Map::Copy(object->GetIsolate(), old_map, "ForceSetPrototype");
+ Map::SetPrototype(object->GetIsolate(), new_map, proto);
JSObject::MigrateToMap(object, new_map);
}
@@ -4365,36 +4490,35 @@ void DescriptorArray::GeneralizeAllFields() {
details = details.CopyWithRepresentation(Representation::Tagged());
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
- details = details.CopyWithConstness(kMutable);
+ details = details.CopyWithConstness(PropertyConstness::kMutable);
SetValue(i, FieldType::Any());
}
- set(ToDetailsIndex(i), details.AsSmi());
+ set(ToDetailsIndex(i), MaybeObject::FromObject(details.AsSmi()));
}
}
-Handle<Map> Map::CopyGeneralizeAllFields(Handle<Map> map,
+Handle<Map> Map::CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
ElementsKind elements_kind,
int modify_index, PropertyKind kind,
PropertyAttributes attributes,
const char* reason) {
- Isolate* isolate = map->GetIsolate();
Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> descriptors =
- DescriptorArray::CopyUpTo(old_descriptors, number_of_own_descriptors);
+ Handle<DescriptorArray> descriptors = DescriptorArray::CopyUpTo(
+ isolate, old_descriptors, number_of_own_descriptors);
descriptors->GeneralizeAllFields();
Handle<LayoutDescriptor> new_layout_descriptor(
LayoutDescriptor::FastPointerLayout(), isolate);
Handle<Map> new_map = CopyReplaceDescriptors(
- map, descriptors, new_layout_descriptor, OMIT_TRANSITION,
+ isolate, map, descriptors, new_layout_descriptor, OMIT_TRANSITION,
MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
// Unless the instance is being migrated, ensure that modify_index is a field.
if (modify_index >= 0) {
PropertyDetails details = descriptors->GetDetails(modify_index);
- if (details.constness() != kMutable || details.location() != kField ||
- details.attributes() != attributes) {
+ if (details.constness() != PropertyConstness::kMutable ||
+ details.location() != kField || details.attributes() != attributes) {
int field_index = details.location() == kField
? details.field_index()
: new_map->NumberOfFields();
@@ -4416,41 +4540,40 @@ Handle<Map> Map::CopyGeneralizeAllFields(Handle<Map> map,
map->instance_descriptors()->GetFieldType(modify_index), isolate);
}
map->PrintGeneralization(
- stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(),
- new_map->NumberOfOwnDescriptors(), details.location() == kDescriptor,
- details.representation(), Representation::Tagged(), field_type,
- MaybeHandle<Object>(), FieldType::Any(isolate),
- MaybeHandle<Object>());
+ isolate, stdout, reason, modify_index,
+ new_map->NumberOfOwnDescriptors(), new_map->NumberOfOwnDescriptors(),
+ details.location() == kDescriptor, details.representation(),
+ Representation::Tagged(), field_type, MaybeHandle<Object>(),
+ FieldType::Any(isolate), MaybeHandle<Object>());
}
}
new_map->set_elements_kind(elements_kind);
return new_map;
}
-void Map::DeprecateTransitionTree() {
+void Map::DeprecateTransitionTree(Isolate* isolate) {
if (is_deprecated()) return;
DisallowHeapAllocation no_gc;
- TransitionsAccessor transitions(this, &no_gc);
+ TransitionsAccessor transitions(isolate, this, &no_gc);
int num_transitions = transitions.NumberOfTransitions();
for (int i = 0; i < num_transitions; ++i) {
- transitions.GetTarget(i)->DeprecateTransitionTree();
+ transitions.GetTarget(i)->DeprecateTransitionTree(isolate);
}
DCHECK(!constructor_or_backpointer()->IsFunctionTemplateInfo());
set_is_deprecated(true);
if (FLAG_trace_maps) {
- LOG(GetIsolate(), MapEvent("Deprecate", this, nullptr));
+ LOG(isolate, MapEvent("Deprecate", this, nullptr));
}
dependent_code()->DeoptimizeDependentCodeGroup(
- GetIsolate(), DependentCode::kTransitionGroup);
- NotifyLeafMapLayoutChange();
+ isolate, DependentCode::kTransitionGroup);
+ NotifyLeafMapLayoutChange(isolate);
}
// Installs |new_descriptors| over the current instance_descriptors to ensure
// proper sharing of descriptor arrays.
-void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
+void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray* new_descriptors,
LayoutDescriptor* new_layout_descriptor) {
- Isolate* isolate = GetIsolate();
// Don't overwrite the empty descriptor array or initial map's descriptors.
if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined(isolate)) {
return;
@@ -4472,9 +4595,8 @@ void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
set_owns_descriptors(false);
}
-Map* Map::FindRootMap() const {
+Map* Map::FindRootMap(Isolate* isolate) const {
const Map* result = this;
- Isolate* isolate = GetIsolate();
while (true) {
Object* back = result->GetBackPointer();
if (back->IsUndefined(isolate)) {
@@ -4489,11 +4611,10 @@ Map* Map::FindRootMap() const {
}
}
-Map* Map::FindFieldOwner(int descriptor) const {
+Map* Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
DisallowHeapAllocation no_allocation;
DCHECK_EQ(kField, instance_descriptors()->GetDetails(descriptor).location());
const Map* result = this;
- Isolate* isolate = GetIsolate();
while (true) {
Object* back = result->GetBackPointer();
if (back->IsUndefined(isolate)) break;
@@ -4504,18 +4625,18 @@ Map* Map::FindFieldOwner(int descriptor) const {
return const_cast<Map*>(result);
}
-void Map::UpdateFieldType(int descriptor, Handle<Name> name,
+void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
PropertyConstness new_constness,
Representation new_representation,
- Handle<Object> new_wrapped_type) {
- DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeakCell());
+ MaybeObjectHandle new_wrapped_type) {
+ DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeakHeapObject());
// We store raw pointers in the queue, so no allocations are allowed.
DisallowHeapAllocation no_allocation;
PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
if (details.location() != kField) return;
DCHECK_EQ(kData, details.kind());
- Zone zone(GetIsolate()->allocator(), ZONE_NAME);
+ Zone zone(isolate->allocator(), ZONE_NAME);
ZoneQueue<Map*> backlog(&zone);
backlog.push(this);
@@ -4523,7 +4644,7 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
Map* current = backlog.front();
backlog.pop();
- TransitionsAccessor transitions(current, &no_allocation);
+ TransitionsAccessor transitions(isolate, current, &no_allocation);
int num_transitions = transitions.NumberOfTransitions();
for (int i = 0; i < num_transitions; ++i) {
Map* target = transitions.GetTarget(i);
@@ -4542,8 +4663,9 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
// Skip if already updated the shared descriptor.
if ((FLAG_modify_map_inplace && new_constness != details.constness()) ||
- descriptors->GetValue(descriptor) != *new_wrapped_type) {
- DCHECK_IMPLIES(!FLAG_track_constant_fields, new_constness == kMutable);
+ descriptors->GetFieldType(descriptor) != *new_wrapped_type.object()) {
+ DCHECK_IMPLIES(!FLAG_track_constant_fields,
+ new_constness == PropertyConstness::kMutable);
Descriptor d = Descriptor::DataField(
name, descriptors->GetFieldIndex(descriptor), details.attributes(),
new_constness, new_representation, new_wrapped_type);
@@ -4575,12 +4697,10 @@ Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
}
// static
-void Map::GeneralizeField(Handle<Map> map, int modify_index,
+void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index,
PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type) {
- Isolate* isolate = map->GetIsolate();
-
// Check if we actually need to generalize the field type at all.
Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
@@ -4589,7 +4709,7 @@ void Map::GeneralizeField(Handle<Map> map, int modify_index,
Handle<FieldType> old_field_type(old_descriptors->GetFieldType(modify_index),
isolate);
- // Return if the current map is general enough to hold requested contness and
+ // Return if the current map is general enough to hold requested constness and
// representation/field type.
if (((FLAG_modify_map_inplace &&
IsGeneralizableTo(new_constness, old_constness)) ||
@@ -4606,7 +4726,7 @@ void Map::GeneralizeField(Handle<Map> map, int modify_index,
}
// Determine the field owner.
- Handle<Map> field_owner(map->FindFieldOwner(modify_index), isolate);
+ Handle<Map> field_owner(map->FindFieldOwner(isolate, modify_index), isolate);
Handle<DescriptorArray> descriptors(field_owner->instance_descriptors(),
isolate);
DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
@@ -4619,17 +4739,17 @@ void Map::GeneralizeField(Handle<Map> map, int modify_index,
}
PropertyDetails details = descriptors->GetDetails(modify_index);
- Handle<Name> name(descriptors->GetKey(modify_index));
+ Handle<Name> name(descriptors->GetKey(modify_index), isolate);
- Handle<Object> wrapped_type(WrapFieldType(new_field_type));
- field_owner->UpdateFieldType(modify_index, name, new_constness,
+ MaybeObjectHandle wrapped_type(WrapFieldType(new_field_type));
+ field_owner->UpdateFieldType(isolate, modify_index, name, new_constness,
new_representation, wrapped_type);
field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kFieldOwnerGroup);
if (FLAG_trace_generalization) {
map->PrintGeneralization(
- stdout, "field type generalization", modify_index,
+ isolate, stdout, "field type generalization", modify_index,
map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
details.representation(), details.representation(), old_field_type,
MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
@@ -4638,37 +4758,38 @@ void Map::GeneralizeField(Handle<Map> map, int modify_index,
// TODO(ishell): remove.
// static
-Handle<Map> Map::ReconfigureProperty(Handle<Map> map, int modify_index,
- PropertyKind new_kind,
+Handle<Map> Map::ReconfigureProperty(Isolate* isolate, Handle<Map> map,
+ int modify_index, PropertyKind new_kind,
PropertyAttributes new_attributes,
Representation new_representation,
Handle<FieldType> new_field_type) {
DCHECK_EQ(kData, new_kind); // Only kData case is supported.
- MapUpdater mu(map->GetIsolate(), map);
- return mu.ReconfigureToDataField(modify_index, new_attributes, kConst,
+ MapUpdater mu(isolate, map);
+ return mu.ReconfigureToDataField(modify_index, new_attributes,
+ PropertyConstness::kConst,
new_representation, new_field_type);
}
// TODO(ishell): remove.
// static
-Handle<Map> Map::ReconfigureElementsKind(Handle<Map> map,
+Handle<Map> Map::ReconfigureElementsKind(Isolate* isolate, Handle<Map> map,
ElementsKind new_elements_kind) {
- MapUpdater mu(map->GetIsolate(), map);
+ MapUpdater mu(isolate, map);
return mu.ReconfigureElementsKind(new_elements_kind);
}
// Generalize all fields and update the transition tree.
-Handle<Map> Map::GeneralizeAllFields(Handle<Map> map) {
- Isolate* isolate = map->GetIsolate();
+Handle<Map> Map::GeneralizeAllFields(Isolate* isolate, Handle<Map> map) {
Handle<FieldType> any_type = FieldType::Any(isolate);
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
for (int i = 0; i < map->NumberOfOwnDescriptors(); ++i) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
MapUpdater mu(isolate, map);
- map = mu.ReconfigureToDataField(i, details.attributes(), kMutable,
+ map = mu.ReconfigureToDataField(i, details.attributes(),
+ PropertyConstness::kMutable,
Representation::Tagged(), any_type);
}
}
@@ -4677,14 +4798,14 @@ Handle<Map> Map::GeneralizeAllFields(Handle<Map> map) {
// static
-MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
+MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
DisallowHeapAllocation no_allocation;
- DisallowDeoptimization no_deoptimization(old_map->GetIsolate());
+ DisallowDeoptimization no_deoptimization(isolate);
if (!old_map->is_deprecated()) return old_map;
// Check the state of the root map.
- Map* root_map = old_map->FindRootMap();
+ Map* root_map = old_map->FindRootMap(isolate);
if (root_map->is_deprecated()) {
JSFunction* constructor = JSFunction::cast(root_map->GetConstructor());
DCHECK(constructor->has_initial_map());
@@ -4693,7 +4814,7 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
old_map->elements_kind()) {
return MaybeHandle<Map>();
}
- return handle(constructor->initial_map());
+ return handle(constructor->initial_map(), constructor->GetIsolate());
}
if (!old_map->EquivalentToForTransition(root_map)) return MaybeHandle<Map>();
@@ -4701,18 +4822,18 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
ElementsKind to_kind = old_map->elements_kind();
if (from_kind != to_kind) {
// Try to follow existing elements kind transitions.
- root_map = root_map->LookupElementsTransitionMap(to_kind);
+ root_map = root_map->LookupElementsTransitionMap(isolate, to_kind);
if (root_map == nullptr) return MaybeHandle<Map>();
// From here on, use the map with correct elements kind as root map.
}
- Map* new_map = root_map->TryReplayPropertyTransitions(*old_map);
+ Map* new_map = root_map->TryReplayPropertyTransitions(isolate, *old_map);
if (new_map == nullptr) return MaybeHandle<Map>();
- return handle(new_map);
+ return handle(new_map, isolate);
}
-Map* Map::TryReplayPropertyTransitions(Map* old_map) {
+Map* Map::TryReplayPropertyTransitions(Isolate* isolate, Map* old_map) {
DisallowHeapAllocation no_allocation;
- DisallowDeoptimization no_deoptimization(GetIsolate());
+ DisallowDeoptimization no_deoptimization(isolate);
int root_nof = NumberOfOwnDescriptors();
@@ -4723,7 +4844,7 @@ Map* Map::TryReplayPropertyTransitions(Map* old_map) {
for (int i = root_nof; i < old_nof; ++i) {
PropertyDetails old_details = old_descriptors->GetDetails(i);
Map* transition =
- TransitionsAccessor(new_map, &no_allocation)
+ TransitionsAccessor(isolate, new_map, &no_allocation)
.SearchTransition(old_descriptors->GetKey(i), old_details.kind(),
old_details.attributes());
if (transition == nullptr) return nullptr;
@@ -4758,7 +4879,7 @@ Map* Map::TryReplayPropertyTransitions(Map* old_map) {
} else {
DCHECK_EQ(kDescriptor, old_details.location());
DCHECK(!FLAG_track_constant_fields);
- Object* old_value = old_descriptors->GetValue(i);
+ Object* old_value = old_descriptors->GetStrongValue(i);
if (!new_type->NowContains(old_value)) {
return nullptr;
}
@@ -4774,9 +4895,9 @@ Map* Map::TryReplayPropertyTransitions(Map* old_map) {
}
} else {
DCHECK_EQ(kDescriptor, new_details.location());
- Object* old_value = old_descriptors->GetValue(i);
- Object* new_value = new_descriptors->GetValue(i);
- if (old_details.location() == kField || old_value != new_value) {
+ if (old_details.location() == kField ||
+ old_descriptors->GetStrongValue(i) !=
+ new_descriptors->GetStrongValue(i)) {
return nullptr;
}
}
@@ -4787,9 +4908,9 @@ Map* Map::TryReplayPropertyTransitions(Map* old_map) {
// static
-Handle<Map> Map::Update(Handle<Map> map) {
+Handle<Map> Map::Update(Isolate* isolate, Handle<Map> map) {
if (!map->is_deprecated()) return map;
- MapUpdater mu(map->GetIsolate(), map);
+ MapUpdater mu(isolate, map);
return mu.Update();
}
@@ -4801,11 +4922,11 @@ Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
should_throw, value);
}
-MaybeHandle<Object> Object::SetProperty(Handle<Object> object,
+MaybeHandle<Object> Object::SetProperty(Isolate* isolate, Handle<Object> object,
Handle<Name> name, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode) {
- LookupIterator it(object, name);
+ LookupIterator it(isolate, object, name);
MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_mode));
return value;
}
@@ -4873,9 +4994,33 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
}
return SetPropertyWithAccessor(it, value, should_throw);
}
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- // TODO(verwaest): We should throw an exception if holder is receiver.
+ case LookupIterator::INTEGER_INDEXED_EXOTIC: {
+ // IntegerIndexedElementSet converts value to a Number/BigInt prior to
+ // the bounds check. The bounds check has already happened here, but
+ // perform the possibly effectful ToNumber (or ToBigInt) operation
+ // anyways.
+ auto holder = it->GetHolder<JSTypedArray>();
+ Handle<Object> throwaway_value;
+ if (holder->type() == kExternalBigInt64Array ||
+ holder->type() == kExternalBigUint64Array) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(), throwaway_value,
+ BigInt::FromObject(it->isolate(), value), Nothing<bool>());
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(), throwaway_value,
+ Object::ToNumber(it->isolate(), value), Nothing<bool>());
+ }
+
+ // FIXME: Throw a TypeError if the holder is neutered here
+ // (IntegerIndexedElementSpec step 5).
+
+ // TODO(verwaest): Per spec, we should return false here (steps 6-9
+ // in IntegerIndexedElementSpec), resulting in an exception being thrown
+ // on OOB accesses in strict code. Historically, v8 has not done made
+ // this change due to uncertainty about web compat. (v8:4901)
return Just(true);
+ }
case LookupIterator::DATA:
if (it->IsReadOnly()) {
@@ -4950,7 +5095,7 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
LookupIterator::Configuration c = LookupIterator::OWN;
LookupIterator own_lookup =
it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
- : LookupIterator(receiver, it->name(), c);
+ : LookupIterator(isolate, receiver, it->name(), c);
for (; own_lookup.IsFound(); own_lookup.Next()) {
switch (own_lookup.state()) {
@@ -5079,8 +5224,9 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
// TODO(neis): According to the spec, this should throw a TypeError.
}
} else if (!value->IsNumber() && !value->IsUndefined(it->isolate())) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- it->isolate(), to_assign, Object::ToNumber(value), Nothing<bool>());
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(it->isolate(), to_assign,
+ Object::ToNumber(it->isolate(), value),
+ Nothing<bool>());
// We have to recheck the length. However, it can only change if the
// underlying buffer was neutered, so just check that.
if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
@@ -5099,7 +5245,7 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- receiver->HeapObjectVerify();
+ receiver->HeapObjectVerify(it->isolate());
}
#endif
return Just(true);
@@ -5146,7 +5292,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
if (receiver->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
if (JSArray::WouldChangeReadOnlyLength(array, it->index())) {
- RETURN_FAILURE(array->GetIsolate(), should_throw,
+ RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kStrictReadOnlyProperty,
isolate->factory()->length_string(),
Object::TypeOf(isolate, array), array));
@@ -5163,10 +5309,9 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
}
Handle<JSObject> receiver_obj = Handle<JSObject>::cast(receiver);
- Maybe<bool> result = JSObject::AddDataElement(
- receiver_obj, it->index(), value, attributes, should_throw);
+ JSObject::AddDataElement(receiver_obj, it->index(), value, attributes);
JSObject::ValidateElements(*receiver_obj);
- return result;
+ return Just(true);
} else {
it->UpdateProtector();
// Migrate to the most up-to-date map that will be able to store |value|
@@ -5181,7 +5326,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- receiver->HeapObjectVerify();
+ receiver->HeapObjectVerify(isolate);
}
#endif
}
@@ -5189,17 +5334,16 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
return Just(true);
}
-
-void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
+void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// Only supports adding slack to owned descriptors.
DCHECK(map->owns_descriptors());
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
int old_size = map->NumberOfOwnDescriptors();
if (slack <= descriptors->NumberOfSlackDescriptors()) return;
- Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
- descriptors, old_size, slack);
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpTo(isolate, descriptors, old_size, slack);
DisallowHeapAllocation no_allocation;
// The descriptors are still the same, so keep the layout descriptor.
@@ -5217,7 +5361,6 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
// will be lazily replaced by the extended cache when needed.
new_descriptors->CopyEnumCacheFrom(*descriptors);
- Isolate* isolate = map->GetIsolate();
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
@@ -5234,8 +5377,8 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
}
// static
-Handle<Map> Map::GetObjectCreateMap(Handle<HeapObject> prototype) {
- Isolate* isolate = prototype->GetIsolate();
+Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
+ Handle<HeapObject> prototype) {
Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
isolate);
if (map->prototype() == *prototype) return map;
@@ -5253,19 +5396,19 @@ Handle<Map> Map::GetObjectCreateMap(Handle<HeapObject> prototype) {
if (info->HasObjectCreateMap()) {
map = handle(info->ObjectCreateMap(), isolate);
} else {
- map = Map::CopyInitialMap(map);
- Map::SetPrototype(map, prototype);
+ map = Map::CopyInitialMap(isolate, map);
+ Map::SetPrototype(isolate, map, prototype);
PrototypeInfo::SetObjectCreateMap(info, map);
}
return map;
}
- return Map::TransitionToPrototype(map, prototype);
+ return Map::TransitionToPrototype(isolate, map, prototype);
}
// static
-MaybeHandle<Map> Map::TryGetObjectCreateMap(Handle<HeapObject> prototype) {
- Isolate* isolate = prototype->GetIsolate();
+MaybeHandle<Map> Map::TryGetObjectCreateMap(Isolate* isolate,
+ Handle<HeapObject> prototype) {
Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
isolate);
if (map->prototype() == *prototype) return map;
@@ -5282,7 +5425,8 @@ MaybeHandle<Map> Map::TryGetObjectCreateMap(Handle<HeapObject> prototype) {
}
template <class T>
-static int AppendUniqueCallbacks(Handle<TemplateList> callbacks,
+static int AppendUniqueCallbacks(Isolate* isolate,
+ Handle<TemplateList> callbacks,
Handle<typename T::Array> array,
int valid_descriptors) {
int nof_callbacks = callbacks->length();
@@ -5291,8 +5435,8 @@ static int AppendUniqueCallbacks(Handle<TemplateList> callbacks,
// back to front so that the last callback with a given name takes
// precedence over previously added callbacks with that name.
for (int i = nof_callbacks - 1; i >= 0; i--) {
- Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
- Handle<Name> key(Name::cast(entry->name()));
+ Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)), isolate);
+ Handle<Name> key(Name::cast(entry->name()), isolate);
DCHECK(key->IsUniqueName());
// Check if a descriptor with this name already exists before writing.
if (!T::Contains(key, entry, valid_descriptors, array)) {
@@ -5324,13 +5468,12 @@ struct FixedArrayAppender {
}
};
-
-int AccessorInfo::AppendUnique(Handle<Object> descriptors,
+int AccessorInfo::AppendUnique(Isolate* isolate, Handle<Object> descriptors,
Handle<FixedArray> array,
int valid_descriptors) {
Handle<TemplateList> callbacks = Handle<TemplateList>::cast(descriptors);
DCHECK_GE(array->length(), callbacks->length() + valid_descriptors);
- return AppendUniqueCallbacks<FixedArrayAppender>(callbacks, array,
+ return AppendUniqueCallbacks<FixedArrayAppender>(isolate, callbacks, array,
valid_descriptors);
}
@@ -5342,9 +5485,10 @@ static bool ContainsMap(MapHandles const& maps, Map* map) {
return false;
}
-Map* Map::FindElementsKindTransitionedMap(MapHandles const& candidates) {
+Map* Map::FindElementsKindTransitionedMap(Isolate* isolate,
+ MapHandles const& candidates) {
DisallowHeapAllocation no_allocation;
- DisallowDeoptimization no_deoptimization(GetIsolate());
+ DisallowDeoptimization no_deoptimization(isolate);
if (is_prototype_map()) return nullptr;
@@ -5354,9 +5498,9 @@ Map* Map::FindElementsKindTransitionedMap(MapHandles const& candidates) {
Map* transition = nullptr;
if (IsTransitionableFastElementsKind(kind)) {
// Check the state of the root map.
- Map* root_map = FindRootMap();
+ Map* root_map = FindRootMap(isolate);
if (!EquivalentToForElementsKindTransition(root_map)) return nullptr;
- root_map = root_map->LookupElementsTransitionMap(kind);
+ root_map = root_map->LookupElementsTransitionMap(isolate, kind);
DCHECK_NOT_NULL(root_map);
// Starting from the next existing elements kind transition try to
// replay the property transitions that does not involve instance rewriting
@@ -5364,7 +5508,7 @@ Map* Map::FindElementsKindTransitionedMap(MapHandles const& candidates) {
for (root_map = root_map->ElementsTransitionMap();
root_map != nullptr && root_map->has_fast_elements();
root_map = root_map->ElementsTransitionMap()) {
- Map* current = root_map->TryReplayPropertyTransitions(this);
+ Map* current = root_map->TryReplayPropertyTransitions(isolate, this);
if (current == nullptr) continue;
if (InstancesNeedRewriting(current)) continue;
@@ -5378,10 +5522,10 @@ Map* Map::FindElementsKindTransitionedMap(MapHandles const& candidates) {
return transition;
}
-
-static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
+static Map* FindClosestElementsTransition(Isolate* isolate, Map* map,
+ ElementsKind to_kind) {
// Ensure we are requested to search elements kind transition "near the root".
- DCHECK_EQ(map->FindRootMap()->NumberOfOwnDescriptors(),
+ DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
Map* current_map = map;
@@ -5397,15 +5541,13 @@ static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
return current_map;
}
-
-Map* Map::LookupElementsTransitionMap(ElementsKind to_kind) {
- Map* to_map = FindClosestElementsTransition(this, to_kind);
+Map* Map::LookupElementsTransitionMap(Isolate* isolate, ElementsKind to_kind) {
+ Map* to_map = FindClosestElementsTransition(isolate, this, to_kind);
if (to_map->elements_kind() == to_kind) return to_map;
return nullptr;
}
-bool Map::IsMapInArrayPrototypeChain() const {
- Isolate* isolate = GetIsolate();
+bool Map::IsMapInArrayPrototypeChain(Isolate* isolate) const {
if (isolate->initial_array_prototype()->map() == this) {
return true;
}
@@ -5417,19 +5559,17 @@ bool Map::IsMapInArrayPrototypeChain() const {
return false;
}
-
-Handle<WeakCell> Map::WeakCellForMap(Handle<Map> map) {
- Isolate* isolate = map->GetIsolate();
+Handle<WeakCell> Map::WeakCellForMap(Isolate* isolate, Handle<Map> map) {
if (map->weak_cell_cache()->IsWeakCell()) {
- return Handle<WeakCell>(WeakCell::cast(map->weak_cell_cache()));
+ return Handle<WeakCell>(WeakCell::cast(map->weak_cell_cache()), isolate);
}
Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(map);
map->set_weak_cell_cache(*weak_cell);
return weak_cell;
}
-
-static Handle<Map> AddMissingElementsTransitions(Handle<Map> map,
+static Handle<Map> AddMissingElementsTransitions(Isolate* isolate,
+ Handle<Map> map,
ElementsKind to_kind) {
DCHECK(IsTransitionElementsKind(map->elements_kind()));
@@ -5444,7 +5584,7 @@ static Handle<Map> AddMissingElementsTransitions(Handle<Map> map,
if (IsFastElementsKind(kind)) {
while (kind != to_kind && !IsTerminalElementsKind(kind)) {
kind = GetNextTransitionElementsKind(kind);
- current_map = Map::CopyAsElementsKind(current_map, kind, flag);
+ current_map = Map::CopyAsElementsKind(isolate, current_map, kind, flag);
}
}
}
@@ -5452,30 +5592,28 @@ static Handle<Map> AddMissingElementsTransitions(Handle<Map> map,
// In case we are exiting the fast elements kind system, just add the map in
// the end.
if (kind != to_kind) {
- current_map = Map::CopyAsElementsKind(current_map, to_kind, flag);
+ current_map = Map::CopyAsElementsKind(isolate, current_map, to_kind, flag);
}
DCHECK(current_map->elements_kind() == to_kind);
return current_map;
}
-
-Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
+Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
ElementsKind to_kind) {
ElementsKind from_kind = map->elements_kind();
if (from_kind == to_kind) return map;
- Isolate* isolate = map->GetIsolate();
Context* native_context = isolate->context()->native_context();
if (from_kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
if (*map == native_context->fast_aliased_arguments_map()) {
DCHECK_EQ(SLOW_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
- return handle(native_context->slow_aliased_arguments_map());
+ return handle(native_context->slow_aliased_arguments_map(), isolate);
}
} else if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
if (*map == native_context->slow_aliased_arguments_map()) {
DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
- return handle(native_context->fast_aliased_arguments_map());
+ return handle(native_context->fast_aliased_arguments_map(), isolate);
}
} else if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
// Reuse map transitions for JSArrays.
@@ -5495,7 +5633,7 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
to_kind == GetPackedElementsKind(from_kind) &&
map->GetBackPointer()->IsMap() &&
Map::cast(map->GetBackPointer())->elements_kind() == to_kind) {
- return handle(Map::cast(map->GetBackPointer()));
+ return handle(Map::cast(map->GetBackPointer()), isolate);
}
bool allow_store_transition = IsTransitionElementsKind(from_kind);
@@ -5507,29 +5645,31 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
}
if (!allow_store_transition) {
- return Map::CopyAsElementsKind(map, to_kind, OMIT_TRANSITION);
+ return Map::CopyAsElementsKind(isolate, map, to_kind, OMIT_TRANSITION);
}
- return Map::ReconfigureElementsKind(map, to_kind);
+ return Map::ReconfigureElementsKind(isolate, map, to_kind);
}
// static
-Handle<Map> Map::AsElementsKind(Handle<Map> map, ElementsKind kind) {
- Handle<Map> closest_map(FindClosestElementsTransition(*map, kind));
+Handle<Map> Map::AsElementsKind(Isolate* isolate, Handle<Map> map,
+ ElementsKind kind) {
+ Handle<Map> closest_map(FindClosestElementsTransition(isolate, *map, kind),
+ isolate);
if (closest_map->elements_kind() == kind) {
return closest_map;
}
- return AddMissingElementsTransitions(closest_map, kind);
+ return AddMissingElementsTransitions(isolate, closest_map, kind);
}
Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
ElementsKind to_kind) {
- Handle<Map> map(object->map());
- return Map::TransitionElementsTo(map, to_kind);
+ Handle<Map> map(object->map(), object->GetIsolate());
+ return Map::TransitionElementsTo(object->GetIsolate(), map, to_kind);
}
@@ -5538,9 +5678,9 @@ void JSProxy::Revoke(Handle<JSProxy> proxy) {
// ES#sec-proxy-revocation-functions
if (!proxy->IsRevoked()) {
// 5. Set p.[[ProxyTarget]] to null.
- proxy->set_target(isolate->heap()->null_value());
+ proxy->set_target(ReadOnlyRoots(isolate).null_value());
// 6. Set p.[[ProxyHandler]] to null.
- proxy->set_handler(isolate->heap()->null_value());
+ proxy->set_handler(ReadOnlyRoots(isolate).null_value());
}
DCHECK(proxy->IsRevoked());
}
@@ -5601,7 +5741,7 @@ Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
isolate, trap_result_obj,
Execution::Call(isolate, trap, handler, arraysize(args), args),
Nothing<bool>());
- bool boolean_trap_result = trap_result_obj->BooleanValue();
+ bool boolean_trap_result = trap_result_obj->BooleanValue(isolate);
// 9. If booleanTrapResult is false, then:
if (!boolean_trap_result) {
MAYBE_RETURN(JSProxy::CheckHasTrap(isolate, name, target), Nothing<bool>());
@@ -5674,7 +5814,7 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
isolate, trap_result,
Execution::Call(isolate, trap, handler, arraysize(args), args),
Nothing<bool>());
- if (!trap_result->BooleanValue()) {
+ if (!trap_result->BooleanValue(isolate)) {
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
trap_name, name));
@@ -5722,7 +5862,7 @@ Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
isolate, trap_result,
Execution::Call(isolate, trap, handler, arraysize(args), args),
Nothing<bool>());
- if (!trap_result->BooleanValue()) {
+ if (!trap_result->BooleanValue(isolate)) {
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
trap_name, name));
@@ -5775,7 +5915,8 @@ MaybeHandle<Context> JSProxy::GetFunctionRealm(Handle<JSProxy> proxy) {
THROW_NEW_ERROR(proxy->GetIsolate(),
NewTypeError(MessageTemplate::kProxyRevoked), Context);
}
- Handle<JSReceiver> target(JSReceiver::cast(proxy->target()));
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()),
+ proxy->GetIsolate());
return JSReceiver::GetFunctionRealm(target);
}
@@ -5785,7 +5926,7 @@ MaybeHandle<Context> JSBoundFunction::GetFunctionRealm(
Handle<JSBoundFunction> function) {
DCHECK(function->map()->is_constructor());
return JSReceiver::GetFunctionRealm(
- handle(function->bound_target_function()));
+ handle(function->bound_target_function(), function->GetIsolate()));
}
// static
@@ -5871,7 +6012,7 @@ Maybe<int> JSFunction::GetLength(Isolate* isolate,
// static
Handle<Context> JSFunction::GetFunctionRealm(Handle<JSFunction> function) {
DCHECK(function->map()->is_constructor());
- return handle(function->context()->native_context());
+ return handle(function->context()->native_context(), function->GetIsolate());
}
@@ -5927,7 +6068,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
} else {
TransitionElementsKind(object, to_kind);
}
- map = Map::ReconfigureElementsKind(map, to_kind);
+ map = Map::ReconfigureElementsKind(object->GetIsolate(), map, to_kind);
}
int number_of_fields = map->NumberOfFields();
int inobject = map->GetInObjectProperties();
@@ -5940,7 +6081,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
if (!FLAG_unbox_double_fields || external > 0) {
Isolate* isolate = object->GetIsolate();
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
Handle<FixedArray> storage;
if (!FLAG_unbox_double_fields) {
storage = isolate->factory()->NewFixedArray(inobject);
@@ -5955,7 +6096,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
if (!representation.IsDouble()) continue;
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
if (map->IsUnboxedDoubleField(index)) continue;
- Handle<HeapNumber> box = isolate->factory()->NewMutableHeapNumber();
+ auto box = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
if (index.is_inobject()) {
storage->set(index.property_index(), *box);
} else {
@@ -5978,8 +6119,8 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
void JSObject::MigrateInstance(Handle<JSObject> object) {
- Handle<Map> original_map(object->map());
- Handle<Map> map = Map::Update(original_map);
+ Handle<Map> original_map(object->map(), object->GetIsolate());
+ Handle<Map> map = Map::Update(object->GetIsolate(), original_map);
map->set_is_migration_target(true);
MigrateToMap(object, map);
if (FLAG_trace_migration) {
@@ -5987,7 +6128,7 @@ void JSObject::MigrateInstance(Handle<JSObject> object) {
}
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- object->JSObjectVerify();
+ object->JSObjectVerify(object->GetIsolate());
}
#endif
}
@@ -5999,7 +6140,7 @@ bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
DisallowDeoptimization no_deoptimization(isolate);
Handle<Map> original_map(object->map(), isolate);
Handle<Map> new_map;
- if (!Map::TryUpdate(original_map).ToHandle(&new_map)) {
+ if (!Map::TryUpdate(isolate, original_map).ToHandle(&new_map)) {
return false;
}
JSObject::MigrateToMap(object, new_map);
@@ -6008,17 +6149,17 @@ bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
}
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- object->JSObjectVerify();
+ object->JSObjectVerify(isolate);
}
#endif
return true;
}
-
-void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
- Handle<Object> value,
+void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
+ Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
- LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(isolate, object, name, object,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
#ifdef DEBUG
uint32_t index;
@@ -6139,7 +6280,7 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
- DCHECK(!value->IsTheHole(object->GetIsolate()));
+ DCHECK(!value->IsTheHole());
LookupIterator it(object, name, object, LookupIterator::OWN);
return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
}
@@ -6220,7 +6361,7 @@ MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
if (!normalized_map->EquivalentToForNormalization(*fast_map, mode)) {
return MaybeHandle<Map>();
}
- return handle(normalized_map);
+ return handle(normalized_map, GetIsolate());
}
void NormalizedMapCache::Set(Handle<Map> fast_map, Handle<Map> normalized_map,
@@ -6246,8 +6387,8 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
const char* reason) {
if (!object->HasFastProperties()) return;
- Handle<Map> map(object->map());
- Handle<Map> new_map = Map::Normalize(map, mode, reason);
+ Handle<Map> map(object->map(), object->GetIsolate());
+ Handle<Map> new_map = Map::Normalize(object->GetIsolate(), map, mode, reason);
MigrateToMap(object, new_map, expected_additional_properties);
}
@@ -6260,7 +6401,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
DCHECK(!object->IsJSGlobalObject());
Isolate* isolate = object->GetIsolate();
Factory* factory = isolate->factory();
- Handle<NameDictionary> dictionary(object->property_dictionary());
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
// Make sure we preserve dictionary representation if there are too many
// descriptors.
@@ -6268,15 +6409,16 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
if (number_of_elements > kMaxNumberOfDescriptors) return;
Handle<FixedArray> iteration_order =
- NameDictionary::IterationIndices(dictionary);
+ NameDictionary::IterationIndices(isolate, dictionary);
int instance_descriptor_length = iteration_order->length();
int number_of_fields = 0;
// Compute the length of the instance descriptor.
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < instance_descriptor_length; i++) {
int index = Smi::ToInt(iteration_order->get(i));
- DCHECK(dictionary->IsKey(isolate, dictionary->KeyAt(index)));
+ DCHECK(dictionary->IsKey(roots, dictionary->KeyAt(index)));
PropertyKind kind = dictionary->DetailsAt(index).kind();
if (kind == kData) {
@@ -6296,7 +6438,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
int inobject_props = old_map->GetInObjectProperties();
// Allocate new map.
- Handle<Map> new_map = Map::CopyDropDescriptors(old_map);
+ Handle<Map> new_map = Map::CopyDropDescriptors(isolate, old_map);
if (new_map->has_named_interceptor() || new_map->is_access_check_needed()) {
// Force certain slow paths when API interceptors are used, or if an access
// check is required.
@@ -6316,7 +6458,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Transform the object.
new_map->SetInObjectUnusedPropertyFields(inobject_props);
object->synchronized_set_map(*new_map);
- object->SetProperties(isolate->heap()->empty_fixed_array());
+ object->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
// Check that it really works.
DCHECK(object->HasFastProperties());
return;
@@ -6360,7 +6502,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
PropertyDetails details = dictionary->DetailsAt(index);
DCHECK_EQ(kField, details.location());
- DCHECK_EQ(kMutable, details.constness());
+ DCHECK_EQ(PropertyConstness::kMutable, details.constness());
Descriptor d;
if (details.kind() == kData) {
@@ -6372,12 +6514,13 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// transitionable.
PropertyConstness constness =
FLAG_track_constant_fields && !is_transitionable_elements_kind
- ? kConst
- : kMutable;
+ ? PropertyConstness::kConst
+ : PropertyConstness::kMutable;
d = Descriptor::DataField(
key, current_offset, details.attributes(), constness,
// TODO(verwaest): value->OptimalRepresentation();
- Representation::Tagged(), FieldType::Any(isolate));
+ Representation::Tagged(),
+ MaybeObjectHandle(FieldType::Any(isolate)));
}
} else {
DCHECK_EQ(kAccessor, details.kind());
@@ -6402,7 +6545,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
descriptors->Sort();
Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::New(
- new_map, descriptors, descriptors->number_of_descriptors());
+ isolate, new_map, descriptors, descriptors->number_of_descriptors());
DisallowHeapAllocation no_gc;
new_map->InitializeDescriptors(*descriptors, *layout_descriptor);
@@ -6444,7 +6587,7 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
elements = SloppyArgumentsElements::cast(elements)->arguments();
}
- if (elements->IsDictionary()) {
+ if (elements->IsNumberDictionary()) {
return handle(NumberDictionary::cast(elements), isolate);
}
}
@@ -6477,7 +6620,7 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
#ifdef DEBUG
if (FLAG_trace_normalization) {
- OFStream os(stdout);
+ StdoutStream os;
os << "Object elements have been normalized:\n";
object->Print(os);
}
@@ -6491,13 +6634,15 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
namespace {
-Object* SetHashAndUpdateProperties(HeapObject* properties, int hash) {
+Object* SetHashAndUpdateProperties(Isolate* isolate, HeapObject* properties,
+ int hash) {
DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
DCHECK(PropertyArray::HashField::is_valid(hash));
- Heap* heap = properties->GetHeap();
- if (properties == heap->empty_fixed_array() ||
- properties == heap->empty_property_array() ||
+ Heap* heap = isolate->heap();
+ ReadOnlyRoots roots(heap);
+ if (properties == roots.empty_fixed_array() ||
+ properties == roots.empty_property_array() ||
properties == heap->empty_property_dictionary()) {
return Smi::FromInt(hash);
}
@@ -6508,7 +6653,7 @@ Object* SetHashAndUpdateProperties(HeapObject* properties, int hash) {
return properties;
}
- DCHECK(properties->IsDictionary());
+ DCHECK(properties->IsNameDictionary());
NameDictionary::cast(properties)->SetHash(hash);
return properties;
}
@@ -6533,7 +6678,7 @@ int GetIdentityHashHelper(Isolate* isolate, JSReceiver* object) {
}
#ifdef DEBUG
- FixedArray* empty_fixed_array = isolate->heap()->empty_fixed_array();
+ FixedArray* empty_fixed_array = ReadOnlyRoots(isolate).empty_fixed_array();
FixedArray* empty_property_dictionary =
isolate->heap()->empty_property_dictionary();
DCHECK(properties == empty_fixed_array ||
@@ -6551,23 +6696,23 @@ void JSReceiver::SetIdentityHash(int hash) {
HeapObject* existing_properties = HeapObject::cast(raw_properties_or_hash());
Object* new_properties =
- SetHashAndUpdateProperties(existing_properties, hash);
+ SetHashAndUpdateProperties(GetIsolate(), existing_properties, hash);
set_raw_properties_or_hash(new_properties);
}
void JSReceiver::SetProperties(HeapObject* properties) {
DCHECK_IMPLIES(properties->IsPropertyArray() &&
PropertyArray::cast(properties)->length() == 0,
- properties == properties->GetHeap()->empty_property_array());
+ properties == GetReadOnlyRoots().empty_property_array());
DisallowHeapAllocation no_gc;
- Isolate* isolate = properties->GetIsolate();
+ Isolate* isolate = GetIsolate();
int hash = GetIdentityHashHelper(isolate, this);
Object* new_properties = properties;
// TODO(cbruni): Make GetIdentityHashHelper return a bool so that we
// don't have to manually compare against kNoHashSentinel.
if (hash != PropertyArray::kNoHashSentinel) {
- new_properties = SetHashAndUpdateProperties(properties, hash);
+ new_properties = SetHashAndUpdateProperties(isolate, properties, hash);
}
set_raw_properties_or_hash(new_properties);
@@ -6578,7 +6723,7 @@ Object* JSReceiver::GetIdentityHash(Isolate* isolate) {
int hash = GetIdentityHashHelper(isolate, this);
if (hash == PropertyArray::kNoHashSentinel) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
return Smi::FromInt(hash);
@@ -6649,18 +6794,18 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
if (object->IsJSGlobalObject()) {
// If we have a global object, invalidate the cell and swap in a new one.
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*object)->global_dictionary());
+ JSGlobalObject::cast(*object)->global_dictionary(), isolate);
DCHECK_NE(GlobalDictionary::kNotFound, entry);
- auto cell = PropertyCell::InvalidateEntry(dictionary, entry);
- cell->set_value(isolate->heap()->the_hole_value());
+ auto cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
+ cell->set_value(ReadOnlyRoots(isolate).the_hole_value());
cell->set_property_details(
PropertyDetails::Empty(PropertyCellType::kUninitialized));
} else {
- Handle<NameDictionary> dictionary(object->property_dictionary());
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
DCHECK_NE(NameDictionary::kNotFound, entry);
- dictionary = NameDictionary::DeleteEntry(dictionary, entry);
+ dictionary = NameDictionary::DeleteEntry(isolate, dictionary, entry);
object->SetProperties(*dictionary);
}
if (object->map()->is_prototype_map()) {
@@ -6762,7 +6907,7 @@ Maybe<bool> JSReceiver::DeletePropertyOrElement(Handle<JSReceiver> object,
Handle<Name> name,
LanguageMode language_mode) {
LookupIterator it = LookupIterator::PropertyOrElement(
- name->GetIsolate(), object, name, object, LookupIterator::OWN);
+ object->GetIsolate(), object, name, object, LookupIterator::OWN);
return DeleteProperty(&it, language_mode);
}
@@ -6785,13 +6930,13 @@ Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
// 5. ReturnIfAbrupt(desc).
PropertyDescriptor desc;
if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
// 6. Let success be DefinePropertyOrThrow(O,key, desc).
Maybe<bool> success = DefineOwnProperty(
isolate, Handle<JSReceiver>::cast(object), key, &desc, kThrowOnError);
// 7. ReturnIfAbrupt(success).
- MAYBE_RETURN(success, isolate->heap()->exception());
+ MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception());
CHECK(success.FromJust());
// 8. Return O.
return *object;
@@ -7317,7 +7462,7 @@ Maybe<bool> JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
// 1. Assert: IsPropertyKey(P) is true. ("P" is |name|.)
// 2. If P is "length", then:
// TODO(jkummerow): Check if we need slow string comparison.
- if (*name == isolate->heap()->length_string()) {
+ if (*name == ReadOnlyRoots(isolate).length_string()) {
// 2a. Return ArraySetLength(A, Desc).
return ArraySetLength(isolate, o, desc, should_throw);
}
@@ -7394,7 +7539,7 @@ bool JSArray::AnythingToArrayLength(Isolate* isolate,
}
// 5. Let numberLen be ToNumber(Desc.[[Value]]).
Handle<Object> number_v;
- if (!Object::ToNumber(length_object).ToHandle(&number_v)) {
+ if (!Object::ToNumber(isolate, length_object).ToHandle(&number_v)) {
// 6. ReturnIfAbrupt(newLen).
return false;
}
@@ -7552,7 +7697,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
Execution::Call(isolate, trap, handler, arraysize(args), args),
Nothing<bool>());
// 10. If booleanTrapResult is false, return false.
- if (!trap_result_obj->BooleanValue()) {
+ if (!trap_result_obj->BooleanValue(isolate)) {
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
trap_name, property_name));
@@ -7637,10 +7782,10 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
return Just(true);
}
- Handle<NameDictionary> dict(proxy->property_dictionary());
+ Handle<NameDictionary> dict(proxy->property_dictionary(), isolate);
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell);
Handle<NameDictionary> result =
- NameDictionary::Add(dict, private_name, value, details);
+ NameDictionary::Add(isolate, dict, private_name, value, details);
if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
return Just(true);
}
@@ -7672,42 +7817,41 @@ Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
}
}
- if (it->state() != LookupIterator::INTERCEPTOR) return Just(false);
-
- Isolate* isolate = it->isolate();
- Handle<InterceptorInfo> interceptor = it->GetInterceptor();
- if (interceptor->descriptor()->IsUndefined(isolate)) return Just(false);
-
- Handle<Object> result;
- Handle<JSObject> holder = it->GetHolder<JSObject>();
+ if (it->state() == LookupIterator::INTERCEPTOR) {
+ Isolate* isolate = it->isolate();
+ Handle<InterceptorInfo> interceptor = it->GetInterceptor();
+ if (!interceptor->descriptor()->IsUndefined(isolate)) {
+ Handle<Object> result;
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
- Object::ConvertReceiver(isolate, receiver),
- Nothing<bool>());
- }
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, receiver, Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, kDontThrow);
- if (it->IsElement()) {
- result = args.CallIndexedDescriptor(interceptor, it->index());
- } else {
- result = args.CallNamedDescriptor(interceptor, it->name());
- }
- if (!result.is_null()) {
- // Request successfully intercepted, try to set the property
- // descriptor.
- Utils::ApiCheck(
- PropertyDescriptor::ToPropertyDescriptor(isolate, result, desc),
- it->IsElement() ? "v8::IndexedPropertyDescriptorCallback"
- : "v8::NamedPropertyDescriptorCallback",
- "Invalid property descriptor.");
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, kDontThrow);
+ if (it->IsElement()) {
+ result = args.CallIndexedDescriptor(interceptor, it->index());
+ } else {
+ result = args.CallNamedDescriptor(interceptor, it->name());
+ }
+ if (!result.is_null()) {
+ // Request successfully intercepted, try to set the property
+ // descriptor.
+ Utils::ApiCheck(
+ PropertyDescriptor::ToPropertyDescriptor(isolate, result, desc),
+ it->IsElement() ? "v8::IndexedPropertyDescriptorCallback"
+ : "v8::NamedPropertyDescriptorCallback",
+ "Invalid property descriptor.");
- return Just(true);
+ return Just(true);
+ }
+ it->Next();
+ }
}
-
- it->Next();
return Just(false);
}
} // namespace
@@ -7761,9 +7905,11 @@ Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
Handle<AccessorPair> accessors =
Handle<AccessorPair>::cast(it->GetAccessors());
// 6a. Set D.[[Get]] to the value of X's [[Get]] attribute.
- desc->set_get(AccessorPair::GetComponent(accessors, ACCESSOR_GETTER));
+ desc->set_get(
+ AccessorPair::GetComponent(isolate, accessors, ACCESSOR_GETTER));
// 6b. Set D.[[Set]] to the value of X's [[Set]] attribute.
- desc->set_set(AccessorPair::GetComponent(accessors, ACCESSOR_SETTER));
+ desc->set_set(
+ AccessorPair::GetComponent(isolate, accessors, ACCESSOR_SETTER));
}
// 7. Set D.[[Enumerable]] to the value of X's [[Enumerable]] attribute.
@@ -7897,7 +8043,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
- Isolate* isolate = elements->GetIsolate();
+ Isolate* isolate = GetIsolate();
if (IsObjectElementsKind(kind) || kind == FAST_STRING_WRAPPER_ELEMENTS) {
int length = IsJSArray() ? Smi::ToInt(JSArray::cast(this)->length())
: elements->length();
@@ -7974,7 +8120,8 @@ bool JSObject::ReferencesObject(Object* obj) {
}
// Check the arguments.
FixedArray* arguments = elements->arguments();
- kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : HOLEY_ELEMENTS;
+ kind = arguments->IsNumberDictionary() ? DICTIONARY_ELEMENTS
+ : HOLEY_ELEMENTS;
if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
break;
}
@@ -8104,14 +8251,15 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
namespace {
template <typename Dictionary>
-bool TestDictionaryPropertiesIntegrityLevel(Dictionary* dict, Isolate* isolate,
+bool TestDictionaryPropertiesIntegrityLevel(Dictionary* dict,
+ ReadOnlyRoots roots,
PropertyAttributes level) {
DCHECK(level == SEALED || level == FROZEN);
uint32_t capacity = dict->Capacity();
for (uint32_t i = 0; i < capacity; i++) {
Object* key;
- if (!dict->ToKey(isolate, i, &key)) continue;
+ if (!dict->ToKey(roots, i, &key)) continue;
if (key->FilterKey(ALL_PROPERTIES)) continue;
PropertyDetails details = dict->DetailsAt(i);
if (details.IsConfigurable()) return false;
@@ -8147,8 +8295,8 @@ bool TestPropertiesIntegrityLevel(JSObject* object, PropertyAttributes level) {
return TestFastPropertiesIntegrityLevel(object->map(), level);
}
- return TestDictionaryPropertiesIntegrityLevel(object->property_dictionary(),
- object->GetIsolate(), level);
+ return TestDictionaryPropertiesIntegrityLevel(
+ object->property_dictionary(), object->GetReadOnlyRoots(), level);
}
bool TestElementsIntegrityLevel(JSObject* object, PropertyAttributes level) {
@@ -8158,7 +8306,7 @@ bool TestElementsIntegrityLevel(JSObject* object, PropertyAttributes level) {
if (IsDictionaryElementsKind(kind)) {
return TestDictionaryPropertiesIntegrityLevel(
- NumberDictionary::cast(object->elements()), object->GetIsolate(),
+ NumberDictionary::cast(object->elements()), object->GetReadOnlyRoots(),
level);
}
@@ -8268,7 +8416,7 @@ Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
isolate, trap_result,
Execution::Call(isolate, trap, handler, arraysize(args), args),
Nothing<bool>());
- if (!trap_result->BooleanValue()) {
+ if (!trap_result->BooleanValue(isolate)) {
RETURN_FAILURE(
isolate, should_throw,
NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
@@ -8295,7 +8443,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
}
if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), object)) {
+ !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
isolate->ReportFailedAccessCheck(object);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
RETURN_FAILURE(isolate, should_throw,
@@ -8331,7 +8479,8 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
// Do a map transition, other objects with this map may still
// be extensible.
// TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Handle<Map> new_map = Map::Copy(handle(object->map()), "PreventExtensions");
+ Handle<Map> new_map =
+ Map::Copy(isolate, handle(object->map(), isolate), "PreventExtensions");
new_map->set_is_extensible(false);
JSObject::MigrateToMap(object, new_map);
@@ -8380,7 +8529,7 @@ Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
// Enforce the invariant.
Maybe<bool> target_result = JSReceiver::IsExtensible(target);
MAYBE_RETURN(target_result, Nothing<bool>());
- if (target_result.FromJust() != trap_result->BooleanValue()) {
+ if (target_result.FromJust() != trap_result->BooleanValue(isolate)) {
isolate->Throw(
*factory->NewTypeError(MessageTemplate::kProxyIsExtensibleInconsistent,
factory->ToBoolean(target_result.FromJust())));
@@ -8393,7 +8542,7 @@ Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
bool JSObject::IsExtensible(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), object)) {
+ !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
return true;
}
if (object->IsJSGlobalProxy()) {
@@ -8408,13 +8557,13 @@ bool JSObject::IsExtensible(Handle<JSObject> object) {
namespace {
template <typename Dictionary>
-void ApplyAttributesToDictionary(Isolate* isolate,
+void ApplyAttributesToDictionary(Isolate* isolate, ReadOnlyRoots roots,
Handle<Dictionary> dictionary,
const PropertyAttributes attributes) {
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k;
- if (!dictionary->ToKey(isolate, i, &k)) continue;
+ if (!dictionary->ToKey(roots, i, &k)) continue;
if (k->FilterKey(ALL_PROPERTIES)) continue;
PropertyDetails details = dictionary->DetailsAt(i);
int attrs = attributes;
@@ -8424,7 +8573,7 @@ void ApplyAttributesToDictionary(Isolate* isolate,
if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
}
details = details.CopyAddAttributes(static_cast<PropertyAttributes>(attrs));
- dictionary->DetailsAtPut(i, details);
+ dictionary->DetailsAtPut(isolate, i, details);
}
}
@@ -8442,7 +8591,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
Isolate* isolate = object->GetIsolate();
if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), object)) {
+ !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
isolate->ReportFailedAccessCheck(object);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
RETURN_FAILURE(isolate, should_throw,
@@ -8501,7 +8650,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
Handle<Map> old_map(object->map(), isolate);
- TransitionsAccessor transitions(old_map);
+ TransitionsAccessor transitions(isolate, old_map);
Map* transition = transitions.SearchSpecial(*transition_marker);
if (transition != nullptr) {
Handle<Map> transition_map(transition, isolate);
@@ -8513,7 +8662,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
} else if (transitions.CanHaveMoreTransitions()) {
// Create a new descriptor array with the appropriate property attributes
Handle<Map> new_map = Map::CopyForPreventExtensions(
- old_map, attrs, transition_marker, "CopyForPreventExtensions");
+ isolate, old_map, attrs, transition_marker, "CopyForPreventExtensions");
JSObject::MigrateToMap(object, new_map);
} else {
DCHECK(old_map->is_dictionary_map() || !old_map->is_prototype_map());
@@ -8523,8 +8672,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
// Create a new map, since other objects with this map may be extensible.
// TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Handle<Map> new_map =
- Map::Copy(handle(object->map()), "SlowCopyForPreventExtensions");
+ Handle<Map> new_map = Map::Copy(isolate, handle(object->map(), isolate),
+ "SlowCopyForPreventExtensions");
new_map->set_is_extensible(false);
if (!new_element_dictionary.is_null()) {
ElementsKind new_kind =
@@ -8536,14 +8685,15 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
JSObject::MigrateToMap(object, new_map);
if (attrs != NONE) {
+ ReadOnlyRoots roots(isolate);
if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
JSGlobalObject::cast(*object)->global_dictionary(), isolate);
- ApplyAttributesToDictionary(isolate, dictionary, attrs);
+ ApplyAttributesToDictionary(isolate, roots, dictionary, attrs);
} else {
Handle<NameDictionary> dictionary(object->property_dictionary(),
isolate);
- ApplyAttributesToDictionary(isolate, dictionary, attrs);
+ ApplyAttributesToDictionary(isolate, roots, dictionary, attrs);
}
}
}
@@ -8566,12 +8716,14 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
object->set_elements(*new_element_dictionary);
}
- if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
+ if (object->elements() !=
+ ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
Handle<NumberDictionary> dictionary(object->element_dictionary(), isolate);
// Make sure we never go back to the fast case
object->RequireSlowElements(*dictionary);
if (attrs != NONE) {
- ApplyAttributesToDictionary(isolate, dictionary, attrs);
+ ApplyAttributesToDictionary(isolate, ReadOnlyRoots(isolate), dictionary,
+ attrs);
}
}
@@ -8636,7 +8788,8 @@ MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
for (Handle<String> name : method_names) {
Handle<Object> method;
ASSIGN_RETURN_ON_EXCEPTION(isolate, method,
- JSReceiver::GetProperty(receiver, name), Object);
+ JSReceiver::GetProperty(isolate, receiver, name),
+ Object);
if (method->IsCallable()) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -8761,7 +8914,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
if (!map->IsJSObjectMap()) return Just(false);
if (!map->OnlyHasSimpleProperties()) return Just(false);
- Handle<JSObject> object(JSObject::cast(*receiver));
+ Handle<JSObject> object(JSObject::cast(*receiver), isolate);
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
@@ -8771,7 +8924,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
number_of_own_descriptors + number_of_own_elements);
int count = 0;
- if (object->elements() != isolate->heap()->empty_fixed_array()) {
+ if (object->elements() != ReadOnlyRoots(isolate).empty_fixed_array()) {
MAYBE_RETURN(object->GetElementsAccessor()->CollectValuesOrEntries(
isolate, object, values_or_entries, get_entries, &count,
ENUMERABLE_STRINGS),
@@ -8791,7 +8944,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
if (!details.IsEnumerable()) continue;
if (details.kind() == kData) {
if (details.location() == kDescriptor) {
- prop_value = handle(descriptors->GetValue(index), isolate);
+ prop_value = handle(descriptors->GetStrongValue(index), isolate);
} else {
Representation representation = details.representation();
FieldIndex field_index = FieldIndex::ForDescriptor(*map, index);
@@ -8800,14 +8953,16 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
}
} else {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, JSReceiver::GetProperty(object, next_key),
+ isolate, prop_value,
+ JSReceiver::GetProperty(isolate, object, next_key),
Nothing<bool>());
stable = object->map() == *map;
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed that
// the object has a simple shape, and that the key is a name.
- LookupIterator it(object, next_key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(isolate, object, next_key,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
if (!it.IsFound()) continue;
DCHECK(it.state() == LookupIterator::DATA ||
it.state() == LookupIterator::ACCESSOR);
@@ -8824,8 +8979,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
count++;
}
- if (count < values_or_entries->length()) values_or_entries->Shrink(count);
- *result = values_or_entries;
+ DCHECK_LE(count, values_or_entries->length());
+ *result = FixedArray::ShrinkOrEmpty(isolate, values_or_entries, count);
return Just(true);
}
@@ -8868,7 +9023,7 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, JSReceiver::GetPropertyOrElement(object, key),
+ isolate, value, JSReceiver::GetPropertyOrElement(isolate, object, key),
MaybeHandle<FixedArray>());
if (get_entries) {
@@ -8883,8 +9038,8 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
values_or_entries->set(length, *value);
length++;
}
- if (length < values_or_entries->length()) values_or_entries->Shrink(length);
- return values_or_entries;
+ DCHECK_LE(length, values_or_entries->length());
+ return FixedArray::ShrinkOrEmpty(isolate, values_or_entries, length);
}
MaybeHandle<FixedArray> JSReceiver::GetOwnValues(Handle<JSReceiver> object,
@@ -8913,12 +9068,12 @@ Handle<FixedArray> JSReceiver::GetOwnElementIndices(Isolate* isolate,
return keys;
}
-bool Map::DictionaryElementsInPrototypeChainOnly() {
+bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
if (IsDictionaryElementsKind(elements_kind())) {
return false;
}
- for (PrototypeIterator iter(this); !iter.IsAtEnd(); iter.Advance()) {
+ for (PrototypeIterator iter(isolate, this); !iter.IsAtEnd(); iter.Advance()) {
// Be conservative, don't walk into proxies.
if (iter.GetCurrent()->IsJSProxy()) return true;
// String wrappers have non-configurable, non-writable elements.
@@ -9060,13 +9215,13 @@ Object* JSObject::SlowReverseLookup(Object* value) {
} else {
DCHECK_EQ(kDescriptor, details.location());
if (details.kind() == kData) {
- if (descs->GetValue(i) == value) {
+ if (descs->GetStrongValue(i) == value) {
return descs->GetKey(i);
}
}
}
}
- return GetHeap()->undefined_value();
+ return GetReadOnlyRoots().undefined_value();
} else if (IsJSGlobalObject()) {
return JSGlobalObject::cast(this)->global_dictionary()->SlowReverseLookup(
value);
@@ -9075,14 +9230,13 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
}
-Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size,
+Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
int inobject_properties) {
- Isolate* isolate = map->GetIsolate();
Handle<Map> result = isolate->factory()->NewMap(
map->instance_type(), instance_size, TERMINAL_FAST_ELEMENTS_KIND,
inobject_properties);
Handle<Object> prototype(map->prototype(), isolate);
- Map::SetPrototype(result, prototype);
+ Map::SetPrototype(isolate, result, prototype);
result->set_constructor_or_backpointer(map->GetConstructor());
result->set_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
@@ -9099,12 +9253,10 @@ Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size,
return result;
}
-
-Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
- const char* reason) {
+Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
+ PropertyNormalizationMode mode, const char* reason) {
DCHECK(!fast_map->is_dictionary_map());
- Isolate* isolate = fast_map->GetIsolate();
Handle<Object> maybe_cache(isolate->native_context()->normalized_map_cache(),
isolate);
bool use_cache =
@@ -9115,14 +9267,14 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
Handle<Map> new_map;
if (use_cache && cache->Get(fast_map, mode).ToHandle(&new_map)) {
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) new_map->DictionaryMapVerify();
+ if (FLAG_verify_heap) new_map->DictionaryMapVerify(isolate);
#endif
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit,
// except for the code cache, which can contain some ICs which can be
// applied to the shared map, dependent code and weak cell cache.
- Handle<Map> fresh = Map::CopyNormalized(fast_map, mode);
+ Handle<Map> fresh = Map::CopyNormalized(isolate, fast_map, mode);
if (new_map->is_prototype_map()) {
// For prototype maps, the PrototypeInfo is not copied.
@@ -9152,9 +9304,9 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
}
#endif
} else {
- new_map = Map::CopyNormalized(fast_map, mode);
+ new_map = Map::CopyNormalized(isolate, fast_map, mode);
if (use_cache) {
- Handle<WeakCell> cell = Map::WeakCellForMap(new_map);
+ Handle<WeakCell> cell = Map::WeakCellForMap(isolate, new_map);
cache->Set(fast_map, new_map, cell);
isolate->counters()->maps_normalized()->Increment();
}
@@ -9162,12 +9314,11 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
LOG(isolate, MapEvent("Normalize", *fast_map, *new_map, reason));
}
}
- fast_map->NotifyLeafMapLayoutChange();
+ fast_map->NotifyLeafMapLayoutChange(isolate);
return new_map;
}
-
-Handle<Map> Map::CopyNormalized(Handle<Map> map,
+Handle<Map> Map::CopyNormalized(Isolate* isolate, Handle<Map> map,
PropertyNormalizationMode mode) {
int new_instance_size = map->instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) {
@@ -9175,7 +9326,7 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
}
Handle<Map> result = RawCopy(
- map, new_instance_size,
+ isolate, map, new_instance_size,
mode == CLEAR_INOBJECT_PROPERTIES ? 0 : map->GetInObjectProperties());
// Clear the unused_property_fields explicitly as this field should not
// be accessed for normalized maps.
@@ -9186,7 +9337,7 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
result->set_construction_counter(kNoSlackTracking);
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) result->DictionaryMapVerify();
+ if (FLAG_verify_heap) result->DictionaryMapVerify(isolate);
#endif
return result;
@@ -9198,16 +9349,15 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
// memory on the map transition tree.
// static
-Handle<Map> Map::TransitionToImmutableProto(Handle<Map> map) {
- Handle<Map> new_map = Map::Copy(map, "ImmutablePrototype");
+Handle<Map> Map::TransitionToImmutableProto(Isolate* isolate, Handle<Map> map) {
+ Handle<Map> new_map = Map::Copy(isolate, map, "ImmutablePrototype");
new_map->set_is_immutable_proto(true);
return new_map;
}
namespace {
-void EnsureInitialMap(Handle<Map> map) {
+void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
#ifdef DEBUG
- Isolate* isolate = map->GetIsolate();
// Strict function maps have Function as a constructor but the
// Function's initial map is a sloppy function map. Same holds for
// GeneratorFunction / AsyncFunction and its initial map.
@@ -9234,18 +9384,19 @@ void EnsureInitialMap(Handle<Map> map) {
} // namespace
// static
-Handle<Map> Map::CopyInitialMapNormalized(Handle<Map> map,
+Handle<Map> Map::CopyInitialMapNormalized(Isolate* isolate, Handle<Map> map,
PropertyNormalizationMode mode) {
- EnsureInitialMap(map);
- return CopyNormalized(map, mode);
+ EnsureInitialMap(isolate, map);
+ return CopyNormalized(isolate, map, mode);
}
// static
-Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
- int inobject_properties,
+Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
+ int instance_size, int inobject_properties,
int unused_property_fields) {
- EnsureInitialMap(map);
- Handle<Map> result = RawCopy(map, instance_size, inobject_properties);
+ EnsureInitialMap(isolate, map);
+ Handle<Map> result =
+ RawCopy(isolate, map, instance_size, inobject_properties);
// Please note instance_type and instance_size are set when allocated.
result->SetInObjectUnusedPropertyFields(unused_property_fields);
@@ -9264,22 +9415,20 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
return result;
}
-
-Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
+Handle<Map> Map::CopyDropDescriptors(Isolate* isolate, Handle<Map> map) {
Handle<Map> result =
- RawCopy(map, map->instance_size(),
+ RawCopy(isolate, map, map->instance_size(),
map->IsJSObjectMap() ? map->GetInObjectProperties() : 0);
// Please note instance_type and instance_size are set when allocated.
if (map->IsJSObjectMap()) {
result->CopyUnusedPropertyFields(*map);
}
- map->NotifyLeafMapLayoutChange();
+ map->NotifyLeafMapLayoutChange(isolate);
return result;
}
-
-Handle<Map> Map::ShareDescriptor(Handle<Map> map,
+Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor) {
// Sanity check. This path is only to be taken if the map owns its descriptor
@@ -9288,7 +9437,7 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
DCHECK_EQ(map->NumberOfOwnDescriptors(),
map->instance_descriptors()->number_of_descriptors());
- Handle<Map> result = CopyDropDescriptors(map);
+ Handle<Map> result = CopyDropDescriptors(isolate, map);
Handle<Name> name = descriptor->GetKey();
// Properly mark the {result} if the {name} is an "interesting symbol".
@@ -9300,18 +9449,19 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
if (descriptors->NumberOfSlackDescriptors() == 0) {
int old_size = descriptors->number_of_descriptors();
if (old_size == 0) {
- descriptors = DescriptorArray::Allocate(map->GetIsolate(), 0, 1);
+ descriptors = DescriptorArray::Allocate(isolate, 0, 1);
} else {
int slack = SlackForArraySize(old_size, kMaxNumberOfDescriptors);
- EnsureDescriptorSlack(map, slack);
- descriptors = handle(map->instance_descriptors());
+ EnsureDescriptorSlack(isolate, map, slack);
+ descriptors = handle(map->instance_descriptors(), isolate);
}
}
Handle<LayoutDescriptor> layout_descriptor =
FLAG_unbox_double_fields
- ? LayoutDescriptor::ShareAppend(map, descriptor->GetDetails())
- : handle(LayoutDescriptor::FastPointerLayout(), map->GetIsolate());
+ ? LayoutDescriptor::ShareAppend(isolate, map,
+ descriptor->GetDetails())
+ : handle(LayoutDescriptor::FastPointerLayout(), isolate);
{
DisallowHeapAllocation no_gc;
@@ -9320,14 +9470,14 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
}
DCHECK(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1);
- ConnectTransition(map, result, name, SIMPLE_PROPERTY_TRANSITION);
+ ConnectTransition(isolate, map, result, name, SIMPLE_PROPERTY_TRANSITION);
return result;
}
-void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
- Handle<Name> name, SimpleTransitionFlag flag) {
- Isolate* isolate = parent->GetIsolate();
+void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
+ Handle<Map> child, Handle<Name> name,
+ SimpleTransitionFlag flag) {
DCHECK_IMPLIES(name->IsInterestingSymbol(),
child->may_have_interesting_symbols());
DCHECK_IMPLIES(parent->may_have_interesting_symbols(),
@@ -9357,22 +9507,21 @@ void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
LOG(isolate, MapEvent("Transition", *parent, *child, "prototype", *name));
}
} else {
- TransitionsAccessor(parent).Insert(name, child, flag);
+ TransitionsAccessor(isolate, parent).Insert(name, child, flag);
if (FLAG_trace_maps) {
LOG(isolate, MapEvent("Transition", *parent, *child, "", *name));
}
}
}
-
Handle<Map> Map::CopyReplaceDescriptors(
- Handle<Map> map, Handle<DescriptorArray> descriptors,
+ Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
MaybeHandle<Name> maybe_name, const char* reason,
SimpleTransitionFlag simple_flag) {
DCHECK(descriptors->IsSortedNoDuplicates());
- Handle<Map> result = CopyDropDescriptors(map);
+ Handle<Map> result = CopyDropDescriptors(isolate, map);
// Properly mark the {result} if the {name} is an "interesting symbol".
Handle<Name> name;
@@ -9382,11 +9531,11 @@ Handle<Map> Map::CopyReplaceDescriptors(
if (!map->is_prototype_map()) {
if (flag == INSERT_TRANSITION &&
- TransitionsAccessor(map).CanHaveMoreTransitions()) {
+ TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
result->InitializeDescriptors(*descriptors, *layout_descriptor);
DCHECK(!maybe_name.is_null());
- ConnectTransition(map, result, name, simple_flag);
+ ConnectTransition(isolate, map, result, name, simple_flag);
} else {
descriptors->GeneralizeAllFields();
result->InitializeDescriptors(*descriptors,
@@ -9399,9 +9548,9 @@ Handle<Map> Map::CopyReplaceDescriptors(
// Mirror conditions above that did not call ConnectTransition().
(map->is_prototype_map() ||
!(flag == INSERT_TRANSITION &&
- TransitionsAccessor(map).CanHaveMoreTransitions()))) {
- LOG(map->GetIsolate(), MapEvent("ReplaceDescriptors", *map, *result, reason,
- maybe_name.is_null() ? nullptr : *name));
+ TransitionsAccessor(isolate, map).CanHaveMoreTransitions()))) {
+ LOG(isolate, MapEvent("ReplaceDescriptors", *map, *result, reason,
+ maybe_name.is_null() ? nullptr : *name));
}
return result;
}
@@ -9412,7 +9561,8 @@ Handle<Map> Map::CopyReplaceDescriptors(
// The way how it is done is tricky because of GC and special descriptors
// marking logic.
Handle<Map> Map::AddMissingTransitions(
- Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Isolate* isolate, Handle<Map> split_map,
+ Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor) {
DCHECK(descriptors->IsSortedNoDuplicates());
int split_nof = split_map->NumberOfOwnDescriptors();
@@ -9428,7 +9578,7 @@ Handle<Map> Map::AddMissingTransitions(
// Also the last map might have interesting symbols, we temporarily set
// the flag and clear it right before the descriptors are installed. This
// makes heap verification happy and ensures the flag ends up accurate.
- Handle<Map> last_map = CopyDropDescriptors(split_map);
+ Handle<Map> last_map = CopyDropDescriptors(isolate, split_map);
last_map->InitializeDescriptors(*descriptors, *full_layout_descriptor);
last_map->SetInObjectUnusedPropertyFields(0);
last_map->set_may_have_interesting_symbols(true);
@@ -9440,13 +9590,14 @@ Handle<Map> Map::AddMissingTransitions(
// case for all the intermediate maps we create here.
Handle<Map> map = split_map;
for (int i = split_nof; i < nof_descriptors - 1; ++i) {
- Handle<Map> new_map = CopyDropDescriptors(map);
- InstallDescriptors(map, new_map, i, descriptors, full_layout_descriptor);
+ Handle<Map> new_map = CopyDropDescriptors(isolate, map);
+ InstallDescriptors(isolate, map, new_map, i, descriptors,
+ full_layout_descriptor);
map = new_map;
}
- map->NotifyLeafMapLayoutChange();
+ map->NotifyLeafMapLayoutChange(isolate);
last_map->set_may_have_interesting_symbols(false);
- InstallDescriptors(map, last_map, nof_descriptors - 1, descriptors,
+ InstallDescriptors(isolate, map, last_map, nof_descriptors - 1, descriptors,
full_layout_descriptor);
return last_map;
}
@@ -9454,8 +9605,8 @@ Handle<Map> Map::AddMissingTransitions(
// Since this method is used to rewrite an existing transition tree, it can
// always insert transitions without checking.
-void Map::InstallDescriptors(Handle<Map> parent, Handle<Map> child,
- int new_descriptor,
+void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
+ Handle<Map> child, int new_descriptor,
Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor) {
DCHECK(descriptors->IsSortedNoDuplicates());
@@ -9470,7 +9621,7 @@ void Map::InstallDescriptors(Handle<Map> parent, Handle<Map> child,
if (FLAG_unbox_double_fields) {
Handle<LayoutDescriptor> layout_descriptor =
- LayoutDescriptor::AppendIfFastOrUseFull(parent, details,
+ LayoutDescriptor::AppendIfFastOrUseFull(isolate, parent, details,
full_layout_descriptor);
child->set_layout_descriptor(*layout_descriptor);
#ifdef VERIFY_HEAP
@@ -9484,16 +9635,15 @@ void Map::InstallDescriptors(Handle<Map> parent, Handle<Map> child,
child->set_visitor_id(Map::GetVisitorId(*child));
}
- Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
+ Handle<Name> name = handle(descriptors->GetKey(new_descriptor), isolate);
if (parent->may_have_interesting_symbols() || name->IsInterestingSymbol()) {
child->set_may_have_interesting_symbols(true);
}
- ConnectTransition(parent, child, name, SIMPLE_PROPERTY_TRANSITION);
+ ConnectTransition(isolate, parent, child, name, SIMPLE_PROPERTY_TRANSITION);
}
-
-Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
- TransitionFlag flag) {
+Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
+ ElementsKind kind, TransitionFlag flag) {
// Only certain objects are allowed to have non-terminal fast transitional
// elements kinds.
DCHECK(map->IsJSObjectMap());
@@ -9504,7 +9654,7 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
Map* maybe_elements_transition_map = nullptr;
if (flag == INSERT_TRANSITION) {
// Ensure we are requested to add elements kind transition "near the root".
- DCHECK_EQ(map->FindRootMap()->NumberOfOwnDescriptors(),
+ DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
maybe_elements_transition_map = map->ElementsTransitionMap();
@@ -9517,70 +9667,70 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
DCHECK(kind != map->elements_kind());
}
- bool insert_transition = flag == INSERT_TRANSITION &&
- TransitionsAccessor(map).CanHaveMoreTransitions() &&
- maybe_elements_transition_map == nullptr;
+ bool insert_transition =
+ flag == INSERT_TRANSITION &&
+ TransitionsAccessor(isolate, map).CanHaveMoreTransitions() &&
+ maybe_elements_transition_map == nullptr;
if (insert_transition) {
- Handle<Map> new_map = CopyForTransition(map, "CopyAsElementsKind");
+ Handle<Map> new_map = CopyForTransition(isolate, map, "CopyAsElementsKind");
new_map->set_elements_kind(kind);
- Isolate* isolate = map->GetIsolate();
Handle<Name> name = isolate->factory()->elements_transition_symbol();
- ConnectTransition(map, new_map, name, SPECIAL_TRANSITION);
+ ConnectTransition(isolate, map, new_map, name, SPECIAL_TRANSITION);
return new_map;
}
// Create a new free-floating map only if we are not allowed to store it.
- Handle<Map> new_map = Copy(map, "CopyAsElementsKind");
+ Handle<Map> new_map = Copy(isolate, map, "CopyAsElementsKind");
new_map->set_elements_kind(kind);
return new_map;
}
-Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
+Handle<Map> Map::AsLanguageMode(Isolate* isolate, Handle<Map> initial_map,
Handle<SharedFunctionInfo> shared_info) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
// Initial map for sloppy mode function is stored in the function
// constructor. Initial maps for strict mode are cached as special transitions
// using |strict_function_transition_symbol| as a key.
if (is_sloppy(shared_info->language_mode())) return initial_map;
- Isolate* isolate = initial_map->GetIsolate();
- Handle<Map> function_map(Map::cast(
- isolate->native_context()->get(shared_info->function_map_index())));
+ Handle<Map> function_map(Map::cast(isolate->native_context()->get(
+ shared_info->function_map_index())),
+ isolate);
STATIC_ASSERT(LanguageModeSize == 2);
DCHECK_EQ(LanguageMode::kStrict, shared_info->language_mode());
Handle<Symbol> transition_symbol =
isolate->factory()->strict_function_transition_symbol();
- Map* maybe_transition =
- TransitionsAccessor(initial_map).SearchSpecial(*transition_symbol);
+ Map* maybe_transition = TransitionsAccessor(isolate, initial_map)
+ .SearchSpecial(*transition_symbol);
if (maybe_transition != nullptr) {
return handle(maybe_transition, isolate);
}
- initial_map->NotifyLeafMapLayoutChange();
+ initial_map->NotifyLeafMapLayoutChange(isolate);
// Create new map taking descriptors from the |function_map| and all
// the other details from the |initial_map|.
Handle<Map> map =
- Map::CopyInitialMap(function_map, initial_map->instance_size(),
+ Map::CopyInitialMap(isolate, function_map, initial_map->instance_size(),
initial_map->GetInObjectProperties(),
initial_map->UnusedPropertyFields());
map->SetConstructor(initial_map->GetConstructor());
map->set_prototype(initial_map->prototype());
map->set_construction_counter(initial_map->construction_counter());
- if (TransitionsAccessor(initial_map).CanHaveMoreTransitions()) {
- Map::ConnectTransition(initial_map, map, transition_symbol,
+ if (TransitionsAccessor(isolate, initial_map).CanHaveMoreTransitions()) {
+ Map::ConnectTransition(isolate, initial_map, map, transition_symbol,
SPECIAL_TRANSITION);
}
return map;
}
-
-Handle<Map> Map::CopyForTransition(Handle<Map> map, const char* reason) {
+Handle<Map> Map::CopyForTransition(Isolate* isolate, Handle<Map> map,
+ const char* reason) {
DCHECK(!map->is_prototype_map());
- Handle<Map> new_map = CopyDropDescriptors(map);
+ Handle<Map> new_map = CopyDropDescriptors(isolate, map);
if (map->owns_descriptors()) {
// In case the map owned its own descriptors, share the descriptors and
@@ -9591,39 +9741,38 @@ Handle<Map> Map::CopyForTransition(Handle<Map> map, const char* reason) {
} else {
// In case the map did not own its own descriptors, a split is forced by
// copying the map; creating a new descriptor array cell.
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> new_descriptors =
- DescriptorArray::CopyUpTo(descriptors, number_of_own_descriptors);
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
+ isolate, descriptors, number_of_own_descriptors);
Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
- map->GetIsolate());
+ isolate);
new_map->InitializeDescriptors(*new_descriptors, *new_layout_descriptor);
}
if (FLAG_trace_maps) {
- LOG(map->GetIsolate(),
- MapEvent("CopyForTransition", *map, *new_map, reason));
+ LOG(isolate, MapEvent("CopyForTransition", *map, *new_map, reason));
}
return new_map;
}
-
-Handle<Map> Map::Copy(Handle<Map> map, const char* reason) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
+Handle<Map> Map::Copy(Isolate* isolate, Handle<Map> map, const char* reason) {
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> new_descriptors =
- DescriptorArray::CopyUpTo(descriptors, number_of_own_descriptors);
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
+ isolate, descriptors, number_of_own_descriptors);
Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
- map->GetIsolate());
- return CopyReplaceDescriptors(map, new_descriptors, new_layout_descriptor,
- OMIT_TRANSITION, MaybeHandle<Name>(), reason,
- SPECIAL_TRANSITION);
+ isolate);
+ return CopyReplaceDescriptors(
+ isolate, map, new_descriptors, new_layout_descriptor, OMIT_TRANSITION,
+ MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
}
Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
Handle<Map> copy =
- Copy(handle(isolate->object_function()->initial_map()), "MapCreate");
+ Copy(isolate, handle(isolate->object_function()->initial_map(), isolate),
+ "MapCreate");
// Check that we do not overflow the instance size when adding the extra
// inobject properties. If the instance size overflows, we allocate as many
@@ -9644,20 +9793,18 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
return copy;
}
-
-Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
+Handle<Map> Map::CopyForPreventExtensions(Isolate* isolate, Handle<Map> map,
PropertyAttributes attrs_to_add,
Handle<Symbol> transition_marker,
const char* reason) {
int num_descriptors = map->NumberOfOwnDescriptors();
- Isolate* isolate = map->GetIsolate();
Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
- handle(map->instance_descriptors(), isolate), num_descriptors,
+ isolate, handle(map->instance_descriptors(), isolate), num_descriptors,
attrs_to_add);
Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
isolate);
Handle<Map> new_map = CopyReplaceDescriptors(
- map, new_desc, new_layout_descriptor, INSERT_TRANSITION,
+ isolate, map, new_desc, new_layout_descriptor, INSERT_TRANSITION,
transition_marker, reason, SPECIAL_TRANSITION);
new_map->set_is_extensible(false);
if (!IsFixedTypedArrayElementsKind(map->elements_kind())) {
@@ -9686,12 +9833,12 @@ bool CanHoldValue(DescriptorArray* descriptors, int descriptor,
} else {
DCHECK_EQ(kDescriptor, details.location());
- DCHECK_EQ(kConst, details.constness());
+ DCHECK_EQ(PropertyConstness::kConst, details.constness());
if (details.kind() == kData) {
DCHECK(!FLAG_track_constant_fields);
- DCHECK(descriptors->GetValue(descriptor) != value ||
+ DCHECK(descriptors->GetStrongValue(descriptor) != value ||
value->FitsRepresentation(details.representation()));
- return descriptors->GetValue(descriptor) == value;
+ return descriptors->GetStrongValue(descriptor) == value;
} else {
DCHECK_EQ(kAccessor, details.kind());
return false;
@@ -9700,7 +9847,8 @@ bool CanHoldValue(DescriptorArray* descriptors, int descriptor,
UNREACHABLE();
}
-Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
+Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
+ int descriptor,
PropertyConstness constness,
Handle<Object> value) {
if (CanHoldValue(map->instance_descriptors(), descriptor, constness,
@@ -9708,7 +9856,6 @@ Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
return map;
}
- Isolate* isolate = map->GetIsolate();
PropertyAttributes attributes =
map->instance_descriptors()->GetDetails(descriptor).attributes();
Representation representation = value->OptimalRepresentation();
@@ -9722,61 +9869,65 @@ Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
} // namespace
// static
-Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
+Handle<Map> Map::PrepareForDataProperty(Isolate* isolate, Handle<Map> map,
+ int descriptor,
PropertyConstness constness,
Handle<Object> value) {
// Dictionaries can store any property value.
DCHECK(!map->is_dictionary_map());
// Update to the newest map before storing the property.
- return UpdateDescriptorForValue(Update(map), descriptor, constness, value);
+ return UpdateDescriptorForValue(isolate, Update(isolate, map), descriptor,
+ constness, value);
}
-Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
+Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
PropertyConstness constness,
StoreFromKeyed store_mode) {
RuntimeCallTimerScope stats_scope(
- *map, map->is_prototype_map()
- ? RuntimeCallCounterId::kPrototypeMap_TransitionToDataProperty
- : RuntimeCallCounterId::kMap_TransitionToDataProperty);
+ isolate, *map,
+ map->is_prototype_map()
+ ? RuntimeCallCounterId::kPrototypeMap_TransitionToDataProperty
+ : RuntimeCallCounterId::kMap_TransitionToDataProperty);
DCHECK(name->IsUniqueName());
DCHECK(!map->is_dictionary_map());
// Migrate to the newest map before storing the property.
- map = Update(map);
+ map = Update(isolate, map);
- Map* maybe_transition =
- TransitionsAccessor(map).SearchTransition(*name, kData, attributes);
+ Map* maybe_transition = TransitionsAccessor(isolate, map)
+ .SearchTransition(*name, kData, attributes);
if (maybe_transition != nullptr) {
- Handle<Map> transition(maybe_transition);
+ Handle<Map> transition(maybe_transition, isolate);
int descriptor = transition->LastAdded();
DCHECK_EQ(attributes, transition->instance_descriptors()
->GetDetails(descriptor)
.attributes());
- return UpdateDescriptorForValue(transition, descriptor, constness, value);
+ return UpdateDescriptorForValue(isolate, transition, descriptor, constness,
+ value);
}
TransitionFlag flag = INSERT_TRANSITION;
MaybeHandle<Map> maybe_map;
if (!map->TooManyFastProperties(store_mode)) {
if (!FLAG_track_constant_fields && value->IsJSFunction()) {
- maybe_map = Map::CopyWithConstant(map, name, value, attributes, flag);
+ maybe_map =
+ Map::CopyWithConstant(isolate, map, name, value, attributes, flag);
} else {
- Isolate* isolate = name->GetIsolate();
Representation representation = value->OptimalRepresentation();
Handle<FieldType> type = value->OptimalType(isolate, representation);
- maybe_map = Map::CopyWithField(map, name, type, attributes, constness,
- representation, flag);
+ maybe_map = Map::CopyWithField(isolate, map, name, type, attributes,
+ constness, representation, flag);
}
}
Handle<Map> result;
if (!maybe_map.ToHandle(&result)) {
- Isolate* isolate = name->GetIsolate();
const char* reason = "TooManyFastProperties";
#if V8_TRACE_MAPS
std::unique_ptr<ScopedVector<char>> buffer;
@@ -9797,8 +9948,9 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
DCHECK_NE(*constructor,
constructor->context()->native_context()->object_function());
Handle<Map> initial_map(constructor->initial_map(), isolate);
- result = Map::Normalize(initial_map, CLEAR_INOBJECT_PROPERTIES, reason);
- initial_map->DeprecateTransitionTree();
+ result = Map::Normalize(isolate, initial_map, CLEAR_INOBJECT_PROPERTIES,
+ reason);
+ initial_map->DeprecateTransitionTree(isolate);
Handle<Object> prototype(result->prototype(), isolate);
JSFunction::SetInitialMap(constructor, result, prototype);
@@ -9807,19 +9959,19 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
isolate, DependentCode::kInitialMapChangedGroup);
if (!result->EquivalentToForNormalization(*map,
CLEAR_INOBJECT_PROPERTIES)) {
- result = Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES, reason);
+ result =
+ Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, reason);
}
} else {
- result = Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES, reason);
+ result = Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, reason);
}
}
return result;
}
-
-Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
- PropertyKind kind,
+Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
+ int descriptor, PropertyKind kind,
PropertyAttributes attributes) {
// Dictionaries have to be reconfigured in-place.
DCHECK(!map->is_dictionary_map());
@@ -9827,17 +9979,15 @@ Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
if (!map->GetBackPointer()->IsMap()) {
// There is no benefit from reconstructing transition tree for maps without
// back pointers.
- return CopyGeneralizeAllFields(map, map->elements_kind(), descriptor, kind,
- attributes,
+ return CopyGeneralizeAllFields(isolate, map, map->elements_kind(),
+ descriptor, kind, attributes,
"GenAll_AttributesMismatchProtoMap");
}
if (FLAG_trace_generalization) {
- map->PrintReconfiguration(stdout, descriptor, kind, attributes);
+ map->PrintReconfiguration(isolate, stdout, descriptor, kind, attributes);
}
- Isolate* isolate = map->GetIsolate();
-
MapUpdater mu(isolate, map);
DCHECK_EQ(kData, kind); // Only kData case is supported so far.
Handle<Map> new_map = mu.ReconfigureToDataField(
@@ -9865,14 +10015,14 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
if (map->is_dictionary_map()) return map;
// Migrate to the newest map before transitioning to the new property.
- map = Update(map);
+ map = Update(isolate, map);
PropertyNormalizationMode mode = map->is_prototype_map()
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
- Map* maybe_transition =
- TransitionsAccessor(map).SearchTransition(*name, kAccessor, attributes);
+ Map* maybe_transition = TransitionsAccessor(isolate, map)
+ .SearchTransition(*name, kAccessor, attributes);
if (maybe_transition != nullptr) {
Handle<Map> transition(maybe_transition, isolate);
DescriptorArray* descriptors = transition->instance_descriptors();
@@ -9882,14 +10032,16 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
DCHECK_EQ(kAccessor, descriptors->GetDetails(descriptor).kind());
DCHECK_EQ(attributes, descriptors->GetDetails(descriptor).attributes());
- Handle<Object> maybe_pair(descriptors->GetValue(descriptor), isolate);
+ Handle<Object> maybe_pair(descriptors->GetStrongValue(descriptor), isolate);
if (!maybe_pair->IsAccessorPair()) {
- return Map::Normalize(map, mode, "TransitionToAccessorFromNonPair");
+ return Map::Normalize(isolate, map, mode,
+ "TransitionToAccessorFromNonPair");
}
Handle<AccessorPair> pair = Handle<AccessorPair>::cast(maybe_pair);
if (!pair->Equals(*getter, *setter)) {
- return Map::Normalize(map, mode, "TransitionToDifferentAccessor");
+ return Map::Normalize(isolate, map, mode,
+ "TransitionToDifferentAccessor");
}
return transition;
@@ -9899,20 +10051,22 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
DescriptorArray* old_descriptors = map->instance_descriptors();
if (descriptor != DescriptorArray::kNotFound) {
if (descriptor != map->LastAdded()) {
- return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
+ return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
}
PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
if (old_details.kind() != kAccessor) {
- return Map::Normalize(map, mode, "AccessorsOverwritingNonAccessors");
+ return Map::Normalize(isolate, map, mode,
+ "AccessorsOverwritingNonAccessors");
}
if (old_details.attributes() != attributes) {
- return Map::Normalize(map, mode, "AccessorsWithAttributes");
+ return Map::Normalize(isolate, map, mode, "AccessorsWithAttributes");
}
- Handle<Object> maybe_pair(old_descriptors->GetValue(descriptor), isolate);
+ Handle<Object> maybe_pair(old_descriptors->GetStrongValue(descriptor),
+ isolate);
if (!maybe_pair->IsAccessorPair()) {
- return Map::Normalize(map, mode, "AccessorsOverwritingNonPair");
+ return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonPair");
}
Handle<AccessorPair> current_pair = Handle<AccessorPair>::cast(maybe_pair);
@@ -9930,13 +10084,15 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
overwriting_accessor = true;
}
if (overwriting_accessor) {
- return Map::Normalize(map, mode, "AccessorsOverwritingAccessors");
+ return Map::Normalize(isolate, map, mode,
+ "AccessorsOverwritingAccessors");
}
- pair = AccessorPair::Copy(Handle<AccessorPair>::cast(maybe_pair));
+ pair = AccessorPair::Copy(isolate, Handle<AccessorPair>::cast(maybe_pair));
} else if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors ||
map->TooManyFastProperties(CERTAINLY_NOT_STORE_FROM_KEYED)) {
- return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES, "TooManyAccessors");
+ return Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES,
+ "TooManyAccessors");
} else {
pair = isolate->factory()->NewAccessorPair();
}
@@ -9945,92 +10101,90 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
TransitionFlag flag = INSERT_TRANSITION;
Descriptor d = Descriptor::AccessorConstant(name, pair, attributes);
- return Map::CopyInsertDescriptor(map, &d, flag);
+ return Map::CopyInsertDescriptor(isolate, map, &d, flag);
}
-
-Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
+Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
// Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
- !map->GetBackPointer()->IsUndefined(map->GetIsolate()) &&
- TransitionsAccessor(map).CanHaveMoreTransitions()) {
- return ShareDescriptor(map, descriptors, descriptor);
+ !map->GetBackPointer()->IsUndefined(isolate) &&
+ TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
+ return ShareDescriptor(isolate, map, descriptors, descriptor);
}
int nof = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_descriptors =
- DescriptorArray::CopyUpTo(descriptors, nof, 1);
+ DescriptorArray::CopyUpTo(isolate, descriptors, nof, 1);
new_descriptors->Append(descriptor);
Handle<LayoutDescriptor> new_layout_descriptor =
FLAG_unbox_double_fields
- ? LayoutDescriptor::New(map, new_descriptors, nof + 1)
- : handle(LayoutDescriptor::FastPointerLayout(), map->GetIsolate());
+ ? LayoutDescriptor::New(isolate, map, new_descriptors, nof + 1)
+ : handle(LayoutDescriptor::FastPointerLayout(), isolate);
- return CopyReplaceDescriptors(map, new_descriptors, new_layout_descriptor,
- flag, descriptor->GetKey(), "CopyAddDescriptor",
- SIMPLE_PROPERTY_TRANSITION);
+ return CopyReplaceDescriptors(
+ isolate, map, new_descriptors, new_layout_descriptor, flag,
+ descriptor->GetKey(), "CopyAddDescriptor", SIMPLE_PROPERTY_TRANSITION);
}
-
-Handle<Map> Map::CopyInsertDescriptor(Handle<Map> map,
+Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag) {
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors());
+ Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
// We replace the key if it is already present.
- int index = old_descriptors->SearchWithCache(map->GetIsolate(),
- *descriptor->GetKey(), *map);
+ int index =
+ old_descriptors->SearchWithCache(isolate, *descriptor->GetKey(), *map);
if (index != DescriptorArray::kNotFound) {
- return CopyReplaceDescriptor(map, old_descriptors, descriptor, index, flag);
+ return CopyReplaceDescriptor(isolate, map, old_descriptors, descriptor,
+ index, flag);
}
- return CopyAddDescriptor(map, descriptor, flag);
+ return CopyAddDescriptor(isolate, map, descriptor, flag);
}
-
-Handle<DescriptorArray> DescriptorArray::CopyUpTo(
- Handle<DescriptorArray> desc,
- int enumeration_index,
- int slack) {
- return DescriptorArray::CopyUpToAddAttributes(
- desc, enumeration_index, NONE, slack);
+Handle<DescriptorArray> DescriptorArray::CopyUpTo(Isolate* isolate,
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ int slack) {
+ return DescriptorArray::CopyUpToAddAttributes(isolate, desc,
+ enumeration_index, NONE, slack);
}
-
Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
- Handle<DescriptorArray> desc,
- int enumeration_index,
- PropertyAttributes attributes,
- int slack) {
+ Isolate* isolate, Handle<DescriptorArray> desc, int enumeration_index,
+ PropertyAttributes attributes, int slack) {
if (enumeration_index + slack == 0) {
- return desc->GetIsolate()->factory()->empty_descriptor_array();
+ return isolate->factory()->empty_descriptor_array();
}
int size = enumeration_index;
Handle<DescriptorArray> descriptors =
- DescriptorArray::Allocate(desc->GetIsolate(), size, slack);
+ DescriptorArray::Allocate(isolate, size, slack);
if (attributes != NONE) {
for (int i = 0; i < size; ++i) {
- Object* value = desc->GetValue(i);
+ MaybeObject* value_or_field_type = desc->GetValue(i);
Name* key = desc->GetKey(i);
PropertyDetails details = desc->GetDetails(i);
// Bulk attribute changes never affect private properties.
if (!key->IsPrivate()) {
int mask = DONT_DELETE | DONT_ENUM;
// READ_ONLY is an invalid attribute for JS setters/getters.
- if (details.kind() != kAccessor || !value->IsAccessorPair()) {
+ HeapObject* heap_object;
+ if (details.kind() != kAccessor ||
+ !(value_or_field_type->ToStrongHeapObject(&heap_object) &&
+ heap_object->IsAccessorPair())) {
mask |= READ_ONLY;
}
details = details.CopyAddAttributes(
static_cast<PropertyAttributes>(attributes & mask));
}
- descriptors->Set(i, key, value, details);
+ descriptors->Set(i, key, value_or_field_type, details);
}
} else {
for (int i = 0; i < size; ++i) {
@@ -10060,8 +10214,7 @@ bool DescriptorArray::IsEqualUpTo(DescriptorArray* desc, int nof_descriptors) {
return true;
}
-
-Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
+Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor,
int insertion_index,
@@ -10074,22 +10227,23 @@ Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
DCHECK_NE(kField, descriptors->GetDetails(insertion_index).location());
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
- descriptors, map->NumberOfOwnDescriptors());
+ isolate, descriptors, map->NumberOfOwnDescriptors());
new_descriptors->Replace(insertion_index, descriptor);
Handle<LayoutDescriptor> new_layout_descriptor = LayoutDescriptor::New(
- map, new_descriptors, new_descriptors->number_of_descriptors());
+ isolate, map, new_descriptors, new_descriptors->number_of_descriptors());
SimpleTransitionFlag simple_flag =
(insertion_index == descriptors->number_of_descriptors() - 1)
? SIMPLE_PROPERTY_TRANSITION
: PROPERTY_TRANSITION;
- return CopyReplaceDescriptors(map, new_descriptors, new_layout_descriptor,
- flag, key, "CopyReplaceDescriptor",
- simple_flag);
+ return CopyReplaceDescriptors(isolate, map, new_descriptors,
+ new_layout_descriptor, flag, key,
+ "CopyReplaceDescriptor", simple_flag);
}
-Handle<FixedArray> FixedArray::SetAndGrow(Handle<FixedArray> array, int index,
+Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate,
+ Handle<FixedArray> array, int index,
Handle<Object> value,
PretenureFlag pretenure) {
if (index < array->length()) {
@@ -10101,8 +10255,7 @@ Handle<FixedArray> FixedArray::SetAndGrow(Handle<FixedArray> array, int index,
capacity = JSObject::NewElementsCapacity(capacity);
} while (capacity <= index);
Handle<FixedArray> new_array =
- array->GetIsolate()->factory()->NewUninitializedFixedArray(capacity,
- pretenure);
+ isolate->factory()->NewUninitializedFixedArray(capacity, pretenure);
array->CopyTo(0, *new_array, 0, array->length());
new_array->FillWithHoles(array->length(), new_array->length());
new_array->set(index, *value);
@@ -10123,16 +10276,30 @@ bool FixedArray::ContainsSortedNumbers() {
return true;
}
-void FixedArray::Shrink(int new_length) {
- DCHECK(0 <= new_length && new_length <= length());
+Handle<FixedArray> FixedArray::ShrinkOrEmpty(Isolate* isolate,
+ Handle<FixedArray> array,
+ int new_length) {
+ if (new_length == 0) {
+ return array->GetReadOnlyRoots().empty_fixed_array_handle();
+ } else {
+ array->Shrink(isolate, new_length);
+ return array;
+ }
+}
+
+void FixedArray::Shrink(Isolate* isolate, int new_length) {
+ DCHECK(0 < new_length && new_length <= length());
if (new_length < length()) {
- GetHeap()->RightTrimFixedArray(this, length() - new_length);
+ isolate->heap()->RightTrimFixedArray(this, length() - new_length);
}
}
void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos,
int len) const {
DisallowHeapAllocation no_gc;
+ // Return early if len == 0 so that we don't try to read the write barrier off
+ // a canonical read-only empty fixed array.
+ if (len == 0) return;
WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
for (int index = 0; index < len; index++) {
dest->set(dest_pos+index, get(pos+index), mode);
@@ -10149,20 +10316,14 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
}
#endif
-void WeakFixedArray::Shrink(int new_length) {
- DCHECK(0 <= new_length && new_length <= length());
- if (new_length < length()) {
- GetHeap()->RightTrimWeakFixedArray(this, length() - new_length);
- }
-}
-
// static
-void FixedArrayOfWeakCells::Set(Handle<FixedArrayOfWeakCells> array, int index,
+void FixedArrayOfWeakCells::Set(Isolate* isolate,
+ Handle<FixedArrayOfWeakCells> array, int index,
Handle<HeapObject> value) {
DCHECK(array->IsEmptySlot(index)); // Don't overwrite anything.
Handle<WeakCell> cell =
- value->IsMap() ? Map::WeakCellForMap(Handle<Map>::cast(value))
- : array->GetIsolate()->factory()->NewWeakCell(value);
+ value->IsMap() ? Map::WeakCellForMap(isolate, Handle<Map>::cast(value))
+ : isolate->factory()->NewWeakCell(value);
Handle<FixedArray>::cast(array)->set(index + kFirstIndex, *cell);
array->set_last_used_index(index);
}
@@ -10170,11 +10331,11 @@ void FixedArrayOfWeakCells::Set(Handle<FixedArrayOfWeakCells> array, int index,
// static
Handle<FixedArrayOfWeakCells> FixedArrayOfWeakCells::Add(
- Handle<Object> maybe_array, Handle<HeapObject> value, int* assigned_index) {
+ Isolate* isolate, Handle<Object> maybe_array, Handle<HeapObject> value,
+ int* assigned_index) {
Handle<FixedArrayOfWeakCells> array =
(maybe_array.is_null() || !maybe_array->IsFixedArrayOfWeakCells())
- ? Allocate(value->GetIsolate(), 1,
- Handle<FixedArrayOfWeakCells>::null())
+ ? Allocate(isolate, 1, Handle<FixedArrayOfWeakCells>::null())
: Handle<FixedArrayOfWeakCells>::cast(maybe_array);
// Try to store the new entry if there's room. Optimize for consecutive
// accesses.
@@ -10183,7 +10344,7 @@ Handle<FixedArrayOfWeakCells> FixedArrayOfWeakCells::Add(
if (length > 0) {
for (int i = first_index;;) {
if (array->IsEmptySlot((i))) {
- FixedArrayOfWeakCells::Set(array, i, value);
+ FixedArrayOfWeakCells::Set(isolate, array, i, value);
if (assigned_index != nullptr) *assigned_index = i;
return array;
}
@@ -10195,14 +10356,14 @@ Handle<FixedArrayOfWeakCells> FixedArrayOfWeakCells::Add(
// No usable slot found, grow the array.
int new_length = length == 0 ? 1 : length + (length >> 1) + 4;
Handle<FixedArrayOfWeakCells> new_array =
- Allocate(array->GetIsolate(), new_length, array);
- FixedArrayOfWeakCells::Set(new_array, length, value);
+ Allocate(isolate, new_length, array);
+ FixedArrayOfWeakCells::Set(isolate, new_array, length, value);
if (assigned_index != nullptr) *assigned_index = length;
return new_array;
}
template <class CompactionCallback>
-void FixedArrayOfWeakCells::Compact() {
+void FixedArrayOfWeakCells::Compact(Isolate* isolate) {
FixedArray* array = FixedArray::cast(this);
int new_length = kFirstIndex;
for (int i = kFirstIndex; i < array->length(); i++) {
@@ -10214,7 +10375,7 @@ void FixedArrayOfWeakCells::Compact() {
new_length - kFirstIndex);
array->set(new_length++, element);
}
- array->Shrink(new_length);
+ array->Shrink(isolate, new_length);
set_last_used_index(0);
}
@@ -10228,10 +10389,9 @@ void FixedArrayOfWeakCells::Iterator::Reset(Object* maybe_array) {
}
}
-
-void JSObject::PrototypeRegistryCompactionCallback::Callback(Object* value,
- int old_index,
- int new_index) {
+void JSObject::PrototypeRegistryCompactionCallback(HeapObject* value,
+ int old_index,
+ int new_index) {
DCHECK(value->IsMap() && Map::cast(value)->is_prototype_map());
Map* map = Map::cast(value);
DCHECK(map->prototype_info()->IsPrototypeInfo());
@@ -10240,10 +10400,8 @@ void JSObject::PrototypeRegistryCompactionCallback::Callback(Object* value,
proto_info->set_registry_slot(new_index);
}
-template void
-FixedArrayOfWeakCells::Compact<FixedArrayOfWeakCells::NullCallback>();
-template void
-FixedArrayOfWeakCells::Compact<JSObject::PrototypeRegistryCompactionCallback>();
+template void FixedArrayOfWeakCells::Compact<
+ FixedArrayOfWeakCells::NullCallback>(Isolate* isolate);
bool FixedArrayOfWeakCells::Remove(Handle<HeapObject> value) {
if (Length() == 0) return false;
@@ -10288,9 +10446,10 @@ Handle<FixedArrayOfWeakCells> FixedArrayOfWeakCells::Allocate(
}
// static
-Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj) {
+Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
+ Handle<Object> obj) {
int length = array->Length();
- array = EnsureSpace(array, length + 1);
+ array = EnsureSpace(isolate, array, length + 1);
// Check that GC didn't remove elements from the array.
DCHECK_EQ(array->Length(), length);
array->Set(length, *obj);
@@ -10299,10 +10458,10 @@ Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj) {
}
// static
-Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
- Handle<Object> obj2) {
+Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
+ Handle<Object> obj1, Handle<Object> obj2) {
int length = array->Length();
- array = EnsureSpace(array, length + 2);
+ array = EnsureSpace(isolate, array, length + 2);
// Check that GC didn't remove elements from the array.
DCHECK_EQ(array->Length(), length);
array->Set(length, *obj1);
@@ -10315,16 +10474,17 @@ Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
Handle<ArrayList> ArrayList::New(Isolate* isolate, int size) {
Handle<FixedArray> fixed_array =
isolate->factory()->NewFixedArray(size + kFirstIndex);
- fixed_array->set_map_no_write_barrier(isolate->heap()->array_list_map());
+ fixed_array->set_map_no_write_barrier(
+ ReadOnlyRoots(isolate).array_list_map());
Handle<ArrayList> result = Handle<ArrayList>::cast(fixed_array);
result->SetLength(0);
return result;
}
-Handle<FixedArray> ArrayList::Elements(Handle<ArrayList> array) {
+Handle<FixedArray> ArrayList::Elements(Isolate* isolate,
+ Handle<ArrayList> array) {
int length = array->Length();
- Handle<FixedArray> result =
- array->GetIsolate()->factory()->NewFixedArray(length);
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
// Do not copy the first entry, i.e., the length.
array->CopyTo(kFirstIndex, *result, 0, length);
return result;
@@ -10337,11 +10497,11 @@ bool ArrayList::IsFull() {
namespace {
-Handle<FixedArray> EnsureSpaceInFixedArray(Handle<FixedArray> array,
+Handle<FixedArray> EnsureSpaceInFixedArray(Isolate* isolate,
+ Handle<FixedArray> array,
int length) {
int capacity = array->length();
if (capacity < length) {
- Isolate* isolate = array->GetIsolate();
int new_capacity = length;
new_capacity = new_capacity + Max(new_capacity / 2, 2);
int grow_by = new_capacity - capacity;
@@ -10353,11 +10513,12 @@ Handle<FixedArray> EnsureSpaceInFixedArray(Handle<FixedArray> array,
} // namespace
// static
-Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
+Handle<ArrayList> ArrayList::EnsureSpace(Isolate* isolate,
+ Handle<ArrayList> array, int length) {
const bool empty = (array->length() == 0);
- auto ret = EnsureSpaceInFixedArray(array, kFirstIndex + length);
+ auto ret = EnsureSpaceInFixedArray(isolate, array, kFirstIndex + length);
if (empty) {
- ret->set_map_no_write_barrier(array->GetHeap()->array_list_map());
+ ret->set_map_no_write_barrier(array->GetReadOnlyRoots().array_list_map());
Handle<ArrayList>::cast(ret)->SetLength(0);
}
@@ -10365,26 +10526,26 @@ Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
}
// static
-Handle<WeakArrayList> WeakArrayList::Add(Handle<WeakArrayList> array,
- Handle<HeapObject> obj1, Smi* obj2) {
+Handle<WeakArrayList> WeakArrayList::AddToEnd(Isolate* isolate,
+ Handle<WeakArrayList> array,
+ MaybeObjectHandle value) {
int length = array->length();
- array = EnsureSpace(array, length + 2);
+ array = EnsureSpace(isolate, array, length + 1);
// Check that GC didn't remove elements from the array.
DCHECK_EQ(array->length(), length);
- array->Set(length, HeapObjectReference::Weak(*obj1));
- array->Set(length + 1, MaybeObject::FromSmi(obj2));
- array->set_length(length + 2);
+ array->Set(length, *value);
+ array->set_length(length + 1);
return array;
}
bool WeakArrayList::IsFull() { return length() == capacity(); }
// static
-Handle<WeakArrayList> WeakArrayList::EnsureSpace(Handle<WeakArrayList> array,
+Handle<WeakArrayList> WeakArrayList::EnsureSpace(Isolate* isolate,
+ Handle<WeakArrayList> array,
int length) {
int capacity = array->capacity();
if (capacity < length) {
- Isolate* isolate = array->GetIsolate();
int new_capacity = length;
new_capacity = new_capacity + Max(new_capacity / 2, 2);
int grow_by = new_capacity - capacity;
@@ -10393,12 +10554,97 @@ Handle<WeakArrayList> WeakArrayList::EnsureSpace(Handle<WeakArrayList> array,
return array;
}
+// static
+Handle<WeakArrayList> PrototypeUsers::Add(Isolate* isolate,
+ Handle<WeakArrayList> array,
+ Handle<Map> value,
+ int* assigned_index) {
+ int length = array->length();
+ if (length == 0) {
+ // Uninitialized WeakArrayList; need to initialize empty_slot_index.
+ array = WeakArrayList::EnsureSpace(isolate, array, kFirstIndex + 1);
+ set_empty_slot_index(*array, kNoEmptySlotsMarker);
+ array->Set(kFirstIndex, HeapObjectReference::Weak(*value));
+ array->set_length(kFirstIndex + 1);
+ if (assigned_index != nullptr) *assigned_index = kFirstIndex;
+ return array;
+ }
+
+ // If the array has unfilled space at the end, use it.
+ if (!array->IsFull()) {
+ array->Set(length, HeapObjectReference::Weak(*value));
+ array->set_length(length + 1);
+ if (assigned_index != nullptr) *assigned_index = length;
+ return array;
+ }
+
+ // If there are empty slots, use one of them.
+ int empty_slot = Smi::ToInt(empty_slot_index(*array));
+ if (empty_slot != kNoEmptySlotsMarker) {
+ DCHECK_GE(empty_slot, kFirstIndex);
+ CHECK_LT(empty_slot, array->length());
+ int next_empty_slot = Smi::ToInt(array->Get(empty_slot)->ToSmi());
+
+ array->Set(empty_slot, HeapObjectReference::Weak(*value));
+ if (assigned_index != nullptr) *assigned_index = empty_slot;
+
+ set_empty_slot_index(*array, next_empty_slot);
+ return array;
+ } else {
+ DCHECK_EQ(empty_slot, kNoEmptySlotsMarker);
+ }
+
+ // Array full and no empty slots. Grow the array.
+ array = WeakArrayList::EnsureSpace(isolate, array, length + 1);
+ array->Set(length, HeapObjectReference::Weak(*value));
+ array->set_length(length + 1);
+ if (assigned_index != nullptr) *assigned_index = length;
+ return array;
+}
+
+WeakArrayList* PrototypeUsers::Compact(Handle<WeakArrayList> array, Heap* heap,
+ CompactionCallback callback) {
+ if (array->length() == 0) {
+ return *array;
+ }
+ // Count the amount of live references.
+ int new_length = kFirstIndex;
+ for (int i = kFirstIndex; i < array->length(); i++) {
+ MaybeObject* element = array->Get(i);
+ if (element->IsSmi()) continue;
+ if (element->IsClearedWeakHeapObject()) continue;
+ ++new_length;
+ }
+ if (new_length == array->length()) {
+ return *array;
+ }
+
+ Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
+ heap->isolate(),
+ handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
+ new_length);
+ // Allocation might have caused GC and turned some of the elements into
+ // cleared weak heap objects. Count the number of live objects again.
+ int copy_to = kFirstIndex;
+ for (int i = kFirstIndex; i < array->length(); i++) {
+ MaybeObject* element = array->Get(i);
+ if (element->IsSmi()) continue;
+ if (element->IsClearedWeakHeapObject()) continue;
+ HeapObject* value = element->ToWeakHeapObject();
+ callback(value, i, copy_to);
+ new_array->Set(copy_to++, element);
+ }
+ new_array->set_length(copy_to);
+ set_empty_slot_index(*new_array, kNoEmptySlotsMarker);
+ return *new_array;
+}
+
Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
- Handle<RegExpMatchInfo> match_info, int capture_count) {
+ Isolate* isolate, Handle<RegExpMatchInfo> match_info, int capture_count) {
DCHECK_GE(match_info->length(), kLastMatchOverhead);
const int required_length = kFirstCaptureIndex + capture_count;
Handle<FixedArray> result =
- EnsureSpaceInFixedArray(match_info, required_length);
+ EnsureSpaceInFixedArray(isolate, match_info, required_length);
return Handle<RegExpMatchInfo>::cast(result);
}
@@ -10410,7 +10656,8 @@ Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
int offset, int flags) {
const int frame_count = in->FrameCount();
const int new_length = LengthFor(frame_count + 1);
- Handle<FrameArray> array = EnsureSpace(in, new_length);
+ Handle<FrameArray> array =
+ EnsureSpace(function->GetIsolate(), in, new_length);
array->SetReceiver(frame_count, *receiver);
array->SetFunction(frame_count, *function);
array->SetCode(frame_count, *code);
@@ -10426,7 +10673,8 @@ Handle<FrameArray> FrameArray::AppendWasmFrame(
int wasm_function_index, wasm::WasmCode* code, int offset, int flags) {
const int frame_count = in->FrameCount();
const int new_length = LengthFor(frame_count + 1);
- Handle<FrameArray> array = EnsureSpace(in, new_length);
+ Handle<FrameArray> array =
+ EnsureSpace(wasm_instance->GetIsolate(), in, new_length);
array->SetWasmInstance(frame_count, *wasm_instance);
array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
// The {code} will be {nullptr} for interpreted wasm frames.
@@ -10437,12 +10685,16 @@ Handle<FrameArray> FrameArray::AppendWasmFrame(
return array;
}
-void FrameArray::ShrinkToFit() { Shrink(LengthFor(FrameCount())); }
+void FrameArray::ShrinkToFit(Isolate* isolate) {
+ Shrink(isolate, LengthFor(FrameCount()));
+}
// static
-Handle<FrameArray> FrameArray::EnsureSpace(Handle<FrameArray> array,
+Handle<FrameArray> FrameArray::EnsureSpace(Isolate* isolate,
+ Handle<FrameArray> array,
int length) {
- return Handle<FrameArray>::cast(EnsureSpaceInFixedArray(array, length));
+ return Handle<FrameArray>::cast(
+ EnsureSpaceInFixedArray(isolate, array, length));
}
Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
@@ -10455,15 +10707,19 @@ Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
int size = number_of_descriptors + slack;
if (size == 0) return factory->empty_descriptor_array();
// Allocate the array of keys.
- Handle<FixedArray> result = factory->NewFixedArrayWithMap(
- Heap::kDescriptorArrayMapRootIndex, LengthFor(size), pretenure);
- result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
- result->set(kEnumCacheIndex, isolate->heap()->empty_enum_cache());
+ Handle<WeakFixedArray> result =
+ factory->NewWeakFixedArrayWithMap<DescriptorArray>(
+ Heap::kDescriptorArrayMapRootIndex, LengthFor(size), pretenure);
+ result->Set(kDescriptorLengthIndex,
+ MaybeObject::FromObject(Smi::FromInt(number_of_descriptors)));
+ result->Set(kEnumCacheIndex, MaybeObject::FromObject(
+ ReadOnlyRoots(isolate).empty_enum_cache()));
return Handle<DescriptorArray>::cast(result);
}
void DescriptorArray::ClearEnumCache() {
- set(kEnumCacheIndex, GetHeap()->empty_enum_cache());
+ set(kEnumCacheIndex,
+ MaybeObject::FromObject(GetReadOnlyRoots().empty_enum_cache()));
}
void DescriptorArray::Replace(int index, Descriptor* descriptor) {
@@ -10476,9 +10732,9 @@ void DescriptorArray::SetEnumCache(Handle<DescriptorArray> descriptors,
Isolate* isolate, Handle<FixedArray> keys,
Handle<FixedArray> indices) {
EnumCache* enum_cache = descriptors->GetEnumCache();
- if (enum_cache == isolate->heap()->empty_enum_cache()) {
+ if (enum_cache == ReadOnlyRoots(isolate).empty_enum_cache()) {
enum_cache = *isolate->factory()->NewEnumCache(keys, indices);
- descriptors->set(kEnumCacheIndex, enum_cache);
+ descriptors->set(kEnumCacheIndex, MaybeObject::FromObject(enum_cache));
} else {
enum_cache->set_keys(*keys);
enum_cache->set_indices(*indices);
@@ -10544,23 +10800,23 @@ void DescriptorArray::Sort() {
DCHECK(IsSortedNoDuplicates());
}
-
-Handle<AccessorPair> AccessorPair::Copy(Handle<AccessorPair> pair) {
- Handle<AccessorPair> copy = pair->GetIsolate()->factory()->NewAccessorPair();
+Handle<AccessorPair> AccessorPair::Copy(Isolate* isolate,
+ Handle<AccessorPair> pair) {
+ Handle<AccessorPair> copy = isolate->factory()->NewAccessorPair();
copy->set_getter(pair->getter());
copy->set_setter(pair->setter());
return copy;
}
-Handle<Object> AccessorPair::GetComponent(Handle<AccessorPair> accessor_pair,
+Handle<Object> AccessorPair::GetComponent(Isolate* isolate,
+ Handle<AccessorPair> accessor_pair,
AccessorComponent component) {
Object* accessor = accessor_pair->get(component);
if (accessor->IsFunctionTemplateInfo()) {
return ApiNatives::InstantiateFunction(
- handle(FunctionTemplateInfo::cast(accessor)))
+ handle(FunctionTemplateInfo::cast(accessor), isolate))
.ToHandleChecked();
}
- Isolate* isolate = accessor_pair->GetIsolate();
if (accessor->IsNull(isolate)) {
return isolate->factory()->undefined_value();
}
@@ -10598,9 +10854,9 @@ bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
#endif
// static
-Handle<String> String::Trim(Handle<String> string, TrimMode mode) {
- Isolate* const isolate = string->GetIsolate();
- string = String::Flatten(string);
+Handle<String> String::Trim(Isolate* isolate, Handle<String> string,
+ TrimMode mode) {
+ string = String::Flatten(isolate, string);
int const length = string->length();
// Perform left trimming if requested.
@@ -10626,13 +10882,20 @@ Handle<String> String::Trim(Handle<String> string, TrimMode mode) {
return isolate->factory()->NewSubString(string, left, right);
}
-bool String::LooksValid() { return GetIsolate()->heap()->Contains(this); }
+bool String::LooksValid() {
+ // TODO(leszeks): Maybe remove this check entirely, Heap::Contains uses
+ // basically the same logic as the way we access the heap in the first place.
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(this);
+ // RO_SPACE objects should always be valid.
+ if (chunk->owner()->identity() == RO_SPACE) return true;
+ if (chunk->heap() == nullptr) return false;
+ return chunk->heap()->Contains(this);
+}
// static
-MaybeHandle<String> Name::ToFunctionName(Handle<Name> name) {
+MaybeHandle<String> Name::ToFunctionName(Isolate* isolate, Handle<Name> name) {
if (name->IsString()) return Handle<String>::cast(name);
// ES6 section 9.2.11 SetFunctionName, step 4.
- Isolate* const isolate = name->GetIsolate();
Handle<Object> description(Handle<Symbol>::cast(name)->name(), isolate);
if (description->IsUndefined(isolate)) {
return isolate->factory()->empty_string();
@@ -10645,12 +10908,11 @@ MaybeHandle<String> Name::ToFunctionName(Handle<Name> name) {
}
// static
-MaybeHandle<String> Name::ToFunctionName(Handle<Name> name,
+MaybeHandle<String> Name::ToFunctionName(Isolate* isolate, Handle<Name> name,
Handle<String> prefix) {
Handle<String> name_string;
- Isolate* const isolate = name->GetIsolate();
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name_string, ToFunctionName(name),
- String);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name_string,
+ ToFunctionName(isolate, name), String);
IncrementalStringBuilder builder(isolate);
builder.AppendString(prefix);
builder.AppendCharacter(' ');
@@ -10684,11 +10946,9 @@ int ParseDecimalInteger(const uint8_t* s, int from, int to) {
} // namespace
// static
-Handle<Object> String::ToNumber(Handle<String> subject) {
- Isolate* const isolate = subject->GetIsolate();
-
+Handle<Object> String::ToNumber(Isolate* isolate, Handle<String> subject) {
// Flatten {subject} string first.
- subject = String::Flatten(subject);
+ subject = String::Flatten(isolate, subject);
// Fast array index case.
uint32_t index;
@@ -10742,7 +11002,7 @@ Handle<Object> String::ToNumber(Handle<String> subject) {
// Slower case.
int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
return isolate->factory()->NewNumber(
- StringToDouble(isolate->unicode_cache(), subject, flags));
+ StringToDouble(isolate, isolate->unicode_cache(), subject, flags));
}
@@ -11243,16 +11503,15 @@ static void CalculateLineEndsImpl(Isolate* isolate, std::vector<int>* line_ends,
}
}
-
-Handle<FixedArray> String::CalculateLineEnds(Handle<String> src,
+Handle<FixedArray> String::CalculateLineEnds(Isolate* isolate,
+ Handle<String> src,
bool include_ending_line) {
- src = Flatten(src);
+ src = Flatten(isolate, src);
// Rough estimate of line count based on a roughly estimated average
// length of (unpacked) code.
int line_count_estimate = src->length() >> 4;
std::vector<int> line_ends;
line_ends.reserve(line_count_estimate);
- Isolate* isolate = src->GetIsolate();
{ DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
// Dispatch on type of strings.
String::FlatContent content = src->GetFlatContent();
@@ -11481,8 +11740,8 @@ bool String::SlowEquals(String* other) {
return comparator.Equals(this, other);
}
-
-bool String::SlowEquals(Handle<String> one, Handle<String> two) {
+bool String::SlowEquals(Isolate* isolate, Handle<String> one,
+ Handle<String> two) {
// Fast check: negative check with lengths.
int one_length = one->length();
if (one_length != two->length()) return false;
@@ -11491,9 +11750,11 @@ bool String::SlowEquals(Handle<String> one, Handle<String> two) {
// Fast check: if at least one ThinString is involved, dereference it/them
// and restart.
if (one->IsThinString() || two->IsThinString()) {
- if (one->IsThinString()) one = handle(ThinString::cast(*one)->actual());
- if (two->IsThinString()) two = handle(ThinString::cast(*two)->actual());
- return String::Equals(one, two);
+ if (one->IsThinString())
+ one = handle(ThinString::cast(*one)->actual(), isolate);
+ if (two->IsThinString())
+ two = handle(ThinString::cast(*two)->actual(), isolate);
+ return String::Equals(isolate, one, two);
}
// Fast check: if hash code is computed for both strings
@@ -11520,8 +11781,8 @@ bool String::SlowEquals(Handle<String> one, Handle<String> two) {
// before we try to flatten the strings.
if (one->Get(0) != two->Get(0)) return false;
- one = String::Flatten(one);
- two = String::Flatten(two);
+ one = String::Flatten(isolate, one);
+ two = String::Flatten(isolate, two);
DisallowHeapAllocation no_gc;
String::FlatContent flat1 = one->GetFlatContent();
@@ -11541,7 +11802,8 @@ bool String::SlowEquals(Handle<String> one, Handle<String> two) {
// static
-ComparisonResult String::Compare(Handle<String> x, Handle<String> y) {
+ComparisonResult String::Compare(Isolate* isolate, Handle<String> x,
+ Handle<String> y) {
// A few fast case tests before we flatten.
if (x.is_identical_to(y)) {
return ComparisonResult::kEqual;
@@ -11560,8 +11822,8 @@ ComparisonResult String::Compare(Handle<String> x, Handle<String> y) {
}
// Slow case.
- x = String::Flatten(x);
- y = String::Flatten(y);
+ x = String::Flatten(isolate, x);
+ y = String::Flatten(isolate, y);
DisallowHeapAllocation no_gc;
ComparisonResult result = ComparisonResult::kEqual;
@@ -11652,8 +11914,8 @@ int String::IndexOf(Isolate* isolate, Handle<String> receiver,
uint32_t receiver_length = receiver->length();
if (start_index + search_length > receiver_length) return -1;
- receiver = String::Flatten(receiver);
- search = String::Flatten(search);
+ receiver = String::Flatten(isolate, receiver);
+ search = String::Flatten(isolate, search);
DisallowHeapAllocation no_gc; // ensure vectors stay valid
// Extract flattened substrings of cons strings before getting encoding.
@@ -11674,7 +11936,6 @@ int String::IndexOf(Isolate* isolate, Handle<String> receiver,
MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
Handle<String> replacement,
int start_index) {
- DCHECK_IMPLIES(match->HasNamedCaptures(), FLAG_harmony_regexp_named_captures);
DCHECK_GE(start_index, 0);
Factory* factory = isolate->factory();
@@ -11682,7 +11943,7 @@ MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
const int replacement_length = replacement->length();
const int captures_length = match->CaptureCount();
- replacement = String::Flatten(replacement);
+ replacement = String::Flatten(isolate, replacement);
Handle<String> dollar_string =
factory->LookupSingleCharacterStringFromCode('$');
@@ -11891,7 +12152,7 @@ Object* String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
Object::ToString(isolate, search));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
- Object::ToNumber(position));
+ Object::ToNumber(isolate, position));
uint32_t start_index;
@@ -11914,8 +12175,8 @@ Object* String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
return Smi::FromInt(start_index);
}
- receiver_string = String::Flatten(receiver_string);
- search_string = String::Flatten(search_string);
+ receiver_string = String::Flatten(isolate, receiver_string);
+ search_string = String::Flatten(isolate, search_string);
int last_index = -1;
DisallowHeapAllocation no_gc; // ensure vectors stay valid
@@ -12005,13 +12266,13 @@ bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
return true;
}
-
-uint32_t String::ComputeAndSetHash() {
+uint32_t String::ComputeAndSetHash(Isolate* isolate) {
// Should only be called if hash code has not yet been computed.
DCHECK(!HasHashCode());
// Store the hash code in the object.
- uint32_t field = IteratingStringHasher::Hash(this, GetHeap()->HashSeed());
+ uint32_t field =
+ IteratingStringHasher::Hash(this, isolate->heap()->HashSeed());
set_hash_field(field);
// Check the hash code is there.
@@ -12045,8 +12306,7 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
- Heap* heap = string->GetHeap();
- if (new_length == 0) return heap->isolate()->factory()->empty_string();
+ if (new_length == 0) return string->GetReadOnlyRoots().empty_string_handle();
int new_size, old_size;
int old_length = string->length();
@@ -12067,6 +12327,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
DCHECK_OBJECT_ALIGNED(start_of_string);
DCHECK_OBJECT_ALIGNED(start_of_string + new_size);
+ Heap* heap = Heap::FromWritableHeapObject(*string);
// Sizes are pointer size aligned, so that we can use filler objects
// that are a multiple of pointer size.
heap->CreateFillerObjectAt(start_of_string + new_size, delta,
@@ -12125,9 +12386,7 @@ uint32_t StringHasher::GetHashField() {
}
}
-
-uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
- uint32_t seed,
+uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars, uint64_t seed,
int* utf16_length_out) {
int vector_length = chars.length();
// Handle some edge cases
@@ -12360,13 +12619,13 @@ static void StopSlackTracking(Map* map, void* data) {
map->set_construction_counter(Map::kNoSlackTracking);
}
-void Map::CompleteInobjectSlackTracking() {
+void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
DisallowHeapAllocation no_gc;
// Has to be an initial map.
- DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
+ DCHECK(GetBackPointer()->IsUndefined(isolate));
int slack = UnusedPropertyFields();
- TransitionsAccessor transitions(this, &no_gc);
+ TransitionsAccessor transitions(isolate, this, &no_gc);
transitions.TraverseTransitionTree(&GetMinInobjectSlack, &slack);
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
@@ -12402,7 +12661,7 @@ void JSObject::MakePrototypesFast(Handle<Object> receiver,
// If the map is already marked as should be fast, we're done. Its
// prototypes will have been marked already as well.
if (current_map->should_be_fast_prototype_map()) return;
- Handle<Map> map(current_map);
+ Handle<Map> map(current_map, isolate);
Map::SetShouldBeFastPrototypeMap(map, true, isolate);
JSObject::OptimizeAsPrototype(current_obj);
}
@@ -12424,7 +12683,9 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
}
} else {
- Handle<Map> new_map = Map::Copy(handle(object->map()), "CopyAsPrototype");
+ Handle<Map> new_map = Map::Copy(object->GetIsolate(),
+ handle(object->map(), object->GetIsolate()),
+ "CopyAsPrototype");
JSObject::MigrateToMap(object, new_map);
object->map()->set_is_prototype_map(true);
@@ -12461,7 +12722,7 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
Handle<Map> current_user = user;
Handle<PrototypeInfo> current_user_info =
Map::GetOrCreatePrototypeInfo(user, isolate);
- for (PrototypeIterator iter(user); !iter.IsAtEnd(); iter.Advance()) {
+ for (PrototypeIterator iter(isolate, user); !iter.IsAtEnd(); iter.Advance()) {
// Walk up the prototype chain as far as links haven't been registered yet.
if (current_user_info->registry_slot() != PrototypeInfo::UNREGISTERED) {
break;
@@ -12474,9 +12735,14 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
Handle<PrototypeInfo> proto_info =
Map::GetOrCreatePrototypeInfo(proto, isolate);
Handle<Object> maybe_registry(proto_info->prototype_users(), isolate);
+ Handle<WeakArrayList> registry =
+ maybe_registry->IsSmi()
+ ? handle(ReadOnlyRoots(isolate->heap()).empty_weak_array_list(),
+ isolate)
+ : Handle<WeakArrayList>::cast(maybe_registry);
int slot = 0;
- Handle<FixedArrayOfWeakCells> new_array =
- FixedArrayOfWeakCells::Add(maybe_registry, current_user, &slot);
+ Handle<WeakArrayList> new_array =
+ PrototypeUsers::Add(isolate, registry, current_user, &slot);
current_user_info->set_registry_slot(slot);
if (!maybe_registry.is_identical_to(new_array)) {
proto_info->set_prototype_users(*new_array);
@@ -12506,7 +12772,7 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
if (!user->prototype()->IsJSObject()) {
Object* users =
PrototypeInfo::cast(user->prototype_info())->prototype_users();
- return users->IsFixedArrayOfWeakCells();
+ return users->IsWeakArrayList();
}
Handle<JSObject> prototype(JSObject::cast(user->prototype()), isolate);
Handle<PrototypeInfo> user_info =
@@ -12519,10 +12785,10 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
DCHECK(maybe_proto_info->IsPrototypeInfo());
Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(maybe_proto_info),
isolate);
- Object* maybe_registry = proto_info->prototype_users();
- DCHECK(maybe_registry->IsFixedArrayOfWeakCells());
- DCHECK(FixedArrayOfWeakCells::cast(maybe_registry)->Get(slot) == *user);
- FixedArrayOfWeakCells::cast(maybe_registry)->Clear(slot);
+ Handle<WeakArrayList> prototype_users(
+ WeakArrayList::cast(proto_info->prototype_users()), isolate);
+ DCHECK_EQ(prototype_users->Get(slot), HeapObjectReference::Weak(*user));
+ PrototypeUsers::MarkSlotEmpty(*prototype_users, slot);
if (FLAG_trace_prototype_users) {
PrintF("Unregistering %p as a user of prototype %p.\n",
reinterpret_cast<void*>(*user), reinterpret_cast<void*>(*prototype));
@@ -12555,12 +12821,18 @@ void InvalidatePrototypeChainsInternal(Map* map) {
Object* maybe_proto_info = map->prototype_info();
if (!maybe_proto_info->IsPrototypeInfo()) return;
PrototypeInfo* proto_info = PrototypeInfo::cast(maybe_proto_info);
- FixedArrayOfWeakCells::Iterator iterator(proto_info->prototype_users());
+ WeakArrayList* prototype_users =
+ WeakArrayList::cast(proto_info->prototype_users());
// For now, only maps register themselves as users.
- Map* user;
- while ((user = iterator.Next<Map>()) != nullptr) {
- // Walk the prototype chain (backwards, towards leaf objects) if necessary.
- InvalidatePrototypeChainsInternal(user);
+ for (int i = PrototypeUsers::kFirstIndex; i < prototype_users->length();
+ ++i) {
+ HeapObject* heap_object;
+ if (prototype_users->Get(i)->ToWeakHeapObject(&heap_object) &&
+ heap_object->IsMap()) {
+ // Walk the prototype chain (backwards, towards leaf objects) if
+ // necessary.
+ InvalidatePrototypeChainsInternal(Map::cast(heap_object));
+ }
}
}
@@ -12694,9 +12966,10 @@ Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSReceiver> prototype,
}
// static
-void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
+void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
+ Handle<Object> prototype,
bool enable_prototype_setup_mode) {
- RuntimeCallTimerScope stats_scope(*map,
+ RuntimeCallTimerScope stats_scope(isolate, *map,
RuntimeCallCounterId::kMap_SetPrototype);
bool is_hidden = false;
@@ -12719,15 +12992,13 @@ void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
}
map->set_has_hidden_prototype(is_hidden);
- WriteBarrierMode wb_mode = prototype->IsNull(map->GetIsolate())
- ? SKIP_WRITE_BARRIER
- : UPDATE_WRITE_BARRIER;
+ WriteBarrierMode wb_mode =
+ prototype->IsNull(isolate) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
map->set_prototype(*prototype, wb_mode);
}
-
-Handle<Object> CacheInitialJSArrayMaps(
- Handle<Context> native_context, Handle<Map> initial_map) {
+Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
+ Handle<Map> initial_map) {
// Replace all of the cached initial array maps in the native context with
// the appropriate transitioned elements kind maps.
Handle<Map> current_map = initial_map;
@@ -12739,10 +13010,11 @@ Handle<Object> CacheInitialJSArrayMaps(
Handle<Map> new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
if (Map* maybe_elements_transition = current_map->ElementsTransitionMap()) {
- new_map = handle(maybe_elements_transition);
+ new_map = handle(maybe_elements_transition, native_context->GetIsolate());
} else {
- new_map = Map::CopyAsElementsKind(
- current_map, next_kind, INSERT_TRANSITION);
+ new_map =
+ Map::CopyAsElementsKind(native_context->GetIsolate(), current_map,
+ next_kind, INSERT_TRANSITION);
}
DCHECK_EQ(next_kind, new_map->elements_kind());
native_context->set(Context::ArrayMapIndex(next_kind), *new_map);
@@ -12766,14 +13038,15 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
Handle<Map> initial_map(function->initial_map(), isolate);
- if (!initial_map->GetIsolate()->bootstrapper()->IsActive() &&
+ if (!isolate->bootstrapper()->IsActive() &&
initial_map->instance_type() == JS_OBJECT_TYPE) {
// Put the value in the initial map field until an initial map is needed.
// At that point, a new initial map is created and the prototype is put
// into the initial map where it belongs.
function->set_prototype_or_initial_map(*value);
} else {
- Handle<Map> new_map = Map::Copy(initial_map, "SetInstancePrototype");
+ Handle<Map> new_map =
+ Map::Copy(isolate, initial_map, "SetInstancePrototype");
JSFunction::SetInitialMap(function, new_map, value);
// If the function is used as the global Array function, cache the
@@ -12820,14 +13093,16 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
// Copy the map so this does not affect unrelated functions.
// Remove map transitions because they point to maps with a
// different prototype.
- Handle<Map> new_map = Map::Copy(handle(function->map()), "SetPrototype");
+ Handle<Map> new_map =
+ Map::Copy(isolate, handle(function->map(), isolate), "SetPrototype");
JSObject::MigrateToMap(function, new_map);
new_map->SetConstructor(*value);
new_map->set_has_non_instance_prototype(true);
FunctionKind kind = function->shared()->kind();
- Handle<Context> native_context(function->context()->native_context());
+ Handle<Context> native_context(function->context()->native_context(),
+ isolate);
construct_prototype = Handle<JSReceiver>(
IsGeneratorFunction(kind)
@@ -12844,15 +13119,15 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
SetInstancePrototype(isolate, function, construct_prototype);
}
-
void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
Handle<Object> prototype) {
- if (map->prototype() != *prototype) Map::SetPrototype(map, prototype);
+ if (map->prototype() != *prototype)
+ Map::SetPrototype(function->GetIsolate(), map, prototype);
function->set_prototype_or_initial_map(*map);
map->SetConstructor(*function);
if (FLAG_trace_maps) {
- LOG(map->GetIsolate(), MapEvent("InitialMap", nullptr, *map, "",
- function->shared()->DebugName()));
+ LOG(function->GetIsolate(), MapEvent("InitialMap", nullptr, *map, "",
+ function->shared()->DebugName()));
}
}
@@ -12893,18 +13168,26 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
return true;
case BIGINT_TYPE:
- case BOILERPLATE_DESCRIPTION_TYPE:
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case BYTECODE_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
case CELL_TYPE:
case CODE_TYPE:
case FILLER_TYPE:
case FIXED_ARRAY_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
case FIXED_DOUBLE_ARRAY_TYPE:
case FEEDBACK_METADATA_TYPE:
case FOREIGN_TYPE:
case FREE_SPACE_TYPE:
case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
case HEAP_NUMBER_TYPE:
case JS_BOUND_FUNCTION_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
@@ -12917,6 +13200,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case SHARED_FUNCTION_INFO_TYPE:
case SYMBOL_TYPE:
case WEAK_CELL_TYPE:
+ case ALLOCATION_SITE_TYPE:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case FIXED_##TYPE##_ARRAY_TYPE:
@@ -13022,10 +13306,10 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
constructor_initial_map->UnusedPropertyFields();
CHECK_LE(constructor_initial_map->UsedInstanceSize(), instance_size);
int unused_property_fields = in_object_properties - pre_allocated;
- map = Map::CopyInitialMap(constructor_initial_map, instance_size,
+ map = Map::CopyInitialMap(isolate, constructor_initial_map, instance_size,
in_object_properties, unused_property_fields);
} else {
- map = Map::CopyInitialMap(constructor_initial_map);
+ map = Map::CopyInitialMap(isolate, constructor_initial_map);
}
map->set_new_target_is_base(false);
Handle<Object> prototype(new_target->instance_prototype(), isolate);
@@ -13073,7 +13357,7 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
Handle<String> prototype_string = isolate->factory()->prototype_string();
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype,
- JSReceiver::GetProperty(new_target, prototype_string), Map);
+ JSReceiver::GetProperty(isolate, new_target, prototype_string), Map);
// The above prototype lookup might change the constructor and its
// prototype, hence we have to reload the initial map.
EnsureHasInitialMap(constructor);
@@ -13093,14 +13377,16 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
constructor, isolate->factory()->native_context_index_symbol());
int index = maybe_index->IsSmi() ? Smi::ToInt(*maybe_index)
: Context::OBJECT_FUNCTION_INDEX;
- Handle<JSFunction> realm_constructor(JSFunction::cast(context->get(index)));
+ Handle<JSFunction> realm_constructor(JSFunction::cast(context->get(index)),
+ isolate);
prototype = handle(realm_constructor->prototype(), isolate);
}
- Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
+ Handle<Map> map = Map::CopyInitialMap(isolate, constructor_initial_map);
map->set_new_target_is_base(false);
CHECK(prototype->IsJSReceiver());
- if (map->prototype() != *prototype) Map::SetPrototype(map, prototype);
+ if (map->prototype() != *prototype)
+ Map::SetPrototype(isolate, map, prototype);
map->SetConstructor(*constructor);
return map;
}
@@ -13134,7 +13420,7 @@ bool JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
Isolate* isolate = function->GetIsolate();
Handle<String> function_name;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, function_name,
- Name::ToFunctionName(name), false);
+ Name::ToFunctionName(isolate, name), false);
if (prefix->length() > 0) {
IncrementalStringBuilder builder(isolate);
builder.AppendString(prefix);
@@ -13203,6 +13489,14 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
}
if (FLAG_harmony_function_tostring) {
+ if (shared_info->function_token_position() == kNoSourcePosition) {
+ // If the function token position isn't valid, return [native code] to
+ // ensure calling eval on the returned source code throws rather than
+ // giving inconsistent call behaviour.
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::
+ kFunctionTokenOffsetTooLongForToString);
+ return NativeCodeFunctionSourceString(shared_info);
+ }
return Handle<String>::cast(
SharedFunctionInfo::GetSourceCodeHarmony(shared_info));
}
@@ -13238,11 +13532,11 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
if (shared_info->is_wrapped()) {
builder.AppendCharacter('(');
Handle<FixedArray> args(
- Script::cast(shared_info->script())->wrapped_arguments());
+ Script::cast(shared_info->script())->wrapped_arguments(), isolate);
int argc = args->length();
for (int i = 0; i < argc; i++) {
if (i > 0) builder.AppendCString(", ");
- builder.AppendString(Handle<String>(String::cast(args->get(i))));
+ builder.AppendString(Handle<String>(String::cast(args->get(i)), isolate));
}
builder.AppendCString(") {\n");
}
@@ -13301,11 +13595,11 @@ void Script::InitLineEnds(Handle<Script> script) {
Object* src_obj = script->source();
if (!src_obj->IsString()) {
DCHECK(src_obj->IsUndefined(isolate));
- script->set_line_ends(isolate->heap()->empty_fixed_array());
+ script->set_line_ends(ReadOnlyRoots(isolate).empty_fixed_array());
} else {
DCHECK(src_obj->IsString());
Handle<String> src(String::cast(src_obj), isolate);
- Handle<FixedArray> array = String::CalculateLineEnds(src, true);
+ Handle<FixedArray> array = String::CalculateLineEnds(isolate, src, true);
script->set_line_ends(*array);
}
@@ -13324,7 +13618,7 @@ bool Script::IsUserJavaScript() { return type() == Script::TYPE_NORMAL; }
bool Script::ContainsAsmModule() {
DisallowHeapAllocation no_gc;
- SharedFunctionInfo::ScriptIterator iter(Handle<Script>(this));
+ SharedFunctionInfo::ScriptIterator iter(this->GetIsolate(), this);
while (SharedFunctionInfo* info = iter.Next()) {
if (info->HasAsmWasmData()) return true;
}
@@ -13368,11 +13662,10 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
if (type() == Script::TYPE_WASM) {
DCHECK_LE(0, position);
return WasmModuleObject::cast(wasm_module_object())
- ->shared()
->GetPositionInfo(static_cast<uint32_t>(position), info);
}
- if (line_ends()->IsUndefined(GetIsolate())) {
+ if (line_ends()->IsUndefined()) {
// Slow mode: we do not have line_ends. We have to iterate through source.
if (!GetPositionInfoSlow(this, position, info)) return false;
} else {
@@ -13466,38 +13759,11 @@ int Script::GetLineNumber(int code_pos) const {
}
Object* Script::GetNameOrSourceURL() {
- Isolate* isolate = GetIsolate();
// Keep in sync with ScriptNameOrSourceURL in messages.js.
- if (!source_url()->IsUndefined(isolate)) return source_url();
+ if (!source_url()->IsUndefined()) return source_url();
return name();
}
-
-Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
- Isolate* isolate = script->GetIsolate();
- if (!script->wrapper()->IsUndefined(isolate)) {
- DCHECK(script->wrapper()->IsWeakCell());
- Handle<WeakCell> cell(WeakCell::cast(script->wrapper()));
- if (!cell->cleared()) {
- // Return a handle for the existing script wrapper from the cache.
- return handle(JSObject::cast(cell->value()));
- }
- // If we found an empty WeakCell, that means the script wrapper was
- // GCed. We are not notified directly of that, so we decrement here
- // so that we at least don't count double for any given script.
- isolate->counters()->script_wrappers()->Decrement();
- }
- // Construct a new script wrapper.
- isolate->counters()->script_wrappers()->Increment();
- Handle<JSFunction> constructor = isolate->script_function();
- Handle<JSValue> result =
- Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
- result->set_value(*script);
- Handle<WeakCell> cell = isolate->factory()->NewWeakCell(result);
- script->set_wrapper(*cell);
- return result;
-}
-
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
Isolate* isolate, const FunctionLiteral* fun) {
CHECK_NE(fun->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
@@ -13513,7 +13779,7 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
heap_object->IsUndefined(isolate)) {
return MaybeHandle<SharedFunctionInfo>();
}
- return handle(SharedFunctionInfo::cast(heap_object));
+ return handle(SharedFunctionInfo::cast(heap_object), isolate);
}
Script::Iterator::Iterator(Isolate* isolate)
@@ -13522,10 +13788,10 @@ Script::Iterator::Iterator(Isolate* isolate)
Script* Script::Iterator::Next() { return iterator_.Next<Script>(); }
-
-SharedFunctionInfo::ScriptIterator::ScriptIterator(Handle<Script> script)
- : ScriptIterator(script->GetIsolate(),
- handle(script->shared_function_infos())) {}
+SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
+ Script* script)
+ : ScriptIterator(isolate,
+ handle(script->shared_function_infos(), isolate)) {}
SharedFunctionInfo::ScriptIterator::ScriptIterator(
Isolate* isolate, Handle<WeakFixedArray> shared_function_infos)
@@ -13546,15 +13812,15 @@ SharedFunctionInfo* SharedFunctionInfo::ScriptIterator::Next() {
return nullptr;
}
-void SharedFunctionInfo::ScriptIterator::Reset(Handle<Script> script) {
- shared_function_infos_ = handle(script->shared_function_infos());
+void SharedFunctionInfo::ScriptIterator::Reset(Script* script) {
+ shared_function_infos_ = handle(script->shared_function_infos(), isolate_);
index_ = 0;
}
SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate)
: script_iterator_(isolate),
noscript_sfi_iterator_(isolate->heap()->noscript_shared_function_infos()),
- sfi_iterator_(handle(script_iterator_.Next(), isolate)) {}
+ sfi_iterator_(isolate, script_iterator_.Next()) {}
SharedFunctionInfo* SharedFunctionInfo::GlobalIterator::Next() {
SharedFunctionInfo* next = noscript_sfi_iterator_.Next<SharedFunctionInfo>();
@@ -13564,18 +13830,19 @@ SharedFunctionInfo* SharedFunctionInfo::GlobalIterator::Next() {
if (next != nullptr) return next;
Script* next_script = script_iterator_.Next();
if (next_script == nullptr) return nullptr;
- sfi_iterator_.Reset(handle(next_script));
+ sfi_iterator_.Reset(next_script);
}
}
void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
Handle<Object> script_object,
+ int function_literal_id,
bool reset_preparsed_scope_data) {
- DCHECK_NE(shared->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
if (shared->script() == *script_object) return;
Isolate* isolate = shared->GetIsolate();
- if (reset_preparsed_scope_data && shared->HasPreParsedScopeData()) {
+ if (reset_preparsed_scope_data &&
+ shared->HasUncompiledDataWithPreParsedScope()) {
shared->ClearPreParsedScopeData();
}
@@ -13588,15 +13855,14 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
Handle<WeakFixedArray> list =
handle(script->shared_function_infos(), isolate);
#ifdef DEBUG
- DCHECK_LT(shared->function_literal_id(), list->length());
- MaybeObject* maybe_object = list->Get(shared->function_literal_id());
+ DCHECK_LT(function_literal_id, list->length());
+ MaybeObject* maybe_object = list->Get(function_literal_id);
HeapObject* heap_object;
if (maybe_object->ToWeakHeapObject(&heap_object)) {
DCHECK_EQ(heap_object, *shared);
}
#endif
- list->Set(shared->function_literal_id(),
- HeapObjectReference::Weak(*shared));
+ list->Set(function_literal_id, HeapObjectReference::Weak(*shared));
} else {
Handle<Object> list = isolate->factory()->noscript_shared_function_infos();
@@ -13610,7 +13876,7 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
}
#endif // DEBUG
- list = FixedArrayOfWeakCells::Add(list, shared);
+ list = FixedArrayOfWeakCells::Add(isolate, list, shared);
isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
}
@@ -13622,14 +13888,14 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
// Due to liveedit, it might happen that the old_script doesn't know
// about the SharedFunctionInfo, so we have to guard against that.
Handle<WeakFixedArray> infos(old_script->shared_function_infos(), isolate);
- if (shared->function_literal_id() < infos->length()) {
- MaybeObject* raw = old_script->shared_function_infos()->Get(
- shared->function_literal_id());
+ if (function_literal_id < infos->length()) {
+ MaybeObject* raw =
+ old_script->shared_function_infos()->Get(function_literal_id);
HeapObject* heap_object;
if (raw->ToWeakHeapObject(&heap_object) && heap_object == *shared) {
old_script->shared_function_infos()->Set(
- shared->function_literal_id(),
- HeapObjectReference::Strong(isolate->heap()->undefined_value()));
+ function_literal_id, HeapObjectReference::Strong(
+ ReadOnlyRoots(isolate).undefined_value()));
}
}
} else {
@@ -13644,21 +13910,21 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
bool SharedFunctionInfo::HasBreakInfo() const {
if (!HasDebugInfo()) return false;
- DebugInfo* info = DebugInfo::cast(debug_info());
+ DebugInfo* info = DebugInfo::cast(GetDebugInfo());
bool has_break_info = info->HasBreakInfo();
return has_break_info;
}
bool SharedFunctionInfo::BreakAtEntry() const {
if (!HasDebugInfo()) return false;
- DebugInfo* info = DebugInfo::cast(debug_info());
+ DebugInfo* info = DebugInfo::cast(GetDebugInfo());
bool break_at_entry = info->BreakAtEntry();
return break_at_entry;
}
bool SharedFunctionInfo::HasCoverageInfo() const {
if (!HasDebugInfo()) return false;
- DebugInfo* info = DebugInfo::cast(debug_info());
+ DebugInfo* info = DebugInfo::cast(GetDebugInfo());
bool has_coverage_info = info->HasCoverageInfo();
return has_coverage_info;
}
@@ -13668,24 +13934,6 @@ CoverageInfo* SharedFunctionInfo::GetCoverageInfo() const {
return CoverageInfo::cast(GetDebugInfo()->coverage_info());
}
-DebugInfo* SharedFunctionInfo::GetDebugInfo() const {
- DCHECK(HasDebugInfo());
- return DebugInfo::cast(debug_info());
-}
-
-int SharedFunctionInfo::debugger_hints() const {
- if (HasDebugInfo()) return GetDebugInfo()->debugger_hints();
- return Smi::ToInt(debug_info());
-}
-
-void SharedFunctionInfo::set_debugger_hints(int value) {
- if (HasDebugInfo()) {
- GetDebugInfo()->set_debugger_hints(value);
- } else {
- set_debug_info(Smi::FromInt(value));
- }
-}
-
String* SharedFunctionInfo::DebugName() {
DisallowHeapAllocation no_gc;
String* function_name = Name();
@@ -13693,17 +13941,6 @@ String* SharedFunctionInfo::DebugName() {
return inferred_name();
}
-// static
-SharedFunctionInfo::SideEffectState SharedFunctionInfo::GetSideEffectState(
- Handle<SharedFunctionInfo> info) {
- if (info->side_effect_state() == kNotComputed) {
- SharedFunctionInfo::SideEffectState has_no_side_effect =
- DebugEvaluate::FunctionGetSideEffectState(info);
- info->set_side_effect_state(has_no_side_effect);
- }
- return static_cast<SideEffectState>(info->side_effect_state());
-}
-
bool SharedFunctionInfo::PassesFilter(const char* raw_filter) {
Vector<const char> filter = CStrVector(raw_filter);
std::unique_ptr<char[]> cstrname(DebugName()->ToCString());
@@ -13721,7 +13958,8 @@ Handle<Object> SharedFunctionInfo::GetSourceCode(
Handle<SharedFunctionInfo> shared) {
Isolate* isolate = shared->GetIsolate();
if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
- Handle<String> source(String::cast(Script::cast(shared->script())->source()));
+ Handle<String> source(String::cast(Script::cast(shared->script())->source()),
+ isolate);
return isolate->factory()->NewSubString(source, shared->StartPosition(),
shared->EndPosition());
}
@@ -13732,9 +13970,9 @@ Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
Isolate* isolate = shared->GetIsolate();
if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
Handle<String> script_source(
- String::cast(Script::cast(shared->script())->source()));
+ String::cast(Script::cast(shared->script())->source()), isolate);
int start_pos = shared->function_token_position();
- if (start_pos == kNoSourcePosition) start_pos = shared->StartPosition();
+ DCHECK_NE(start_pos, kNoSourcePosition);
Handle<String> source = isolate->factory()->NewSubString(
script_source, start_pos, shared->EndPosition());
if (!shared->is_wrapped()) return source;
@@ -13744,11 +13982,12 @@ Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
builder.AppendCString("function ");
builder.AppendString(Handle<String>(shared->Name(), isolate));
builder.AppendCString("(");
- Handle<FixedArray> args(Script::cast(shared->script())->wrapped_arguments());
+ Handle<FixedArray> args(Script::cast(shared->script())->wrapped_arguments(),
+ isolate);
int argc = args->length();
for (int i = 0; i < argc; i++) {
if (i > 0) builder.AppendCString(", ");
- builder.AppendString(Handle<String>(String::cast(args->get(i))));
+ builder.AppendString(Handle<String>(String::cast(args->get(i)), isolate));
}
builder.AppendCString(") {\n");
builder.AppendString(source);
@@ -13769,6 +14008,27 @@ bool SharedFunctionInfo::IsInlineable() {
int SharedFunctionInfo::SourceSize() { return EndPosition() - StartPosition(); }
+int SharedFunctionInfo::FindIndexInScript(Isolate* isolate) const {
+ DisallowHeapAllocation no_gc;
+
+ Object* script_obj = script();
+ if (!script_obj->IsScript()) return FunctionLiteral::kIdTypeInvalid;
+
+ WeakFixedArray* shared_info_list =
+ Script::cast(script_obj)->shared_function_infos();
+ SharedFunctionInfo::ScriptIterator iterator(
+ isolate, Handle<WeakFixedArray>(&shared_info_list));
+
+ for (SharedFunctionInfo* shared = iterator.Next(); shared != nullptr;
+ shared = iterator.Next()) {
+ if (shared == this) {
+ return iterator.CurrentIndex();
+ }
+ }
+
+ return FunctionLiteral::kIdTypeInvalid;
+}
+
void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
bool has_prototype_slot,
int requested_embedder_fields,
@@ -13810,7 +14070,7 @@ bool JSFunction::CalculateInstanceSizeForDerivedClass(
Handle<JSFunction> func(Handle<JSFunction>::cast(current));
// The super constructor should be compiled for the number of expected
// properties to be available.
- Handle<SharedFunctionInfo> shared(func->shared());
+ Handle<SharedFunctionInfo> shared(func->shared(), isolate);
if (shared->is_compiled() ||
Compiler::Compile(func, Compiler::CLEAR_EXCEPTION)) {
DCHECK(shared->is_compiled());
@@ -13887,15 +14147,18 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
void SharedFunctionInfo::InitFromFunctionLiteral(
Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit,
bool is_toplevel) {
+ Isolate* isolate = shared_info->GetIsolate();
+ bool needs_position_info = true;
+
// When adding fields here, make sure DeclarationScope::AnalyzePartially is
// updated accordingly.
shared_info->set_internal_formal_parameter_count(lit->parameter_count());
- shared_info->set_function_token_position(lit->function_token_position());
- shared_info->set_raw_start_position(lit->start_position());
- shared_info->set_raw_end_position(lit->end_position());
+ shared_info->SetFunctionTokenPosition(lit->function_token_position(),
+ lit->start_position());
if (shared_info->scope_info()->HasPositionInfo()) {
shared_info->scope_info()->SetPositionInfo(lit->start_position(),
lit->end_position());
+ needs_position_info = false;
}
shared_info->set_is_declaration(lit->is_declaration());
shared_info->set_is_named_expression(lit->is_named_expression());
@@ -13908,14 +14171,13 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// FunctionKind must have already been set.
DCHECK(lit->kind() == shared_info->kind());
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
- shared_info->set_function_literal_id(lit->function_literal_id());
DCHECK_IMPLIES(lit->requires_instance_fields_initializer(),
IsClassConstructor(lit->kind()));
shared_info->set_requires_instance_fields_initializer(
lit->requires_instance_fields_initializer());
shared_info->set_is_toplevel(is_toplevel);
- DCHECK(shared_info->outer_scope_info()->IsTheHole(shared_info->GetIsolate()));
+ DCHECK(shared_info->outer_scope_info()->IsTheHole());
if (!is_toplevel) {
Scope* outer_scope = lit->scope()->GetOuterScopeWithContext();
if (outer_scope) {
@@ -13932,6 +14194,14 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
shared_info->SetExpectedNofPropertiesFromEstimate(lit);
DCHECK_NULL(lit->produced_preparsed_scope_data());
+ if (lit->ShouldEagerCompile()) {
+ // If we're about to eager compile, we'll have the function literal
+ // available, so there's no need to wastefully allocate an uncompiled
+ // data.
+ // TODO(leszeks): This should be explicitly passed as a parameter, rather
+ // than relying on a property of the literal.
+ needs_position_info = false;
+ }
} else {
// Set an invalid length for lazy functions. This way we can set the correct
// value after compiling, but avoid overwriting values set manually by the
@@ -13941,15 +14211,26 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
ProducedPreParsedScopeData* scope_data =
lit->produced_preparsed_scope_data();
if (scope_data != nullptr) {
- MaybeHandle<PreParsedScopeData> maybe_data =
- scope_data->Serialize(shared_info->GetIsolate());
- if (!maybe_data.is_null()) {
- Handle<PreParsedScopeData> data = maybe_data.ToHandleChecked();
- shared_info->set_preparsed_scope_data(*data);
+ Handle<PreParsedScopeData> pre_parsed_scope_data;
+ if (scope_data->Serialize(shared_info->GetIsolate())
+ .ToHandle(&pre_parsed_scope_data)) {
+ Handle<UncompiledData> data =
+ isolate->factory()->NewUncompiledDataWithPreParsedScope(
+ lit->start_position(), lit->end_position(),
+ lit->function_literal_id(), pre_parsed_scope_data);
+ shared_info->set_uncompiled_data(*data);
+ needs_position_info = false;
}
}
}
}
+ if (needs_position_info) {
+ Handle<UncompiledData> data =
+ isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
+ lit->start_position(), lit->end_position(),
+ lit->function_literal_id());
+ shared_info->set_uncompiled_data(*data);
+ }
}
void SharedFunctionInfo::SetExpectedNofPropertiesFromEstimate(
@@ -13964,9 +14245,28 @@ void SharedFunctionInfo::SetExpectedNofPropertiesFromEstimate(
// so we can afford to adjust the estimate generously.
estimate += 8;
+ // Limit actual estimate to fit in a 16 bit field, we will never allocate
+ // more than this in any case.
+ estimate = std::min(estimate, kMaxUInt16);
+
set_expected_nof_properties(estimate);
}
+void SharedFunctionInfo::SetFunctionTokenPosition(int function_token_position,
+ int start_position) {
+ int offset;
+ if (function_token_position == kNoSourcePosition) {
+ offset = 0;
+ } else {
+ offset = start_position - function_token_position;
+ }
+
+ if (offset > kMaximumFunctionTokenOffset) {
+ offset = kFunctionTokenOutOfRange;
+ }
+ set_raw_function_token_offset(offset);
+}
+
void Map::StartInobjectSlackTracking() {
DCHECK(!IsInobjectSlackTrackingInProgress());
if (UnusedPropertyFields() == 0) return;
@@ -13974,7 +14274,7 @@ void Map::StartInobjectSlackTracking() {
}
void ObjectVisitor::VisitCodeTarget(Code* host, RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Object* old_pointer = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* new_pointer = old_pointer;
VisitPointer(host, &new_pointer);
@@ -13995,13 +14295,13 @@ void ObjectVisitor::VisitRelocInfo(RelocIterator* it) {
}
}
-void Code::InvalidateEmbeddedObjects() {
- HeapObject* undefined = GetHeap()->undefined_value();
+void Code::InvalidateEmbeddedObjects(Heap* heap) {
+ HeapObject* undefined = ReadOnlyRoots(heap).undefined_value();
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
+ it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
}
}
}
@@ -14018,12 +14318,12 @@ void Code::FlushICache() const {
Assembler::FlushICache(raw_instruction_start(), raw_instruction_size());
}
-void Code::CopyFrom(const CodeDesc& desc) {
- CopyFromNoFlush(desc);
+void Code::CopyFrom(Heap* heap, const CodeDesc& desc) {
+ CopyFromNoFlush(heap, desc);
FlushICache();
}
-void Code::CopyFromNoFlush(const CodeDesc& desc) {
+void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
// copy code
CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
static_cast<size_t>(desc.instr_size));
@@ -14043,9 +14343,10 @@ void Code::CopyFromNoFlush(const CodeDesc& desc) {
static_cast<size_t>(desc.reloc_size));
// unbox handles and relocate
- int mode_mask = RelocInfo::kCodeTargetMask |
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
RelocInfo::kApplyMask;
// Needed to find target_object and runtime_entry on X64
Assembler* origin = desc.origin;
@@ -14054,9 +14355,9 @@ void Code::CopyFromNoFlush(const CodeDesc& desc) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
- it.rinfo()->set_target_object(*p, UPDATE_WRITE_BARRIER,
+ it.rinfo()->set_target_object(heap, *p, UPDATE_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
- } else if (RelocInfo::IsCodeTarget(mode)) {
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
// rewrite code handles to direct pointers to the first instruction in the
// code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
@@ -14081,35 +14382,33 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
return table.FindEntry(pc);
}
-#ifdef V8_EMBEDDED_BUILTINS
int Code::OffHeapInstructionSize() const {
- DCHECK(Builtins::IsEmbeddedBuiltin(this));
+ DCHECK(is_off_heap_trampoline());
if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_size();
EmbeddedData d = EmbeddedData::FromBlob();
return d.InstructionSizeOfBuiltin(builtin_index());
}
Address Code::OffHeapInstructionStart() const {
- DCHECK(Builtins::IsEmbeddedBuiltin(this));
+ DCHECK(is_off_heap_trampoline());
if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_start();
EmbeddedData d = EmbeddedData::FromBlob();
return d.InstructionStartOfBuiltin(builtin_index());
}
Address Code::OffHeapInstructionEnd() const {
- DCHECK(Builtins::IsEmbeddedBuiltin(this));
+ DCHECK(is_off_heap_trampoline());
if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_end();
EmbeddedData d = EmbeddedData::FromBlob();
return d.InstructionStartOfBuiltin(builtin_index()) +
d.InstructionSizeOfBuiltin(builtin_index());
}
-#endif
namespace {
template <typename Code>
-void SetStackFrameCacheCommon(Handle<Code> code,
+void SetStackFrameCacheCommon(Isolate* isolate, Handle<Code> code,
Handle<SimpleNumberDictionary> cache) {
- Handle<Object> maybe_table(code->source_position_table(), code->GetIsolate());
+ Handle<Object> maybe_table(code->source_position_table(), isolate);
if (maybe_table->IsSourcePositionTableWithFrameCache()) {
Handle<SourcePositionTableWithFrameCache>::cast(maybe_table)
->set_stack_frame_cache(*cache);
@@ -14118,8 +14417,7 @@ void SetStackFrameCacheCommon(Handle<Code> code,
DCHECK(maybe_table->IsByteArray());
Handle<ByteArray> table(Handle<ByteArray>::cast(maybe_table));
Handle<SourcePositionTableWithFrameCache> table_with_cache =
- code->GetIsolate()->factory()->NewSourcePositionTableWithFrameCache(
- table, cache);
+ isolate->factory()->NewSourcePositionTableWithFrameCache(table, cache);
code->set_source_position_table(*table_with_cache);
}
} // namespace
@@ -14128,9 +14426,14 @@ void SetStackFrameCacheCommon(Handle<Code> code,
void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
Handle<SimpleNumberDictionary> cache) {
if (abstract_code->IsCode()) {
- SetStackFrameCacheCommon(handle(abstract_code->GetCode()), cache);
+ SetStackFrameCacheCommon(
+ abstract_code->GetIsolate(),
+ handle(abstract_code->GetCode(), abstract_code->GetIsolate()), cache);
} else {
- SetStackFrameCacheCommon(handle(abstract_code->GetBytecodeArray()), cache);
+ SetStackFrameCacheCommon(
+ abstract_code->GetIsolate(),
+ handle(abstract_code->GetBytecodeArray(), abstract_code->GetIsolate()),
+ cache);
}
}
@@ -14240,39 +14543,53 @@ const char* AbstractCode::Kind2String(Kind kind) {
UNREACHABLE();
}
-#ifdef V8_EMBEDDED_BUILTINS
-bool Code::IsProcessIndependent() {
+bool Code::IsIsolateIndependent(Isolate* isolate) {
constexpr int all_real_modes_mask =
(1 << (RelocInfo::LAST_REAL_RELOC_MODE + 1)) - 1;
- constexpr int mode_mask =
- all_real_modes_mask & ~RelocInfo::ModeMask(RelocInfo::COMMENT) &
- ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) &
- ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) &
- ~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
- ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
- ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ constexpr int mode_mask = all_real_modes_mask &
+ ~RelocInfo::ModeMask(RelocInfo::COMMENT) &
+ ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
+ ~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
+ ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
STATIC_ASSERT(RelocInfo::LAST_REAL_RELOC_MODE == RelocInfo::VENEER_POOL);
STATIC_ASSERT(RelocInfo::ModeMask(RelocInfo::COMMENT) ==
(1 << RelocInfo::COMMENT));
- STATIC_ASSERT(
- mode_mask ==
- (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_HANDLE) |
- RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE)));
-
- RelocIterator it(this, mode_mask);
- return it.done();
+ STATIC_ASSERT(mode_mask ==
+ (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
+
+ bool is_process_independent = true;
+ for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
+ // Off-heap code targets are later rewritten as pc-relative jumps to the
+ // off-heap instruction stream and are thus process-independent.
+ Address target_address = it.rinfo()->target_address();
+ if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
+
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ CHECK(target->IsCode());
+ if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
+ }
+ is_process_independent = false;
+ }
+
+ return is_process_independent;
}
-#endif
Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
DCHECK(code->kind() == OPTIMIZED_FUNCTION);
WeakCell* raw_cell = code->CachedWeakCell();
- if (raw_cell != nullptr) return Handle<WeakCell>(raw_cell);
+ if (raw_cell != nullptr) {
+ return Handle<WeakCell>(raw_cell, code->GetIsolate());
+ }
Handle<WeakCell> cell = code->GetIsolate()->factory()->NewWeakCell(code);
DeoptimizationData::cast(code->deoptimization_data())
->SetWeakCellCache(*cell);
@@ -14565,27 +14882,46 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
}
+const char* Code::GetName(Isolate* isolate) const {
+ if (is_stub()) {
+ return CodeStub::MajorName(CodeStub::GetMajorKey(this));
+ } else if (kind() == BYTECODE_HANDLER) {
+ return isolate->interpreter()->LookupNameOfBytecodeHandler(this);
+ } else {
+ // There are some handlers and ICs that we can also find names for with
+ // Builtins::Lookup.
+ return isolate->builtins()->Lookup(raw_instruction_start());
+ }
+}
+
+void Code::PrintBuiltinCode(Isolate* isolate, const char* name) {
+ DCHECK(FLAG_print_builtin_code);
+ if (name == nullptr) {
+ name = GetName(isolate);
+ }
+ if (name != nullptr &&
+ PassesFilter(CStrVector(name),
+ CStrVector(FLAG_print_builtin_code_filter))) {
+ CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+ OFStream os(trace_scope.file());
+ Disassemble(name, os);
+ os << "\n";
+ }
+}
+
void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
+ Isolate* isolate = GetIsolate();
os << "kind = " << Kind2String(kind()) << "\n";
if (is_stub()) {
const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this));
os << "major_key = " << (n == nullptr ? "null" : n) << "\n";
os << "minor_key = " << CodeStub::MinorKeyFromKey(this->stub_key()) << "\n";
}
+ if (name == nullptr) {
+ name = GetName(isolate);
+ }
if ((name != nullptr) && (name[0] != '\0')) {
os << "name = " << name << "\n";
- } else if (kind() == BYTECODE_HANDLER) {
- name = GetIsolate()->interpreter()->LookupNameOfBytecodeHandler(this);
- if (name != nullptr) {
- os << "name = " << name << "\n";
- }
- } else {
- // There are some handlers and ICs that we can also find names for with
- // Builtins::Lookup.
- name = GetIsolate()->builtins()->Lookup(raw_instruction_start());
- if (name != nullptr) {
- os << "name = " << name << "\n";
- }
}
if (kind() == OPTIMIZED_FUNCTION) {
os << "stack_slots = " << stack_slots() << "\n";
@@ -14595,7 +14931,6 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
os << "Body (size = " << InstructionSize() << ")\n";
{
- Isolate* isolate = GetIsolate();
int size = InstructionSize();
int safepoint_offset =
has_safepoint_info() ? safepoint_table_offset() : size;
@@ -14689,7 +15024,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
os << "RelocInfo (size = " << relocation_size() << ")\n";
for (RelocIterator it(this); !it.done(); it.next()) {
- it.rinfo()->Print(GetIsolate(), os);
+ it.rinfo()->Print(isolate, os);
}
os << "\n";
@@ -14704,15 +15039,20 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
}
#endif // ENABLE_DISASSEMBLER
-
void BytecodeArray::Disassemble(std::ostream& os) {
+ DisallowHeapAllocation no_gc;
+
os << "Parameter count " << parameter_count() << "\n";
os << "Frame size " << frame_size() << "\n";
Address base_address = GetFirstBytecodeAddress();
SourcePositionTableIterator source_positions(SourcePositionTable());
- interpreter::BytecodeArrayIterator iterator(handle(this));
+ // Storage for backing the handle passed to the iterator. This handle won't be
+ // updated by the gc, but that's ok because we've disallowed GCs anyway.
+ BytecodeArray* handle_storage = this;
+ Handle<BytecodeArray> handle(&handle_storage);
+ interpreter::BytecodeArrayIterator iterator(handle);
while (!iterator.done()) {
if (!source_positions.done() &&
iterator.current_offset() == source_positions.code_offset()) {
@@ -14809,43 +15149,51 @@ void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
array->GetElementsAccessor()->SetLength(array, new_length);
}
-
-// static
-void Map::AddDependentCode(Handle<Map> map,
- DependentCode::DependencyGroup group,
- Handle<Code> code) {
- Handle<WeakCell> cell = Code::WeakCellFor(code);
- Handle<DependentCode> codes = DependentCode::InsertWeakCode(
- Handle<DependentCode>(map->dependent_code()), group, cell);
- if (*codes != map->dependent_code()) map->set_dependent_code(*codes);
+DependentCode* DependentCode::Get(Handle<HeapObject> object) {
+ if (object->IsMap()) {
+ return Handle<Map>::cast(object)->dependent_code();
+ } else if (object->IsPropertyCell()) {
+ return Handle<PropertyCell>::cast(object)->dependent_code();
+ } else if (object->IsAllocationSite()) {
+ return Handle<AllocationSite>::cast(object)->dependent_code();
+ }
+ UNREACHABLE();
}
-
-Handle<DependentCode> DependentCode::InsertCompilationDependencies(
- Handle<DependentCode> entries, DependencyGroup group,
- Handle<Foreign> info) {
- return Insert(entries, group, info);
+void DependentCode::Set(Handle<HeapObject> object, Handle<DependentCode> dep) {
+ if (object->IsMap()) {
+ Handle<Map>::cast(object)->set_dependent_code(*dep);
+ } else if (object->IsPropertyCell()) {
+ Handle<PropertyCell>::cast(object)->set_dependent_code(*dep);
+ } else if (object->IsAllocationSite()) {
+ Handle<AllocationSite>::cast(object)->set_dependent_code(*dep);
+ } else {
+ UNREACHABLE();
+ }
}
+void DependentCode::InstallDependency(Isolate* isolate, Handle<WeakCell> cell,
+ Handle<HeapObject> object,
+ DependencyGroup group) {
+ Handle<DependentCode> old_deps(DependentCode::Get(object), isolate);
+ Handle<DependentCode> new_deps =
+ InsertWeakCode(isolate, old_deps, group, cell);
+ // Update the list head if necessary.
+ if (!new_deps.is_identical_to(old_deps)) DependentCode::Set(object, new_deps);
+}
Handle<DependentCode> DependentCode::InsertWeakCode(
- Handle<DependentCode> entries, DependencyGroup group,
+ Isolate* isolate, Handle<DependentCode> entries, DependencyGroup group,
Handle<WeakCell> code_cell) {
- return Insert(entries, group, code_cell);
-}
-
-
-Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<Object> object) {
if (entries->length() == 0 || entries->group() > group) {
// There is no such group.
- return DependentCode::New(group, object, entries);
+ return DependentCode::New(isolate, group, code_cell, entries);
}
if (entries->group() < group) {
// The group comes later in the list.
- Handle<DependentCode> old_next(entries->next_link());
- Handle<DependentCode> new_next = Insert(old_next, group, object);
+ Handle<DependentCode> old_next(entries->next_link(), isolate);
+ Handle<DependentCode> new_next =
+ InsertWeakCode(isolate, old_next, group, code_cell);
if (!old_next.is_identical_to(new_next)) {
entries->set_next_link(*new_next);
}
@@ -14855,23 +15203,22 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
int count = entries->count();
// Check for existing entry to avoid duplicates.
for (int i = 0; i < count; i++) {
- if (entries->object_at(i) == *object) return entries;
+ if (entries->object_at(i) == *code_cell) return entries;
}
if (entries->length() < kCodesStartIndex + count + 1) {
- entries = EnsureSpace(entries);
+ entries = EnsureSpace(isolate, entries);
// Count could have changed, reload it.
count = entries->count();
}
- entries->set_object_at(count, *object);
+ entries->set_object_at(count, *code_cell);
entries->set_count(count + 1);
return entries;
}
-
-Handle<DependentCode> DependentCode::New(DependencyGroup group,
+Handle<DependentCode> DependentCode::New(Isolate* isolate,
+ DependencyGroup group,
Handle<Object> object,
Handle<DependentCode> next) {
- Isolate* isolate = next->GetIsolate();
Handle<DependentCode> result = Handle<DependentCode>::cast(
isolate->factory()->NewFixedArray(kCodesStartIndex + 1, TENURED));
result->set_next_link(*next);
@@ -14880,11 +15227,9 @@ Handle<DependentCode> DependentCode::New(DependencyGroup group,
return result;
}
-
Handle<DependentCode> DependentCode::EnsureSpace(
- Handle<DependentCode> entries) {
+ Isolate* isolate, Handle<DependentCode> entries) {
if (entries->Compact()) return entries;
- Isolate* isolate = entries->GetIsolate();
int capacity = kCodesStartIndex + DependentCode::Grow(entries->count());
int grow_by = capacity - entries->length();
return Handle<DependentCode>::cast(
@@ -14912,34 +15257,6 @@ bool DependentCode::Compact() {
}
-void DependentCode::UpdateToFinishedCode(DependencyGroup group, Foreign* info,
- WeakCell* code_cell) {
- if (this->length() == 0 || this->group() > group) {
- // There is no such group.
- return;
- }
- if (this->group() < group) {
- // The group comes later in the list.
- next_link()->UpdateToFinishedCode(group, info, code_cell);
- return;
- }
- DCHECK_EQ(group, this->group());
- DisallowHeapAllocation no_gc;
- int count = this->count();
- for (int i = 0; i < count; i++) {
- if (object_at(i) == info) {
- set_object_at(i, code_cell);
- break;
- }
- }
-#ifdef DEBUG
- for (int i = 0; i < count; i++) {
- DCHECK(object_at(i) != info);
- }
-#endif
-}
-
-
void DependentCode::RemoveCompilationDependencies(
DependentCode::DependencyGroup group, Foreign* info) {
if (this->length() == 0 || this->group() > group) {
@@ -15028,20 +15345,12 @@ bool DependentCode::MarkCodeForDeoptimization(
int count = this->count();
for (int i = 0; i < count; i++) {
Object* obj = object_at(i);
- if (obj->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(obj);
- if (cell->cleared()) continue;
- Code* code = Code::cast(cell->value());
- if (!code->marked_for_deoptimization()) {
- code->SetMarkedForDeoptimization(DependencyGroupName(group));
- marked = true;
- }
- } else {
- DCHECK(obj->IsForeign());
- CompilationDependencies* info =
- reinterpret_cast<CompilationDependencies*>(
- Foreign::cast(obj)->foreign_address());
- info->Abort();
+ WeakCell* cell = WeakCell::cast(obj);
+ if (cell->cleared()) continue;
+ Code* code = Code::cast(cell->value());
+ if (!code->marked_for_deoptimization()) {
+ code->SetMarkedForDeoptimization(DependencyGroupName(group));
+ marked = true;
}
}
for (int i = 0; i < count; i++) {
@@ -15066,7 +15375,7 @@ void DependentCode::DeoptimizeDependentCodeGroup(
void Code::SetMarkedForDeoptimization(const char* reason) {
set_marked_for_deoptimization(true);
if (FLAG_trace_deopt &&
- (deoptimization_data() != GetHeap()->empty_fixed_array())) {
+ (deoptimization_data() != GetReadOnlyRoots().empty_fixed_array())) {
DeoptimizationData* deopt_data =
DeoptimizationData::cast(deoptimization_data());
CodeTracer::Scope scope(GetHeap()->isolate()->GetCodeTracer());
@@ -15099,14 +15408,15 @@ const char* DependentCode::DependencyGroupName(DependencyGroup group) {
UNREACHABLE();
}
-Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
+Handle<Map> Map::TransitionToPrototype(Isolate* isolate, Handle<Map> map,
Handle<Object> prototype) {
Handle<Map> new_map =
- TransitionsAccessor(map).GetPrototypeTransition(prototype);
+ TransitionsAccessor(isolate, map).GetPrototypeTransition(prototype);
if (new_map.is_null()) {
- new_map = Copy(map, "TransitionToPrototype");
- TransitionsAccessor(map).PutPrototypeTransition(prototype, new_map);
- Map::SetPrototype(new_map, prototype);
+ new_map = Copy(isolate, map, "TransitionToPrototype");
+ TransitionsAccessor(isolate, map)
+ .PutPrototypeTransition(prototype, new_map);
+ Map::SetPrototype(isolate, new_map, prototype);
}
return new_map;
}
@@ -15163,7 +15473,7 @@ Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
isolate, trap_result,
Execution::Call(isolate, trap, handler, arraysize(argv), argv),
Nothing<bool>());
- bool bool_trap_result = trap_result->BooleanValue();
+ bool bool_trap_result = trap_result->BooleanValue(isolate);
// 9. If booleanTrapResult is false, return false.
if (!bool_trap_result) {
RETURN_FAILURE(
@@ -15207,7 +15517,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
if (from_javascript) {
if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), object)) {
+ !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
isolate->ReportFailedAccessCheck(object);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
RETURN_FAILURE(isolate, should_throw,
@@ -15236,7 +15546,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
all_extensible = all_extensible && real_receiver->map()->is_extensible();
}
}
- Handle<Map> map(real_receiver->map());
+ Handle<Map> map(real_receiver->map(), isolate);
// Nothing to do if prototype is already set.
if (map->prototype() == *value) return Just(true);
@@ -15280,7 +15590,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
isolate->UpdateNoElementsProtectorOnSetPrototype(real_receiver);
- Handle<Map> new_map = Map::TransitionToPrototype(map, value);
+ Handle<Map> new_map = Map::TransitionToPrototype(isolate, map, value);
DCHECK(new_map->prototype() == *value);
JSObject::MigrateToMap(real_receiver, new_map);
@@ -15291,12 +15601,13 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
// static
void JSObject::SetImmutableProto(Handle<JSObject> object) {
DCHECK(!object->IsAccessCheckNeeded()); // Never called from JS
- Handle<Map> map(object->map());
+ Handle<Map> map(object->map(), object->GetIsolate());
// Nothing to do if prototype is already set.
if (map->is_immutable_proto()) return;
- Handle<Map> new_map = Map::TransitionToImmutableProto(map);
+ Handle<Map> new_map =
+ Map::TransitionToImmutableProto(object->GetIsolate(), map);
object->synchronized_set_map(*new_map);
}
@@ -15340,7 +15651,7 @@ static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
DCHECK_LT(index, *new_capacity);
if (*new_capacity <= JSObject::kMaxUncheckedOldFastElementsLength ||
(*new_capacity <= JSObject::kMaxUncheckedFastElementsLength &&
- object->GetHeap()->InNewSpace(object))) {
+ Heap::InNewSpace(object))) {
return false;
}
// If the fast-case backing storage takes up much more memory than a
@@ -15417,23 +15728,10 @@ static bool ShouldConvertToFastElements(JSObject* object,
return 2 * dictionary_size >= *new_capacity;
}
-
-// static
-MaybeHandle<Object> JSObject::AddDataElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes) {
- MAYBE_RETURN_NULL(
- AddDataElement(object, index, value, attributes, kThrowOnError));
- return value;
-}
-
-
// static
-Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- ShouldThrow should_throw) {
+void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
DCHECK(object->map()->is_extensible());
Isolate* isolate = object->GetIsolate();
@@ -15483,8 +15781,6 @@ Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
isolate->factory()->NewNumberFromUint(index + 1);
JSArray::cast(*object)->set_length(*new_length);
}
-
- return Just(true);
}
@@ -15515,7 +15811,7 @@ PretenureFlag AllocationSite::GetPretenureMode() const {
bool AllocationSite::IsNested() {
DCHECK(FLAG_trace_track_allocation_sites);
- Object* current = GetHeap()->allocation_sites_list();
+ Object* current = boilerplate()->GetHeap()->allocation_sites_list();
while (current->IsAllocationSite()) {
AllocationSite* current_site = AllocationSite::cast(current);
if (current_site->nested_site() == this) {
@@ -15606,19 +15902,19 @@ bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
ElementsKind to_kind) {
if (!object->IsJSArray()) return false;
- Heap* heap = object->GetHeap();
- if (!heap->InNewSpace(*object)) return false;
+ if (!Heap::InNewSpace(*object)) return false;
Handle<AllocationSite> site;
{
DisallowHeapAllocation no_allocation;
+ Heap* heap = object->GetHeap();
AllocationMemento* memento =
heap->FindAllocationMemento<Heap::kForRuntime>(object->map(), *object);
if (memento == nullptr) return false;
// Walk through to the Allocation Site
- site = handle(memento->GetAllocationSite());
+ site = handle(memento->GetAllocationSite(), heap->isolate());
}
return AllocationSite::DigestTransitionFeedback<update_or_check>(site,
to_kind);
@@ -15647,14 +15943,14 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
UpdateAllocationSite(object, to_kind);
- if (object->elements() == object->GetHeap()->empty_fixed_array() ||
+ if (object->elements() == object->GetReadOnlyRoots().empty_fixed_array() ||
IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
// No change is needed to the elements() buffer, the transition
// only requires a map change.
Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
MigrateToMap(object, new_map);
if (FLAG_trace_elements_transitions) {
- Handle<FixedArrayBase> elms(object->elements());
+ Handle<FixedArrayBase> elms(object->elements(), object->GetIsolate());
PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
}
} else {
@@ -15685,7 +15981,7 @@ bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
// configurable, it's guaranteed to be the first in the descriptor array.
if (!map->is_dictionary_map()) {
DCHECK(map->instance_descriptors()->GetKey(0) ==
- array->GetHeap()->length_string());
+ array->GetReadOnlyRoots().length_string());
return map->instance_descriptors()->GetDetails(0).IsReadOnly();
}
@@ -15707,7 +16003,7 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
template <typename BackingStore>
static int HoleyElementsUsage(JSObject* object, BackingStore* store) {
- Isolate* isolate = store->GetIsolate();
+ Isolate* isolate = object->GetIsolate();
int limit = object->IsJSArray() ? Smi::ToInt(JSArray::cast(object)->length())
: store->length();
int used = 0;
@@ -15760,12 +16056,12 @@ int JSObject::GetFastElementsUsage() {
template <typename Derived, typename Shape>
void Dictionary<Derived, Shape>::Print(std::ostream& os) {
DisallowHeapAllocation no_gc;
- Isolate* isolate = this->GetIsolate();
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
Derived* dictionary = Derived::cast(this);
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = dictionary->KeyAt(i);
- if (!dictionary->ToKey(isolate, i, &k)) continue;
+ if (!dictionary->ToKey(roots, i, &k)) continue;
os << "\n ";
if (k->IsString()) {
String::cast(k)->StringPrint(os);
@@ -15778,7 +16074,7 @@ void Dictionary<Derived, Shape>::Print(std::ostream& os) {
}
template <typename Derived, typename Shape>
void Dictionary<Derived, Shape>::Print() {
- OFStream os(stdout);
+ StdoutStream os;
Print(os);
os << std::endl;
}
@@ -15794,7 +16090,7 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
Handle<Name> name) {
LookupIterator it = LookupIterator::PropertyOrElement(
- name->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ object->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
return HasProperty(&it);
}
@@ -15811,7 +16107,7 @@ Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
Handle<Name> name) {
LookupIterator it = LookupIterator::PropertyOrElement(
- name->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ object->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
return maybe_result.IsJust() ? Just(it.state() == LookupIterator::ACCESSOR)
: Nothing<bool>();
@@ -15823,7 +16119,7 @@ int FixedArrayBase::GetMaxLengthForNewSpaceAllocation(ElementsKind kind) {
}
bool FixedArrayBase::IsCowArray() const {
- return map() == GetHeap()->fixed_cow_array_map();
+ return map() == GetReadOnlyRoots().fixed_cow_array_map();
}
bool JSObject::WasConstructedFromApiFunction() {
@@ -15857,9 +16153,9 @@ bool JSObject::WasConstructedFromApiFunction() {
}
const char* Symbol::PrivateSymbolToName() const {
- Heap* heap = GetIsolate()->heap();
+ ReadOnlyRoots roots = GetReadOnlyRoots();
#define SYMBOL_CHECK_AND_PRINT(name) \
- if (this == heap->name()) return #name;
+ if (this == roots.name()) return #name;
PRIVATE_SYMBOL_LIST(SYMBOL_CHECK_AND_PRINT)
#undef SYMBOL_CHECK_AND_PRINT
return "UNKNOWN";
@@ -15868,7 +16164,7 @@ const char* Symbol::PrivateSymbolToName() const {
void Symbol::SymbolShortPrint(std::ostream& os) {
os << "<Symbol:";
- if (!name()->IsUndefined(GetIsolate())) {
+ if (!name()->IsUndefined()) {
os << " ";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
@@ -15929,7 +16225,7 @@ class StringSharedKey : public HashTableKey {
array->set(1, *source_);
array->set(2, Smi::FromEnum(language_mode_));
array->set(3, Smi::FromInt(position_));
- array->set_map(isolate->heap()->fixed_cow_array_map());
+ array->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
return array;
}
@@ -15964,6 +16260,14 @@ const char* JSPromise::Status(v8::Promise::PromiseState status) {
UNREACHABLE();
}
+int JSPromise::async_task_id() const {
+ return AsyncTaskIdField::decode(flags());
+}
+
+void JSPromise::set_async_task_id(int id) {
+ set_flags(AsyncTaskIdField::update(flags(), id));
+}
+
// static
Handle<Object> JSPromise::Fulfill(Handle<JSPromise> promise,
Handle<Object> value) {
@@ -16055,8 +16359,9 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
// on JSPromise instances which have the (initial) %PromisePrototype%.
then = isolate->promise_then();
} else {
- then = JSReceiver::GetProperty(Handle<JSReceiver>::cast(resolution),
- isolate->factory()->then_string());
+ then =
+ JSReceiver::GetProperty(isolate, Handle<JSReceiver>::cast(resolution),
+ isolate->factory()->then_string());
}
// 9. If then is an abrupt completion, then
@@ -16083,7 +16388,7 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
Handle<JSReceiver>::cast(resolution), isolate->native_context());
if (isolate->debug()->is_active() && resolution->IsJSPromise()) {
// Mark the dependency of the new {promise} on the {resolution}.
- Object::SetProperty(resolution,
+ Object::SetProperty(isolate, resolution,
isolate->factory()->promise_handled_by_symbol(),
promise, LanguageMode::kStrict)
.Check();
@@ -16126,7 +16431,7 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize);
if (type == PromiseReaction::kFulfill) {
task->synchronized_set_map(
- isolate->heap()->promise_fulfill_reaction_job_task_map());
+ ReadOnlyRoots(isolate).promise_fulfill_reaction_job_task_map());
Handle<PromiseFulfillReactionJobTask>::cast(task)->set_argument(
*argument);
Handle<PromiseFulfillReactionJobTask>::cast(task)->set_context(
@@ -16139,7 +16444,7 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
DisallowHeapAllocation no_gc;
HeapObject* handler = reaction->reject_handler();
task->synchronized_set_map(
- isolate->heap()->promise_reject_reaction_job_task_map());
+ ReadOnlyRoots(isolate).promise_reject_reaction_job_task_map());
Handle<PromiseRejectReactionJobTask>::cast(task)->set_argument(*argument);
Handle<PromiseRejectReactionJobTask>::cast(task)->set_context(
*isolate->native_context());
@@ -16197,8 +16502,8 @@ JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
// static
-MaybeHandle<JSRegExp> JSRegExp::New(Handle<String> pattern, Flags flags) {
- Isolate* isolate = pattern->GetIsolate();
+MaybeHandle<JSRegExp> JSRegExp::New(Isolate* isolate, Handle<String> pattern,
+ Flags flags) {
Handle<JSFunction> constructor = isolate->regexp_function();
Handle<JSRegExp> regexp =
Handle<JSRegExp>::cast(isolate->factory()->NewJSObject(constructor));
@@ -16285,7 +16590,7 @@ MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
Handle<String> source,
Handle<String> flags_string) {
- Isolate* isolate = source->GetIsolate();
+ Isolate* isolate = regexp->GetIsolate();
bool success = false;
Flags flags = RegExpFlagsFromString(flags_string, &success);
if (!success) {
@@ -16307,14 +16612,14 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
// suggested by ECMA-262, 5th, section 15.10.4.1.
if (source->length() == 0) source = factory->query_colon_string();
- source = String::Flatten(source);
+ source = String::Flatten(isolate, source);
Handle<String> escaped_source;
ASSIGN_RETURN_ON_EXCEPTION(isolate, escaped_source,
EscapeRegExpSource(isolate, source), JSRegExp);
- RETURN_ON_EXCEPTION(isolate, RegExpImpl::Compile(regexp, source, flags),
- JSRegExp);
+ RETURN_ON_EXCEPTION(
+ isolate, RegExpImpl::Compile(isolate, regexp, source, flags), JSRegExp);
regexp->set_source(*escaped_source);
regexp->set_flags(Smi::FromInt(flags));
@@ -16330,7 +16635,7 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
// Map has changed, so use generic, but slower, method.
RETURN_ON_EXCEPTION(
isolate,
- JSReceiver::SetProperty(regexp, factory->lastIndex_string(),
+ JSReceiver::SetProperty(isolate, regexp, factory->lastIndex_string(),
Handle<Smi>(Smi::kZero, isolate),
LanguageMode::kStrict),
JSRegExp);
@@ -16475,7 +16780,7 @@ Handle<Derived> HashTable<Derived, Shape>::NewInternal(
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(Derived* new_table) {
+void HashTable<Derived, Shape>::Rehash(Isolate* isolate, Derived* new_table) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
@@ -16488,11 +16793,11 @@ void HashTable<Derived, Shape>::Rehash(Derived* new_table) {
// Rehash the elements.
int capacity = this->Capacity();
- Isolate* isolate = new_table->GetIsolate();
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
uint32_t from_index = EntryToIndex(i);
Object* k = this->get(from_index);
- if (!Shape::IsLive(isolate, k)) continue;
+ if (!Shape::IsLive(roots, k)) continue;
uint32_t hash = Shape::HashForObject(isolate, k);
uint32_t insertion_index =
EntryToIndex(new_table->FindInsertionEntry(hash));
@@ -16505,9 +16810,10 @@ void HashTable<Derived, Shape>::Rehash(Derived* new_table) {
}
template <typename Derived, typename Shape>
-uint32_t HashTable<Derived, Shape>::EntryForProbe(Object* k, int probe,
+uint32_t HashTable<Derived, Shape>::EntryForProbe(Isolate* isolate, Object* k,
+ int probe,
uint32_t expected) {
- uint32_t hash = Shape::HashForObject(GetIsolate(), k);
+ uint32_t hash = Shape::HashForObject(isolate, k);
uint32_t capacity = this->Capacity();
uint32_t entry = FirstProbe(hash, capacity);
for (int i = 1; i < probe; i++) {
@@ -16535,10 +16841,10 @@ void HashTable<Derived, Shape>::Swap(uint32_t entry1, uint32_t entry2,
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash() {
+void HashTable<Derived, Shape>::Rehash(Isolate* isolate) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
- Isolate* isolate = GetIsolate();
+ ReadOnlyRoots roots(isolate);
uint32_t capacity = Capacity();
bool done = false;
for (int probe = 1; !done; probe++) {
@@ -16547,12 +16853,12 @@ void HashTable<Derived, Shape>::Rehash() {
done = true;
for (uint32_t current = 0; current < capacity; current++) {
Object* current_key = KeyAt(current);
- if (!Shape::IsLive(isolate, current_key)) continue;
- uint32_t target = EntryForProbe(current_key, probe, current);
+ if (!Shape::IsLive(roots, current_key)) continue;
+ uint32_t target = EntryForProbe(isolate, current_key, probe, current);
if (current == target) continue;
Object* target_key = KeyAt(target);
- if (!Shape::IsLive(isolate, target_key) ||
- EntryForProbe(target_key, probe, target) != target) {
+ if (!Shape::IsLive(roots, target_key) ||
+ EntryForProbe(isolate, target_key, probe, target) != target) {
// Put the current element into the correct position.
Swap(current, target, mode);
// The other element will be processed on the next iteration.
@@ -16565,8 +16871,8 @@ void HashTable<Derived, Shape>::Rehash() {
}
}
// Wipe deleted entries.
- Object* the_hole = isolate->heap()->the_hole_value();
- Object* undefined = isolate->heap()->undefined_value();
+ Object* the_hole = roots.the_hole_value();
+ Object* undefined = roots.undefined_value();
for (uint32_t current = 0; current < capacity; current++) {
if (KeyAt(current) == the_hole) {
set(EntryToIndex(current) + kEntryKeyIndex, undefined);
@@ -16577,21 +16883,20 @@ void HashTable<Derived, Shape>::Rehash() {
template <typename Derived, typename Shape>
Handle<Derived> HashTable<Derived, Shape>::EnsureCapacity(
- Handle<Derived> table, int n, PretenureFlag pretenure) {
+ Isolate* isolate, Handle<Derived> table, int n, PretenureFlag pretenure) {
if (table->HasSufficientCapacityToAdd(n)) return table;
- Isolate* isolate = table->GetIsolate();
int capacity = table->Capacity();
int new_nof = table->NumberOfElements() + n;
const int kMinCapacityForPretenure = 256;
- bool should_pretenure = pretenure == TENURED ||
- ((capacity > kMinCapacityForPretenure) &&
- !isolate->heap()->InNewSpace(*table));
+ bool should_pretenure =
+ pretenure == TENURED ||
+ ((capacity > kMinCapacityForPretenure) && !Heap::InNewSpace(*table));
Handle<Derived> new_table = HashTable::New(
isolate, new_nof, should_pretenure ? TENURED : NOT_TENURED);
- table->Rehash(*new_table);
+ table->Rehash(isolate, *new_table);
return new_table;
}
@@ -16615,7 +16920,8 @@ bool HashTable<Derived, Shape>::HasSufficientCapacityToAdd(
}
template <typename Derived, typename Shape>
-Handle<Derived> HashTable<Derived, Shape>::Shrink(Handle<Derived> table,
+Handle<Derived> HashTable<Derived, Shape>::Shrink(Isolate* isolate,
+ Handle<Derived> table,
int additionalCapacity) {
int capacity = table->Capacity();
int nof = table->NumberOfElements();
@@ -16632,15 +16938,14 @@ Handle<Derived> HashTable<Derived, Shape>::Shrink(Handle<Derived> table,
if (new_capacity < Derived::kMinShrinkCapacity) return table;
if (new_capacity == capacity) return table;
- Isolate* isolate = table->GetIsolate();
const int kMinCapacityForPretenure = 256;
bool pretenure = (at_least_room_for > kMinCapacityForPretenure) &&
- !isolate->heap()->InNewSpace(*table);
+ !Heap::InNewSpace(*table);
Handle<Derived> new_table =
HashTable::New(isolate, new_capacity, pretenure ? TENURED : NOT_TENURED,
USE_CUSTOM_MINIMUM_CAPACITY);
- table->Rehash(*new_table);
+ table->Rehash(isolate, *new_table);
return new_table;
}
@@ -16650,147 +16955,14 @@ uint32_t HashTable<Derived, Shape>::FindInsertionEntry(uint32_t hash) {
uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
- Isolate* isolate = GetIsolate();
+ ReadOnlyRoots roots = GetReadOnlyRoots();
while (true) {
- if (!Shape::IsLive(isolate, KeyAt(entry))) break;
+ if (!Shape::IsLive(roots, KeyAt(entry))) break;
entry = NextProbe(entry, count++, capacity);
}
return entry;
}
-
-// Force instantiation of template instances class.
-// Please note this list is compiler dependent.
-
-template class HashTable<StringTable, StringTableShape>;
-
-template class HashTable<CompilationCacheTable, CompilationCacheShape>;
-
-template class HashTable<ObjectHashTable, ObjectHashTableShape>;
-
-template class Dictionary<NameDictionary, NameDictionaryShape>;
-
-template class Dictionary<GlobalDictionary, GlobalDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) HashTable<NumberDictionary, NumberDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Dictionary<NumberDictionary, NumberDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- HashTable<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
-
-template Handle<NameDictionary>
-BaseNameDictionary<NameDictionary, NameDictionaryShape>::New(
- Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
-
-template Handle<GlobalDictionary>
-BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::New(
- Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
-
-template Handle<NumberDictionary>
- Dictionary<NumberDictionary, NumberDictionaryShape>::AtPut(
- Handle<NumberDictionary>, uint32_t, Handle<Object>, PropertyDetails);
-
-template Handle<SimpleNumberDictionary>
- Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>::AtPut(
- Handle<SimpleNumberDictionary>, uint32_t, Handle<Object>,
- PropertyDetails);
-
-template Object* Dictionary<
- NumberDictionary, NumberDictionaryShape>::SlowReverseLookup(Object* value);
-
-template Object* Dictionary<
- NameDictionary, NameDictionaryShape>::SlowReverseLookup(Object* value);
-
-template Handle<NameDictionary>
-Dictionary<NameDictionary, NameDictionaryShape>::DeleteEntry(
- Handle<NameDictionary>, int);
-
-template Handle<NumberDictionary>
-Dictionary<NumberDictionary, NumberDictionaryShape>::DeleteEntry(
- Handle<NumberDictionary>, int);
-
-template Handle<SimpleNumberDictionary>
-Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>::DeleteEntry(
- Handle<SimpleNumberDictionary>, int);
-
-template Handle<NameDictionary>
-HashTable<NameDictionary, NameDictionaryShape>::New(Isolate*, int,
- PretenureFlag,
- MinimumCapacity);
-
-template Handle<ObjectHashSet>
-HashTable<ObjectHashSet, ObjectHashSetShape>::New(Isolate*, int n,
- PretenureFlag,
- MinimumCapacity);
-
-template Handle<NameDictionary>
-HashTable<NameDictionary, NameDictionaryShape>::Shrink(Handle<NameDictionary>,
- int additionalCapacity);
-
-template Handle<NameDictionary>
-BaseNameDictionary<NameDictionary, NameDictionaryShape>::Add(
- Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails,
- int*);
-
-template Handle<GlobalDictionary>
-BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::Add(
- Handle<GlobalDictionary>, Handle<Name>, Handle<Object>, PropertyDetails,
- int*);
-
-template void HashTable<GlobalDictionary, GlobalDictionaryShape>::Rehash();
-
-template Handle<NumberDictionary>
-Dictionary<NumberDictionary, NumberDictionaryShape>::Add(
- Handle<NumberDictionary>, uint32_t, Handle<Object>, PropertyDetails, int*);
-
-template Handle<NameDictionary>
-BaseNameDictionary<NameDictionary, NameDictionaryShape>::EnsureCapacity(
- Handle<NameDictionary>, int);
-
-template Handle<SimpleNumberDictionary>
-Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>::Add(
- Handle<SimpleNumberDictionary>, uint32_t, Handle<Object>, PropertyDetails,
- int*);
-
-template int Dictionary<GlobalDictionary,
- GlobalDictionaryShape>::NumberOfEnumerableProperties();
-
-template int
-Dictionary<NameDictionary, NameDictionaryShape>::NumberOfEnumerableProperties();
-
-template void
-BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::CopyEnumKeysTo(
- Handle<GlobalDictionary> dictionary, Handle<FixedArray> storage,
- KeyCollectionMode mode, KeyAccumulator* accumulator);
-
-template void
-BaseNameDictionary<NameDictionary, NameDictionaryShape>::CopyEnumKeysTo(
- Handle<NameDictionary> dictionary, Handle<FixedArray> storage,
- KeyCollectionMode mode, KeyAccumulator* accumulator);
-
-template Handle<FixedArray>
-BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::IterationIndices(
- Handle<GlobalDictionary> dictionary);
-template void
-BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::CollectKeysTo(
- Handle<GlobalDictionary> dictionary, KeyAccumulator* keys);
-
-template Handle<FixedArray>
-BaseNameDictionary<NameDictionary, NameDictionaryShape>::IterationIndices(
- Handle<NameDictionary> dictionary);
-template void
-BaseNameDictionary<NameDictionary, NameDictionaryShape>::CollectKeysTo(
- Handle<NameDictionary> dictionary, KeyAccumulator* keys);
-
-template int Dictionary<NumberDictionary,
- NumberDictionaryShape>::NumberOfEnumerableProperties();
-
namespace {
bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s,
@@ -16801,7 +16973,7 @@ bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s,
if (s->IsSmi()) {
result = s;
} else {
- result = String::ToNumber(Handle<String>::cast(s));
+ result = String::ToNumber(isolate, Handle<String>::cast(s));
if (!result->IsMinusZero()) {
Handle<String> str = Object::ToString(isolate, result).ToHandleChecked();
// Avoid treating strings like "2E1" and "20" as the same key.
@@ -16920,10 +17092,10 @@ void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
JSObject::InvalidatePrototypeValidityCell(*global);
DCHECK(!global->HasFastProperties());
- auto dictionary = handle(global->global_dictionary());
- int entry = dictionary->FindEntry(name);
+ auto dictionary = handle(global->global_dictionary(), global->GetIsolate());
+ int entry = dictionary->FindEntry(global->GetIsolate(), name);
if (entry == GlobalDictionary::kNotFound) return;
- PropertyCell::InvalidateEntry(dictionary, entry);
+ PropertyCell::InvalidateEntry(global->GetIsolate(), dictionary, entry);
}
Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
@@ -16932,17 +17104,17 @@ Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
Isolate* isolate = global->GetIsolate();
DCHECK(!global->HasFastProperties());
Handle<GlobalDictionary> dictionary(global->global_dictionary(), isolate);
- int entry = dictionary->FindEntry(name);
+ int entry = dictionary->FindEntry(isolate, name);
Handle<PropertyCell> cell;
if (entry != GlobalDictionary::kNotFound) {
if (entry_out) *entry_out = entry;
- cell = handle(dictionary->CellAt(entry));
+ cell = handle(dictionary->CellAt(entry), isolate);
PropertyCellType original_cell_type = cell->property_details().cell_type();
DCHECK(original_cell_type == PropertyCellType::kInvalidated ||
original_cell_type == PropertyCellType::kUninitialized);
DCHECK(cell->value()->IsTheHole(isolate));
if (original_cell_type == PropertyCellType::kInvalidated) {
- cell = PropertyCell::InvalidateEntry(dictionary, entry);
+ cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
}
PropertyDetails details(kData, NONE, cell_type);
cell->set_property_details(details);
@@ -16950,8 +17122,8 @@ Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
}
cell = isolate->factory()->NewPropertyCell(name);
PropertyDetails details(kData, NONE, cell_type);
- dictionary =
- GlobalDictionary::Add(dictionary, name, cell, details, entry_out);
+ dictionary = GlobalDictionary::Add(isolate, dictionary, name, cell, details,
+ entry_out);
// {*entry_out} is initialized inside GlobalDictionary::Add().
global->SetProperties(*dictionary);
return cell;
@@ -16965,7 +17137,7 @@ Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
// algorithm.
class TwoCharHashTableKey : public StringTableKey {
public:
- TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint32_t seed)
+ TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint64_t seed)
: StringTableKey(ComputeHashField(c1, c2, seed)), c1_(c1), c2_(c2) {}
bool IsMatch(Object* o) override {
@@ -16982,9 +17154,9 @@ class TwoCharHashTableKey : public StringTableKey {
}
private:
- uint32_t ComputeHashField(uint16_t c1, uint16_t c2, uint32_t seed) {
+ uint32_t ComputeHashField(uint16_t c1, uint16_t c2, uint64_t seed) {
// Char 1.
- uint32_t hash = seed;
+ uint32_t hash = static_cast<uint32_t>(seed);
hash += c1;
hash += hash << 10;
hash ^= hash >> 6;
@@ -17020,7 +17192,7 @@ MaybeHandle<String> StringTable::LookupTwoCharsStringIfExists(
uint16_t c2) {
TwoCharHashTableKey key(c1, c2, isolate->heap()->HashSeed());
Handle<StringTable> string_table = isolate->factory()->string_table();
- int entry = string_table->FindEntry(&key);
+ int entry = string_table->FindEntry(isolate, &key);
if (entry == kNotFound) return MaybeHandle<String>();
Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate);
@@ -17033,7 +17205,7 @@ void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
int expected) {
Handle<StringTable> table = isolate->factory()->string_table();
// We need a key instance for the virtual hash function.
- table = StringTable::EnsureCapacity(table, expected);
+ table = StringTable::EnsureCapacity(isolate, table, expected);
isolate->heap()->SetRootStringTable(*table);
}
@@ -17098,7 +17270,7 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
// static
Handle<String> StringTable::LookupString(Isolate* isolate,
Handle<String> string) {
- string = String::Flatten(string);
+ string = String::Flatten(isolate, string);
if (string->IsInternalizedString()) return string;
InternalizedStringKey key(string);
@@ -17111,8 +17283,8 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
} else { // !FLAG_thin_strings
if (string->IsConsString()) {
Handle<ConsString> cons = Handle<ConsString>::cast(string);
- cons->set_first(*result);
- cons->set_second(isolate->heap()->empty_string());
+ cons->set_first(isolate, *result);
+ cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
} else if (string->IsSlicedString()) {
STATIC_ASSERT(ConsString::kSize == SlicedString::kSize);
DisallowHeapAllocation no_gc;
@@ -17122,8 +17294,8 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
: isolate->factory()->cons_string_map();
string->set_map(*map);
Handle<ConsString> cons = Handle<ConsString>::cast(string);
- cons->set_first(*result);
- cons->set_second(isolate->heap()->empty_string());
+ cons->set_first(isolate, *result);
+ cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
}
}
return result;
@@ -17132,16 +17304,16 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
// static
Handle<String> StringTable::LookupKey(Isolate* isolate, StringTableKey* key) {
Handle<StringTable> table = isolate->factory()->string_table();
- int entry = table->FindEntry(key);
+ int entry = table->FindEntry(isolate, key);
// String already in table.
if (entry != kNotFound) {
return handle(String::cast(table->KeyAt(entry)), isolate);
}
- table = StringTable::CautiousShrink(table);
+ table = StringTable::CautiousShrink(isolate, table);
// Adding new string. Grow table if needed.
- table = StringTable::EnsureCapacity(table, 1);
+ table = StringTable::EnsureCapacity(isolate, table, 1);
isolate->heap()->SetRootStringTable(*table);
return AddKeyNoResize(isolate, key);
@@ -17157,7 +17329,7 @@ Handle<String> StringTable::AddKeyNoResize(Isolate* isolate,
// InvalidStringLength error.
CHECK(!string.is_null());
DCHECK(string->HasHashCode());
- DCHECK_EQ(table->FindEntry(key), kNotFound);
+ DCHECK_EQ(table->FindEntry(isolate, key), kNotFound);
// Add the new string and return it along with the string table.
int entry = table->FindInsertionEntry(key->Hash());
@@ -17167,7 +17339,8 @@ Handle<String> StringTable::AddKeyNoResize(Isolate* isolate,
return Handle<String>::cast(string);
}
-Handle<StringTable> StringTable::CautiousShrink(Handle<StringTable> table) {
+Handle<StringTable> StringTable::CautiousShrink(Isolate* isolate,
+ Handle<StringTable> table) {
// Only shrink if the table is very empty to avoid performance penalty.
int capacity = table->Capacity();
int nof = table->NumberOfElements();
@@ -17175,14 +17348,14 @@ Handle<StringTable> StringTable::CautiousShrink(Handle<StringTable> table) {
if (nof > (capacity / kMaxEmptyFactor)) return table;
// Keep capacity for at least half of the current nof elements.
int slack_capacity = nof >> 2;
- return Shrink(table, slack_capacity);
+ return Shrink(isolate, table, slack_capacity);
}
namespace {
class StringTableNoAllocateKey : public StringTableKey {
public:
- StringTableNoAllocateKey(String* string, uint32_t seed)
+ StringTableNoAllocateKey(String* string, uint64_t seed)
: StringTableKey(0), string_(string) {
StringShape shape(string);
one_byte_ = shape.HasOnlyOneByteChars();
@@ -17311,10 +17484,10 @@ class StringTableNoAllocateKey : public StringTableKey {
} // namespace
// static
-Object* StringTable::LookupStringIfExists_NoAllocate(String* string) {
+Object* StringTable::LookupStringIfExists_NoAllocate(Isolate* isolate,
+ String* string) {
DisallowHeapAllocation no_gc;
- Heap* heap = string->GetHeap();
- Isolate* isolate = heap->isolate();
+ Heap* heap = isolate->heap();
StringTable* table = heap->string_table();
StringTableNoAllocateKey key(string, heap->HashSeed());
@@ -17338,7 +17511,7 @@ Object* StringTable::LookupStringIfExists_NoAllocate(String* string) {
}
DCHECK(!string->IsInternalizedString());
- int entry = table->FindEntry(isolate, &key, key.Hash());
+ int entry = table->FindEntry(ReadOnlyRoots(isolate), &key, key.Hash());
if (entry != kNotFound) {
String* internalized = String::cast(table->KeyAt(entry));
if (FLAG_thin_strings) {
@@ -17367,11 +17540,11 @@ Handle<StringSet> StringSet::New(Isolate* isolate) {
return HashTable::New(isolate, 0);
}
-Handle<StringSet> StringSet::Add(Handle<StringSet> stringset,
+Handle<StringSet> StringSet::Add(Isolate* isolate, Handle<StringSet> stringset,
Handle<String> name) {
- if (!stringset->Has(name)) {
- stringset = EnsureCapacity(stringset, 1);
- uint32_t hash = ShapeT::Hash(name->GetIsolate(), *name);
+ if (!stringset->Has(isolate, name)) {
+ stringset = EnsureCapacity(isolate, stringset, 1);
+ uint32_t hash = ShapeT::Hash(isolate, *name);
int entry = stringset->FindInsertionEntry(hash);
stringset->set(EntryToIndex(entry), *name);
stringset->ElementAdded();
@@ -17379,16 +17552,16 @@ Handle<StringSet> StringSet::Add(Handle<StringSet> stringset,
return stringset;
}
-bool StringSet::Has(Handle<String> name) {
- return FindEntry(*name) != kNotFound;
+bool StringSet::Has(Isolate* isolate, Handle<String> name) {
+ return FindEntry(isolate, *name) != kNotFound;
}
-Handle<ObjectHashSet> ObjectHashSet::Add(Handle<ObjectHashSet> set,
+Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
+ Handle<ObjectHashSet> set,
Handle<Object> key) {
- Isolate* isolate = set->GetIsolate();
int32_t hash = key->GetOrCreateHash(isolate)->value();
if (!set->Has(isolate, key, hash)) {
- set = EnsureCapacity(set, 1);
+ set = EnsureCapacity(isolate, set, 1);
int entry = set->FindInsertionEntry(hash);
set->set(EntryToIndex(entry), *key);
set->ElementAdded();
@@ -17401,7 +17574,7 @@ Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
LanguageMode language_mode) {
Isolate* isolate = GetIsolate();
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
- int entry = FindEntry(&key);
+ int entry = FindEntry(isolate, &key);
if (entry == kNotFound) return isolate->factory()->undefined_value();
int index = EntryToIndex(entry);
if (!get(index)->IsFixedArray()) return isolate->factory()->undefined_value();
@@ -17524,15 +17697,16 @@ FeedbackCell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
Handle<String> src, Handle<Context> native_context,
LanguageMode language_mode) {
- Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared());
+ Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared(),
+ native_context->GetIsolate());
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
- int entry = FindEntry(&key);
+ int entry = FindEntry(GetIsolate(), &key);
if (entry == kNotFound) return MaybeHandle<SharedFunctionInfo>();
int index = EntryToIndex(entry);
if (!get(index)->IsFixedArray()) return MaybeHandle<SharedFunctionInfo>();
Object* obj = get(index + 1);
if (obj->IsSharedFunctionInfo()) {
- return handle(SharedFunctionInfo::cast(obj));
+ return handle(SharedFunctionInfo::cast(obj), native_context->GetIsolate());
}
return MaybeHandle<SharedFunctionInfo>();
}
@@ -17542,7 +17716,7 @@ InfoCellPair CompilationCacheTable::LookupEval(
Handle<Context> native_context, LanguageMode language_mode, int position) {
InfoCellPair empty_result;
StringSharedKey key(src, outer_info, language_mode, position);
- int entry = FindEntry(&key);
+ int entry = FindEntry(GetIsolate(), &key);
if (entry == kNotFound) return empty_result;
int index = EntryToIndex(entry);
if (!get(index)->IsFixedArray()) return empty_result;
@@ -17560,7 +17734,7 @@ Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
Isolate* isolate = GetIsolate();
DisallowHeapAllocation no_allocation;
RegExpKey key(src, flags);
- int entry = FindEntry(&key);
+ int entry = FindEntry(isolate, &key);
if (entry == kNotFound) return isolate->factory()->undefined_value();
return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
}
@@ -17572,7 +17746,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::Put(
Isolate* isolate = cache->GetIsolate();
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
Handle<Object> k = key.AsHandle(isolate);
- cache = EnsureCapacity(cache, 1);
+ cache = EnsureCapacity(isolate, cache, 1);
int entry = cache->FindInsertionEntry(key.Hash());
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
@@ -17584,11 +17758,12 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> native_context, LanguageMode language_mode,
Handle<SharedFunctionInfo> value) {
- Isolate* isolate = cache->GetIsolate();
- Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared());
+ Isolate* isolate = native_context->GetIsolate();
+ Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared(),
+ isolate);
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
Handle<Object> k = key.AsHandle(isolate);
- cache = EnsureCapacity(cache, 1);
+ cache = EnsureCapacity(isolate, cache, 1);
int entry = cache->FindInsertionEntry(key.Hash());
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
@@ -17601,11 +17776,11 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
int position) {
- Isolate* isolate = cache->GetIsolate();
+ Isolate* isolate = native_context->GetIsolate();
StringSharedKey key(src, outer_info, value->language_mode(), position);
{
Handle<Object> k = key.AsHandle(isolate);
- int entry = cache->FindEntry(&key);
+ int entry = cache->FindEntry(isolate, &key);
if (entry != kNotFound) {
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
@@ -17618,7 +17793,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
}
}
- cache = EnsureCapacity(cache, 1);
+ cache = EnsureCapacity(isolate, cache, 1);
int entry = cache->FindInsertionEntry(key.Hash());
Handle<Object> k =
isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
@@ -17628,12 +17803,11 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
return cache;
}
-
Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
- Handle<CompilationCacheTable> cache, Handle<String> src,
- JSRegExp::Flags flags, Handle<FixedArray> value) {
+ Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
+ JSRegExp::Flags flags, Handle<FixedArray> value) {
RegExpKey key(src, flags);
- cache = EnsureCapacity(cache, 1);
+ cache = EnsureCapacity(isolate, cache, 1);
int entry = cache->FindInsertionEntry(key.Hash());
// We store the value in the key slot, and compare the search key
// to the stored value with a custon IsMatch function during lookups.
@@ -17646,7 +17820,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
void CompilationCacheTable::Age() {
DisallowHeapAllocation no_allocation;
- Object* the_hole_value = GetHeap()->the_hole_value();
+ Object* the_hole_value = GetReadOnlyRoots().the_hole_value();
for (int entry = 0, size = Capacity(); entry < size; entry++) {
int entry_index = EntryToIndex(entry);
int value_index = entry_index + 1;
@@ -17676,7 +17850,7 @@ void CompilationCacheTable::Age() {
void CompilationCacheTable::Remove(Object* value) {
DisallowHeapAllocation no_allocation;
- Object* the_hole_value = GetHeap()->the_hole_value();
+ Object* the_hole_value = GetReadOnlyRoots().the_hole_value();
for (int entry = 0, size = Capacity(); entry < size; entry++) {
int entry_index = EntryToIndex(entry);
int value_index = entry_index + 1;
@@ -17704,83 +17878,85 @@ Handle<Derived> BaseNameDictionary<Derived, Shape>::New(
template <typename Derived, typename Shape>
Handle<Derived> BaseNameDictionary<Derived, Shape>::EnsureCapacity(
- Handle<Derived> dictionary, int n) {
+ Isolate* isolate, Handle<Derived> dictionary, int n) {
// Check whether there are enough enumeration indices to add n elements.
if (!PropertyDetails::IsValidIndex(dictionary->NextEnumerationIndex() + n)) {
// If not, we generate new indices for the properties.
int length = dictionary->NumberOfElements();
- Handle<FixedArray> iteration_order = IterationIndices(dictionary);
+ Handle<FixedArray> iteration_order = IterationIndices(isolate, dictionary);
DCHECK_EQ(length, iteration_order->length());
// Iterate over the dictionary using the enumeration order and update
// the dictionary with new enumeration indices.
for (int i = 0; i < length; i++) {
int index = Smi::ToInt(iteration_order->get(i));
- DCHECK(dictionary->IsKey(dictionary->GetIsolate(),
+ DCHECK(dictionary->IsKey(dictionary->GetReadOnlyRoots(),
dictionary->KeyAt(index)));
int enum_index = PropertyDetails::kInitialIndex + i;
PropertyDetails details = dictionary->DetailsAt(index);
PropertyDetails new_details = details.set_index(enum_index);
- dictionary->DetailsAtPut(index, new_details);
+ dictionary->DetailsAtPut(isolate, index, new_details);
}
// Set the next enumeration index.
dictionary->SetNextEnumerationIndex(PropertyDetails::kInitialIndex +
length);
}
- return HashTable<Derived, Shape>::EnsureCapacity(dictionary, n);
+ return HashTable<Derived, Shape>::EnsureCapacity(isolate, dictionary, n);
}
template <typename Derived, typename Shape>
Handle<Derived> Dictionary<Derived, Shape>::DeleteEntry(
- Handle<Derived> dictionary, int entry) {
+ Isolate* isolate, Handle<Derived> dictionary, int entry) {
DCHECK(Shape::kEntrySize != 3 ||
dictionary->DetailsAt(entry).IsConfigurable());
- dictionary->ClearEntry(entry);
+ dictionary->ClearEntry(isolate, entry);
dictionary->ElementRemoved();
- return Shrink(dictionary);
+ return Shrink(isolate, dictionary);
}
template <typename Derived, typename Shape>
-Handle<Derived> Dictionary<Derived, Shape>::AtPut(Handle<Derived> dictionary,
+Handle<Derived> Dictionary<Derived, Shape>::AtPut(Isolate* isolate,
+ Handle<Derived> dictionary,
Key key, Handle<Object> value,
PropertyDetails details) {
- int entry = dictionary->FindEntry(key);
+ int entry = dictionary->FindEntry(isolate, key);
// If the entry is present set the value;
if (entry == Dictionary::kNotFound) {
- return Derived::Add(dictionary, key, value, details);
+ return Derived::Add(isolate, dictionary, key, value, details);
}
// We don't need to copy over the enumeration index.
dictionary->ValueAtPut(entry, *value);
- if (Shape::kEntrySize == 3) dictionary->DetailsAtPut(entry, details);
+ if (Shape::kEntrySize == 3) dictionary->DetailsAtPut(isolate, entry, details);
return dictionary;
}
template <typename Derived, typename Shape>
Handle<Derived>
BaseNameDictionary<Derived, Shape>::AddNoUpdateNextEnumerationIndex(
- Handle<Derived> dictionary, Key key, Handle<Object> value,
+ Isolate* isolate, Handle<Derived> dictionary, Key key, Handle<Object> value,
PropertyDetails details, int* entry_out) {
// Insert element at empty or deleted entry
- return Dictionary<Derived, Shape>::Add(dictionary, key, value, details,
- entry_out);
+ return Dictionary<Derived, Shape>::Add(isolate, dictionary, key, value,
+ details, entry_out);
}
// GCC workaround: Explicitly instantiate template method for NameDictionary
// to avoid "undefined reference" issues during linking.
template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::
- AddNoUpdateNextEnumerationIndex(Handle<NameDictionary>, Handle<Name>,
- Handle<Object>, PropertyDetails, int*);
+ AddNoUpdateNextEnumerationIndex(Isolate* isolate, Handle<NameDictionary>,
+ Handle<Name>, Handle<Object>,
+ PropertyDetails, int*);
template <typename Derived, typename Shape>
Handle<Derived> BaseNameDictionary<Derived, Shape>::Add(
- Handle<Derived> dictionary, Key key, Handle<Object> value,
+ Isolate* isolate, Handle<Derived> dictionary, Key key, Handle<Object> value,
PropertyDetails details, int* entry_out) {
// Insert element at empty or deleted entry
DCHECK_EQ(0, details.dictionary_index());
@@ -17789,27 +17965,27 @@ Handle<Derived> BaseNameDictionary<Derived, Shape>::Add(
int index = dictionary->NextEnumerationIndex();
details = details.set_index(index);
dictionary->SetNextEnumerationIndex(index + 1);
- return AddNoUpdateNextEnumerationIndex(dictionary, key, value, details,
- entry_out);
+ return AddNoUpdateNextEnumerationIndex(isolate, dictionary, key, value,
+ details, entry_out);
}
template <typename Derived, typename Shape>
-Handle<Derived> Dictionary<Derived, Shape>::Add(Handle<Derived> dictionary,
+Handle<Derived> Dictionary<Derived, Shape>::Add(Isolate* isolate,
+ Handle<Derived> dictionary,
Key key, Handle<Object> value,
PropertyDetails details,
int* entry_out) {
- Isolate* isolate = dictionary->GetIsolate();
uint32_t hash = Shape::Hash(isolate, key);
// Valdate key is absent.
- SLOW_DCHECK((dictionary->FindEntry(key) == Dictionary::kNotFound));
+ SLOW_DCHECK((dictionary->FindEntry(isolate, key) == Dictionary::kNotFound));
// Check whether the dictionary should be extended.
- dictionary = Derived::EnsureCapacity(dictionary, 1);
+ dictionary = Derived::EnsureCapacity(isolate, dictionary, 1);
// Compute the key object.
Handle<Object> k = Shape::AsHandle(isolate, key);
uint32_t entry = dictionary->FindInsertionEntry(hash);
- dictionary->SetEntry(entry, *k, *value, details);
+ dictionary->SetEntry(isolate, entry, *k, *value, details);
DCHECK(dictionary->KeyAt(entry)->IsNumber() ||
Shape::Unwrap(dictionary->KeyAt(entry))->IsUniqueName());
dictionary->ElementAdded();
@@ -17819,18 +17995,18 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(Handle<Derived> dictionary,
// static
Handle<SimpleNumberDictionary> SimpleNumberDictionary::Set(
- Handle<SimpleNumberDictionary> dictionary, uint32_t key,
+ Isolate* isolate, Handle<SimpleNumberDictionary> dictionary, uint32_t key,
Handle<Object> value) {
- return AtPut(dictionary, key, value, PropertyDetails::Empty());
+ return AtPut(isolate, dictionary, key, value, PropertyDetails::Empty());
}
bool NumberDictionary::HasComplexElements() {
if (!requires_slow_elements()) return false;
- Isolate* isolate = this->GetIsolate();
+ ReadOnlyRoots roots = GetReadOnlyRoots();
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k;
- if (!this->ToKey(isolate, i, &k)) continue;
+ if (!this->ToKey(roots, i, &k)) continue;
PropertyDetails details = this->DetailsAt(i);
if (details.kind() == kAccessor) return true;
PropertyAttributes attr = details.attributes();
@@ -17863,21 +18039,22 @@ void NumberDictionary::UpdateMaxNumberKey(uint32_t key,
}
Handle<NumberDictionary> NumberDictionary::Set(
- Handle<NumberDictionary> dictionary, uint32_t key, Handle<Object> value,
- Handle<JSObject> dictionary_holder, PropertyDetails details) {
+ Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value, Handle<JSObject> dictionary_holder,
+ PropertyDetails details) {
dictionary->UpdateMaxNumberKey(key, dictionary_holder);
- return AtPut(dictionary, key, value, details);
+ return AtPut(isolate, dictionary, key, value, details);
}
void NumberDictionary::CopyValuesTo(FixedArray* elements) {
- Isolate* isolate = this->GetIsolate();
+ ReadOnlyRoots roots = GetReadOnlyRoots();
int pos = 0;
int capacity = this->Capacity();
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
Object* k;
- if (this->ToKey(isolate, i, &k)) {
+ if (this->ToKey(roots, i, &k)) {
elements->set(pos++, this->ValueAt(i), mode);
}
}
@@ -17886,12 +18063,12 @@ void NumberDictionary::CopyValuesTo(FixedArray* elements) {
template <typename Derived, typename Shape>
int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
- Isolate* isolate = this->GetIsolate();
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
int capacity = this->Capacity();
int result = 0;
for (int i = 0; i < capacity; i++) {
Object* k;
- if (!this->ToKey(isolate, i, &k)) continue;
+ if (!this->ToKey(roots, i, &k)) continue;
if (k->FilterKey(ENUMERABLE_STRINGS)) continue;
PropertyDetails details = this->DetailsAt(i);
PropertyAttributes attr = details.attributes();
@@ -17915,16 +18092,16 @@ struct EnumIndexComparator {
template <typename Derived, typename Shape>
void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
- Handle<Derived> dictionary, Handle<FixedArray> storage,
+ Isolate* isolate, Handle<Derived> dictionary, Handle<FixedArray> storage,
KeyCollectionMode mode, KeyAccumulator* accumulator) {
DCHECK_IMPLIES(mode != KeyCollectionMode::kOwnOnly, accumulator != nullptr);
- Isolate* isolate = dictionary->GetIsolate();
int length = storage->length();
int capacity = dictionary->Capacity();
int properties = 0;
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
Object* key;
- if (!dictionary->ToKey(isolate, i, &key)) continue;
+ if (!dictionary->ToKey(roots, i, &key)) continue;
bool is_shadowing_key = false;
if (key->IsSymbol()) continue;
PropertyDetails details = dictionary->DetailsAt(i);
@@ -17964,18 +18141,18 @@ void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
template <typename Derived, typename Shape>
Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
- Handle<Derived> dictionary) {
- Isolate* isolate = dictionary->GetIsolate();
+ Isolate* isolate, Handle<Derived> dictionary) {
int capacity = dictionary->Capacity();
int length = dictionary->NumberOfElements();
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
+ ReadOnlyRoots roots(isolate);
int array_size = 0;
{
DisallowHeapAllocation no_gc;
Derived* raw_dictionary = *dictionary;
for (int i = 0; i < capacity; i++) {
Object* k;
- if (!raw_dictionary->ToKey(isolate, i, &k)) continue;
+ if (!raw_dictionary->ToKey(roots, i, &k)) continue;
array->set(array_size++, Smi::FromInt(i));
}
@@ -17989,14 +18166,14 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
array->GetFirstElementAddress());
std::sort(start, start + array_size, cmp);
}
- array->Shrink(array_size);
- return array;
+ return FixedArray::ShrinkOrEmpty(isolate, array, array_size);
}
template <typename Derived, typename Shape>
void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
Handle<Derived> dictionary, KeyAccumulator* keys) {
Isolate* isolate = keys->isolate();
+ ReadOnlyRoots roots(isolate);
int capacity = dictionary->Capacity();
Handle<FixedArray> array =
isolate->factory()->NewFixedArray(dictionary->NumberOfElements());
@@ -18007,7 +18184,7 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
Derived* raw_dictionary = *dictionary;
for (int i = 0; i < capacity; i++) {
Object* k;
- if (!raw_dictionary->ToKey(isolate, i, &k)) continue;
+ if (!raw_dictionary->ToKey(roots, i, &k)) continue;
if (k->FilterKey(filter)) continue;
PropertyDetails details = raw_dictionary->DetailsAt(i);
if ((details.attributes() & filter) != 0) {
@@ -18056,86 +18233,101 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
template <typename Derived, typename Shape>
Object* Dictionary<Derived, Shape>::SlowReverseLookup(Object* value) {
Derived* dictionary = Derived::cast(this);
- Isolate* isolate = dictionary->GetIsolate();
+ ReadOnlyRoots roots = dictionary->GetReadOnlyRoots();
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k;
- if (!dictionary->ToKey(isolate, i, &k)) continue;
+ if (!dictionary->ToKey(roots, i, &k)) continue;
Object* e = dictionary->ValueAt(i);
if (e == value) return k;
}
- return isolate->heap()->undefined_value();
+ return roots.undefined_value();
}
+template <typename Derived, typename Shape>
+void ObjectHashTableBase<Derived, Shape>::FillEntriesWithHoles(
+ Handle<Derived> table) {
+ int length = table->length();
+ for (int i = Derived::EntryToIndex(0); i < length; i++) {
+ table->set_the_hole(i);
+ }
+}
-Object* ObjectHashTable::Lookup(Isolate* isolate, Handle<Object> key,
- int32_t hash) {
+template <typename Derived, typename Shape>
+Object* ObjectHashTableBase<Derived, Shape>::Lookup(ReadOnlyRoots roots,
+ Handle<Object> key,
+ int32_t hash) {
DisallowHeapAllocation no_gc;
- DCHECK(IsKey(isolate, *key));
+ DCHECK(this->IsKey(roots, *key));
- int entry = FindEntry(isolate, key, hash);
- if (entry == kNotFound) return isolate->heap()->the_hole_value();
- return get(EntryToIndex(entry) + 1);
+ int entry = this->FindEntry(roots, key, hash);
+ if (entry == kNotFound) return roots.the_hole_value();
+ return this->get(Derived::EntryToIndex(entry) + 1);
}
-
-Object* ObjectHashTable::Lookup(Handle<Object> key) {
+template <typename Derived, typename Shape>
+Object* ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
- DCHECK(IsKey(isolate, *key));
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
+ DCHECK(this->IsKey(roots, *key));
// If the object does not have an identity hash, it was never used as a key.
Object* hash = key->GetHash();
- if (hash->IsUndefined(isolate)) {
- return isolate->heap()->the_hole_value();
+ if (hash->IsUndefined(roots)) {
+ return roots.the_hole_value();
}
- return Lookup(isolate, key, Smi::ToInt(hash));
+ return Lookup(roots, key, Smi::ToInt(hash));
}
-Object* ObjectHashTable::ValueAt(int entry) {
- return get(EntryToValueIndex(entry));
+template <typename Derived, typename Shape>
+Object* ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key,
+ int32_t hash) {
+ return Lookup(this->GetReadOnlyRoots(), key, hash);
}
-Object* ObjectHashTable::Lookup(Handle<Object> key, int32_t hash) {
- return Lookup(GetIsolate(), key, hash);
+template <typename Derived, typename Shape>
+Object* ObjectHashTableBase<Derived, Shape>::ValueAt(int entry) {
+ return this->get(EntryToValueIndex(entry));
}
-
-Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
- Handle<Object> key,
- Handle<Object> value) {
- Isolate* isolate = table->GetIsolate();
- DCHECK(table->IsKey(isolate, *key));
- DCHECK(!value->IsTheHole(isolate));
+template <typename Derived, typename Shape>
+Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Handle<Derived> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ Isolate* isolate = Heap::FromWritableHeapObject(*table)->isolate();
+ DCHECK(table->IsKey(ReadOnlyRoots(isolate), *key));
+ DCHECK(!value->IsTheHole(ReadOnlyRoots(isolate)));
// Make sure the key object has an identity hash code.
int32_t hash = key->GetOrCreateHash(isolate)->value();
- return Put(table, key, value, hash);
+ return ObjectHashTableBase<Derived, Shape>::Put(isolate, table, key, value,
+ hash);
}
+template <typename Derived, typename Shape>
+Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Isolate* isolate,
+ Handle<Derived> table,
+ Handle<Object> key,
+ Handle<Object> value,
+ int32_t hash) {
+ ReadOnlyRoots roots(isolate);
+ DCHECK(table->IsKey(roots, *key));
+ DCHECK(!value->IsTheHole(roots));
-Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
- Handle<Object> key,
- Handle<Object> value,
- int32_t hash) {
- Isolate* isolate = table->GetIsolate();
- DCHECK(table->IsKey(isolate, *key));
- DCHECK(!value->IsTheHole(isolate));
-
- int entry = table->FindEntry(isolate, key, hash);
+ int entry = table->FindEntry(roots, key, hash);
// Key is already in table, just overwrite value.
if (entry != kNotFound) {
- table->set(EntryToIndex(entry) + 1, *value);
+ table->set(Derived::EntryToIndex(entry) + 1, *value);
return table;
}
// Rehash if more than 33% of the entries are deleted entries.
// TODO(jochen): Consider to shrink the fixed array in place.
if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
- table->Rehash();
+ table->Rehash(isolate);
}
// If we're out of luck, we didn't get a GC recently, and so rehashing
// isn't enough to avoid a crash.
@@ -18148,40 +18340,39 @@ Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
Heap::kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kFullHashtable);
}
- table->Rehash();
+ table->Rehash(isolate);
}
}
// Check whether the hash table should be extended.
- table = EnsureCapacity(table, 1);
+ table = Derived::EnsureCapacity(isolate, table, 1);
table->AddEntry(table->FindInsertionEntry(hash), *key, *value);
return table;
}
-
-Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
- Handle<Object> key,
- bool* was_present) {
- DCHECK(table->IsKey(table->GetIsolate(), *key));
+template <typename Derived, typename Shape>
+Handle<Derived> ObjectHashTableBase<Derived, Shape>::Remove(
+ Isolate* isolate, Handle<Derived> table, Handle<Object> key,
+ bool* was_present) {
+ DCHECK(table->IsKey(table->GetReadOnlyRoots(), *key));
Object* hash = key->GetHash();
- if (hash->IsUndefined(table->GetIsolate())) {
+ if (hash->IsUndefined()) {
*was_present = false;
return table;
}
- return Remove(table, key, was_present, Smi::ToInt(hash));
+ return Remove(isolate, table, key, was_present, Smi::ToInt(hash));
}
+template <typename Derived, typename Shape>
+Handle<Derived> ObjectHashTableBase<Derived, Shape>::Remove(
+ Isolate* isolate, Handle<Derived> table, Handle<Object> key,
+ bool* was_present, int32_t hash) {
+ ReadOnlyRoots roots = table->GetReadOnlyRoots();
+ DCHECK(table->IsKey(roots, *key));
-Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
- Handle<Object> key,
- bool* was_present,
- int32_t hash) {
- Isolate* isolate = table->GetIsolate();
- DCHECK(table->IsKey(isolate, *key));
-
- int entry = table->FindEntry(isolate, key, hash);
+ int entry = table->FindEntry(roots, key, hash);
if (entry == kNotFound) {
*was_present = false;
return table;
@@ -18189,21 +18380,22 @@ Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
*was_present = true;
table->RemoveEntry(entry);
- return Shrink(table);
+ return Derived::Shrink(isolate, table);
}
-
-void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) {
- set(EntryToIndex(entry), key);
- set(EntryToIndex(entry) + 1, value);
- ElementAdded();
+template <typename Derived, typename Shape>
+void ObjectHashTableBase<Derived, Shape>::AddEntry(int entry, Object* key,
+ Object* value) {
+ this->set(Derived::EntryToIndex(entry), key);
+ this->set(Derived::EntryToIndex(entry) + 1, value);
+ this->ElementAdded();
}
-
-void ObjectHashTable::RemoveEntry(int entry) {
- set_the_hole(EntryToIndex(entry));
- set_the_hole(EntryToIndex(entry) + 1);
- ElementRemoved();
+template <typename Derived, typename Shape>
+void ObjectHashTableBase<Derived, Shape>::RemoveEntry(int entry) {
+ this->set_the_hole(Derived::EntryToIndex(entry));
+ this->set_the_hole(Derived::EntryToIndex(entry) + 1);
+ this->ElementRemoved();
}
@@ -18212,10 +18404,9 @@ void JSSet::Initialize(Handle<JSSet> set, Isolate* isolate) {
set->set_table(*table);
}
-
-void JSSet::Clear(Handle<JSSet> set) {
- Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
- table = OrderedHashSet::Clear(table);
+void JSSet::Clear(Isolate* isolate, Handle<JSSet> set) {
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()), isolate);
+ table = OrderedHashSet::Clear(isolate, table);
set->set_table(*table);
}
@@ -18225,17 +18416,16 @@ void JSMap::Initialize(Handle<JSMap> map, Isolate* isolate) {
map->set_table(*table);
}
-
-void JSMap::Clear(Handle<JSMap> map) {
- Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
- table = OrderedHashMap::Clear(table);
+void JSMap::Clear(Isolate* isolate, Handle<JSMap> map) {
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()), isolate);
+ table = OrderedHashMap::Clear(isolate, table);
map->set_table(*table);
}
void JSWeakCollection::Initialize(Handle<JSWeakCollection> weak_collection,
Isolate* isolate) {
- Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
+ Handle<EphemeronHashTable> table = EphemeronHashTable::New(isolate, 0);
weak_collection->set_table(*table);
}
@@ -18244,15 +18434,16 @@ void JSWeakCollection::Set(Handle<JSWeakCollection> weak_collection,
Handle<Object> key, Handle<Object> value,
int32_t hash) {
DCHECK(key->IsJSReceiver() || key->IsSymbol());
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
- DCHECK(table->IsKey(table->GetIsolate(), *key));
- Handle<ObjectHashTable> new_table =
- ObjectHashTable::Put(table, key, value, hash);
+ Handle<EphemeronHashTable> table(
+ EphemeronHashTable::cast(weak_collection->table()),
+ weak_collection->GetIsolate());
+ DCHECK(table->IsKey(weak_collection->GetReadOnlyRoots(), *key));
+ Handle<EphemeronHashTable> new_table = EphemeronHashTable::Put(
+ weak_collection->GetIsolate(), table, key, value, hash);
weak_collection->set_table(*new_table);
if (*table != *new_table) {
// Zap the old table since we didn't record slots for its elements.
- table->FillWithHoles(0, table->length());
+ EphemeronHashTable::FillEntriesWithHoles(table);
}
}
@@ -18260,16 +18451,17 @@ void JSWeakCollection::Set(Handle<JSWeakCollection> weak_collection,
bool JSWeakCollection::Delete(Handle<JSWeakCollection> weak_collection,
Handle<Object> key, int32_t hash) {
DCHECK(key->IsJSReceiver() || key->IsSymbol());
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
- DCHECK(table->IsKey(table->GetIsolate(), *key));
+ Handle<EphemeronHashTable> table(
+ EphemeronHashTable::cast(weak_collection->table()),
+ weak_collection->GetIsolate());
+ DCHECK(table->IsKey(weak_collection->GetReadOnlyRoots(), *key));
bool was_present = false;
- Handle<ObjectHashTable> new_table =
- ObjectHashTable::Remove(table, key, &was_present, hash);
+ Handle<EphemeronHashTable> new_table = EphemeronHashTable::Remove(
+ weak_collection->GetIsolate(), table, key, &was_present, hash);
weak_collection->set_table(*new_table);
if (*table != *new_table) {
// Zap the old table since we didn't record slots for its elements.
- table->FillWithHoles(0, table->length());
+ EphemeronHashTable::FillEntriesWithHoles(table);
}
return was_present;
}
@@ -18277,7 +18469,8 @@ bool JSWeakCollection::Delete(Handle<JSWeakCollection> weak_collection,
Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
int max_entries) {
Isolate* isolate = holder->GetIsolate();
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<EphemeronHashTable> table(EphemeronHashTable::cast(holder->table()),
+ isolate);
if (max_entries == 0 || max_entries > table->NumberOfElements()) {
max_entries = table->NumberOfElements();
}
@@ -18291,11 +18484,12 @@ Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
{
DisallowHeapAllocation no_gc;
+ ReadOnlyRoots roots = ReadOnlyRoots(isolate);
int count = 0;
for (int i = 0;
count / values_per_entry < max_entries && i < table->Capacity(); i++) {
Object* key;
- if (table->ToKey(isolate, i, &key)) {
+ if (table->ToKey(roots, i, &key)) {
entries->set(count++, key);
if (values_per_entry > 1) {
Object* value = table->Lookup(handle(key, isolate));
@@ -18375,7 +18569,7 @@ Object* JSDate::DoGetField(FieldIndex index) {
}
double time = value()->Number();
- if (std::isnan(time)) return GetIsolate()->heap()->nan_value();
+ if (std::isnan(time)) return GetReadOnlyRoots().nan_value();
int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time));
int days = DateCache::DaysFromTime(local_time_ms);
@@ -18394,7 +18588,7 @@ Object* JSDate::GetUTCField(FieldIndex index,
DateCache* date_cache) {
DCHECK_GE(index, kFirstUTCField);
- if (std::isnan(value)) return GetIsolate()->heap()->nan_value();
+ if (std::isnan(value)) return GetReadOnlyRoots().nan_value();
int64_t time_ms = static_cast<int64_t>(value);
@@ -18443,7 +18637,7 @@ Handle<Object> JSDate::SetValue(Handle<JSDate> date, double v) {
void JSDate::SetValue(Object* value, bool is_value_nan) {
set_value(value);
if (is_value_nan) {
- HeapNumber* nan = GetIsolate()->heap()->nan_value();
+ HeapNumber* nan = GetReadOnlyRoots().nan_value();
set_cache_stamp(nan, SKIP_WRITE_BARRIER);
set_year(nan, SKIP_WRITE_BARRIER);
set_month(nan, SKIP_WRITE_BARRIER);
@@ -18477,21 +18671,10 @@ void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
}
-namespace {
-
-Script* ScriptFromJSValue(Object* in) {
- DCHECK(in->IsJSValue());
- JSValue* jsvalue = JSValue::cast(in);
- DCHECK(jsvalue->value()->IsScript());
- return Script::cast(jsvalue->value());
-}
-
-} // namespace
-
int JSMessageObject::GetLineNumber() const {
if (start_position() == -1) return Message::kNoLineNumberInfo;
- Handle<Script> the_script = handle(ScriptFromJSValue(script()));
+ Handle<Script> the_script(script(), GetIsolate());
Script::PositionInfo info;
const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
@@ -18506,7 +18689,7 @@ int JSMessageObject::GetLineNumber() const {
int JSMessageObject::GetColumnNumber() const {
if (start_position() == -1) return -1;
- Handle<Script> the_script = handle(ScriptFromJSValue(script()));
+ Handle<Script> the_script(script(), GetIsolate());
Script::PositionInfo info;
const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
@@ -18519,9 +18702,9 @@ int JSMessageObject::GetColumnNumber() const {
}
Handle<String> JSMessageObject::GetSourceLine() const {
- Handle<Script> the_script = handle(ScriptFromJSValue(script()));
+ Isolate* isolate = GetIsolate();
+ Handle<Script> the_script(script(), isolate);
- Isolate* isolate = the_script->GetIsolate();
if (the_script->type() == Script::TYPE_WASM) {
return isolate->factory()->empty_string();
}
@@ -18552,13 +18735,18 @@ void JSArrayBuffer::Neuter() {
}
}
+void JSArrayBuffer::StopTrackingWasmMemory(Isolate* isolate) {
+ DCHECK(is_wasm_memory());
+ isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(backing_store());
+ set_is_wasm_memory(false);
+}
+
void JSArrayBuffer::FreeBackingStoreFromMainThread() {
if (allocation_base() == nullptr) {
return;
}
- FreeBackingStore(GetIsolate(),
- {allocation_base(), allocation_length(), backing_store(),
- allocation_mode(), is_wasm_memory()});
+ FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(),
+ backing_store(), is_wasm_memory()});
// Zero out the backing store and allocation base to avoid dangling
// pointers.
set_backing_store(nullptr);
@@ -18566,16 +18754,10 @@ void JSArrayBuffer::FreeBackingStoreFromMainThread() {
// static
void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
- if (allocation.mode == ArrayBuffer::Allocator::AllocationMode::kReservation) {
- bool needs_free = true;
- if (allocation.is_wasm_memory) {
- wasm::WasmMemoryTracker* memory_tracker =
- isolate->wasm_engine()->memory_tracker();
- if (memory_tracker->FreeMemoryIfIsWasmMemory(allocation.backing_store)) {
- needs_free = false;
- }
- }
- if (needs_free) {
+ if (allocation.is_wasm_memory) {
+ wasm::WasmMemoryTracker* memory_tracker =
+ isolate->wasm_engine()->memory_tracker();
+ if (!memory_tracker->FreeMemoryIfIsWasmMemory(allocation.backing_store)) {
CHECK(FreePages(allocation.allocation_base, allocation.length));
}
} else {
@@ -18668,7 +18850,7 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
DCHECK(IsFixedTypedArrayElementsKind(typed_array->GetElementsKind()));
Handle<FixedTypedArrayBase> fixed_typed_array(
- FixedTypedArrayBase::cast(typed_array->elements()));
+ FixedTypedArrayBase::cast(typed_array->elements()), isolate);
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
isolate);
@@ -18678,6 +18860,10 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
void* backing_store =
isolate->array_buffer_allocator()->AllocateUninitialized(
fixed_typed_array->DataSize());
+ if (backing_store == nullptr) {
+ isolate->heap()->FatalProcessOutOfMemory(
+ "JSTypedArray::MaterializeArrayBuffer");
+ }
buffer->set_is_external(false);
DCHECK(buffer->byte_length()->IsSmi() ||
buffer->byte_length()->IsHeapNumber());
@@ -18705,18 +18891,18 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
if (!is_on_heap()) {
- Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()));
+ Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()),
+ GetIsolate());
return array_buffer;
}
- Handle<JSTypedArray> self(this);
+ Handle<JSTypedArray> self(this, GetIsolate());
return MaterializeArrayBuffer(self);
}
Handle<PropertyCell> PropertyCell::InvalidateEntry(
- Handle<GlobalDictionary> dictionary, int entry) {
- Isolate* isolate = dictionary->GetIsolate();
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry) {
// Swap with a copy.
- Handle<PropertyCell> cell(dictionary->CellAt(entry));
+ Handle<PropertyCell> cell(dictionary->CellAt(entry), isolate);
Handle<Name> name(cell->name(), isolate);
Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell(name);
new_cell->set_value(cell->value());
@@ -18729,9 +18915,9 @@ Handle<PropertyCell> PropertyCell::InvalidateEntry(
new_cell->set_property_details(details);
// Old cell is ready for invalidation.
if (is_the_hole) {
- cell->set_value(isolate->heap()->undefined_value());
+ cell->set_value(ReadOnlyRoots(isolate).undefined_value());
} else {
- cell->set_value(isolate->heap()->the_hole_value());
+ cell->set_value(ReadOnlyRoots(isolate).the_hole_value());
}
details = details.set_cell_type(PropertyCellType::kInvalidated);
cell->set_property_details(details);
@@ -18760,12 +18946,11 @@ static bool RemainsConstantType(Handle<PropertyCell> cell,
return false;
}
-
-PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
+PropertyCellType PropertyCell::UpdatedType(Isolate* isolate,
+ Handle<PropertyCell> cell,
Handle<Object> value,
PropertyDetails details) {
PropertyCellType type = details.cell_type();
- Isolate* isolate = cell->GetIsolate();
DCHECK(!value->IsTheHole(isolate));
if (cell->value()->IsTheHole(isolate)) {
switch (type) {
@@ -18797,11 +18982,10 @@ PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
}
Handle<PropertyCell> PropertyCell::PrepareForValue(
- Handle<GlobalDictionary> dictionary, int entry, Handle<Object> value,
- PropertyDetails details) {
- Isolate* isolate = dictionary->GetIsolate();
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry,
+ Handle<Object> value, PropertyDetails details) {
DCHECK(!value->IsTheHole(isolate));
- Handle<PropertyCell> cell(dictionary->CellAt(entry));
+ Handle<PropertyCell> cell(dictionary->CellAt(entry), isolate);
const PropertyDetails original_details = cell->property_details();
// Data accesses could be cached in ics or optimized code.
bool invalidate =
@@ -18820,8 +19004,11 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
DCHECK_LT(0, index);
details = details.set_index(index);
- PropertyCellType new_type = UpdatedType(cell, value, original_details);
- if (invalidate) cell = PropertyCell::InvalidateEntry(dictionary, entry);
+ PropertyCellType new_type =
+ UpdatedType(isolate, cell, value, original_details);
+ if (invalidate) {
+ cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
+ }
// Install new property details.
details = details.set_cell_type(new_type);
@@ -18846,11 +19033,11 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
// static
-void PropertyCell::SetValueWithInvalidation(Handle<PropertyCell> cell,
+void PropertyCell::SetValueWithInvalidation(Isolate* isolate,
+ Handle<PropertyCell> cell,
Handle<Object> new_value) {
if (cell->value() != *new_value) {
cell->set_value(*new_value);
- Isolate* isolate = cell->GetIsolate();
cell->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
}
@@ -18921,11 +19108,109 @@ MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
Handle<FunctionTemplateInfo>::cast(getter);
// Check if the accessor uses a cached property.
if (!fti->cached_property_name()->IsTheHole(isolate)) {
- return handle(Name::cast(fti->cached_property_name()));
+ return handle(Name::cast(fti->cached_property_name()), isolate);
}
}
return MaybeHandle<Name>();
}
+// Force instantiation of template instances class.
+// Please note this list is compiler dependent.
+// Keep this at the end of this file
+
+template class HashTable<StringTable, StringTableShape>;
+
+template class HashTable<CompilationCacheTable, CompilationCacheShape>;
+
+template class HashTable<ObjectHashTable, ObjectHashTableShape>;
+
+template class HashTable<EphemeronHashTable, EphemeronHashTableShape>;
+
+template class ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>;
+
+template class ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>;
+
+template class Dictionary<NameDictionary, NameDictionaryShape>;
+
+template class Dictionary<GlobalDictionary, GlobalDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) HashTable<NumberDictionary, NumberDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Dictionary<NumberDictionary, NumberDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ HashTable<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+template Handle<NameDictionary>
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::New(
+ Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
+
+template Handle<GlobalDictionary>
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::New(
+ Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
+
+template Handle<NameDictionary>
+HashTable<NameDictionary, NameDictionaryShape>::New(Isolate*, int,
+ PretenureFlag,
+ MinimumCapacity);
+
+template Handle<ObjectHashSet>
+HashTable<ObjectHashSet, ObjectHashSetShape>::New(Isolate*, int n,
+ PretenureFlag,
+ MinimumCapacity);
+
+template Handle<NameDictionary>
+HashTable<NameDictionary, NameDictionaryShape>::Shrink(Isolate* isolate,
+ Handle<NameDictionary>,
+ int additionalCapacity);
+
+template Handle<NameDictionary>
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::Add(
+ Isolate* isolate, Handle<NameDictionary>, Handle<Name>, Handle<Object>,
+ PropertyDetails, int*);
+
+template Handle<GlobalDictionary>
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::Add(
+ Isolate* isolate, Handle<GlobalDictionary>, Handle<Name>, Handle<Object>,
+ PropertyDetails, int*);
+
+template void HashTable<GlobalDictionary, GlobalDictionaryShape>::Rehash(
+ Isolate* isolate);
+
+template Handle<NameDictionary>
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::EnsureCapacity(
+ Isolate* isolate, Handle<NameDictionary>, int);
+
+template void
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::CopyEnumKeysTo(
+ Isolate* isolate, Handle<GlobalDictionary> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator);
+
+template void
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::CopyEnumKeysTo(
+ Isolate* isolate, Handle<NameDictionary> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator);
+
+template Handle<FixedArray>
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::IterationIndices(
+ Isolate* isolate, Handle<GlobalDictionary> dictionary);
+template void
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::CollectKeysTo(
+ Handle<GlobalDictionary> dictionary, KeyAccumulator* keys);
+
+template Handle<FixedArray>
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::IterationIndices(
+ Isolate* isolate, Handle<NameDictionary> dictionary);
+template void
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::CollectKeysTo(
+ Handle<NameDictionary> dictionary, KeyAccumulator* keys);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index ed88ab6f34..be3e2b8dac 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -20,7 +20,9 @@
#include "src/field-index.h"
#include "src/flags.h"
#include "src/messages.h"
+#include "src/objects-definitions.h"
#include "src/property-details.h"
+#include "src/roots.h"
#include "src/utils.h"
#if V8_TARGET_ARCH_ARM
@@ -74,6 +76,7 @@
// - JSMessageObject
// - JSModuleNamespace
// - JSLocale // If V8_INTL_SUPPORT enabled.
+// - JSRelativeTimeFormat // If V8_INTL_SUPPORT enabled.
// - WasmGlobalObject
// - WasmInstanceObject
// - WasmMemoryObject
@@ -103,8 +106,6 @@
// - ModuleInfo
// - ScriptContextTable
// - FixedArrayOfWeakCells
-// - WasmSharedModuleData
-// - WasmCompiledModule
// - FixedDoubleArray
// - Name
// - String
@@ -169,10 +170,13 @@
// - PromiseResolveThenableJobTask
// - Module
// - ModuleInfoEntry
-// - PreParsedScopeData
// - WeakCell
// - FeedbackCell
// - FeedbackVector
+// - PreParsedScopeData
+// - UncompiledData
+// - UncompiledDataWithoutPreParsedScope
+// - UncompiledDataWithPreParsedScope
//
// Formats of Object*:
// Smi: [31 bit signed int] 0
@@ -301,316 +305,6 @@ const int kVariableSizeSentinel = 0;
const int kStubMajorKeyBits = 8;
const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
-// All Maps have a field instance_type containing a InstanceType.
-// It describes the type of the instances.
-//
-// As an example, a JavaScript object is a heap object and its map
-// instance_type is JS_OBJECT_TYPE.
-//
-// The names of the string instance types are intended to systematically
-// mirror their encoding in the instance_type field of the map. The default
-// encoding is considered TWO_BYTE. It is not mentioned in the name. ONE_BYTE
-// encoding is mentioned explicitly in the name. Likewise, the default
-// representation is considered sequential. It is not mentioned in the
-// name. The other representations (e.g. CONS, EXTERNAL) are explicitly
-// mentioned. Finally, the string is either a STRING_TYPE (if it is a normal
-// string) or a INTERNALIZED_STRING_TYPE (if it is a internalized string).
-//
-// NOTE: The following things are some that depend on the string types having
-// instance_types that are less than those of all other types:
-// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
-// Object::IsString.
-//
-// NOTE: Everything following JS_VALUE_TYPE is considered a
-// JSObject for GC purposes. The first four entries here have typeof
-// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-//
-// NOTE: List had to be split into two, because of conditional item(s) from
-// INTL namespace. They can't just be appended to the end, because of the
-// checks we do in tests (expecting JS_FUNCTION_TYPE to be last).
-#define INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
- V(INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(STRING_TYPE) \
- V(CONS_STRING_TYPE) \
- V(EXTERNAL_STRING_TYPE) \
- V(SLICED_STRING_TYPE) \
- V(THIN_STRING_TYPE) \
- V(ONE_BYTE_STRING_TYPE) \
- V(CONS_ONE_BYTE_STRING_TYPE) \
- V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
- V(SLICED_ONE_BYTE_STRING_TYPE) \
- V(THIN_ONE_BYTE_STRING_TYPE) \
- V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(SHORT_EXTERNAL_STRING_TYPE) \
- V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE) \
- V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- \
- V(SYMBOL_TYPE) \
- V(HEAP_NUMBER_TYPE) \
- V(BIGINT_TYPE) \
- V(ODDBALL_TYPE) \
- \
- V(MAP_TYPE) \
- V(CODE_TYPE) \
- V(MUTABLE_HEAP_NUMBER_TYPE) \
- V(FOREIGN_TYPE) \
- V(BYTE_ARRAY_TYPE) \
- V(BYTECODE_ARRAY_TYPE) \
- V(FREE_SPACE_TYPE) \
- \
- V(FIXED_INT8_ARRAY_TYPE) \
- V(FIXED_UINT8_ARRAY_TYPE) \
- V(FIXED_INT16_ARRAY_TYPE) \
- V(FIXED_UINT16_ARRAY_TYPE) \
- V(FIXED_INT32_ARRAY_TYPE) \
- V(FIXED_UINT32_ARRAY_TYPE) \
- V(FIXED_FLOAT32_ARRAY_TYPE) \
- V(FIXED_FLOAT64_ARRAY_TYPE) \
- V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
- V(FIXED_BIGINT64_ARRAY_TYPE) \
- V(FIXED_BIGUINT64_ARRAY_TYPE) \
- \
- V(FIXED_DOUBLE_ARRAY_TYPE) \
- V(FEEDBACK_METADATA_TYPE) \
- V(FILLER_TYPE) \
- \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(ACCESSOR_INFO_TYPE) \
- V(ACCESSOR_PAIR_TYPE) \
- V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
- V(ALLOCATION_MEMENTO_TYPE) \
- V(ALLOCATION_SITE_TYPE) \
- V(ASYNC_GENERATOR_REQUEST_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
- V(INTERPRETER_DATA_TYPE) \
- V(MODULE_INFO_ENTRY_TYPE) \
- V(MODULE_TYPE) \
- V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(PROMISE_CAPABILITY_TYPE) \
- V(PROMISE_REACTION_TYPE) \
- V(PROTOTYPE_INFO_TYPE) \
- V(SCRIPT_TYPE) \
- V(STACK_FRAME_INFO_TYPE) \
- V(TUPLE2_TYPE) \
- V(TUPLE3_TYPE) \
- V(WASM_COMPILED_MODULE_TYPE) \
- V(WASM_DEBUG_INFO_TYPE) \
- V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
- V(WASM_SHARED_MODULE_DATA_TYPE) \
- \
- V(CALLABLE_TASK_TYPE) \
- V(CALLBACK_TASK_TYPE) \
- V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
- V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
- V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
- \
- V(FIXED_ARRAY_TYPE) \
- V(BOILERPLATE_DESCRIPTION_TYPE) \
- V(DESCRIPTOR_ARRAY_TYPE) \
- V(HASH_TABLE_TYPE) \
- V(SCOPE_INFO_TYPE) \
- \
- V(BLOCK_CONTEXT_TYPE) \
- V(CATCH_CONTEXT_TYPE) \
- V(DEBUG_EVALUATE_CONTEXT_TYPE) \
- V(EVAL_CONTEXT_TYPE) \
- V(FUNCTION_CONTEXT_TYPE) \
- V(MODULE_CONTEXT_TYPE) \
- V(NATIVE_CONTEXT_TYPE) \
- V(SCRIPT_CONTEXT_TYPE) \
- V(WITH_CONTEXT_TYPE) \
- \
- V(WEAK_FIXED_ARRAY_TYPE) \
- V(TRANSITION_ARRAY_TYPE) \
- \
- V(CALL_HANDLER_INFO_TYPE) \
- V(CELL_TYPE) \
- V(CODE_DATA_CONTAINER_TYPE) \
- V(FEEDBACK_CELL_TYPE) \
- V(FEEDBACK_VECTOR_TYPE) \
- V(LOAD_HANDLER_TYPE) \
- V(PROPERTY_ARRAY_TYPE) \
- V(PROPERTY_CELL_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- V(SMALL_ORDERED_HASH_MAP_TYPE) \
- V(SMALL_ORDERED_HASH_SET_TYPE) \
- V(STORE_HANDLER_TYPE) \
- V(WEAK_CELL_TYPE) \
- V(WEAK_ARRAY_LIST_TYPE) \
- \
- V(JS_PROXY_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_MODULE_NAMESPACE_TYPE) \
- V(JS_SPECIAL_API_OBJECT_TYPE) \
- V(JS_VALUE_TYPE) \
- V(JS_API_OBJECT_TYPE) \
- V(JS_OBJECT_TYPE) \
- \
- V(JS_ARGUMENTS_TYPE) \
- V(JS_ARRAY_BUFFER_TYPE) \
- V(JS_ARRAY_ITERATOR_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
- V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
- V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JS_DATE_TYPE) \
- V(JS_ERROR_TYPE) \
- V(JS_GENERATOR_OBJECT_TYPE) \
- V(JS_MAP_TYPE) \
- V(JS_MAP_KEY_ITERATOR_TYPE) \
- V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_MAP_VALUE_ITERATOR_TYPE) \
- V(JS_MESSAGE_OBJECT_TYPE) \
- V(JS_PROMISE_TYPE) \
- V(JS_REGEXP_TYPE) \
- V(JS_REGEXP_STRING_ITERATOR_TYPE) \
- V(JS_SET_TYPE) \
- V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_SET_VALUE_ITERATOR_TYPE) \
- V(JS_STRING_ITERATOR_TYPE) \
- V(JS_WEAK_MAP_TYPE) \
- V(JS_WEAK_SET_TYPE) \
- V(JS_TYPED_ARRAY_TYPE) \
- V(JS_DATA_VIEW_TYPE)
-
-#define INSTANCE_TYPE_LIST_AFTER_INTL(V) \
- V(WASM_GLOBAL_TYPE) \
- V(WASM_INSTANCE_TYPE) \
- V(WASM_MEMORY_TYPE) \
- V(WASM_MODULE_TYPE) \
- V(WASM_TABLE_TYPE) \
- V(JS_BOUND_FUNCTION_TYPE) \
- V(JS_FUNCTION_TYPE)
-
-#ifdef V8_INTL_SUPPORT
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
- V(JS_INTL_LOCALE_TYPE) \
- INSTANCE_TYPE_LIST_AFTER_INTL(V)
-#else
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
- INSTANCE_TYPE_LIST_AFTER_INTL(V)
-#endif // V8_INTL_SUPPORT
-
-// Since string types are not consecutive, this macro is used to
-// iterate over them.
-#define STRING_TYPE_LIST(V) \
- V(STRING_TYPE, kVariableSizeSentinel, string, String) \
- V(ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, one_byte_string, \
- OneByteString) \
- V(CONS_STRING_TYPE, ConsString::kSize, cons_string, ConsString) \
- V(CONS_ONE_BYTE_STRING_TYPE, ConsString::kSize, cons_one_byte_string, \
- ConsOneByteString) \
- V(SLICED_STRING_TYPE, SlicedString::kSize, sliced_string, SlicedString) \
- V(SLICED_ONE_BYTE_STRING_TYPE, SlicedString::kSize, sliced_one_byte_string, \
- SlicedOneByteString) \
- V(EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, external_string, \
- ExternalString) \
- V(EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \
- external_one_byte_string, ExternalOneByteString) \
- V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, ExternalTwoByteString::kSize, \
- external_string_with_one_byte_data, ExternalStringWithOneByteData) \
- V(SHORT_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kShortSize, \
- short_external_string, ShortExternalString) \
- V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kShortSize, \
- short_external_one_byte_string, ShortExternalOneByteString) \
- V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_string_with_one_byte_data, \
- ShortExternalStringWithOneByteData) \
- \
- V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \
- InternalizedString) \
- V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, \
- one_byte_internalized_string, OneByteInternalizedString) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE, ExternalTwoByteString::kSize, \
- external_internalized_string, ExternalInternalizedString) \
- V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
- external_one_byte_internalized_string, ExternalOneByteInternalizedString) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_internalized_string_with_one_byte_data, \
- ExternalInternalizedStringWithOneByteData) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \
- ExternalTwoByteString::kShortSize, short_external_internalized_string, \
- ShortExternalInternalizedString) \
- V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, \
- ExternalOneByteString::kShortSize, \
- short_external_one_byte_internalized_string, \
- ShortExternalOneByteInternalizedString) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_internalized_string_with_one_byte_data, \
- ShortExternalInternalizedStringWithOneByteData) \
- V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
- V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
- ThinOneByteString)
-
-// A struct is a simple object a set of object-valued fields. Including an
-// object type in this causes the compiler to generate most of the boilerplate
-// code for the class including allocation and garbage collection routines,
-// casts and predicates. All you need to define is the class, methods and
-// object verification routines. Easy, no?
-//
-// Note that for subtle reasons related to the ordering or numerical values of
-// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
-// manually.
-#define STRUCT_LIST(V) \
- V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
- V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
- V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
- V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
- V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
- V(ALLOCATION_SITE, AllocationSite, allocation_site) \
- V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request) \
- V(DEBUG_INFO, DebugInfo, debug_info) \
- V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
- V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
- V(INTERPRETER_DATA, InterpreterData, interpreter_data) \
- V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
- V(MODULE, Module, module) \
- V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(PROMISE_CAPABILITY, PromiseCapability, promise_capability) \
- V(PROMISE_REACTION, PromiseReaction, promise_reaction) \
- V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
- V(SCRIPT, Script, script) \
- V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
- V(TUPLE2, Tuple2, tuple2) \
- V(TUPLE3, Tuple3, tuple3) \
- V(WASM_COMPILED_MODULE, WasmCompiledModule, wasm_compiled_module) \
- V(WASM_DEBUG_INFO, WasmDebugInfo, wasm_debug_info) \
- V(WASM_EXPORTED_FUNCTION_DATA, WasmExportedFunctionData, \
- wasm_exported_function_data) \
- V(WASM_SHARED_MODULE_DATA, WasmSharedModuleData, wasm_shared_module_data) \
- V(CALLABLE_TASK, CallableTask, callable_task) \
- V(CALLBACK_TASK, CallbackTask, callback_task) \
- V(PROMISE_FULFILL_REACTION_JOB_TASK, PromiseFulfillReactionJobTask, \
- promise_fulfill_reaction_job_task) \
- V(PROMISE_REJECT_REACTION_JOB_TASK, PromiseRejectReactionJobTask, \
- promise_reject_reaction_job_task) \
- V(PROMISE_RESOLVE_THENABLE_JOB_TASK, PromiseResolveThenableJobTask, \
- promise_resolve_thenable_job_task)
-
-#define DATA_HANDLER_LIST(V) \
- V(LOAD_HANDLER, LoadHandler, 1, load_handler1) \
- V(LOAD_HANDLER, LoadHandler, 2, load_handler2) \
- V(LOAD_HANDLER, LoadHandler, 3, load_handler3) \
- V(STORE_HANDLER, StoreHandler, 0, store_handler0) \
- V(STORE_HANDLER, StoreHandler, 1, store_handler1) \
- V(STORE_HANDLER, StoreHandler, 2, store_handler2) \
- V(STORE_HANDLER, StoreHandler, 3, store_handler3)
-
// We use the full 16 bits of the instance_type field to encode heap object
// instance types. All the high-order bits (bit 7-15) are cleared if the object
// is a string, and contain set bits if it is not a string.
@@ -768,7 +462,6 @@ enum InstanceType : uint16_t {
ACCESSOR_PAIR_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
ALLOCATION_MEMENTO_TYPE,
- ALLOCATION_SITE_TYPE,
ASYNC_GENERATOR_REQUEST_TYPE,
DEBUG_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
@@ -784,10 +477,9 @@ enum InstanceType : uint16_t {
STACK_FRAME_INFO_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
- WASM_COMPILED_MODULE_TYPE,
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
WASM_DEBUG_INFO_TYPE,
WASM_EXPORTED_FUNCTION_DATA_TYPE,
- WASM_SHARED_MODULE_DATA_TYPE,
CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
CALLBACK_TASK_TYPE,
@@ -795,12 +487,21 @@ enum InstanceType : uint16_t {
PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
+ ALLOCATION_SITE_TYPE,
// FixedArrays.
FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
- BOILERPLATE_DESCRIPTION_TYPE,
- DESCRIPTOR_ARRAY_TYPE,
- HASH_TABLE_TYPE,
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE
+ ORDERED_HASH_MAP_TYPE, // FIRST_DICTIONARY_TYPE
+ ORDERED_HASH_SET_TYPE,
+ NAME_DICTIONARY_TYPE,
+ GLOBAL_DICTIONARY_TYPE,
+ NUMBER_DICTIONARY_TYPE,
+ SIMPLE_NUMBER_DICTIONARY_TYPE, // LAST_DICTIONARY_TYPE
+ STRING_TABLE_TYPE, // LAST_HASH_TABLE_TYPE
+ EPHEMERON_HASH_TABLE_TYPE,
SCOPE_INFO_TYPE,
+ SCRIPT_CONTEXT_TABLE_TYPE,
BLOCK_CONTEXT_TYPE, // FIRST_CONTEXT_TYPE
CATCH_CONTEXT_TYPE,
DEBUG_EVALUATE_CONTEXT_TYPE,
@@ -812,6 +513,7 @@ enum InstanceType : uint16_t {
WITH_CONTEXT_TYPE, // LAST_FIXED_ARRAY_TYPE, LAST_CONTEXT_TYPE
WEAK_FIXED_ARRAY_TYPE, // FIRST_WEAK_FIXED_ARRAY_TYPE
+ DESCRIPTOR_ARRAY_TYPE,
TRANSITION_ARRAY_TYPE, // LAST_WEAK_FIXED_ARRAY_TYPE
// Misc.
@@ -821,12 +523,15 @@ enum InstanceType : uint16_t {
FEEDBACK_CELL_TYPE,
FEEDBACK_VECTOR_TYPE,
LOAD_HANDLER_TYPE,
+ PRE_PARSED_SCOPE_DATA_TYPE,
PROPERTY_ARRAY_TYPE,
PROPERTY_CELL_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SMALL_ORDERED_HASH_MAP_TYPE,
SMALL_ORDERED_HASH_SET_TYPE,
STORE_HANDLER_TYPE,
+ UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE,
+ UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE,
WEAK_CELL_TYPE,
WEAK_ARRAY_LIST_TYPE,
@@ -878,6 +583,7 @@ enum InstanceType : uint16_t {
#ifdef V8_INTL_SUPPORT
JS_INTL_LOCALE_TYPE,
+ JS_INTL_RELATIVE_TIME_FORMAT_TYPE,
#endif // V8_INTL_SUPPORT
WASM_GLOBAL_TYPE,
@@ -903,6 +609,12 @@ enum InstanceType : uint16_t {
// Boundaries for testing if given HeapObject is a subclass of FixedArray.
FIRST_FIXED_ARRAY_TYPE = FIXED_ARRAY_TYPE,
LAST_FIXED_ARRAY_TYPE = WITH_CONTEXT_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of HashTable
+ FIRST_HASH_TABLE_TYPE = HASH_TABLE_TYPE,
+ LAST_HASH_TABLE_TYPE = STRING_TABLE_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of Dictionary
+ FIRST_DICTIONARY_TYPE = ORDERED_HASH_MAP_TYPE,
+ LAST_DICTIONARY_TYPE = SIMPLE_NUMBER_DICTIONARY_TYPE,
// Boundaries for testing if given HeapObject is a subclass of WeakFixedArray.
FIRST_WEAK_FIXED_ARRAY_TYPE = WEAK_FIXED_ARRAY_TYPE,
LAST_WEAK_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
@@ -964,6 +676,8 @@ enum class ComparisonResult {
// (Returns false whenever {result} is kUndefined.)
bool ComparisonResultToBool(Operation op, ComparisonResult result);
+enum class OnNonExistent { kThrowReferenceError, kReturnUndefined };
+
class AbstractCode;
class AccessorPair;
class AccessCheckInfo;
@@ -981,6 +695,7 @@ class FunctionTemplateInfo;
class JSGlobalObject;
#ifdef V8_INTL_SUPPORT
class JSLocale;
+class JSRelativeTimeFormat;
#endif // V8_INTL_SUPPORT
class JSPromise;
class KeyAccumulator;
@@ -992,6 +707,7 @@ class ModuleInfoEntry;
class ObjectHashTable;
class ObjectTemplateInfo;
class ObjectVisitor;
+class PreParsedScopeData;
class PropertyCell;
class PropertyDescriptor;
class RootVisitor;
@@ -1001,6 +717,7 @@ class StringStream;
class FeedbackCell;
class FeedbackMetadata;
class FeedbackVector;
+class UncompiledData;
class WeakCell;
class TemplateInfo;
class TransitionArray;
@@ -1028,10 +745,11 @@ template <class C> inline bool Is(Object* obj);
#define HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
V(AbstractCode) \
V(AccessCheckNeeded) \
+ V(AllocationSite) \
V(ArrayList) \
V(BigInt) \
V(BigIntWrapper) \
- V(BoilerplateDescription) \
+ V(ObjectBoilerplateDescription) \
V(Boolean) \
V(BooleanWrapper) \
V(BreakPoint) \
@@ -1046,7 +764,6 @@ template <class C> inline bool Is(Object* obj);
V(CodeDataContainer) \
V(CompilationCacheTable) \
V(ConsString) \
- V(ConstantElementsPair) \
V(Constructor) \
V(Context) \
V(CoverageInfo) \
@@ -1054,8 +771,8 @@ template <class C> inline bool Is(Object* obj);
V(DeoptimizationData) \
V(DependentCode) \
V(DescriptorArray) \
+ V(EphemeronHashTable) \
V(EnumCache) \
- V(External) \
V(ExternalOneByteString) \
V(ExternalString) \
V(ExternalTwoByteString) \
@@ -1173,6 +890,9 @@ template <class C> inline bool Is(Object* obj);
V(TemplateObjectDescription) \
V(ThinString) \
V(TransitionArray) \
+ V(UncompiledData) \
+ V(UncompiledDataWithPreParsedScope) \
+ V(UncompiledDataWithoutPreParsedScope) \
V(Undetectable) \
V(UniqueName) \
V(WasmGlobalObject) \
@@ -1187,7 +907,8 @@ template <class C> inline bool Is(Object* obj);
#ifdef V8_INTL_SUPPORT
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
- V(JSLocale)
+ V(JSLocale) \
+ V(JSRelativeTimeFormat)
#else
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V)
#endif // V8_INTL_SUPPORT
@@ -1226,17 +947,25 @@ class Object {
// Type testing.
bool IsObject() const { return true; }
-#define IS_TYPE_FUNCTION_DECL(Type) INLINE(bool Is##Type() const);
+#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
-#define IS_TYPE_FUNCTION_DECL(Type, Value) \
- INLINE(bool Is##Type(Isolate* isolate) const);
+ V8_INLINE bool IsExternal(Isolate* isolate) const;
+
+// Oddball checks are faster when they are raw pointer comparisons, so the
+// isolate/read-only roots overloads should be preferred where possible.
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+ V8_INLINE bool Is##Type(Isolate* isolate) const; \
+ V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
+ V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
- INLINE(bool IsNullOrUndefined(Isolate* isolate) const);
+ V8_INLINE bool IsNullOrUndefined(Isolate* isolate) const;
+ V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
+ V8_INLINE bool IsNullOrUndefined() const;
// A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
// a keyed store is of the form a[expression] = foo.
@@ -1264,20 +993,20 @@ class Object {
#define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
-#define DECL_STRUCT_PREDICATE(NAME, Name, name) INLINE(bool Is##Name() const);
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
// ES6, #sec-isarray. NOT to be confused with %_IsArray.
- INLINE(
- V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<Object> object));
+ V8_INLINE
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<Object> object);
- INLINE(bool IsSmallOrderedHashTable() const);
+ V8_INLINE bool IsSmallOrderedHashTable() const;
// Extract the number.
inline double Number() const;
- INLINE(bool IsNaN() const);
- INLINE(bool IsMinusZero() const);
+ V8_INLINE bool IsNaN() const;
+ V8_INLINE bool IsMinusZero() const;
V8_EXPORT_PRIVATE bool ToInt32(int32_t* value);
inline bool ToUint32(uint32_t* value) const;
@@ -1309,14 +1038,16 @@ class Object {
// implementation of a JSObject's elements.
inline bool HasValidElements();
- bool BooleanValue(); // ECMA-262 9.2.
+ // ECMA-262 9.2.
+ bool BooleanValue(Isolate* isolate);
// ES6 section 7.2.11 Abstract Relational Comparison
V8_WARN_UNUSED_RESULT static Maybe<ComparisonResult> Compare(
- Handle<Object> x, Handle<Object> y);
+ Isolate* isolate, Handle<Object> x, Handle<Object> y);
// ES6 section 7.2.12 Abstract Equality Comparison
- V8_WARN_UNUSED_RESULT static Maybe<bool> Equals(Handle<Object> x,
+ V8_WARN_UNUSED_RESULT static Maybe<bool> Equals(Isolate* isolate,
+ Handle<Object> x,
Handle<Object> y);
// ES6 section 7.2.13 Strict Equality Comparison
@@ -1350,10 +1081,10 @@ class Object {
// ES6 section 7.1.3 ToNumber
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToNumber(
- Handle<Object> input);
+ Isolate* isolate, Handle<Object> input);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToNumeric(
- Handle<Object> input);
+ Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.4 ToInteger
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToInteger(
@@ -1408,14 +1139,16 @@ class Object {
Handle<Object> rhs);
// ES6 section 12.9 Relational Operators
- V8_WARN_UNUSED_RESULT static inline Maybe<bool> GreaterThan(Handle<Object> x,
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> GreaterThan(Isolate* isolate,
+ Handle<Object> x,
Handle<Object> y);
V8_WARN_UNUSED_RESULT static inline Maybe<bool> GreaterThanOrEqual(
- Handle<Object> x, Handle<Object> y);
- V8_WARN_UNUSED_RESULT static inline Maybe<bool> LessThan(Handle<Object> x,
+ Isolate* isolate, Handle<Object> x, Handle<Object> y);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> LessThan(Isolate* isolate,
+ Handle<Object> x,
Handle<Object> y);
V8_WARN_UNUSED_RESULT static inline Maybe<bool> LessThanOrEqual(
- Handle<Object> x, Handle<Object> y);
+ Isolate* isolate, Handle<Object> x, Handle<Object> y);
// ES6 section 7.3.19 OrdinaryHasInstance (C, O).
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> OrdinaryHasInstance(
@@ -1426,7 +1159,8 @@ class Object {
Isolate* isolate, Handle<Object> object, Handle<Object> callable);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
- GetProperty(LookupIterator* it);
+ GetProperty(LookupIterator* it,
+ OnNonExistent on_non_existent = OnNonExistent::kReturnUndefined);
// ES6 [[Set]] (when passed kDontThrow)
// Invariants for this and related functions (unless stated otherwise):
@@ -1439,12 +1173,12 @@ class Object {
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetProperty(
- Handle<Object> object, Handle<Name> name, Handle<Object> value,
- LanguageMode language_mode,
+ Isolate* isolate, Handle<Object> object, Handle<Name> name,
+ Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> SetPropertyOrElement(
- Handle<Object> object, Handle<Name> name, Handle<Object> value,
- LanguageMode language_mode,
+ Isolate* isolate, Handle<Object> object, Handle<Name> name,
+ Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
V8_WARN_UNUSED_RESULT static Maybe<bool> SetSuperProperty(
@@ -1468,11 +1202,11 @@ class Object {
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ShouldThrow should_throw, StoreFromKeyed store_mode);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
- Handle<Object> object, Handle<Name> name);
+ Isolate* isolate, Handle<Object> object, Handle<Name> name);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Handle<Object> receiver, Handle<Name> name, Handle<JSReceiver> holder);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
- Handle<Object> object, Handle<Name> name);
+ Isolate* isolate, Handle<Object> object, Handle<Name> name);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
LookupIterator* it);
@@ -1539,7 +1273,7 @@ class Object {
DECL_VERIFIER(Object)
#ifdef VERIFY_HEAP
// Verify a pointer is a valid object pointer.
- static void VerifyPointer(Object* p);
+ static void VerifyPointer(Isolate* isolate, Object* p);
#endif
inline void VerifyApiCallResultType();
@@ -1774,24 +1508,47 @@ class HeapObject: public Object {
inline MapWord map_word() const;
inline void set_map_word(MapWord map_word);
+ // TODO(v8:7464): Once RO_SPACE is shared between isolates, this method can be
+ // removed as ReadOnlyRoots will be accessible from a global variable. For now
+ // this method exists to help remove GetIsolate/GetHeap from HeapObject, in a
+ // way that doesn't require passing Isolate/Heap down huge call chains or to
+ // places where it might not be safe to access it.
+ inline ReadOnlyRoots GetReadOnlyRoots() const;
+
// The Heap the object was allocated in. Used also to access Isolate.
- inline Heap* GetHeap() const;
+#ifdef DEPRECATE_GET_ISOLATE
+ [[deprecated("Pass Heap explicitly or use a NeverReadOnlySpaceObject")]]
+#endif
+ inline Heap*
+ GetHeap() const;
- // Convenience method to get current isolate.
- inline Isolate* GetIsolate() const;
+// Convenience method to get current isolate.
+#ifdef DEPRECATE_GET_ISOLATE
+ [[deprecated("Pass Isolate explicitly or use a NeverReadOnlySpaceObject")]]
+#endif
+ inline Isolate*
+ GetIsolate() const;
-#define IS_TYPE_FUNCTION_DECL(Type) INLINE(bool Is##Type() const);
+#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
-#define IS_TYPE_FUNCTION_DECL(Type, Value) \
- INLINE(bool Is##Type(Isolate* isolate) const);
+ V8_INLINE bool IsExternal(Isolate* isolate) const;
+
+// Oddball checks are faster when they are raw pointer comparisons, so the
+// isolate/read-only roots overloads should be preferred where possible.
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+ V8_INLINE bool Is##Type(Isolate* isolate) const; \
+ V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
+ V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
- INLINE(bool IsNullOrUndefined(Isolate* isolate) const);
+ V8_INLINE bool IsNullOrUndefined(Isolate* isolate) const;
+ V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
+ V8_INLINE bool IsNullOrUndefined() const;
-#define DECL_STRUCT_PREDICATE(NAME, Name, name) INLINE(bool Is##Name() const);
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -1867,12 +1624,13 @@ class HeapObject: public Object {
DECL_PRINTER(HeapObject)
DECL_VERIFIER(HeapObject)
#ifdef VERIFY_HEAP
- inline void VerifyObjectField(int offset);
+ inline void VerifyObjectField(Isolate* isolate, int offset);
inline void VerifySmiField(int offset);
+ inline void VerifyMaybeObjectField(Isolate* isolate, int offset);
// Verify a pointer is a valid HeapObject pointer that points to object
// areas in the heap.
- static void VerifyHeapPointer(Object* p);
+ static void VerifyHeapPointer(Isolate* isolate, Object* p);
#endif
static inline AllocationAlignment RequiredAlignment(Map* map);
@@ -1888,7 +1646,7 @@ class HeapObject: public Object {
bool CanBeRehashed() const;
// Rehash the object based on the layout inferred from its map.
- void RehashBasedOnMap();
+ void RehashBasedOnMap(Isolate* isolate);
// Layout description.
// First field in a heap object is map.
@@ -1897,10 +1655,23 @@ class HeapObject: public Object {
STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset);
+ inline Address GetFieldAddress(int field_offset) const;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
};
+// Mixin class for objects that can never be in RO space.
+// TODO(leszeks): Add checks in the factory that we never allocate these objects
+// in RO space.
+class NeverReadOnlySpaceObject {
+ public:
+ // The Heap the object was allocated in. Used also to access Isolate.
+ inline Heap* GetHeap() const;
+
+ // Convenience method to get current isolate.
+ inline Isolate* GetIsolate() const;
+};
template <int start_offset, int end_offset, int size>
class FixedBodyDescriptor;
@@ -1909,10 +1680,11 @@ class FixedBodyDescriptor;
template <int start_offset>
class FlexibleBodyDescriptor;
-
// The HeapNumber class describes heap allocated numbers that cannot be
-// represented in a Smi (small integer)
-class HeapNumber: public HeapObject {
+// represented in a Smi (small integer). MutableHeapNumber is the same, but its
+// number value can change over time (it is used only as property storage).
+// HeapNumberBase merely exists to avoid code duplication.
+class HeapNumberBase : public HeapObject {
public:
// [value]: number value.
inline double value() const;
@@ -1921,11 +1693,6 @@ class HeapNumber: public HeapObject {
inline uint64_t value_as_bits() const;
inline void set_value_as_bits(uint64_t bits);
- DECL_CAST(HeapNumber)
-
- V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os); // NOLINT
- DECL_VERIFIER(HeapNumber)
-
inline int get_exponent();
inline int get_sign();
@@ -1959,7 +1726,25 @@ class HeapNumber: public HeapObject {
static const int kNonMantissaBitsInTopWord = 12;
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumberBase)
+};
+
+class HeapNumber : public HeapNumberBase {
+ public:
+ DECL_CAST(HeapNumber)
+ V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber)
+};
+
+class MutableHeapNumber : public HeapNumberBase {
+ public:
+ DECL_CAST(MutableHeapNumber)
+ V8_EXPORT_PRIVATE void MutableHeapNumberPrint(std::ostream& os);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MutableHeapNumber)
};
enum EnsureElementsMode {
@@ -2044,8 +1829,13 @@ class PropertyArray : public HeapObject {
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
-class JSReceiver: public HeapObject {
+class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
public:
+ // Use the mixin methods over the HeapObject methods.
+ // TODO(v8:7786) Remove once the HeapObject methods are gone.
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
// Returns true if there is no slow (ie, dictionary) backing store.
inline bool HasFastProperties() const;
@@ -2131,7 +1921,7 @@ class JSReceiver: public HeapObject {
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<JSReceiver> receiver, const char* key);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
- Handle<JSReceiver> receiver, Handle<Name> name);
+ Isolate* isolate, Handle<JSReceiver> receiver, Handle<Name> name);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetElement(
Isolate* isolate, Handle<JSReceiver> receiver, uint32_t index);
@@ -2413,15 +2203,13 @@ class JSObject: public JSReceiver {
LookupIterator* it, Handle<Object> value,
ShouldThrow should_throw = kDontThrow);
- static void AddProperty(Handle<JSObject> object, Handle<Name> name,
- Handle<Object> value, PropertyAttributes attributes);
+ static void AddProperty(Isolate* isolate, Handle<JSObject> object,
+ Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes);
- V8_WARN_UNUSED_RESULT static Maybe<bool> AddDataElement(
- Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, ShouldThrow should_throw);
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> AddDataElement(
- Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes);
+ static void AddDataElement(Handle<JSObject> receiver, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes);
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
@@ -2469,11 +2257,9 @@ class JSObject: public JSReceiver {
// Utility used by many Array builtins and runtime functions
static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object);
- // Alternative implementation of FixedArrayOfWeakCells::NullCallback.
- class PrototypeRegistryCompactionCallback {
- public:
- static void Callback(Object* value, int old_index, int new_index);
- };
+ // To be passed to PrototypeUsers::Compact.
+ static void PrototypeRegistryCompactionCallback(HeapObject* value,
+ int old_index, int new_index);
// Retrieve interceptors.
inline InterceptorInfo* GetNamedInterceptor();
@@ -2707,7 +2493,7 @@ class JSObject: public JSReceiver {
int number_of_slow_unused_elements_;
};
- void IncrementSpillStatistics(SpillInformation* info);
+ void IncrementSpillStatistics(Isolate* isolate, SpillInformation* info);
#endif
#ifdef VERIFY_HEAP
@@ -2982,58 +2768,6 @@ class AsyncGeneratorRequest : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(AsyncGeneratorRequest);
};
-// Container for metadata stored on each prototype map.
-class PrototypeInfo : public Struct {
- public:
- static const int UNREGISTERED = -1;
-
- // [weak_cell]: A WeakCell containing this prototype. ICs cache the cell here.
- DECL_ACCESSORS(weak_cell, Object)
-
- // [prototype_users]: FixedArrayOfWeakCells containing maps using this
- // prototype, or Smi(0) if uninitialized.
- DECL_ACCESSORS(prototype_users, Object)
-
- // [object_create_map]: A field caching the map for Object.create(prototype).
- static inline void SetObjectCreateMap(Handle<PrototypeInfo> info,
- Handle<Map> map);
- inline Map* ObjectCreateMap();
- inline bool HasObjectCreateMap();
-
- // [registry_slot]: Slot in prototype's user registry where this user
- // is stored. Returns UNREGISTERED if this prototype has not been registered.
- inline int registry_slot() const;
- inline void set_registry_slot(int slot);
-
- // [bit_field]
- inline int bit_field() const;
- inline void set_bit_field(int bit_field);
-
- DECL_BOOLEAN_ACCESSORS(should_be_fast_map)
-
- DECL_CAST(PrototypeInfo)
-
- // Dispatched behavior.
- DECL_PRINTER(PrototypeInfo)
- DECL_VERIFIER(PrototypeInfo)
-
- static const int kWeakCellOffset = HeapObject::kHeaderSize;
- static const int kPrototypeUsersOffset = kWeakCellOffset + kPointerSize;
- static const int kRegistrySlotOffset = kPrototypeUsersOffset + kPointerSize;
- static const int kValidityCellOffset = kRegistrySlotOffset + kPointerSize;
- static const int kObjectCreateMap = kValidityCellOffset + kPointerSize;
- static const int kBitFieldOffset = kObjectCreateMap + kPointerSize;
- static const int kSize = kBitFieldOffset + kPointerSize;
-
- // Bit field usage.
- static const int kShouldBeFastBit = 0;
-
- private:
- DECL_ACCESSORS(object_create_map, Object)
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
-};
-
// List of builtin functions we want to identify to improve code
// generation.
//
@@ -3192,9 +2926,6 @@ enum BuiltinFunctionId {
FUNCTIONS_WITH_ID_LIST(DECL_FUNCTION_ID)
ATOMIC_FUNCTIONS_WITH_ID_LIST(DECL_FUNCTION_ID)
#undef DECL_FUNCTION_ID
- // Fake id for a special case of Math.pow. Note, it continues the
- // list of math functions.
- kMathPowHalf,
// These are manually assigned to special getters during bootstrapping.
kArrayBufferByteLength,
kArrayBufferIsView,
@@ -3273,8 +3004,8 @@ class JSGeneratorObject: public JSObject {
// is suspended.
int source_position() const;
- // [register_file]: Saved interpreter register file.
- DECL_ACCESSORS(register_file, FixedArray)
+ // [parameters_and_registers]: Saved interpreter register file.
+ DECL_ACCESSORS(parameters_and_registers, FixedArray)
DECL_CAST(JSGeneratorObject)
@@ -3293,8 +3024,9 @@ class JSGeneratorObject: public JSObject {
static const int kInputOrDebugPosOffset = kReceiverOffset + kPointerSize;
static const int kResumeModeOffset = kInputOrDebugPosOffset + kPointerSize;
static const int kContinuationOffset = kResumeModeOffset + kPointerSize;
- static const int kRegisterFileOffset = kContinuationOffset + kPointerSize;
- static const int kSize = kRegisterFileOffset + kPointerSize;
+ static const int kParametersAndRegistersOffset =
+ kContinuationOffset + kPointerSize;
+ static const int kSize = kParametersAndRegistersOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
@@ -3560,6 +3292,7 @@ class JSFunction: public JSObject {
V(kSizeWithPrototype, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_FUNCTION_FIELDS)
+#undef JS_FUNCTION_FIELDS
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
@@ -3775,7 +3508,7 @@ class JSMessageObject: public JSObject {
DECL_ACCESSORS(argument, Object)
// [script]: the script from which the error message originated.
- DECL_ACCESSORS(script, Object)
+ DECL_ACCESSORS(script, Script)
// [stack_frames]: an array of stack frames for this error object.
DECL_ACCESSORS(stack_frames, Object)
@@ -3825,7 +3558,7 @@ class JSMessageObject: public JSObject {
typedef BodyDescriptor BodyDescriptorWeak;
};
-class AllocationSite: public Struct {
+class AllocationSite : public Struct, public NeverReadOnlySpaceObject {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
static const double kPretenureRatio;
@@ -3841,6 +3574,11 @@ class AllocationSite: public Struct {
kLastPretenureDecisionValue = kZombie
};
+ // Use the mixin methods over the HeapObject methods.
+ // TODO(v8:7786) Remove once the HeapObject methods are gone.
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
const char* PretenureDecisionName(PretenureDecision decision);
// Contains either a Smi-encoded bitfield or a boilerplate. If it's a Smi the
@@ -3855,9 +3593,9 @@ class AllocationSite: public Struct {
DECL_ACCESSORS(nested_site, Object)
// Bitfield containing pretenuring information.
- DECL_INT_ACCESSORS(pretenure_data)
+ DECL_INT32_ACCESSORS(pretenure_data)
- DECL_INT_ACCESSORS(pretenure_create_count)
+ DECL_INT32_ACCESSORS(pretenure_create_count)
DECL_ACCESSORS(dependent_code, DependentCode)
// heap->allocation_site_list() points to the last AllocationSite which form
@@ -3867,6 +3605,9 @@ class AllocationSite: public Struct {
inline void Initialize();
+ // Checks if the allocation site contain weak_next field;
+ inline bool HasWeakNext() const;
+
// This method is expensive, it should only be called for reporting.
bool IsNested();
@@ -3940,31 +3681,36 @@ class AllocationSite: public Struct {
static bool ShouldTrack(ElementsKind from, ElementsKind to);
static inline bool CanTrack(InstanceType type);
- static const int kTransitionInfoOrBoilerplateOffset = HeapObject::kHeaderSize;
- static const int kNestedSiteOffset =
- kTransitionInfoOrBoilerplateOffset + kPointerSize;
- static const int kPretenureDataOffset = kNestedSiteOffset + kPointerSize;
- static const int kPretenureCreateCountOffset =
- kPretenureDataOffset + kPointerSize;
- static const int kDependentCodeOffset =
- kPretenureCreateCountOffset + kPointerSize;
- static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
- static const int kSize = kWeakNextOffset + kPointerSize;
-
- // During mark compact we need to take special care for the dependent code
- // field.
- static const int kPointerFieldsBeginOffset =
- kTransitionInfoOrBoilerplateOffset;
- static const int kPointerFieldsEndOffset = kWeakNextOffset;
-
- // Ignores weakness.
- typedef FixedBodyDescriptor<HeapObject::kHeaderSize, kSize, kSize>
- BodyDescriptor;
-
- // Respects weakness.
- typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
- kPointerFieldsEndOffset, kSize>
- BodyDescriptorWeak;
+// Layout description.
+// AllocationSite has to start with TransitionInfoOrboilerPlateOffset
+// and end with WeakNext field.
+#define ALLOCATION_SITE_FIELDS(V) \
+ V(kTransitionInfoOrBoilerplateOffset, kPointerSize) \
+ V(kNestedSiteOffset, kPointerSize) \
+ V(kDependentCodeOffset, kPointerSize) \
+ V(kCommonPointerFieldEndOffset, 0) \
+ V(kPretenureDataOffset, kInt32Size) \
+ V(kPretenureCreateCountOffset, kInt32Size) \
+ /* Size of AllocationSite without WeakNext field */ \
+ V(kSizeWithoutWeakNext, 0) \
+ V(kWeakNextOffset, kPointerSize) \
+ /* Size of AllocationSite with WeakNext field */ \
+ V(kSizeWithWeakNext, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ALLOCATION_SITE_FIELDS)
+
+ static const int kStartOffset = HeapObject::kHeaderSize;
+
+ template <bool includeWeakNext>
+ class BodyDescriptorImpl;
+
+ // BodyDescriptor is used to traverse all the pointer fields including
+ // weak_next
+ typedef BodyDescriptorImpl<true> BodyDescriptor;
+
+ // BodyDescriptorWeak is used to traverse all the pointer fields
+ // except for weak_next
+ typedef BodyDescriptorImpl<false> BodyDescriptorWeak;
private:
inline bool PretenuringDecisionMade() const;
@@ -4040,7 +3786,7 @@ class Oddball: public HeapObject {
// ES6 section 7.1.3 ToNumber for Boolean, Null, Undefined.
V8_WARN_UNUSED_RESULT static inline Handle<Object> ToNumber(
- Handle<Oddball> input);
+ Isolate* isolate, Handle<Oddball> input);
DECL_CAST(Oddball)
@@ -4163,27 +3909,29 @@ class PropertyCell : public HeapObject {
// property.
DECL_ACCESSORS(dependent_code, DependentCode)
- inline PropertyDetails property_details();
+ inline PropertyDetails property_details() const;
inline void set_property_details(PropertyDetails details);
PropertyCellConstantType GetConstantType();
// Computes the new type of the cell's contents for the given value, but
// without actually modifying the details.
- static PropertyCellType UpdatedType(Handle<PropertyCell> cell,
+ static PropertyCellType UpdatedType(Isolate* isolate,
+ Handle<PropertyCell> cell,
Handle<Object> value,
PropertyDetails details);
// Prepares property cell at given entry for receiving given value.
// As a result the old cell could be invalidated and/or dependent code could
// be deoptimized. Returns the prepared property cell.
static Handle<PropertyCell> PrepareForValue(
- Handle<GlobalDictionary> dictionary, int entry, Handle<Object> value,
- PropertyDetails details);
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry,
+ Handle<Object> value, PropertyDetails details);
static Handle<PropertyCell> InvalidateEntry(
- Handle<GlobalDictionary> dictionary, int entry);
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry);
- static void SetValueWithInvalidation(Handle<PropertyCell> cell,
+ static void SetValueWithInvalidation(Isolate* isolate,
+ Handle<PropertyCell> cell,
Handle<Object> new_value);
DECL_CAST(PropertyCell)
@@ -4252,7 +4000,7 @@ class JSProxy: public JSReceiver {
DECL_CAST(JSProxy)
- INLINE(bool IsRevoked() const);
+ V8_INLINE bool IsRevoked() const;
static void Revoke(Handle<JSProxy> proxy);
// ES6 9.5.1
@@ -4472,13 +4220,14 @@ class AccessorPair: public Struct {
DECL_CAST(AccessorPair)
- static Handle<AccessorPair> Copy(Handle<AccessorPair> pair);
+ static Handle<AccessorPair> Copy(Isolate* isolate, Handle<AccessorPair> pair);
inline Object* get(AccessorComponent component);
inline void set(AccessorComponent component, Object* value);
// Note: Returns undefined if the component is not set.
- static Handle<Object> GetComponent(Handle<AccessorPair> accessor_pair,
+ static Handle<Object> GetComponent(Isolate* isolate,
+ Handle<AccessorPair> accessor_pair,
AccessorComponent component);
// Set both components, skipping arguments which are a JavaScript null.
@@ -4508,8 +4257,11 @@ class AccessorPair: public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorPair);
};
-class StackFrameInfo : public Struct {
+class StackFrameInfo : public Struct, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
DECL_INT_ACCESSORS(line_number)
DECL_INT_ACCESSORS(column_number)
DECL_INT_ACCESSORS(script_id)
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 77fb8bb63a..4f7680d8ed 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -22,8 +22,6 @@ CAST_ACCESSOR(AccessCheckInfo)
CAST_ACCESSOR(InterceptorInfo)
CAST_ACCESSOR(CallHandlerInfo)
-TYPE_CHECKER(CallHandlerInfo, CALL_HANDLER_INFO_TYPE)
-
ACCESSORS(AccessorInfo, name, Name, kNameOffset)
SMI_ACCESSORS(AccessorInfo, flags, kFlagsOffset)
ACCESSORS(AccessorInfo, expected_receiver_type, Object,
@@ -104,28 +102,30 @@ ACCESSORS(CallHandlerInfo, js_callback, Object, kJsCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
bool CallHandlerInfo::IsSideEffectFreeCallHandlerInfo() const {
- DCHECK(map() == GetHeap()->side_effect_call_handler_info_map() ||
- map() == GetHeap()->side_effect_free_call_handler_info_map() ||
- map() ==
- GetHeap()->next_call_side_effect_free_call_handler_info_map());
- return map() == GetHeap()->side_effect_free_call_handler_info_map();
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ DCHECK(map() == roots.side_effect_call_handler_info_map() ||
+ map() == roots.side_effect_free_call_handler_info_map() ||
+ map() == roots.next_call_side_effect_free_call_handler_info_map());
+ return map() == roots.side_effect_free_call_handler_info_map();
}
bool CallHandlerInfo::IsSideEffectCallHandlerInfo() const {
- DCHECK(map() == GetHeap()->side_effect_call_handler_info_map() ||
- map() == GetHeap()->side_effect_free_call_handler_info_map() ||
- map() ==
- GetHeap()->next_call_side_effect_free_call_handler_info_map());
- return map() == GetHeap()->side_effect_call_handler_info_map();
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ DCHECK(map() == roots.side_effect_call_handler_info_map() ||
+ map() == roots.side_effect_free_call_handler_info_map() ||
+ map() == roots.next_call_side_effect_free_call_handler_info_map());
+ return map() == roots.side_effect_call_handler_info_map();
}
void CallHandlerInfo::SetNextCallHasNoSideEffect() {
- set_map(GetHeap()->next_call_side_effect_free_call_handler_info_map());
+ set_map(
+ GetReadOnlyRoots().next_call_side_effect_free_call_handler_info_map());
}
bool CallHandlerInfo::NextCallHasNoSideEffect() {
- if (map() == GetHeap()->next_call_side_effect_free_call_handler_info_map()) {
- set_map(GetHeap()->side_effect_call_handler_info_map());
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ if (map() == roots.next_call_side_effect_free_call_handler_info_map()) {
+ set_map(roots.side_effect_call_handler_info_map());
return true;
}
return false;
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index 46247e9b98..d4f42bee11 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -37,8 +37,7 @@ class AccessorInfo : public Struct {
DECL_ACCESSORS(js_getter, Object)
DECL_ACCESSORS(data, Object)
- static Address redirect(Isolate* isolate, Address address,
- AccessorComponent component);
+ static Address redirect(Address address, AccessorComponent component);
Address redirected_getter() const;
// Dispatched behavior.
@@ -70,8 +69,8 @@ class AccessorInfo : public Struct {
// Append all descriptors to the array that are not already there.
// Return number added.
- static int AppendUnique(Handle<Object> descriptors, Handle<FixedArray> array,
- int valid_descriptors);
+ static int AppendUnique(Isolate* isolate, Handle<Object> descriptors,
+ Handle<FixedArray> array, int valid_descriptors);
// Layout description.
#define ACCESSOR_INFO_FIELDS(V) \
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index d759c7dab2..22f9837478 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -19,8 +19,6 @@ CAST_ACCESSOR(SloppyArgumentsElements)
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
-TYPE_CHECKER(JSArgumentsObject, JS_ARGUMENTS_TYPE)
-
Context* SloppyArgumentsElements::context() {
return Context::cast(get(kContextIndex));
}
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 83476d78be..9e23fa8b61 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -98,7 +98,7 @@ class SloppyArgumentsElements : public FixedArray {
DECL_CAST(SloppyArgumentsElements)
#ifdef VERIFY_HEAP
- void SloppyArgumentsElementsVerify(JSObject* holder);
+ void SloppyArgumentsElementsVerify(Isolate* isolate, JSObject* holder);
#endif
private:
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 61de3a066a..4bb83a93b6 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -45,7 +45,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static Handle<BigInt> NewFromInt(Isolate* isolate, int value);
static Handle<BigInt> NewFromDouble(Isolate* isolate, double value);
void InitializeDigits(int length, byte value = 0);
- static Handle<MutableBigInt> Copy(Handle<BigIntBase> source);
+ static Handle<MutableBigInt> Copy(Isolate* isolate,
+ Handle<BigIntBase> source);
static Handle<BigInt> Zero(Isolate* isolate) {
// TODO(jkummerow): Consider caching a canonical zero-BigInt.
return MakeImmutable(New(isolate, 0)).ToHandleChecked();
@@ -57,44 +58,52 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
}
// Internal helpers.
- static MaybeHandle<MutableBigInt> BitwiseAnd(Handle<BigInt> x,
+ static MaybeHandle<MutableBigInt> BitwiseAnd(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y);
- static MaybeHandle<MutableBigInt> BitwiseXor(Handle<BigInt> x,
+ static MaybeHandle<MutableBigInt> BitwiseXor(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y);
- static MaybeHandle<MutableBigInt> BitwiseOr(Handle<BigInt> x,
+ static MaybeHandle<MutableBigInt> BitwiseOr(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y);
- static Handle<BigInt> TruncateToNBits(int n, Handle<BigInt> x);
- static Handle<BigInt> TruncateAndSubFromPowerOfTwo(int n, Handle<BigInt> x,
+ static Handle<BigInt> TruncateToNBits(Isolate* isolate, int n,
+ Handle<BigInt> x);
+ static Handle<BigInt> TruncateAndSubFromPowerOfTwo(Isolate* isolate, int n,
+ Handle<BigInt> x,
bool result_sign);
- static MaybeHandle<BigInt> AbsoluteAdd(Handle<BigInt> x, Handle<BigInt> y,
- bool result_sign);
- static Handle<BigInt> AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
- bool result_sign);
+ static MaybeHandle<BigInt> AbsoluteAdd(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y, bool result_sign);
+ static Handle<BigInt> AbsoluteSub(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y, bool result_sign);
static MaybeHandle<MutableBigInt> AbsoluteAddOne(
- Handle<BigIntBase> x, bool sign, MutableBigInt* result_storage = nullptr);
- static Handle<MutableBigInt> AbsoluteSubOne(Handle<BigIntBase> x);
- static MaybeHandle<MutableBigInt> AbsoluteSubOne(Handle<BigIntBase> x,
+ Isolate* isolate, Handle<BigIntBase> x, bool sign,
+ MutableBigInt* result_storage = nullptr);
+ static Handle<MutableBigInt> AbsoluteSubOne(Isolate* isolate,
+ Handle<BigIntBase> x);
+ static MaybeHandle<MutableBigInt> AbsoluteSubOne(Isolate* isolate,
+ Handle<BigIntBase> x,
int result_length);
enum ExtraDigitsHandling { kCopy, kSkip };
enum SymmetricOp { kSymmetric, kNotSymmetric };
static inline Handle<MutableBigInt> AbsoluteBitwiseOp(
- Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage,
- ExtraDigitsHandling extra_digits, SymmetricOp symmetric,
- std::function<digit_t(digit_t, digit_t)> op);
+ Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
+ MutableBigInt* result_storage, ExtraDigitsHandling extra_digits,
+ SymmetricOp symmetric, std::function<digit_t(digit_t, digit_t)> op);
static Handle<MutableBigInt> AbsoluteAnd(
- Handle<BigIntBase> x, Handle<BigIntBase> y,
+ Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt* result_storage = nullptr);
static Handle<MutableBigInt> AbsoluteAndNot(
- Handle<BigIntBase> x, Handle<BigIntBase> y,
+ Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt* result_storage = nullptr);
static Handle<MutableBigInt> AbsoluteOr(
- Handle<BigIntBase> x, Handle<BigIntBase> y,
+ Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt* result_storage = nullptr);
static Handle<MutableBigInt> AbsoluteXor(
- Handle<BigIntBase> x, Handle<BigIntBase> y,
+ Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt* result_storage = nullptr);
static int AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y);
@@ -109,10 +118,10 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
void InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand);
// Specialized helpers for Divide/Remainder.
- static void AbsoluteDivSmall(Handle<BigIntBase> x, digit_t divisor,
- Handle<MutableBigInt>* quotient,
+ static void AbsoluteDivSmall(Isolate* isolate, Handle<BigIntBase> x,
+ digit_t divisor, Handle<MutableBigInt>* quotient,
digit_t* remainder);
- static bool AbsoluteDivLarge(Handle<BigIntBase> dividend,
+ static bool AbsoluteDivLarge(Isolate* isolate, Handle<BigIntBase> dividend,
Handle<BigIntBase> divisor,
Handle<MutableBigInt>* quotient,
Handle<MutableBigInt>* remainder);
@@ -125,19 +134,23 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
kSameSizeResult,
kAlwaysAddOneDigit,
};
- static MaybeHandle<MutableBigInt> SpecialLeftShift(Handle<BigIntBase> x,
+ static MaybeHandle<MutableBigInt> SpecialLeftShift(Isolate* isolate,
+ Handle<BigIntBase> x,
int shift,
SpecialLeftShiftMode mode);
// Specialized helpers for shift operations.
- static MaybeHandle<BigInt> LeftShiftByAbsolute(Handle<BigIntBase> x,
+ static MaybeHandle<BigInt> LeftShiftByAbsolute(Isolate* isolate,
+ Handle<BigIntBase> x,
Handle<BigIntBase> y);
- static Handle<BigInt> RightShiftByAbsolute(Handle<BigIntBase> x,
+ static Handle<BigInt> RightShiftByAbsolute(Isolate* isolate,
+ Handle<BigIntBase> x,
Handle<BigIntBase> y);
static Handle<BigInt> RightShiftByMaximum(Isolate* isolate, bool sign);
static Maybe<digit_t> ToShiftAmount(Handle<BigIntBase> x);
- static MaybeHandle<String> ToStringBasePowerOfTwo(Handle<BigIntBase> x,
+ static MaybeHandle<String> ToStringBasePowerOfTwo(Isolate* isolate,
+ Handle<BigIntBase> x,
int radix);
static MaybeHandle<String> ToStringGeneric(Handle<BigIntBase> x, int radix);
@@ -287,11 +300,11 @@ Handle<BigInt> MutableBigInt::NewFromDouble(Isolate* isolate, double value) {
return MakeImmutable(result);
}
-Handle<MutableBigInt> MutableBigInt::Copy(Handle<BigIntBase> source) {
+Handle<MutableBigInt> MutableBigInt::Copy(Isolate* isolate,
+ Handle<BigIntBase> source) {
int length = source->length();
// Allocating a BigInt of the same length as an existing BigInt cannot throw.
- Handle<MutableBigInt> result =
- New(source->GetIsolate(), length).ToHandleChecked();
+ Handle<MutableBigInt> result = New(isolate, length).ToHandleChecked();
memcpy(reinterpret_cast<void*>(result->address() + BigIntBase::kHeaderSize),
reinterpret_cast<void*>(source->address() + BigIntBase::kHeaderSize),
BigInt::SizeFor(length) - BigIntBase::kHeaderSize);
@@ -339,31 +352,30 @@ Handle<BigInt> BigInt::Zero(Isolate* isolate) {
return MutableBigInt::Zero(isolate);
}
-Handle<BigInt> BigInt::UnaryMinus(Handle<BigInt> x) {
+Handle<BigInt> BigInt::UnaryMinus(Isolate* isolate, Handle<BigInt> x) {
// Special case: There is no -0n.
if (x->is_zero()) {
return x;
}
- Handle<MutableBigInt> result = MutableBigInt::Copy(x);
+ Handle<MutableBigInt> result = MutableBigInt::Copy(isolate, x);
result->set_sign(!x->sign());
return MutableBigInt::MakeImmutable(result);
}
-MaybeHandle<BigInt> BigInt::BitwiseNot(Handle<BigInt> x) {
+MaybeHandle<BigInt> BigInt::BitwiseNot(Isolate* isolate, Handle<BigInt> x) {
MaybeHandle<MutableBigInt> result;
if (x->sign()) {
// ~(-x) == ~(~(x-1)) == x-1
- result = MutableBigInt::AbsoluteSubOne(x, x->length());
+ result = MutableBigInt::AbsoluteSubOne(isolate, x, x->length());
} else {
// ~x == -x-1 == -(x+1)
- result = MutableBigInt::AbsoluteAddOne(x, true);
+ result = MutableBigInt::AbsoluteAddOne(isolate, x, true);
}
return MutableBigInt::MakeImmutable(result);
}
-MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
+MaybeHandle<BigInt> BigInt::Exponentiate(Isolate* isolate, Handle<BigInt> base,
Handle<BigInt> exponent) {
- Isolate* isolate = base->GetIsolate();
// 1. If exponent is < 0, throw a RangeError exception.
if (exponent->sign()) {
THROW_NEW_ERROR(isolate,
@@ -380,7 +392,7 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
if (base->length() == 1 && base->digit(0) == 1) {
// (-1) ** even_number == 1.
if (base->sign() && (exponent->digit(0) & 1) == 0) {
- return UnaryMinus(base);
+ return UnaryMinus(isolate, base);
}
// (-1) ** odd_number == -1; 1 ** anything == 1.
return base;
@@ -421,13 +433,14 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
if (n & 1) result = base;
n >>= 1;
for (; n != 0; n >>= 1) {
- MaybeHandle<BigInt> maybe_result = Multiply(running_square, running_square);
+ MaybeHandle<BigInt> maybe_result =
+ Multiply(isolate, running_square, running_square);
if (!maybe_result.ToHandle(&running_square)) return maybe_result;
if (n & 1) {
if (result.is_null()) {
result = running_square;
} else {
- maybe_result = Multiply(result, running_square);
+ maybe_result = Multiply(isolate, result, running_square);
if (!maybe_result.ToHandle(&result)) return maybe_result;
}
}
@@ -435,12 +448,13 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
return result;
}
-MaybeHandle<BigInt> BigInt::Multiply(Handle<BigInt> x, Handle<BigInt> y) {
+MaybeHandle<BigInt> BigInt::Multiply(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y) {
if (x->is_zero()) return x;
if (y->is_zero()) return y;
int result_length = x->length() + y->length();
Handle<MutableBigInt> result;
- if (!MutableBigInt::New(x->GetIsolate(), result_length).ToHandle(&result)) {
+ if (!MutableBigInt::New(isolate, result_length).ToHandle(&result)) {
return MaybeHandle<BigInt>();
}
result->InitializeDigits(result_length);
@@ -451,29 +465,30 @@ MaybeHandle<BigInt> BigInt::Multiply(Handle<BigInt> x, Handle<BigInt> y) {
return MutableBigInt::MakeImmutable(result);
}
-MaybeHandle<BigInt> BigInt::Divide(Handle<BigInt> x, Handle<BigInt> y) {
+MaybeHandle<BigInt> BigInt::Divide(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y) {
// 1. If y is 0n, throw a RangeError exception.
if (y->is_zero()) {
- THROW_NEW_ERROR(y->GetIsolate(),
- NewRangeError(MessageTemplate::kBigIntDivZero), BigInt);
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntDivZero),
+ BigInt);
}
// 2. Let quotient be the mathematical value of x divided by y.
// 3. Return a BigInt representing quotient rounded towards 0 to the next
// integral value.
if (MutableBigInt::AbsoluteCompare(x, y) < 0) {
- return Zero(x->GetIsolate());
+ return Zero(isolate);
}
Handle<MutableBigInt> quotient;
bool result_sign = x->sign() != y->sign();
if (y->length() == 1) {
digit_t divisor = y->digit(0);
if (divisor == 1) {
- return result_sign == x->sign() ? x : UnaryMinus(x);
+ return result_sign == x->sign() ? x : UnaryMinus(isolate, x);
}
digit_t remainder;
- MutableBigInt::AbsoluteDivSmall(x, divisor, &quotient, &remainder);
+ MutableBigInt::AbsoluteDivSmall(isolate, x, divisor, &quotient, &remainder);
} else {
- if (!MutableBigInt::AbsoluteDivLarge(x, y, &quotient, nullptr)) {
+ if (!MutableBigInt::AbsoluteDivLarge(isolate, x, y, &quotient, nullptr)) {
return MaybeHandle<BigInt>();
}
}
@@ -481,8 +496,8 @@ MaybeHandle<BigInt> BigInt::Divide(Handle<BigInt> x, Handle<BigInt> y) {
return MutableBigInt::MakeImmutable(quotient);
}
-MaybeHandle<BigInt> BigInt::Remainder(Handle<BigInt> x, Handle<BigInt> y) {
- Isolate* isolate = x->GetIsolate();
+MaybeHandle<BigInt> BigInt::Remainder(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y) {
// 1. If y is 0n, throw a RangeError exception.
if (y->is_zero()) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntDivZero),
@@ -496,14 +511,15 @@ MaybeHandle<BigInt> BigInt::Remainder(Handle<BigInt> x, Handle<BigInt> y) {
digit_t divisor = y->digit(0);
if (divisor == 1) return Zero(isolate);
digit_t remainder_digit;
- MutableBigInt::AbsoluteDivSmall(x, divisor, nullptr, &remainder_digit);
+ MutableBigInt::AbsoluteDivSmall(isolate, x, divisor, nullptr,
+ &remainder_digit);
if (remainder_digit == 0) {
return Zero(isolate);
}
remainder = MutableBigInt::New(isolate, 1).ToHandleChecked();
remainder->set_digit(0, remainder_digit);
} else {
- if (!MutableBigInt::AbsoluteDivLarge(x, y, nullptr, &remainder)) {
+ if (!MutableBigInt::AbsoluteDivLarge(isolate, x, y, nullptr, &remainder)) {
return MaybeHandle<BigInt>();
}
}
@@ -511,53 +527,56 @@ MaybeHandle<BigInt> BigInt::Remainder(Handle<BigInt> x, Handle<BigInt> y) {
return MutableBigInt::MakeImmutable(remainder);
}
-MaybeHandle<BigInt> BigInt::Add(Handle<BigInt> x, Handle<BigInt> y) {
+MaybeHandle<BigInt> BigInt::Add(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y) {
bool xsign = x->sign();
if (xsign == y->sign()) {
// x + y == x + y
// -x + -y == -(x + y)
- return MutableBigInt::AbsoluteAdd(x, y, xsign);
+ return MutableBigInt::AbsoluteAdd(isolate, x, y, xsign);
}
// x + -y == x - y == -(y - x)
// -x + y == y - x == -(x - y)
if (MutableBigInt::AbsoluteCompare(x, y) >= 0) {
- return MutableBigInt::AbsoluteSub(x, y, xsign);
+ return MutableBigInt::AbsoluteSub(isolate, x, y, xsign);
}
- return MutableBigInt::AbsoluteSub(y, x, !xsign);
+ return MutableBigInt::AbsoluteSub(isolate, y, x, !xsign);
}
-MaybeHandle<BigInt> BigInt::Subtract(Handle<BigInt> x, Handle<BigInt> y) {
+MaybeHandle<BigInt> BigInt::Subtract(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y) {
bool xsign = x->sign();
if (xsign != y->sign()) {
// x - (-y) == x + y
// (-x) - y == -(x + y)
- return MutableBigInt::AbsoluteAdd(x, y, xsign);
+ return MutableBigInt::AbsoluteAdd(isolate, x, y, xsign);
}
// x - y == -(y - x)
// (-x) - (-y) == y - x == -(x - y)
if (MutableBigInt::AbsoluteCompare(x, y) >= 0) {
- return MutableBigInt::AbsoluteSub(x, y, xsign);
+ return MutableBigInt::AbsoluteSub(isolate, x, y, xsign);
}
- return MutableBigInt::AbsoluteSub(y, x, !xsign);
+ return MutableBigInt::AbsoluteSub(isolate, y, x, !xsign);
}
-MaybeHandle<BigInt> BigInt::LeftShift(Handle<BigInt> x, Handle<BigInt> y) {
+MaybeHandle<BigInt> BigInt::LeftShift(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y) {
if (y->is_zero() || x->is_zero()) return x;
- if (y->sign()) return MutableBigInt::RightShiftByAbsolute(x, y);
- return MutableBigInt::LeftShiftByAbsolute(x, y);
+ if (y->sign()) return MutableBigInt::RightShiftByAbsolute(isolate, x, y);
+ return MutableBigInt::LeftShiftByAbsolute(isolate, x, y);
}
-MaybeHandle<BigInt> BigInt::SignedRightShift(Handle<BigInt> x,
+MaybeHandle<BigInt> BigInt::SignedRightShift(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y) {
if (y->is_zero() || x->is_zero()) return x;
- if (y->sign()) return MutableBigInt::LeftShiftByAbsolute(x, y);
- return MutableBigInt::RightShiftByAbsolute(x, y);
+ if (y->sign()) return MutableBigInt::LeftShiftByAbsolute(isolate, x, y);
+ return MutableBigInt::RightShiftByAbsolute(isolate, x, y);
}
-MaybeHandle<BigInt> BigInt::UnsignedRightShift(Handle<BigInt> x,
+MaybeHandle<BigInt> BigInt::UnsignedRightShift(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y) {
- THROW_NEW_ERROR(x->GetIsolate(), NewTypeError(MessageTemplate::kBigIntShr),
- BigInt);
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kBigIntShr), BigInt);
}
namespace {
@@ -602,49 +621,53 @@ bool BigInt::EqualToBigInt(BigInt* x, BigInt* y) {
return true;
}
-MaybeHandle<BigInt> BigInt::BitwiseAnd(Handle<BigInt> x, Handle<BigInt> y) {
- return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseAnd(x, y));
+MaybeHandle<BigInt> BigInt::BitwiseAnd(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y) {
+ return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseAnd(isolate, x, y));
}
-MaybeHandle<MutableBigInt> MutableBigInt::BitwiseAnd(Handle<BigInt> x,
+MaybeHandle<MutableBigInt> MutableBigInt::BitwiseAnd(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y) {
if (!x->sign() && !y->sign()) {
- return AbsoluteAnd(x, y);
+ return AbsoluteAnd(isolate, x, y);
} else if (x->sign() && y->sign()) {
int result_length = Max(x->length(), y->length()) + 1;
// (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1))
// == -(((x-1) | (y-1)) + 1)
Handle<MutableBigInt> result;
- if (!AbsoluteSubOne(x, result_length).ToHandle(&result)) {
+ if (!AbsoluteSubOne(isolate, x, result_length).ToHandle(&result)) {
return MaybeHandle<MutableBigInt>();
}
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(y);
- result = AbsoluteOr(result, y_1, *result);
- return AbsoluteAddOne(result, true, *result);
+ Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
+ result = AbsoluteOr(isolate, result, y_1, *result);
+ return AbsoluteAddOne(isolate, result, true, *result);
} else {
DCHECK(x->sign() != y->sign());
// Assume that x is the positive BigInt.
if (x->sign()) std::swap(x, y);
// x & (-y) == x & ~(y-1) == x &~ (y-1)
- return AbsoluteAndNot(x, AbsoluteSubOne(y));
+ return AbsoluteAndNot(isolate, x, AbsoluteSubOne(isolate, y));
}
}
-MaybeHandle<BigInt> BigInt::BitwiseXor(Handle<BigInt> x, Handle<BigInt> y) {
- return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseXor(x, y));
+MaybeHandle<BigInt> BigInt::BitwiseXor(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y) {
+ return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseXor(isolate, x, y));
}
-MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Handle<BigInt> x,
+MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y) {
if (!x->sign() && !y->sign()) {
- return AbsoluteXor(x, y);
+ return AbsoluteXor(isolate, x, y);
} else if (x->sign() && y->sign()) {
int result_length = Max(x->length(), y->length());
// (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
Handle<MutableBigInt> result =
- AbsoluteSubOne(x, result_length).ToHandleChecked();
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(y);
- return AbsoluteXor(result, y_1, *result);
+ AbsoluteSubOne(isolate, x, result_length).ToHandleChecked();
+ Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
+ return AbsoluteXor(isolate, result, y_1, *result);
} else {
DCHECK(x->sign() != y->sign());
int result_length = Max(x->length(), y->length()) + 1;
@@ -652,69 +675,71 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Handle<BigInt> x,
if (x->sign()) std::swap(x, y);
// x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
Handle<MutableBigInt> result;
- if (!AbsoluteSubOne(y, result_length).ToHandle(&result)) {
+ if (!AbsoluteSubOne(isolate, y, result_length).ToHandle(&result)) {
return MaybeHandle<MutableBigInt>();
}
- result = AbsoluteXor(result, x, *result);
- return AbsoluteAddOne(result, true, *result);
+ result = AbsoluteXor(isolate, result, x, *result);
+ return AbsoluteAddOne(isolate, result, true, *result);
}
}
-MaybeHandle<BigInt> BigInt::BitwiseOr(Handle<BigInt> x, Handle<BigInt> y) {
- return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseOr(x, y));
+MaybeHandle<BigInt> BigInt::BitwiseOr(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y) {
+ return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseOr(isolate, x, y));
}
-MaybeHandle<MutableBigInt> MutableBigInt::BitwiseOr(Handle<BigInt> x,
+MaybeHandle<MutableBigInt> MutableBigInt::BitwiseOr(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y) {
int result_length = Max(x->length(), y->length());
if (!x->sign() && !y->sign()) {
- return AbsoluteOr(x, y);
+ return AbsoluteOr(isolate, x, y);
} else if (x->sign() && y->sign()) {
// (-x) | (-y) == ~(x-1) | ~(y-1) == ~((x-1) & (y-1))
// == -(((x-1) & (y-1)) + 1)
Handle<MutableBigInt> result =
- AbsoluteSubOne(x, result_length).ToHandleChecked();
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(y);
- result = AbsoluteAnd(result, y_1, *result);
- return AbsoluteAddOne(result, true, *result);
+ AbsoluteSubOne(isolate, x, result_length).ToHandleChecked();
+ Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
+ result = AbsoluteAnd(isolate, result, y_1, *result);
+ return AbsoluteAddOne(isolate, result, true, *result);
} else {
DCHECK(x->sign() != y->sign());
// Assume that x is the positive BigInt.
if (x->sign()) std::swap(x, y);
// x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1)
Handle<MutableBigInt> result =
- AbsoluteSubOne(y, result_length).ToHandleChecked();
- result = AbsoluteAndNot(result, x, *result);
- return AbsoluteAddOne(result, true, *result);
+ AbsoluteSubOne(isolate, y, result_length).ToHandleChecked();
+ result = AbsoluteAndNot(isolate, result, x, *result);
+ return AbsoluteAddOne(isolate, result, true, *result);
}
}
-MaybeHandle<BigInt> BigInt::Increment(Handle<BigInt> x) {
+MaybeHandle<BigInt> BigInt::Increment(Isolate* isolate, Handle<BigInt> x) {
if (x->sign()) {
- Handle<MutableBigInt> result = MutableBigInt::AbsoluteSubOne(x);
+ Handle<MutableBigInt> result = MutableBigInt::AbsoluteSubOne(isolate, x);
result->set_sign(true);
return MutableBigInt::MakeImmutable(result);
} else {
return MutableBigInt::MakeImmutable(
- MutableBigInt::AbsoluteAddOne(x, false));
+ MutableBigInt::AbsoluteAddOne(isolate, x, false));
}
}
-MaybeHandle<BigInt> BigInt::Decrement(Handle<BigInt> x) {
+MaybeHandle<BigInt> BigInt::Decrement(Isolate* isolate, Handle<BigInt> x) {
MaybeHandle<MutableBigInt> result;
if (x->sign()) {
- result = MutableBigInt::AbsoluteAddOne(x, true);
+ result = MutableBigInt::AbsoluteAddOne(isolate, x, true);
} else if (x->is_zero()) {
// TODO(jkummerow): Consider caching a canonical -1n BigInt.
- return MutableBigInt::NewFromInt(x->GetIsolate(), -1);
+ return MutableBigInt::NewFromInt(isolate, -1);
} else {
- result = MutableBigInt::AbsoluteSubOne(x);
+ result = MutableBigInt::AbsoluteSubOne(isolate, x);
}
return MutableBigInt::MakeImmutable(result);
}
-ComparisonResult BigInt::CompareToString(Handle<BigInt> x, Handle<String> y) {
- Isolate* isolate = x->GetIsolate();
+ComparisonResult BigInt::CompareToString(Isolate* isolate, Handle<BigInt> x,
+ Handle<String> y) {
// a. Let ny be StringToBigInt(y);
MaybeHandle<BigInt> maybe_ny = StringToBigInt(isolate, y);
// b. If ny is NaN, return undefined.
@@ -727,8 +752,8 @@ ComparisonResult BigInt::CompareToString(Handle<BigInt> x, Handle<String> y) {
return CompareToBigInt(x, ny);
}
-bool BigInt::EqualToString(Handle<BigInt> x, Handle<String> y) {
- Isolate* isolate = x->GetIsolate();
+bool BigInt::EqualToString(Isolate* isolate, Handle<BigInt> x,
+ Handle<String> y) {
// a. Let n be StringToBigInt(y).
MaybeHandle<BigInt> maybe_n = StringToBigInt(isolate, y);
// b. If n is NaN, return false.
@@ -893,13 +918,13 @@ ComparisonResult BigInt::CompareToDouble(Handle<BigInt> x, double y) {
return ComparisonResult::kEqual;
}
-MaybeHandle<String> BigInt::ToString(Handle<BigInt> bigint, int radix) {
- Isolate* isolate = bigint->GetIsolate();
+MaybeHandle<String> BigInt::ToString(Isolate* isolate, Handle<BigInt> bigint,
+ int radix) {
if (bigint->is_zero()) {
return isolate->factory()->NewStringFromStaticChars("0");
}
if (base::bits::IsPowerOfTwo(radix)) {
- return MutableBigInt::ToStringBasePowerOfTwo(bigint, radix);
+ return MutableBigInt::ToStringBasePowerOfTwo(isolate, bigint, radix);
}
return MutableBigInt::ToStringGeneric(bigint, radix);
}
@@ -929,7 +954,7 @@ MaybeHandle<BigInt> BigInt::FromObject(Isolate* isolate, Handle<Object> obj) {
}
if (obj->IsBoolean()) {
- return MutableBigInt::NewFromInt(isolate, obj->BooleanValue());
+ return MutableBigInt::NewFromInt(isolate, obj->BooleanValue(isolate));
}
if (obj->IsBigInt()) {
return Handle<BigInt>::cast(obj);
@@ -948,8 +973,7 @@ MaybeHandle<BigInt> BigInt::FromObject(Isolate* isolate, Handle<Object> obj) {
isolate, NewTypeError(MessageTemplate::kBigIntFromObject, obj), BigInt);
}
-Handle<Object> BigInt::ToNumber(Handle<BigInt> x) {
- Isolate* isolate = x->GetIsolate();
+Handle<Object> BigInt::ToNumber(Isolate* isolate, Handle<BigInt> x) {
if (x->is_zero()) return Handle<Smi>(Smi::kZero, isolate);
if (x->length() == 1 && x->digit(0) < Smi::kMaxValue) {
int value = static_cast<int>(x->digit(0));
@@ -1062,19 +1086,20 @@ void BigInt::BigIntShortPrint(std::ostream& os) {
// Internal helpers.
-MaybeHandle<BigInt> MutableBigInt::AbsoluteAdd(Handle<BigInt> x,
+MaybeHandle<BigInt> MutableBigInt::AbsoluteAdd(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y,
bool result_sign) {
- if (x->length() < y->length()) return AbsoluteAdd(y, x, result_sign);
+ if (x->length() < y->length()) return AbsoluteAdd(isolate, y, x, result_sign);
if (x->is_zero()) {
DCHECK(y->is_zero());
return x;
}
if (y->is_zero()) {
- return result_sign == x->sign() ? x : BigInt::UnaryMinus(x);
+ return result_sign == x->sign() ? x : BigInt::UnaryMinus(isolate, x);
}
Handle<MutableBigInt> result;
- if (!New(x->GetIsolate(), x->length() + 1).ToHandle(&result)) {
+ if (!New(isolate, x->length() + 1).ToHandle(&result)) {
return MaybeHandle<BigInt>();
}
digit_t carry = 0;
@@ -1097,8 +1122,8 @@ MaybeHandle<BigInt> MutableBigInt::AbsoluteAdd(Handle<BigInt> x,
return MakeImmutable(result);
}
-Handle<BigInt> MutableBigInt::AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
- bool result_sign) {
+Handle<BigInt> MutableBigInt::AbsoluteSub(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y, bool result_sign) {
DCHECK(x->length() >= y->length());
SLOW_DCHECK(AbsoluteCompare(x, y) >= 0);
if (x->is_zero()) {
@@ -1106,10 +1131,9 @@ Handle<BigInt> MutableBigInt::AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
return x;
}
if (y->is_zero()) {
- return result_sign == x->sign() ? x : BigInt::UnaryMinus(x);
+ return result_sign == x->sign() ? x : BigInt::UnaryMinus(isolate, x);
}
- Handle<MutableBigInt> result =
- New(x->GetIsolate(), x->length()).ToHandleChecked();
+ Handle<MutableBigInt> result = New(isolate, x->length()).ToHandleChecked();
digit_t borrow = 0;
int i = 0;
for (; i < y->length(); i++) {
@@ -1136,7 +1160,8 @@ Handle<BigInt> MutableBigInt::AbsoluteSub(Handle<BigInt> x, Handle<BigInt> y,
// {result_storage} and {x} may refer to the same BigInt for in-place
// modification.
MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
- Handle<BigIntBase> x, bool sign, MutableBigInt* result_storage) {
+ Isolate* isolate, Handle<BigIntBase> x, bool sign,
+ MutableBigInt* result_storage) {
int input_length = x->length();
// The addition will overflow into a new digit if all existing digits are
// at maximum.
@@ -1148,7 +1173,6 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
}
}
int result_length = input_length + will_overflow;
- Isolate* isolate = x->GetIsolate();
Handle<MutableBigInt> result(result_storage, isolate);
if (result_storage == nullptr) {
if (!New(isolate, result_length).ToHandle(&result)) {
@@ -1173,21 +1197,23 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
}
// Subtracts 1 from the absolute value of {x}. {x} must not be zero.
-Handle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Handle<BigIntBase> x) {
+Handle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Isolate* isolate,
+ Handle<BigIntBase> x) {
DCHECK(!x->is_zero());
// Requesting a result length identical to an existing BigInt's length
// cannot overflow the limit.
- return AbsoluteSubOne(x, x->length()).ToHandleChecked();
+ return AbsoluteSubOne(isolate, x, x->length()).ToHandleChecked();
}
// Like the above, but you can specify that the allocated result should have
// length {result_length}, which must be at least as large as {x->length()}.
-MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Handle<BigIntBase> x,
+MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Isolate* isolate,
+ Handle<BigIntBase> x,
int result_length) {
DCHECK(!x->is_zero());
DCHECK(result_length >= x->length());
Handle<MutableBigInt> result;
- if (!New(x->GetIsolate(), result_length).ToHandle(&result)) {
+ if (!New(isolate, result_length).ToHandle(&result)) {
return MaybeHandle<MutableBigInt>();
}
int length = x->length();
@@ -1222,9 +1248,9 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Handle<BigIntBase> x,
// v v v v
// result_storage: [ 0 ][ x3 ][ r2 ][ r1 ][ r0 ]
inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
- Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage,
- ExtraDigitsHandling extra_digits, SymmetricOp symmetric,
- std::function<digit_t(digit_t, digit_t)> op) {
+ Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
+ MutableBigInt* result_storage, ExtraDigitsHandling extra_digits,
+ SymmetricOp symmetric, std::function<digit_t(digit_t, digit_t)> op) {
int x_length = x->length();
int y_length = y->length();
int num_pairs = y_length;
@@ -1236,7 +1262,6 @@ inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
}
}
DCHECK(num_pairs == Min(x_length, y_length));
- Isolate* isolate = x->GetIsolate();
Handle<MutableBigInt> result(result_storage, isolate);
int result_length = extra_digits == kCopy ? x_length : num_pairs;
if (result_storage == nullptr) {
@@ -1264,8 +1289,9 @@ inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
Handle<MutableBigInt> MutableBigInt::AbsoluteAnd(
- Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage) {
- return AbsoluteBitwiseOp(x, y, result_storage, kSkip, kSymmetric,
+ Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
+ MutableBigInt* result_storage) {
+ return AbsoluteBitwiseOp(isolate, x, y, result_storage, kSkip, kSymmetric,
[](digit_t a, digit_t b) { return a & b; });
}
@@ -1273,18 +1299,20 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteAnd(
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
Handle<MutableBigInt> MutableBigInt::AbsoluteAndNot(
- Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage) {
- return AbsoluteBitwiseOp(x, y, result_storage, kCopy, kNotSymmetric,
+ Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
+ MutableBigInt* result_storage) {
+ return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kNotSymmetric,
[](digit_t a, digit_t b) { return a & ~b; });
}
// If {result_storage} is non-nullptr, it will be used for the result,
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteOr(Handle<BigIntBase> x,
+Handle<MutableBigInt> MutableBigInt::AbsoluteOr(Isolate* isolate,
+ Handle<BigIntBase> x,
Handle<BigIntBase> y,
MutableBigInt* result_storage) {
- return AbsoluteBitwiseOp(x, y, result_storage, kCopy, kSymmetric,
+ return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kSymmetric,
[](digit_t a, digit_t b) { return a | b; });
}
@@ -1292,8 +1320,9 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteOr(Handle<BigIntBase> x,
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
Handle<MutableBigInt> MutableBigInt::AbsoluteXor(
- Handle<BigIntBase> x, Handle<BigIntBase> y, MutableBigInt* result_storage) {
- return AbsoluteBitwiseOp(x, y, result_storage, kCopy, kSymmetric,
+ Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
+ MutableBigInt* result_storage) {
+ return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kSymmetric,
[](digit_t a, digit_t b) { return a ^ b; });
}
@@ -1399,7 +1428,8 @@ void BigInt::InplaceMultiplyAdd(Handle<FreshlyAllocatedBigInt> x,
// allocated for it; otherwise the caller must ensure that it is big enough.
// {quotient} can be the same as {x} for an in-place division. {quotient} can
// also be nullptr if the caller is only interested in the remainder.
-void MutableBigInt::AbsoluteDivSmall(Handle<BigIntBase> x, digit_t divisor,
+void MutableBigInt::AbsoluteDivSmall(Isolate* isolate, Handle<BigIntBase> x,
+ digit_t divisor,
Handle<MutableBigInt>* quotient,
digit_t* remainder) {
DCHECK_NE(divisor, 0);
@@ -1408,7 +1438,7 @@ void MutableBigInt::AbsoluteDivSmall(Handle<BigIntBase> x, digit_t divisor,
int length = x->length();
if (quotient != nullptr) {
if ((*quotient).is_null()) {
- *quotient = New(x->GetIsolate(), length).ToHandleChecked();
+ *quotient = New(isolate, length).ToHandleChecked();
}
for (int i = length - 1; i >= 0; i--) {
digit_t q = digit_div(*remainder, x->digit(i), divisor, remainder);
@@ -1427,13 +1457,13 @@ void MutableBigInt::AbsoluteDivSmall(Handle<BigIntBase> x, digit_t divisor,
// Both {quotient} and {remainder} are optional, for callers that are only
// interested in one of them.
// See Knuth, Volume 2, section 4.3.1, Algorithm D.
-bool MutableBigInt::AbsoluteDivLarge(Handle<BigIntBase> dividend,
+bool MutableBigInt::AbsoluteDivLarge(Isolate* isolate,
+ Handle<BigIntBase> dividend,
Handle<BigIntBase> divisor,
Handle<MutableBigInt>* quotient,
Handle<MutableBigInt>* remainder) {
DCHECK_GE(divisor->length(), 2);
DCHECK(dividend->length() >= divisor->length());
- Isolate* isolate = dividend->GetIsolate();
// The unusual variable names inside this function are consistent with
// Knuth's book, as well as with Go's implementation of this algorithm.
// Maintaining this consistency is probably more useful than trying to
@@ -1456,13 +1486,14 @@ bool MutableBigInt::AbsoluteDivLarge(Handle<BigIntBase> dividend,
// result).
int shift = base::bits::CountLeadingZeros(divisor->digit(n - 1));
if (shift > 0) {
- divisor =
- SpecialLeftShift(divisor, shift, kSameSizeResult).ToHandleChecked();
+ divisor = SpecialLeftShift(isolate, divisor, shift, kSameSizeResult)
+ .ToHandleChecked();
}
// Holds the (continuously updated) remaining part of the dividend, which
// eventually becomes the remainder.
Handle<MutableBigInt> u;
- if (!SpecialLeftShift(dividend, shift, kAlwaysAddOneDigit).ToHandle(&u)) {
+ if (!SpecialLeftShift(isolate, dividend, shift, kAlwaysAddOneDigit)
+ .ToHandle(&u)) {
return false;
}
@@ -1587,14 +1618,15 @@ void MutableBigInt::InplaceRightShift(int shift) {
// Always copies the input, even when {shift} == 0.
// {shift} must be less than kDigitBits, {x} must be non-zero.
MaybeHandle<MutableBigInt> MutableBigInt::SpecialLeftShift(
- Handle<BigIntBase> x, int shift, SpecialLeftShiftMode mode) {
+ Isolate* isolate, Handle<BigIntBase> x, int shift,
+ SpecialLeftShiftMode mode) {
DCHECK_GE(shift, 0);
DCHECK_LT(shift, kDigitBits);
DCHECK_GT(x->length(), 0);
int n = x->length();
int result_length = mode == kAlwaysAddOneDigit ? n + 1 : n;
Handle<MutableBigInt> result;
- if (!New(x->GetIsolate(), result_length).ToHandle(&result)) {
+ if (!New(isolate, result_length).ToHandle(&result)) {
return MaybeHandle<MutableBigInt>();
}
if (shift == 0) {
@@ -1618,9 +1650,9 @@ MaybeHandle<MutableBigInt> MutableBigInt::SpecialLeftShift(
return result;
}
-MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Handle<BigIntBase> x,
+MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
+ Handle<BigIntBase> x,
Handle<BigIntBase> y) {
- Isolate* isolate = x->GetIsolate();
Maybe<digit_t> maybe_shift = ToShiftAmount(y);
if (maybe_shift.IsNothing()) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
@@ -1665,9 +1697,9 @@ MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Handle<BigIntBase> x,
return MakeImmutable(result);
}
-Handle<BigInt> MutableBigInt::RightShiftByAbsolute(Handle<BigIntBase> x,
+Handle<BigInt> MutableBigInt::RightShiftByAbsolute(Isolate* isolate,
+ Handle<BigIntBase> x,
Handle<BigIntBase> y) {
- Isolate* isolate = x->GetIsolate();
int length = x->length();
bool sign = x->sign();
Maybe<digit_t> maybe_shift = ToShiftAmount(y);
@@ -1729,7 +1761,7 @@ Handle<BigInt> MutableBigInt::RightShiftByAbsolute(Handle<BigIntBase> x,
if (must_round_down) {
// Since the result is negative, rounding down means adding one to
// its absolute value. This cannot overflow.
- result = AbsoluteAddOne(result, true, *result).ToHandleChecked();
+ result = AbsoluteAddOne(isolate, result, true, *result).ToHandleChecked();
}
}
return MakeImmutable(result);
@@ -1887,13 +1919,13 @@ MaybeHandle<BigInt> BigInt::FromSerializedDigits(
static const char kConversionChars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
-MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(Handle<BigIntBase> x,
+MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(Isolate* isolate,
+ Handle<BigIntBase> x,
int radix) {
STATIC_ASSERT(base::bits::IsPowerOfTwo(kDigitBits));
DCHECK(base::bits::IsPowerOfTwo(radix));
DCHECK(radix >= 2 && radix <= 32);
DCHECK(!x->is_zero());
- Isolate* isolate = x->GetIsolate();
const int length = x->length();
const bool sign = x->sign();
@@ -2017,7 +2049,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Handle<BigIntBase> x,
Handle<BigIntBase>* dividend = &x;
do {
digit_t chunk;
- AbsoluteDivSmall(*dividend, chunk_divisor, &rest, &chunk);
+ AbsoluteDivSmall(isolate, *dividend, chunk_divisor, &rest, &chunk);
DCHECK(!rest.is_null());
dividend = reinterpret_cast<Handle<BigIntBase>*>(&rest);
DisallowHeapAllocation no_gc;
@@ -2071,9 +2103,9 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Handle<BigIntBase> x,
return result;
}
-Handle<BigInt> BigInt::AsIntN(uint64_t n, Handle<BigInt> x) {
+Handle<BigInt> BigInt::AsIntN(Isolate* isolate, uint64_t n, Handle<BigInt> x) {
if (x->is_zero()) return x;
- if (n == 0) return MutableBigInt::Zero(x->GetIsolate());
+ if (n == 0) return MutableBigInt::Zero(isolate);
uint64_t needed_length = (n + kDigitBits - 1) / kDigitBits;
uint64_t x_length = static_cast<uint64_t>(x->length());
// If {x} has less than {n} bits, return it directly.
@@ -2093,35 +2125,37 @@ Handle<BigInt> BigInt::AsIntN(uint64_t n, Handle<BigInt> x) {
DCHECK_LE(n, kMaxInt);
int N = static_cast<int>(n);
if (!has_bit) {
- return MutableBigInt::TruncateToNBits(N, x);
+ return MutableBigInt::TruncateToNBits(isolate, N, x);
}
if (!x->sign()) {
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(N, x, true);
+ return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x, true);
}
// Negative numbers must subtract from 2^n, except for the special case
// described above.
if ((top_digit & (compare_digit - 1)) == 0) {
for (int i = static_cast<int>(needed_length) - 2; i >= 0; i--) {
if (x->digit(i) != 0) {
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(N, x, false);
+ return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x,
+ false);
}
}
- return MutableBigInt::TruncateToNBits(N, x);
+ return MutableBigInt::TruncateToNBits(isolate, N, x);
}
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(N, x, false);
+ return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x, false);
}
-MaybeHandle<BigInt> BigInt::AsUintN(uint64_t n, Handle<BigInt> x) {
+MaybeHandle<BigInt> BigInt::AsUintN(Isolate* isolate, uint64_t n,
+ Handle<BigInt> x) {
if (x->is_zero()) return x;
- if (n == 0) return MutableBigInt::Zero(x->GetIsolate());
+ if (n == 0) return MutableBigInt::Zero(isolate);
// If {x} is negative, simulate two's complement representation.
if (x->sign()) {
if (n > kMaxLengthBits) {
- THROW_NEW_ERROR(x->GetIsolate(),
- NewRangeError(MessageTemplate::kBigIntTooBig), BigInt);
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
}
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(static_cast<int>(n), x,
- false);
+ return MutableBigInt::TruncateAndSubFromPowerOfTwo(
+ isolate, static_cast<int>(n), x, false);
}
// If {x} is positive and has up to {n} bits, return it directly.
if (n >= kMaxLengthBits) return x;
@@ -2137,14 +2171,14 @@ MaybeHandle<BigInt> BigInt::AsUintN(uint64_t n, Handle<BigInt> x) {
}
// Otherwise, truncate.
DCHECK_LE(n, kMaxInt);
- return MutableBigInt::TruncateToNBits(static_cast<int>(n), x);
+ return MutableBigInt::TruncateToNBits(isolate, static_cast<int>(n), x);
}
-Handle<BigInt> MutableBigInt::TruncateToNBits(int n, Handle<BigInt> x) {
+Handle<BigInt> MutableBigInt::TruncateToNBits(Isolate* isolate, int n,
+ Handle<BigInt> x) {
// Only call this when there's something to do.
DCHECK_NE(n, 0);
DCHECK_GT(x->length(), n / kDigitBits);
- Isolate* isolate = x->GetIsolate();
int needed_digits = (n + (kDigitBits - 1)) / kDigitBits;
DCHECK_LE(needed_digits, x->length());
@@ -2168,12 +2202,12 @@ Handle<BigInt> MutableBigInt::TruncateToNBits(int n, Handle<BigInt> x) {
}
// Subtracts the least significant n bits of abs(x) from 2^n.
-Handle<BigInt> MutableBigInt::TruncateAndSubFromPowerOfTwo(int n,
+Handle<BigInt> MutableBigInt::TruncateAndSubFromPowerOfTwo(Isolate* isolate,
+ int n,
Handle<BigInt> x,
bool result_sign) {
DCHECK_NE(n, 0);
DCHECK_LE(n, kMaxLengthBits);
- Isolate* isolate = x->GetIsolate();
int needed_digits = (n + (kDigitBits - 1)) / kDigitBits;
DCHECK_LE(needed_digits, kMaxLength); // Follows from n <= kMaxLengthBits.
@@ -2459,16 +2493,20 @@ BigInt::digit_t MutableBigInt::digit_div(digit_t high, digit_t low,
static const digit_t kHalfDigitBase = 1ull << kHalfDigitBits;
// Adapted from Warren, Hacker's Delight, p. 152.
int s = base::bits::CountLeadingZeros(divisor);
+ DCHECK_NE(s, kDigitBits); // {divisor} is not 0.
divisor <<= s;
digit_t vn1 = divisor >> kHalfDigitBits;
digit_t vn0 = divisor & kHalfDigitMask;
- // {s} can be 0. "low >> kDigitBits == low" on x86, so we "&" it with
+ // {s} can be 0. {low >> kDigitBits} would be undefined behavior, so
+ // we mask the shift amount with {kShiftMask}, and the result with
// {s_zero_mask} which is 0 if s == 0 and all 1-bits otherwise.
STATIC_ASSERT(sizeof(intptr_t) == sizeof(digit_t));
+ const int kShiftMask = kDigitBits - 1;
digit_t s_zero_mask =
static_cast<digit_t>(static_cast<intptr_t>(-s) >> (kDigitBits - 1));
- digit_t un32 = (high << s) | ((low >> (kDigitBits - s)) & s_zero_mask);
+ digit_t un32 =
+ (high << s) | ((low >> ((kDigitBits - s) & kShiftMask)) & s_zero_mask);
digit_t un10 = low << s;
digit_t un1 = un10 >> kHalfDigitBits;
digit_t un0 = un10 & kHalfDigitMask;
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index e654738934..f8c5c3dbf6 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -85,10 +85,7 @@ class FreshlyAllocatedBigInt : public BigIntBase {
// (and no explicit operator is provided either).
public:
- inline static FreshlyAllocatedBigInt* cast(Object* object) {
- SLOW_DCHECK(object->IsBigInt());
- return reinterpret_cast<FreshlyAllocatedBigInt*>(object);
- }
+ inline static FreshlyAllocatedBigInt* cast(Object* object);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FreshlyAllocatedBigInt);
@@ -101,30 +98,41 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
// Implementation of the Spec methods, see:
// https://tc39.github.io/proposal-bigint/#sec-numeric-types
// Sections 1.1.1 through 1.1.19.
- static Handle<BigInt> UnaryMinus(Handle<BigInt> x);
- static MaybeHandle<BigInt> BitwiseNot(Handle<BigInt> x);
- static MaybeHandle<BigInt> Exponentiate(Handle<BigInt> base,
+ static Handle<BigInt> UnaryMinus(Isolate* isolate, Handle<BigInt> x);
+ static MaybeHandle<BigInt> BitwiseNot(Isolate* isolate, Handle<BigInt> x);
+ static MaybeHandle<BigInt> Exponentiate(Isolate* isolate, Handle<BigInt> base,
Handle<BigInt> exponent);
- static MaybeHandle<BigInt> Multiply(Handle<BigInt> x, Handle<BigInt> y);
- static MaybeHandle<BigInt> Divide(Handle<BigInt> x, Handle<BigInt> y);
- static MaybeHandle<BigInt> Remainder(Handle<BigInt> x, Handle<BigInt> y);
- static MaybeHandle<BigInt> Add(Handle<BigInt> x, Handle<BigInt> y);
- static MaybeHandle<BigInt> Subtract(Handle<BigInt> x, Handle<BigInt> y);
- static MaybeHandle<BigInt> LeftShift(Handle<BigInt> x, Handle<BigInt> y);
- static MaybeHandle<BigInt> SignedRightShift(Handle<BigInt> x,
+ static MaybeHandle<BigInt> Multiply(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<BigInt> Divide(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<BigInt> Remainder(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<BigInt> Add(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<BigInt> Subtract(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<BigInt> LeftShift(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<BigInt> SignedRightShift(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y);
- static MaybeHandle<BigInt> UnsignedRightShift(Handle<BigInt> x,
+ static MaybeHandle<BigInt> UnsignedRightShift(Isolate* isolate,
+ Handle<BigInt> x,
Handle<BigInt> y);
// More convenient version of "bool LessThan(x, y)".
static ComparisonResult CompareToBigInt(Handle<BigInt> x, Handle<BigInt> y);
static bool EqualToBigInt(BigInt* x, BigInt* y);
- static MaybeHandle<BigInt> BitwiseAnd(Handle<BigInt> x, Handle<BigInt> y);
- static MaybeHandle<BigInt> BitwiseXor(Handle<BigInt> x, Handle<BigInt> y);
- static MaybeHandle<BigInt> BitwiseOr(Handle<BigInt> x, Handle<BigInt> y);
+ static MaybeHandle<BigInt> BitwiseAnd(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<BigInt> BitwiseXor(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y);
+ static MaybeHandle<BigInt> BitwiseOr(Isolate* isolate, Handle<BigInt> x,
+ Handle<BigInt> y);
// Other parts of the public interface.
- static MaybeHandle<BigInt> Increment(Handle<BigInt> x);
- static MaybeHandle<BigInt> Decrement(Handle<BigInt> x);
+ static MaybeHandle<BigInt> Increment(Isolate* isolate, Handle<BigInt> x);
+ static MaybeHandle<BigInt> Decrement(Isolate* isolate, Handle<BigInt> x);
bool ToBoolean() { return !is_zero(); }
uint32_t Hash() {
@@ -132,15 +140,18 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
return is_zero() ? 0 : ComputeIntegerHash(static_cast<uint32_t>(digit(0)));
}
- static bool EqualToString(Handle<BigInt> x, Handle<String> y);
+ static bool EqualToString(Isolate* isolate, Handle<BigInt> x,
+ Handle<String> y);
static bool EqualToNumber(Handle<BigInt> x, Handle<Object> y);
- static ComparisonResult CompareToString(Handle<BigInt> x, Handle<String> y);
+ static ComparisonResult CompareToString(Isolate* isolate, Handle<BigInt> x,
+ Handle<String> y);
static ComparisonResult CompareToNumber(Handle<BigInt> x, Handle<Object> y);
// Exposed for tests, do not call directly. Use CompareToNumber() instead.
static ComparisonResult CompareToDouble(Handle<BigInt> x, double y);
- static Handle<BigInt> AsIntN(uint64_t n, Handle<BigInt> x);
- static MaybeHandle<BigInt> AsUintN(uint64_t n, Handle<BigInt> x);
+ static Handle<BigInt> AsIntN(Isolate* isolate, uint64_t n, Handle<BigInt> x);
+ static MaybeHandle<BigInt> AsUintN(Isolate* isolate, uint64_t n,
+ Handle<BigInt> x);
static Handle<BigInt> FromInt64(Isolate* isolate, int64_t n);
static Handle<BigInt> FromUint64(Isolate* isolate, uint64_t n);
@@ -161,11 +172,12 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
return kHeaderSize + length * kDigitSize;
}
- static MaybeHandle<String> ToString(Handle<BigInt> bigint, int radix = 10);
+ static MaybeHandle<String> ToString(Isolate* isolate, Handle<BigInt> bigint,
+ int radix = 10);
// "The Number value for x", see:
// https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type
// Returns a Smi or HeapNumber.
- static Handle<Object> ToNumber(Handle<BigInt> x);
+ static Handle<Object> ToNumber(Isolate* isolate, Handle<BigInt> x);
// ECMAScript's NumberToBigInt
static MaybeHandle<BigInt> FromNumber(Isolate* isolate,
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index e484003ba8..57e5f2a565 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -19,10 +19,6 @@
namespace v8 {
namespace internal {
-TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
-TYPE_CHECKER(Code, CODE_TYPE)
-TYPE_CHECKER(CodeDataContainer, CODE_DATA_CONTAINER_TYPE)
-
CAST_ACCESSOR(AbstractCode)
CAST_ACCESSOR(BytecodeArray)
CAST_ACCESSOR(Code)
@@ -159,10 +155,6 @@ DependentCode::DependencyGroup DependentCode::group() {
return static_cast<DependencyGroup>(GroupField::decode(flags()));
}
-void DependentCode::set_group(DependentCode::DependencyGroup group) {
- set_flags(GroupField::update(flags(), static_cast<int>(group)));
-}
-
void DependentCode::set_object_at(int i, Object* object) {
set(kCodesStartIndex + i, object);
}
@@ -177,9 +169,8 @@ void DependentCode::copy(int from, int to) {
INT_ACCESSORS(Code, raw_instruction_size, kInstructionSizeOffset)
INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
-#define CODE_ACCESSORS(name, type, offset) \
- ACCESSORS_CHECKED2(Code, name, type, offset, true, \
- !GetHeap()->InNewSpace(value))
+#define CODE_ACCESSORS(name, type, offset) \
+ ACCESSORS_CHECKED2(Code, name, type, offset, true, !Heap::InNewSpace(value))
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
@@ -229,9 +220,10 @@ void Code::set_next_code_link(Object* value) {
}
int Code::InstructionSize() const {
-#ifdef V8_EMBEDDED_BUILTINS
- if (Builtins::IsEmbeddedBuiltin(this)) return OffHeapInstructionSize();
-#endif
+ if (is_off_heap_trampoline()) {
+ DCHECK(FLAG_embedded_builtins);
+ return OffHeapInstructionSize();
+ }
return raw_instruction_size();
}
@@ -240,9 +232,10 @@ Address Code::raw_instruction_start() const {
}
Address Code::InstructionStart() const {
-#ifdef V8_EMBEDDED_BUILTINS
- if (Builtins::IsEmbeddedBuiltin(this)) return OffHeapInstructionStart();
-#endif
+ if (is_off_heap_trampoline()) {
+ DCHECK(FLAG_embedded_builtins);
+ return OffHeapInstructionStart();
+ }
return raw_instruction_start();
}
@@ -251,9 +244,10 @@ Address Code::raw_instruction_end() const {
}
Address Code::InstructionEnd() const {
-#ifdef V8_EMBEDDED_BUILTINS
- if (Builtins::IsEmbeddedBuiltin(this)) return OffHeapInstructionEnd();
-#endif
+ if (is_off_heap_trampoline()) {
+ DCHECK(FLAG_embedded_builtins);
+ return OffHeapInstructionEnd();
+ }
return raw_instruction_end();
}
@@ -318,12 +312,11 @@ int Code::relocation_size() const {
Address Code::entry() const { return raw_instruction_start(); }
bool Code::contains(Address inner_pointer) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (Builtins::IsEmbeddedBuiltin(this)) {
+ if (is_off_heap_trampoline()) {
+ DCHECK(FLAG_embedded_builtins);
return (OffHeapInstructionStart() <= inner_pointer) &&
(inner_pointer < OffHeapInstructionEnd());
}
-#endif
return (address() <= inner_pointer) && (inner_pointer < address() + Size());
}
@@ -341,13 +334,15 @@ Code::Kind Code::kind() const {
}
void Code::initialize_flags(Kind kind, bool has_unwinding_info,
- bool is_turbofanned, int stack_slots) {
+ bool is_turbofanned, int stack_slots,
+ bool is_off_heap_trampoline) {
CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
static_assert(Code::NUMBER_OF_KINDS <= KindField::kMax + 1, "field overflow");
uint32_t flags = HasUnwindingInfoField::encode(has_unwinding_info) |
KindField::encode(kind) |
IsTurbofannedField::encode(is_turbofanned) |
- StackSlotsField::encode(stack_slots);
+ StackSlotsField::encode(stack_slots) |
+ IsOffHeapTrampoline::encode(is_off_heap_trampoline);
WRITE_UINT32_FIELD(this, kFlagsOffset, flags);
DCHECK_IMPLIES(stack_slots != 0, has_safepoint_info());
}
@@ -441,6 +436,10 @@ inline void Code::set_is_exception_caught(bool value) {
code_data_container()->set_kind_specific_flags(updated);
}
+inline bool Code::is_off_heap_trampoline() const {
+ return IsOffHeapTrampoline::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+}
+
inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
if (is_promise_rejection()) return HandlerTable::PROMISE;
if (is_exception_caught()) return HandlerTable::CAUGHT;
@@ -534,6 +533,14 @@ Address Code::constant_pool() const {
}
Code* Code::GetCodeFromTargetAddress(Address address) {
+ {
+ // TODO(jgruber,v8:6666): Support embedded builtins here. We'd need to pass
+ // in the current isolate.
+ Address start = reinterpret_cast<Address>(Isolate::CurrentEmbeddedBlob());
+ Address end = start + Isolate::CurrentEmbeddedBlobSize();
+ CHECK(address < start || address >= end);
+ }
+
HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
// GetCodeFromTargetAddress might be called when marking objects during mark
// sweep. reinterpret_cast is therefore used instead of the more appropriate
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 0fb374e9ea..99a159a977 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -24,8 +24,10 @@ class Register;
}
// Code describes objects with on-the-fly generated machine code.
-class Code : public HeapObject {
+class Code : public HeapObject, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
// Opaque data type for encapsulating code flags like kind, inline
// cache state, and arguments count.
typedef uint32_t Flags;
@@ -52,6 +54,8 @@ class Code : public HeapObject {
static const char* Kind2String(Kind kind);
#ifdef ENABLE_DISASSEMBLER
+ const char* GetName(Isolate* isolate) const;
+ void PrintBuiltinCode(Isolate* isolate, const char* name);
void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress);
#endif
@@ -67,13 +71,11 @@ class Code : public HeapObject {
// off-heap instruction stream rather than the on-heap trampoline located
// at instruction_start.
inline int InstructionSize() const;
-#ifdef V8_EMBEDDED_BUILTINS
int OffHeapInstructionSize() const;
-#endif
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
- void InvalidateEmbeddedObjects();
+ void InvalidateEmbeddedObjects(Heap* heap);
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
@@ -182,6 +184,10 @@ class Code : public HeapObject {
// Use GetBuiltinCatchPrediction to access this.
inline void set_is_exception_caught(bool flag);
+ // [is_off_heap_trampoline]: For kind BUILTIN tells whether
+ // this is a trampoline to an off-heap builtin.
+ inline bool is_off_heap_trampoline() const;
+
// [constant_pool]: The constant pool for this function.
inline Address constant_pool() const;
@@ -203,7 +209,8 @@ class Code : public HeapObject {
// Initialize the flags field. Similar to clear_padding above this ensure that
// the snapshot content is deterministic.
inline void initialize_flags(Kind kind, bool has_unwinding_info,
- bool is_turbofanned, int stack_slots);
+ bool is_turbofanned, int stack_slots,
+ bool is_off_heap_trampoline);
// Convert a target address into a code object.
static inline Code* GetCodeFromTargetAddress(Address address);
@@ -221,9 +228,7 @@ class Code : public HeapObject {
// this differs from instruction_start (which would point to the off-heap
// trampoline instead).
inline Address InstructionStart() const;
-#ifdef V8_EMBEDDED_BUILTINS
Address OffHeapInstructionStart() const;
-#endif
// Returns the address right after the last instruction.
inline Address raw_instruction_end() const;
@@ -232,9 +237,7 @@ class Code : public HeapObject {
// objects this differs from instruction_end (which would point to the
// off-heap trampoline instead).
inline Address InstructionEnd() const;
-#ifdef V8_EMBEDDED_BUILTINS
Address OffHeapInstructionEnd() const;
-#endif
// Returns the size of the instructions, padding, relocation and unwinding
// information.
@@ -304,10 +307,10 @@ class Code : public HeapObject {
void Relocate(intptr_t delta);
// Migrate code described by desc.
- void CopyFrom(const CodeDesc& desc);
+ void CopyFrom(Heap* heap, const CodeDesc& desc);
// Migrate code from desc without flushing the instruction cache.
- void CopyFromNoFlush(const CodeDesc& desc);
+ void CopyFromNoFlush(Heap* heap, const CodeDesc& desc);
// Flushes the instruction cache for the executable instructions of this code
// object.
@@ -340,12 +343,11 @@ class Code : public HeapObject {
#ifdef DEBUG
enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
- void VerifyEmbeddedObjects(VerifyMode mode = kNoContextRetainingPointers);
+ void VerifyEmbeddedObjects(Isolate* isolate,
+ VerifyMode mode = kNoContextRetainingPointers);
#endif // DEBUG
-#ifdef V8_EMBEDDED_BUILTINS
- bool IsProcessIndependent();
-#endif
+ bool IsIsolateIndependent(Isolate* isolate);
inline bool CanContainWeakObjects();
@@ -415,11 +417,13 @@ class Code : public HeapObject {
V(HasUnwindingInfoField, bool, 1, _) \
V(KindField, Kind, 5, _) \
V(IsTurbofannedField, bool, 1, _) \
- V(StackSlotsField, int, 24, _)
+ V(StackSlotsField, int, 24, _) \
+ V(IsOffHeapTrampoline, bool, 1, _)
DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
#undef CODE_FLAGS_BIT_FIELDS
static_assert(NUMBER_OF_KINDS <= KindField::kMax, "Code::KindField size");
- static_assert(StackSlotsField::kNext <= 32, "Code::flags field exhausted");
+ static_assert(IsOffHeapTrampoline::kNext <= 32,
+ "Code::flags field exhausted");
// KindSpecificFlags layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
#define CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \
@@ -438,7 +442,8 @@ class Code : public HeapObject {
MarkedForDeoptimizationField::kShift;
static const int kArgumentsBits = 16;
- static const int kMaxArguments = (1 << kArgumentsBits) - 1;
+ // Reserve one argument count value as the "don't adapt arguments" sentinel.
+ static const int kMaxArguments = (1 << kArgumentsBits) - 2;
private:
friend class RelocIterator;
@@ -454,8 +459,11 @@ class Code : public HeapObject {
// pages within the heap, its header fields need to be immutable. There always
// is a 1-to-1 relation between {Code} and {CodeDataContainer}, the referencing
// field {Code::code_data_container} itself is immutable.
-class CodeDataContainer : public HeapObject {
+class CodeDataContainer : public HeapObject, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
DECL_ACCESSORS(next_code_link, Object)
DECL_INT_ACCESSORS(kind_specific_flags)
@@ -493,8 +501,11 @@ class CodeDataContainer : public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(CodeDataContainer);
};
-class AbstractCode : public HeapObject {
+class AbstractCode : public HeapObject, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
// All code kinds and INTERPRETED_FUNCTION.
enum Kind {
#define DEFINE_CODE_KIND_ENUM(name) name,
@@ -586,6 +597,8 @@ class AbstractCode : public HeapObject {
class DependentCode : public FixedArray {
public:
+ DECL_CAST(DependentCode)
+
enum DependencyGroup {
// Group of code that embed a transition to this map, and depend on being
// deoptimized when the transition is replaced by a new version.
@@ -612,64 +625,63 @@ class DependentCode : public FixedArray {
kAllocationSiteTransitionChangedGroup
};
- static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
- static const int kNextLinkIndex = 0;
- static const int kFlagsIndex = 1;
- static const int kCodesStartIndex = 2;
+ // Register a code dependency of {cell} on {object}.
+ static void InstallDependency(Isolate* isolate, Handle<WeakCell> cell,
+ Handle<HeapObject> object,
+ DependencyGroup group);
bool Contains(DependencyGroup group, WeakCell* code_cell);
bool IsEmpty(DependencyGroup group);
- static Handle<DependentCode> InsertCompilationDependencies(
- Handle<DependentCode> entries, DependencyGroup group,
- Handle<Foreign> info);
-
- static Handle<DependentCode> InsertWeakCode(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<WeakCell> code_cell);
-
- void UpdateToFinishedCode(DependencyGroup group, Foreign* info,
- WeakCell* code_cell);
-
- void RemoveCompilationDependencies(DependentCode::DependencyGroup group,
- Foreign* info);
+ void RemoveCompilationDependencies(DependencyGroup group, Foreign* info);
- void DeoptimizeDependentCodeGroup(Isolate* isolate,
- DependentCode::DependencyGroup group);
+ void DeoptimizeDependentCodeGroup(Isolate* isolate, DependencyGroup group);
- bool MarkCodeForDeoptimization(Isolate* isolate,
- DependentCode::DependencyGroup group);
+ bool MarkCodeForDeoptimization(Isolate* isolate, DependencyGroup group);
- // The following low-level accessors should only be used by this class
- // and the mark compact collector.
- inline DependentCode* next_link();
- inline void set_next_link(DependentCode* next);
- inline int count();
- inline void set_count(int value);
+ // The following low-level accessors are exposed only for tests.
inline DependencyGroup group();
- inline void set_group(DependencyGroup group);
inline Object* object_at(int i);
- inline void set_object_at(int i, Object* object);
- inline void clear_at(int i);
- inline void copy(int from, int to);
- DECL_CAST(DependentCode)
+ inline int count();
+ inline DependentCode* next_link();
+ private:
static const char* DependencyGroupName(DependencyGroup group);
- private:
- static Handle<DependentCode> Insert(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<Object> object);
- static Handle<DependentCode> New(DependencyGroup group, Handle<Object> object,
+ // Get/Set {object}'s {DependentCode}.
+ static DependentCode* Get(Handle<HeapObject> object);
+ static void Set(Handle<HeapObject> object, Handle<DependentCode> dep);
+
+ static Handle<DependentCode> New(Isolate* isolate, DependencyGroup group,
+ Handle<Object> object,
Handle<DependentCode> next);
- static Handle<DependentCode> EnsureSpace(Handle<DependentCode> entries);
+ static Handle<DependentCode> EnsureSpace(Isolate* isolate,
+ Handle<DependentCode> entries);
+ static Handle<DependentCode> InsertWeakCode(Isolate* isolate,
+ Handle<DependentCode> entries,
+ DependencyGroup group,
+ Handle<WeakCell> code_cell);
+
// Compact by removing cleared weak cells and return true if there was
// any cleared weak cell.
bool Compact();
+
static int Grow(int number_of_entries) {
if (number_of_entries < 5) return number_of_entries + 1;
return number_of_entries * 5 / 4;
}
+
+ static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
+ static const int kNextLinkIndex = 0;
+ static const int kFlagsIndex = 1;
+ static const int kCodesStartIndex = 2;
+
+ inline void set_next_link(DependentCode* next);
+ inline void set_count(int value);
+ inline void set_object_at(int i, Object* object);
+ inline void clear_at(int i);
+ inline void copy(int from, int to);
+
inline int flags();
inline void set_flags(int flags);
class GroupField : public BitField<int, 0, 3> {};
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-inl.h
index 99edc50c96..cf80ec7076 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-inl.h
@@ -44,7 +44,7 @@ uint32_t CompilationCacheShape::HashForObject(Isolate* isolate,
if (object->IsNumber()) return static_cast<uint32_t>(object->Number());
FixedArray* val = FixedArray::cast(object);
- if (val->map() == val->GetHeap()->fixed_cow_array_map()) {
+ if (val->map() == val->GetReadOnlyRoots().fixed_cow_array_map()) {
DCHECK_EQ(4, val->length());
SharedFunctionInfo* shared = SharedFunctionInfo::cast(val->get(0));
String* source = String::cast(val->get(1));
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index e65dc22fb4..76deeb9684 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -66,8 +66,12 @@ class InfoCellPair {
// recompilation stub, or to "old" code. This avoids memory leaks due to
// premature caching of scripts and eval strings that are never needed later.
class CompilationCacheTable
- : public HashTable<CompilationCacheTable, CompilationCacheShape> {
+ : public HashTable<CompilationCacheTable, CompilationCacheShape>,
+ public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
// Find cached value for a string key, otherwise return null.
Handle<Object> Lookup(Handle<String> src, Handle<SharedFunctionInfo> shared,
LanguageMode language_mode);
@@ -93,7 +97,7 @@ class CompilationCacheTable
Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
int position);
static Handle<CompilationCacheTable> PutRegExp(
- Handle<CompilationCacheTable> cache, Handle<String> src,
+ Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value);
void Remove(Object* value);
void Age();
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index f0650479f7..ce9b5682c7 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -26,8 +26,8 @@ int DataHandler::data_field_count() const {
return (map()->instance_size() - kSizeWithData0) / kPointerSize;
}
-ACCESSORS_CHECKED(DataHandler, data1, Object, kData1Offset,
- map()->instance_size() >= kSizeWithData1)
+WEAK_ACCESSORS_CHECKED(DataHandler, data1, kData1Offset,
+ map()->instance_size() >= kSizeWithData1)
ACCESSORS_CHECKED(DataHandler, data2, Object, kData2Offset,
map()->instance_size() >= kSizeWithData2)
ACCESSORS_CHECKED(DataHandler, data3, Object, kData3Offset,
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index 8b3298207f..96fab2e6bc 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -31,7 +31,7 @@ class DataHandler : public Struct {
// [data1-3]: These are optional general-purpose fields whose content and
// presence depends on the handler kind.
- DECL_ACCESSORS(data1, Object)
+ DECL_ACCESSORS(data1, MaybeObject)
DECL_ACCESSORS(data2, Object)
DECL_ACCESSORS(data3, Object)
@@ -53,6 +53,8 @@ class DataHandler : public Struct {
DECL_CAST(DataHandler)
DECL_VERIFIER(DataHandler)
+
+ class BodyDescriptor;
};
} // namespace internal
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index db651d9f4e..548fb15705 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_DEBUG_OBJECTS_INL_H_
#include "src/objects/debug-objects.h"
+#include "src/objects/shared-function-info.h"
#include "src/heap/heap-inl.h"
@@ -23,28 +24,39 @@ CAST_ACCESSOR(BreakPoint)
SMI_ACCESSORS(DebugInfo, flags, kFlagsOffset)
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
SMI_ACCESSORS(DebugInfo, debugger_hints, kDebuggerHintsOffset)
-ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayOffset)
+ACCESSORS(DebugInfo, function_identifier, Object, kFunctionIdentifierOffset)
+ACCESSORS(DebugInfo, original_bytecode_array, Object,
+ kOriginalBytecodeArrayOffset)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateOffset)
ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset)
+BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, side_effect_state,
+ DebugInfo::SideEffectStateBits)
+BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, debug_is_blackboxed,
+ DebugInfo::DebugIsBlackboxedBit)
+BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, computed_debug_is_blackboxed,
+ DebugInfo::ComputedDebugIsBlackboxedBit)
+BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, debugging_id,
+ DebugInfo::DebuggingIdBits)
+
SMI_ACCESSORS(BreakPointInfo, source_position, kSourcePositionOffset)
ACCESSORS(BreakPointInfo, break_points, Object, kBreakPointsOffset)
SMI_ACCESSORS(BreakPoint, id, kIdOffset)
ACCESSORS(BreakPoint, condition, String, kConditionOffset)
-bool DebugInfo::HasDebugBytecodeArray() {
- return debug_bytecode_array()->IsBytecodeArray();
+bool DebugInfo::HasInstrumentedBytecodeArray() {
+ return original_bytecode_array()->IsBytecodeArray();
}
BytecodeArray* DebugInfo::OriginalBytecodeArray() {
- DCHECK(HasDebugBytecodeArray());
- return shared()->GetBytecodeArray();
+ DCHECK(HasInstrumentedBytecodeArray());
+ return BytecodeArray::cast(original_bytecode_array());
}
BytecodeArray* DebugInfo::DebugBytecodeArray() {
- DCHECK(HasDebugBytecodeArray());
- return BytecodeArray::cast(debug_bytecode_array());
+ DCHECK(HasInstrumentedBytecodeArray());
+ return shared()->GetDebugBytecodeArray();
}
} // namespace internal
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index f79aa5cea5..b77b6e136e 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -3,12 +3,15 @@
// found in the LICENSE file.
#include "src/objects/debug-objects.h"
+#include "src/debug/debug-evaluate.h"
#include "src/objects/debug-objects-inl.h"
namespace v8 {
namespace internal {
-bool DebugInfo::IsEmpty() const { return flags() == kNone; }
+bool DebugInfo::IsEmpty() const {
+ return flags() == kNone && debugger_hints() == 0;
+}
bool DebugInfo::HasBreakInfo() const { return (flags() & kHasBreakInfo) != 0; }
@@ -21,19 +24,20 @@ void DebugInfo::SetDebugExecutionMode(ExecutionMode value) {
: (flags() & ~kDebugExecutionMode));
}
-bool DebugInfo::ClearBreakInfo() {
- Isolate* isolate = GetIsolate();
-
- set_debug_bytecode_array(isolate->heap()->undefined_value());
- set_break_points(isolate->heap()->empty_fixed_array());
+void DebugInfo::ClearBreakInfo(Isolate* isolate) {
+ if (HasInstrumentedBytecodeArray()) {
+ // Reset function's bytecode array field to point to the original bytecode
+ // array.
+ shared()->SetDebugBytecodeArray(OriginalBytecodeArray());
+ set_original_bytecode_array(ReadOnlyRoots(isolate).undefined_value());
+ }
+ set_break_points(ReadOnlyRoots(isolate).empty_fixed_array());
int new_flags = flags();
new_flags &= ~kHasBreakInfo & ~kPreparedForDebugExecution;
new_flags &= ~kBreakAtEntry & ~kCanBreakAtEntry;
new_flags &= ~kDebugExecutionMode;
set_flags(new_flags);
-
- return new_flags == kNone;
}
void DebugInfo::SetBreakAtEntry() {
@@ -53,21 +57,21 @@ bool DebugInfo::CanBreakAtEntry() const {
}
// Check if there is a break point at this source position.
-bool DebugInfo::HasBreakPoint(int source_position) {
+bool DebugInfo::HasBreakPoint(Isolate* isolate, int source_position) {
DCHECK(HasBreakInfo());
// Get the break point info object for this code offset.
- Object* break_point_info = GetBreakPointInfo(source_position);
+ Object* break_point_info = GetBreakPointInfo(isolate, source_position);
// If there is no break point info object or no break points in the break
// point info object there is no break point at this code offset.
- if (break_point_info->IsUndefined(GetIsolate())) return false;
- return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
+ if (break_point_info->IsUndefined(isolate)) return false;
+ return BreakPointInfo::cast(break_point_info)->GetBreakPointCount(isolate) >
+ 0;
}
// Get the break point info object for this source position.
-Object* DebugInfo::GetBreakPointInfo(int source_position) {
+Object* DebugInfo::GetBreakPointInfo(Isolate* isolate, int source_position) {
DCHECK(HasBreakInfo());
- Isolate* isolate = GetIsolate();
for (int i = 0; i < break_points()->length(); i++) {
if (!break_points()->get(i)->IsUndefined(isolate)) {
BreakPointInfo* break_point_info =
@@ -77,35 +81,33 @@ Object* DebugInfo::GetBreakPointInfo(int source_position) {
}
}
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
-bool DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
+bool DebugInfo::ClearBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
- Isolate* isolate = debug_info->GetIsolate();
-
for (int i = 0; i < debug_info->break_points()->length(); i++) {
if (debug_info->break_points()->get(i)->IsUndefined(isolate)) continue;
Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
- if (BreakPointInfo::HasBreakPoint(break_point_info, break_point)) {
- BreakPointInfo::ClearBreakPoint(break_point_info, break_point);
+ if (BreakPointInfo::HasBreakPoint(isolate, break_point_info, break_point)) {
+ BreakPointInfo::ClearBreakPoint(isolate, break_point_info, break_point);
return true;
}
}
return false;
}
-void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
+void DebugInfo::SetBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
+ int source_position,
Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
- Isolate* isolate = debug_info->GetIsolate();
Handle<Object> break_point_info(
- debug_info->GetBreakPointInfo(source_position), isolate);
+ debug_info->GetBreakPointInfo(isolate, source_position), isolate);
if (!break_point_info->IsUndefined(isolate)) {
BreakPointInfo::SetBreakPoint(
- Handle<BreakPointInfo>::cast(break_point_info), break_point);
+ isolate, Handle<BreakPointInfo>::cast(break_point_info), break_point);
return;
}
@@ -138,15 +140,15 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
// Allocate new BreakPointInfo object and set the break point.
Handle<BreakPointInfo> new_break_point_info =
isolate->factory()->NewBreakPointInfo(source_position);
- BreakPointInfo::SetBreakPoint(new_break_point_info, break_point);
+ BreakPointInfo::SetBreakPoint(isolate, new_break_point_info, break_point);
debug_info->break_points()->set(index, *new_break_point_info);
}
// Get the break point objects for a source position.
-Handle<Object> DebugInfo::GetBreakPoints(int source_position) {
+Handle<Object> DebugInfo::GetBreakPoints(Isolate* isolate,
+ int source_position) {
DCHECK(HasBreakInfo());
- Object* break_point_info = GetBreakPointInfo(source_position);
- Isolate* isolate = GetIsolate();
+ Object* break_point_info = GetBreakPointInfo(isolate, source_position);
if (break_point_info->IsUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
@@ -155,29 +157,29 @@ Handle<Object> DebugInfo::GetBreakPoints(int source_position) {
}
// Get the total number of break points.
-int DebugInfo::GetBreakPointCount() {
+int DebugInfo::GetBreakPointCount(Isolate* isolate) {
DCHECK(HasBreakInfo());
- Isolate* isolate = GetIsolate();
int count = 0;
for (int i = 0; i < break_points()->length(); i++) {
if (!break_points()->get(i)->IsUndefined(isolate)) {
BreakPointInfo* break_point_info =
BreakPointInfo::cast(break_points()->get(i));
- count += break_point_info->GetBreakPointCount();
+ count += break_point_info->GetBreakPointCount(isolate);
}
}
return count;
}
-Handle<Object> DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
+Handle<Object> DebugInfo::FindBreakPointInfo(Isolate* isolate,
+ Handle<DebugInfo> debug_info,
Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
- Isolate* isolate = debug_info->GetIsolate();
for (int i = 0; i < debug_info->break_points()->length(); i++) {
if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
- if (BreakPointInfo::HasBreakPoint(break_point_info, break_point)) {
+ if (BreakPointInfo::HasBreakPoint(isolate, break_point_info,
+ break_point)) {
return break_point_info;
}
}
@@ -189,16 +191,23 @@ bool DebugInfo::HasCoverageInfo() const {
return (flags() & kHasCoverageInfo) != 0;
}
-bool DebugInfo::ClearCoverageInfo() {
+void DebugInfo::ClearCoverageInfo(Isolate* isolate) {
if (HasCoverageInfo()) {
- Isolate* isolate = GetIsolate();
-
- set_coverage_info(isolate->heap()->undefined_value());
+ set_coverage_info(ReadOnlyRoots(isolate).undefined_value());
int new_flags = flags() & ~kHasCoverageInfo;
set_flags(new_flags);
}
- return flags() == kNone;
+}
+
+DebugInfo::SideEffectState DebugInfo::GetSideEffectState(Isolate* isolate) {
+ if (side_effect_state() == kNotComputed) {
+ SideEffectState has_no_side_effect =
+ DebugEvaluate::FunctionGetSideEffectState(isolate,
+ handle(shared(), isolate));
+ set_side_effect_state(has_no_side_effect);
+ }
+ return static_cast<SideEffectState>(side_effect_state());
}
namespace {
@@ -208,23 +217,24 @@ bool IsEqual(BreakPoint* break_point1, BreakPoint* break_point2) {
} // namespace
// Remove the specified break point object.
-void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
+void BreakPointInfo::ClearBreakPoint(Isolate* isolate,
+ Handle<BreakPointInfo> break_point_info,
Handle<BreakPoint> break_point) {
- Isolate* isolate = break_point_info->GetIsolate();
// If there are no break points just ignore.
if (break_point_info->break_points()->IsUndefined(isolate)) return;
// If there is a single break point clear it if it is the same.
if (!break_point_info->break_points()->IsFixedArray()) {
if (IsEqual(BreakPoint::cast(break_point_info->break_points()),
*break_point)) {
- break_point_info->set_break_points(isolate->heap()->undefined_value());
+ break_point_info->set_break_points(
+ ReadOnlyRoots(isolate).undefined_value());
}
return;
}
// If there are multiple break points shrink the array
DCHECK(break_point_info->break_points()->IsFixedArray());
- Handle<FixedArray> old_array =
- Handle<FixedArray>(FixedArray::cast(break_point_info->break_points()));
+ Handle<FixedArray> old_array = Handle<FixedArray>(
+ FixedArray::cast(break_point_info->break_points()), isolate);
Handle<FixedArray> new_array =
isolate->factory()->NewFixedArray(old_array->length() - 1);
int found_count = 0;
@@ -241,10 +251,9 @@ void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
}
// Add the specified break point object.
-void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
+void BreakPointInfo::SetBreakPoint(Isolate* isolate,
+ Handle<BreakPointInfo> break_point_info,
Handle<BreakPoint> break_point) {
- Isolate* isolate = break_point_info->GetIsolate();
-
// If there was no break point objects before just set it.
if (break_point_info->break_points()->IsUndefined(isolate)) {
break_point_info->set_break_points(*break_point);
@@ -261,8 +270,8 @@ void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
return;
}
// If there was more than one break point before extend array.
- Handle<FixedArray> old_array =
- Handle<FixedArray>(FixedArray::cast(break_point_info->break_points()));
+ Handle<FixedArray> old_array = Handle<FixedArray>(
+ FixedArray::cast(break_point_info->break_points()), isolate);
Handle<FixedArray> new_array =
isolate->factory()->NewFixedArray(old_array->length() + 1);
for (int i = 0; i < old_array->length(); i++) {
@@ -275,10 +284,10 @@ void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
break_point_info->set_break_points(*new_array);
}
-bool BreakPointInfo::HasBreakPoint(Handle<BreakPointInfo> break_point_info,
+bool BreakPointInfo::HasBreakPoint(Isolate* isolate,
+ Handle<BreakPointInfo> break_point_info,
Handle<BreakPoint> break_point) {
// No break point.
- Isolate* isolate = break_point_info->GetIsolate();
if (break_point_info->break_points()->IsUndefined(isolate)) {
return false;
}
@@ -298,9 +307,9 @@ bool BreakPointInfo::HasBreakPoint(Handle<BreakPointInfo> break_point_info,
}
// Get the number of break points.
-int BreakPointInfo::GetBreakPointCount() {
+int BreakPointInfo::GetBreakPointCount(Isolate* isolate) {
// No break point.
- if (break_points()->IsUndefined(GetIsolate())) return 0;
+ if (break_points()->IsUndefined(isolate)) return 0;
// Single break point.
if (!break_points()->IsFixedArray()) return 1;
// Multiple break points.
@@ -355,7 +364,7 @@ void CoverageInfo::Print(std::unique_ptr<char[]> function_name) {
DCHECK(FLAG_trace_block_coverage);
DisallowHeapAllocation no_gc;
- OFStream os(stdout);
+ StdoutStream os;
os << "Coverage info (";
if (strlen(function_name.get()) > 0) {
os << function_name.get();
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index f3e0256ae3..749489a1c1 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -19,8 +19,11 @@ class BytecodeArray;
// The DebugInfo class holds additional information for a function being
// debugged.
-class DebugInfo : public Struct {
+class DebugInfo : public Struct, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
enum Flag {
kNone = 0,
kHasBreakInfo = 1 << 0,
@@ -42,6 +45,9 @@ class DebugInfo : public Struct {
// Bit field containing various information collected for debugging.
DECL_INT_ACCESSORS(debugger_hints)
+ // Function identifier field from shared function info.
+ DECL_ACCESSORS(function_identifier, Object)
+
// DebugInfo can be detached from the SharedFunctionInfo iff it is empty.
bool IsEmpty() const;
@@ -56,7 +62,10 @@ class DebugInfo : public Struct {
ExecutionMode DebugExecutionMode() const;
void SetDebugExecutionMode(ExecutionMode value);
- inline bool HasDebugBytecodeArray();
+ // Specifies whether the associated function has an instrumented bytecode
+ // array. If so, OriginalBytecodeArray returns the non-instrumented bytecode,
+ // and DebugBytecodeArray returns the instrumented bytecode.
+ inline bool HasInstrumentedBytecodeArray();
inline BytecodeArray* OriginalBytecodeArray();
inline BytecodeArray* DebugBytecodeArray();
@@ -66,9 +75,8 @@ class DebugInfo : public Struct {
bool HasBreakInfo() const;
- // Clears all fields related to break points. Returns true iff the
- // DebugInfo is now empty.
- bool ClearBreakInfo();
+ // Clears all fields related to break points.
+ void ClearBreakInfo(Isolate* isolate);
// Accessors to flag whether to break before entering the function.
// This is used to break for functions with no source, e.g. builtins.
@@ -76,40 +84,79 @@ class DebugInfo : public Struct {
void ClearBreakAtEntry();
bool BreakAtEntry() const;
- // The instrumented bytecode array for functions with break points.
- DECL_ACCESSORS(debug_bytecode_array, Object)
+ // The original uninstrumented bytecode array for functions with break
+ // points - the instrumented bytecode is held in the shared function info.
+ DECL_ACCESSORS(original_bytecode_array, Object)
// Fixed array holding status information for each active break point.
DECL_ACCESSORS(break_points, FixedArray)
// Check if there is a break point at a source position.
- bool HasBreakPoint(int source_position);
+ bool HasBreakPoint(Isolate* isolate, int source_position);
// Attempt to clear a break point. Return true if successful.
- static bool ClearBreakPoint(Handle<DebugInfo> debug_info,
+ static bool ClearBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
Handle<BreakPoint> break_point);
// Set a break point.
- static void SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
+ static void SetBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
+ int source_position,
Handle<BreakPoint> break_point);
// Get the break point objects for a source position.
- Handle<Object> GetBreakPoints(int source_position);
+ Handle<Object> GetBreakPoints(Isolate* isolate, int source_position);
// Find the break point info holding this break point object.
- static Handle<Object> FindBreakPointInfo(Handle<DebugInfo> debug_info,
+ static Handle<Object> FindBreakPointInfo(Isolate* isolate,
+ Handle<DebugInfo> debug_info,
Handle<BreakPoint> break_point);
// Get the number of break points for this function.
- int GetBreakPointCount();
+ int GetBreakPointCount(Isolate* isolate);
// Returns whether we should be able to break before entering the function.
// This is true for functions with no source, e.g. builtins.
bool CanBreakAtEntry() const;
+ // --- Debugger hint flags ---
+ // ---------------------------
+
+ // Indicates that the function should be skipped during stepping.
+ DECL_BOOLEAN_ACCESSORS(debug_is_blackboxed)
+
+ // Indicates that |debug_is_blackboxed| has been computed and set.
+ DECL_BOOLEAN_ACCESSORS(computed_debug_is_blackboxed)
+
+ // Indicates the side effect state.
+ DECL_INT_ACCESSORS(side_effect_state)
+
+ enum SideEffectState {
+ kNotComputed = 0,
+ kHasSideEffects = 1,
+ kRequiresRuntimeChecks = 2,
+ kHasNoSideEffect = 3,
+ };
+
+ SideEffectState GetSideEffectState(Isolate* isolate);
+
+ // Id assigned to the function for debugging.
+ // This could also be implemented as a weak hash table.
+ DECL_INT_ACCESSORS(debugging_id);
+
+// Bit positions in |debugger_hints|.
+#define DEBUGGER_HINTS_BIT_FIELDS(V, _) \
+ V(SideEffectStateBits, int, 2, _) \
+ V(DebugIsBlackboxedBit, bool, 1, _) \
+ V(ComputedDebugIsBlackboxedBit, bool, 1, _) \
+ V(DebuggingIdBits, int, 20, _)
+
+ DEFINE_BIT_FIELDS(DEBUGGER_HINTS_BIT_FIELDS)
+#undef DEBUGGER_HINTS_BIT_FIELDS
+
+ static const int kNoDebuggingId = 0;
+
// --- Block Coverage ---
// ----------------------
bool HasCoverageInfo() const;
- // Clears all fields related to block coverage. Returns true iff the
- // DebugInfo is now empty.
- bool ClearCoverageInfo();
+ // Clears all fields related to block coverage.
+ void ClearCoverageInfo(Isolate* isolate);
DECL_ACCESSORS(coverage_info, Object)
DECL_CAST(DebugInfo)
@@ -121,10 +168,12 @@ class DebugInfo : public Struct {
static const int kSharedFunctionInfoOffset = Struct::kHeaderSize;
static const int kDebuggerHintsOffset =
kSharedFunctionInfoOffset + kPointerSize;
- static const int kDebugBytecodeArrayOffset =
+ static const int kFunctionIdentifierOffset =
kDebuggerHintsOffset + kPointerSize;
+ static const int kOriginalBytecodeArrayOffset =
+ kFunctionIdentifierOffset + kPointerSize;
static const int kBreakPointsStateOffset =
- kDebugBytecodeArrayOffset + kPointerSize;
+ kOriginalBytecodeArrayOffset + kPointerSize;
static const int kFlagsOffset = kBreakPointsStateOffset + kPointerSize;
static const int kCoverageInfoOffset = kFlagsOffset + kPointerSize;
static const int kSize = kCoverageInfoOffset + kPointerSize;
@@ -133,7 +182,7 @@ class DebugInfo : public Struct {
private:
// Get the break point info object for a source position.
- Object* GetBreakPointInfo(int source_position);
+ Object* GetBreakPointInfo(Isolate* isolate, int source_position);
DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
};
@@ -149,16 +198,16 @@ class BreakPointInfo : public Tuple2 {
DECL_ACCESSORS(break_points, Object)
// Removes a break point.
- static void ClearBreakPoint(Handle<BreakPointInfo> info,
+ static void ClearBreakPoint(Isolate* isolate, Handle<BreakPointInfo> info,
Handle<BreakPoint> break_point);
// Set a break point.
- static void SetBreakPoint(Handle<BreakPointInfo> info,
+ static void SetBreakPoint(Isolate* isolate, Handle<BreakPointInfo> info,
Handle<BreakPoint> break_point);
// Check if break point info has this break point.
- static bool HasBreakPoint(Handle<BreakPointInfo> info,
+ static bool HasBreakPoint(Isolate* isolate, Handle<BreakPointInfo> info,
Handle<BreakPoint> break_point);
// Get the number of break points for this code offset.
- int GetBreakPointCount();
+ int GetBreakPointCount(Isolate* isolate);
int GetStatementPosition(Handle<DebugInfo> debug_info);
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index a4dc9adaac..c77e1000b7 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -44,7 +44,10 @@ class EnumCache : public Tuple2 {
// [4]: first value for constants | Smi(1) when not used
//
// [2 + number of descriptors * 3]: start of slack
-class DescriptorArray : public FixedArray {
+// The "value" fields store either values or field types. A field type is either
+// FieldType::None(), FieldType::Any() or a weak reference to a Map. All other
+// references are strong.
+class DescriptorArray : public WeakFixedArray {
public:
// Returns the number of descriptors in the array.
inline int number_of_descriptors() const;
@@ -66,12 +69,13 @@ class DescriptorArray : public FixedArray {
// Accessors for fetching instance descriptor at descriptor number.
inline Name* GetKey(int descriptor_number);
inline Object** GetKeySlot(int descriptor_number);
- inline Object* GetValue(int descriptor_number);
+ inline Object* GetStrongValue(int descriptor_number);
inline void SetValue(int descriptor_number, Object* value);
- inline Object** GetValueSlot(int descriptor_number);
+ inline MaybeObject* GetValue(int descriptor_number);
+ inline MaybeObject** GetValueSlot(int descriptor_number);
static inline int GetValueOffset(int descriptor_number);
- inline Object** GetDescriptorStartSlot(int descriptor_number);
- inline Object** GetDescriptorEndSlot(int descriptor_number);
+ inline MaybeObject** GetDescriptorStartSlot(int descriptor_number);
+ inline MaybeObject** GetDescriptorEndSlot(int descriptor_number);
inline PropertyDetails GetDetails(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
inline FieldType* GetFieldType(int descriptor_number);
@@ -81,9 +85,8 @@ class DescriptorArray : public FixedArray {
inline void SetSortedKey(int pointer, int descriptor_number);
// Accessor for complete descriptor.
- inline void Get(int descriptor_number, Descriptor* desc);
inline void Set(int descriptor_number, Descriptor* desc);
- inline void Set(int descriptor_number, Name* key, Object* value,
+ inline void Set(int descriptor_number, Name* key, MaybeObject* value,
PropertyDetails details);
void Replace(int descriptor_number, Descriptor* descriptor);
@@ -96,22 +99,24 @@ class DescriptorArray : public FixedArray {
// array.
inline void Append(Descriptor* desc);
- static Handle<DescriptorArray> CopyUpTo(Handle<DescriptorArray> desc,
+ static Handle<DescriptorArray> CopyUpTo(Isolate* isolate,
+ Handle<DescriptorArray> desc,
int enumeration_index, int slack = 0);
static Handle<DescriptorArray> CopyUpToAddAttributes(
- Handle<DescriptorArray> desc, int enumeration_index,
+ Isolate* isolate, Handle<DescriptorArray> desc, int enumeration_index,
PropertyAttributes attributes, int slack = 0);
// Sort the instance descriptors by the hash codes of their keys.
void Sort();
// Search the instance descriptors for given name.
- INLINE(int Search(Name* name, int number_of_own_descriptors));
+ V8_INLINE int Search(Name* name, int number_of_own_descriptors);
+ V8_INLINE int Search(Name* name, Map* map);
// As the above, but uses DescriptorLookupCache and updates it when
// necessary.
- INLINE(int SearchWithCache(Isolate* isolate, Name* name, Map* map));
+ V8_INLINE int SearchWithCache(Isolate* isolate, Name* name, Map* map);
bool IsEqualUpTo(DescriptorArray* desc, int nof_descriptors);
@@ -147,12 +152,7 @@ class DescriptorArray : public FixedArray {
void PrintDescriptorDetails(std::ostream& os, int descriptor,
PropertyDetails::PrintMode mode);
-#if defined(DEBUG) || defined(OBJECT_PRINT)
- // For our gdb macros, we should perhaps change these in the future.
- void Print();
- void DescriptorArrayPrint(std::ostream& os);
-#endif
-
+ DECL_PRINTER(DescriptorArray)
DECL_VERIFIER(DescriptorArray)
#ifdef DEBUG
@@ -183,6 +183,9 @@ class DescriptorArray : public FixedArray {
}
private:
+ inline MaybeObject* get(int index) const;
+ inline void set(int index, MaybeObject* value);
+
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
void CopyFrom(int index, DescriptorArray* src);
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 4aca71d563..eac358c1cd 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -43,18 +43,18 @@ class Dictionary : public HashTable<Derived, Shape> {
}
// Set the details for entry.
- void DetailsAtPut(int entry, PropertyDetails value) {
- Shape::DetailsAtPut(static_cast<Derived*>(this), entry, value);
+ void DetailsAtPut(Isolate* isolate, int entry, PropertyDetails value) {
+ Shape::DetailsAtPut(isolate, static_cast<Derived*>(this), entry, value);
}
// Delete a property from the dictionary.
V8_WARN_UNUSED_RESULT static Handle<Derived> DeleteEntry(
- Handle<Derived> dictionary, int entry);
+ Isolate* isolate, Handle<Derived> dictionary, int entry);
// Attempt to shrink the dictionary after deletion of key.
V8_WARN_UNUSED_RESULT static inline Handle<Derived> Shrink(
- Handle<Derived> dictionary) {
- return DerivedHashTable::Shrink(dictionary);
+ Isolate* isolate, Handle<Derived> dictionary) {
+ return DerivedHashTable::Shrink(isolate, dictionary);
}
int NumberOfEnumerableProperties();
@@ -69,19 +69,18 @@ class Dictionary : public HashTable<Derived, Shape> {
Object* SlowReverseLookup(Object* value);
// Sets the entry to (key, value) pair.
- inline void ClearEntry(int entry);
- inline void SetEntry(int entry, Object* key, Object* value,
+ inline void ClearEntry(Isolate* isolate, int entry);
+ inline void SetEntry(Isolate* isolate, int entry, Object* key, Object* value,
PropertyDetails details);
- V8_WARN_UNUSED_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
- Key key,
- Handle<Object> value,
- PropertyDetails details,
- int* entry_out = nullptr);
+ V8_WARN_UNUSED_RESULT static Handle<Derived> Add(
+ Isolate* isolate, Handle<Derived> dictionary, Key key,
+ Handle<Object> value, PropertyDetails details, int* entry_out = nullptr);
protected:
// Generic at put operation.
- V8_WARN_UNUSED_RESULT static Handle<Derived> AtPut(Handle<Derived> dictionary,
+ V8_WARN_UNUSED_RESULT static Handle<Derived> AtPut(Isolate* isolate,
+ Handle<Derived> dictionary,
Key key,
Handle<Object> value,
PropertyDetails details);
@@ -100,7 +99,7 @@ class BaseDictionaryShape : public BaseShape<Key> {
}
template <typename Dictionary>
- static inline void DetailsAtPut(Dictionary* dict, int entry,
+ static inline void DetailsAtPut(Isolate* isolate, Dictionary* dict, int entry,
PropertyDetails value) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
dict->set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex,
@@ -164,27 +163,27 @@ class BaseNameDictionary : public Dictionary<Derived, Shape> {
static void CollectKeysTo(Handle<Derived> dictionary, KeyAccumulator* keys);
// Return the key indices sorted by its enumeration index.
- static Handle<FixedArray> IterationIndices(Handle<Derived> dictionary);
+ static Handle<FixedArray> IterationIndices(Isolate* isolate,
+ Handle<Derived> dictionary);
// Copies enumerable keys to preallocated fixed array.
// Does not throw for uninitialized exports in module namespace objects, so
// this has to be checked separately.
- static void CopyEnumKeysTo(Handle<Derived> dictionary,
+ static void CopyEnumKeysTo(Isolate* isolate, Handle<Derived> dictionary,
Handle<FixedArray> storage, KeyCollectionMode mode,
KeyAccumulator* accumulator);
// Ensure enough space for n additional elements.
- static Handle<Derived> EnsureCapacity(Handle<Derived> dictionary, int n);
+ static Handle<Derived> EnsureCapacity(Isolate* isolate,
+ Handle<Derived> dictionary, int n);
V8_WARN_UNUSED_RESULT static Handle<Derived> AddNoUpdateNextEnumerationIndex(
- Handle<Derived> dictionary, Key key, Handle<Object> value,
- PropertyDetails details, int* entry_out = nullptr);
-
- V8_WARN_UNUSED_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
- Key key,
- Handle<Object> value,
- PropertyDetails details,
- int* entry_out = nullptr);
+ Isolate* isolate, Handle<Derived> dictionary, Key key,
+ Handle<Object> value, PropertyDetails details, int* entry_out = nullptr);
+
+ V8_WARN_UNUSED_RESULT static Handle<Derived> Add(
+ Isolate* isolate, Handle<Derived> dictionary, Key key,
+ Handle<Object> value, PropertyDetails details, int* entry_out = nullptr);
};
class NameDictionary
@@ -211,12 +210,12 @@ class GlobalDictionaryShape : public NameDictionaryShape {
static inline PropertyDetails DetailsAt(Dictionary* dict, int entry);
template <typename Dictionary>
- static inline void DetailsAtPut(Dictionary* dict, int entry,
+ static inline void DetailsAtPut(Isolate* isolate, Dictionary* dict, int entry,
PropertyDetails value);
static inline Object* Unwrap(Object* key);
- static inline bool IsKey(Isolate* isolate, Object* k);
- static inline bool IsLive(Isolate* isolate, Object* key);
+ static inline bool IsKey(ReadOnlyRoots roots, Object* k);
+ static inline bool IsLive(ReadOnlyRoots roots, Object* key);
static inline int GetMapRootIndex();
};
@@ -227,7 +226,7 @@ class GlobalDictionary
inline Object* ValueAt(int entry);
inline PropertyCell* CellAt(int entry);
- inline void SetEntry(int entry, Object* key, Object* value,
+ inline void SetEntry(Isolate* isolate, int entry, Object* key, Object* value,
PropertyDetails details);
inline Name* NameAt(int entry);
inline void ValueAtPut(int entry, Object* value);
@@ -262,7 +261,7 @@ class SimpleNumberDictionaryShape : public NumberDictionaryBaseShape {
}
template <typename Dictionary>
- static inline void DetailsAtPut(Dictionary* dict, int entry,
+ static inline void DetailsAtPut(Isolate* isolate, Dictionary* dict, int entry,
PropertyDetails value) {
UNREACHABLE();
}
@@ -283,7 +282,7 @@ class SimpleNumberDictionary
DECL_CAST(SimpleNumberDictionary)
// Type specific at put (default NONE attributes is used when adding).
V8_WARN_UNUSED_RESULT static Handle<SimpleNumberDictionary> Set(
- Handle<SimpleNumberDictionary> dictionary, uint32_t key,
+ Isolate* isolate, Handle<SimpleNumberDictionary> dictionary, uint32_t key,
Handle<Object> value);
static const int kEntryValueIndex = 1;
@@ -301,10 +300,12 @@ class NumberDictionary
: public Dictionary<NumberDictionary, NumberDictionaryShape> {
public:
DECL_CAST(NumberDictionary)
+ DECL_PRINTER(NumberDictionary)
// Type specific at put (default NONE attributes is used when adding).
V8_WARN_UNUSED_RESULT static Handle<NumberDictionary> Set(
- Handle<NumberDictionary> dictionary, uint32_t key, Handle<Object> value,
+ Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value,
Handle<JSObject> dictionary_holder = Handle<JSObject>::null(),
PropertyDetails details = PropertyDetails::Empty());
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 1de1079717..45107ce7fe 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -5,7 +5,9 @@
#ifndef V8_OBJECTS_FIXED_ARRAY_INL_H_
#define V8_OBJECTS_FIXED_ARRAY_INL_H_
-#include "src/objects.h"
+#include "src/objects/fixed-array.h"
+
+#include "src/objects/bigint.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -13,12 +15,6 @@
namespace v8 {
namespace internal {
-TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
-TYPE_CHECKER(FixedArrayExact, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
-TYPE_CHECKER(FixedArrayOfWeakCells, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(WeakArrayList, WEAK_ARRAY_LIST_TYPE)
-
CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(FixedArray)
@@ -50,7 +46,7 @@ Object** FixedArray::GetFirstElementAddress() {
}
bool FixedArray::ContainsOnlySmisOrHoles() {
- Object* the_hole = GetHeap()->the_hole_value();
+ Object* the_hole = GetReadOnlyRoots().the_hole_value();
Object** current = GetFirstElementAddress();
for (int i = 0; i < length(); ++i) {
Object* candidate = *current++;
@@ -60,7 +56,7 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
}
Object* FixedArray::get(int index) const {
- SLOW_DCHECK(index >= 0 && index < this->length());
+ DCHECK(index >= 0 && index < this->length());
return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
@@ -87,7 +83,7 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
}
void FixedArray::set(int index, Smi* value) {
- DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
+ DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(index, this->length());
DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
@@ -95,58 +91,71 @@ void FixedArray::set(int index, Smi* value) {
}
void FixedArray::set(int index, Object* value) {
- DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
+ DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
DCHECK(IsFixedArray());
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
+ WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
}
void FixedArray::set(int index, Object* value, WriteBarrierMode mode) {
- DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
+ DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
+ value, mode);
}
void FixedArray::NoWriteBarrierSet(FixedArray* array, int index,
Object* value) {
- DCHECK_NE(array->map(), array->GetHeap()->fixed_cow_array_map());
+ DCHECK_NE(array->map(), array->GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_GE(index, 0);
DCHECK_LT(index, array->length());
- DCHECK(!array->GetHeap()->InNewSpace(value));
+ DCHECK(!Heap::InNewSpace(value));
RELAXED_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
- set_undefined(GetIsolate(), index);
+ set_undefined(GetReadOnlyRoots(), index);
}
void FixedArray::set_undefined(Isolate* isolate, int index) {
- FixedArray::NoWriteBarrierSet(this, index,
- isolate->heap()->undefined_value());
+ set_undefined(ReadOnlyRoots(isolate), index);
}
-void FixedArray::set_null(int index) { set_null(GetIsolate(), index); }
+void FixedArray::set_undefined(ReadOnlyRoots ro_roots, int index) {
+ FixedArray::NoWriteBarrierSet(this, index, ro_roots.undefined_value());
+}
+
+void FixedArray::set_null(int index) { set_null(GetReadOnlyRoots(), index); }
void FixedArray::set_null(Isolate* isolate, int index) {
- FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->null_value());
+ set_null(ReadOnlyRoots(isolate), index);
+}
+
+void FixedArray::set_null(ReadOnlyRoots ro_roots, int index) {
+ FixedArray::NoWriteBarrierSet(this, index, ro_roots.null_value());
}
-void FixedArray::set_the_hole(int index) { set_the_hole(GetIsolate(), index); }
+void FixedArray::set_the_hole(int index) {
+ set_the_hole(GetReadOnlyRoots(), index);
+}
void FixedArray::set_the_hole(Isolate* isolate, int index) {
- FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->the_hole_value());
+ set_the_hole(ReadOnlyRoots(isolate), index);
+}
+
+void FixedArray::set_the_hole(ReadOnlyRoots ro_roots, int index) {
+ FixedArray::NoWriteBarrierSet(this, index, ro_roots.the_hole_value());
}
void FixedArray::FillWithHoles(int from, int to) {
- Isolate* isolate = GetIsolate();
for (int i = from; i < to; i++) {
- set_the_hole(isolate, i);
+ set_the_hole(i);
}
}
@@ -159,16 +168,16 @@ Object** FixedArray::RawFieldOfElementAt(int index) {
}
double FixedDoubleArray::get_scalar(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
+ DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() &&
+ map() != GetReadOnlyRoots().fixed_array_map());
DCHECK(index >= 0 && index < this->length());
DCHECK(!is_the_hole(index));
return READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
}
uint64_t FixedDoubleArray::get_representation(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
+ DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() &&
+ map() != GetReadOnlyRoots().fixed_array_map());
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kDoubleSize;
return READ_UINT64_FIELD(this, offset);
@@ -184,8 +193,8 @@ Handle<Object> FixedDoubleArray::get(FixedDoubleArray* array, int index,
}
void FixedDoubleArray::set(int index, double value) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
+ DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() &&
+ map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
if (std::isnan(value)) {
WRITE_DOUBLE_FIELD(this, offset, std::numeric_limits<double>::quiet_NaN());
@@ -200,8 +209,8 @@ void FixedDoubleArray::set_the_hole(Isolate* isolate, int index) {
}
void FixedDoubleArray::set_the_hole(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
+ DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() &&
+ map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
}
@@ -225,7 +234,7 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
}
MaybeObject* WeakFixedArray::Get(int index) const {
- SLOW_DCHECK(index >= 0 && index < this->length());
+ DCHECK(index >= 0 && index < this->length());
return RELAXED_READ_WEAK_FIELD(this, OffsetOfElementAt(index));
}
@@ -234,7 +243,7 @@ void WeakFixedArray::Set(int index, MaybeObject* value) {
DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value);
- WEAK_WRITE_BARRIER(GetHeap(), this, offset, value);
+ WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
}
void WeakFixedArray::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
@@ -242,7 +251,8 @@ void WeakFixedArray::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+ CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
+ offset, value, mode);
}
MaybeObject** WeakFixedArray::data_start() {
@@ -253,8 +263,13 @@ MaybeObject** WeakFixedArray::RawFieldOfElementAt(int index) {
return HeapObject::RawMaybeWeakField(this, OffsetOfElementAt(index));
}
+MaybeObject** WeakFixedArray::GetFirstElementAddress() {
+ return reinterpret_cast<MaybeObject**>(
+ FIELD_ADDR(this, OffsetOfElementAt(0)));
+}
+
MaybeObject* WeakArrayList::Get(int index) const {
- SLOW_DCHECK(index >= 0 && index < this->capacity());
+ DCHECK(index >= 0 && index < this->capacity());
return RELAXED_READ_WEAK_FIELD(this, OffsetOfElementAt(index));
}
@@ -263,7 +278,8 @@ void WeakArrayList::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
DCHECK_LT(index, this->capacity());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+ CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
+ offset, value, mode);
}
MaybeObject** WeakArrayList::data_start() {
@@ -334,7 +350,7 @@ void ArrayList::Set(int index, Object* obj, WriteBarrierMode mode) {
}
void ArrayList::Clear(int index, Object* undefined) {
- DCHECK(undefined->IsUndefined(GetIsolate()));
+ DCHECK(undefined->IsUndefined());
FixedArray::cast(this)->set(kFirstIndex + index, undefined,
SKIP_WRITE_BARRIER);
}
@@ -405,12 +421,12 @@ byte* ByteArray::GetDataStartAddress() {
template <class T>
PodArray<T>* PodArray<T>::cast(Object* object) {
- SLOW_DCHECK(object->IsByteArray());
+ DCHECK(object->IsByteArray());
return reinterpret_cast<PodArray<T>*>(object);
}
template <class T>
const PodArray<T>* PodArray<T>::cast(const Object* object) {
- SLOW_DCHECK(object->IsByteArray());
+ DCHECK(object->IsByteArray());
return reinterpret_cast<const PodArray<T>*>(object);
}
@@ -689,7 +705,7 @@ void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
} else {
// Clamp undefined to the default value. All other types have been
// converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined(GetIsolate()));
+ DCHECK(value->IsUndefined());
}
set(index, cast_value);
}
@@ -761,18 +777,18 @@ STATIC_CONST_MEMBER_DEFINITION const InstanceType
template <class Traits>
FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
- SLOW_DCHECK(object->IsHeapObject() &&
- HeapObject::cast(object)->map()->instance_type() ==
- Traits::kInstanceType);
+ DCHECK(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
return reinterpret_cast<FixedTypedArray<Traits>*>(object);
}
template <class Traits>
const FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(
const Object* object) {
- SLOW_DCHECK(object->IsHeapObject() &&
- HeapObject::cast(object)->map()->instance_type() ==
- Traits::kInstanceType);
+ DCHECK(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
return reinterpret_cast<FixedTypedArray<Traits>*>(object);
}
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 16aa97d3fa..362064edf7 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -104,7 +104,8 @@ class FixedArray : public FixedArrayBase {
Handle<T> GetValueChecked(Isolate* isolate, int index) const;
// Return a grown copy if the index is bigger than the array's length.
- static Handle<FixedArray> SetAndGrow(Handle<FixedArray> array, int index,
+ static Handle<FixedArray> SetAndGrow(Isolate* isolate,
+ Handle<FixedArray> array, int index,
Handle<Object> value,
PretenureFlag pretenure = NOT_TENURED);
@@ -135,8 +136,13 @@ class FixedArray : public FixedArrayBase {
inline void FillWithHoles(int from, int to);
- // Shrink length and insert filler objects.
- void Shrink(int length);
+ // Shrink the array and insert filler objects. {new_length} must be > 0.
+ void Shrink(Isolate* isolate, int new_length);
+ // If {new_length} is 0, return the canonical empty FixedArray. Otherwise
+ // like above.
+ static Handle<FixedArray> ShrinkOrEmpty(Isolate* isolate,
+ Handle<FixedArray> array,
+ int new_length);
// Copy a sub array from the receiver to dest.
void CopyTo(int pos, FixedArray* dest, int dest_pos, int len) const;
@@ -186,6 +192,10 @@ class FixedArray : public FixedArrayBase {
private:
STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
+ inline void set_undefined(ReadOnlyRoots ro_roots, int index);
+ inline void set_null(ReadOnlyRoots ro_roots, int index);
+ inline void set_the_hole(ReadOnlyRoots ro_roots, int index);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
@@ -276,8 +286,7 @@ class WeakFixedArray : public HeapObject {
inline MaybeObject** RawFieldOfElementAt(int index);
- // Shrink length and insert filler objects.
- void Shrink(int new_length);
+ inline MaybeObject** GetFirstElementAddress();
DECL_PRINTER(WeakFixedArray)
DECL_VERIFIER(WeakFixedArray)
@@ -291,11 +300,12 @@ class WeakFixedArray : public HeapObject {
static const int kMaxLength =
(FixedArray::kMaxSize - kHeaderSize) / kPointerSize;
- private:
+ protected:
static int OffsetOfElementAt(int index) {
return kHeaderSize + index * kPointerSize;
}
+ private:
friend class Heap;
static const int kFirstIndex = 1;
@@ -314,13 +324,14 @@ class WeakArrayList : public HeapObject {
DECL_VERIFIER(WeakArrayList)
DECL_PRINTER(WeakArrayList)
- static Handle<WeakArrayList> Add(Handle<WeakArrayList> array,
- Handle<HeapObject> obj1, Smi* obj2);
+ static Handle<WeakArrayList> AddToEnd(Isolate* isolate,
+ Handle<WeakArrayList> array,
+ MaybeObjectHandle value);
inline MaybeObject* Get(int index) const;
// Set the element at index to obj. The underlying array must be large enough.
- // If you need to grow the WeakArrayList, use the static Add() methods
+ // If you need to grow the WeakArrayList, use the static AddToEnd() method
// instead.
inline void Set(int index, MaybeObject* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -351,14 +362,16 @@ class WeakArrayList : public HeapObject {
static const int kMaxCapacity =
(FixedArray::kMaxSize - kHeaderSize) / kPointerSize;
+ protected:
+ static Handle<WeakArrayList> EnsureSpace(Isolate* isolate,
+ Handle<WeakArrayList> array,
+ int length);
+
private:
static int OffsetOfElementAt(int index) {
return kHeaderSize + index * kPointerSize;
}
- static Handle<WeakArrayList> EnsureSpace(Handle<WeakArrayList> array,
- int length);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(WeakArrayList);
};
@@ -368,7 +381,8 @@ class FixedArrayOfWeakCells : public FixedArray {
// If |maybe_array| is not a FixedArrayOfWeakCells, a fresh one will be
// allocated. This function does not check if the value exists already,
// callers must ensure this themselves if necessary.
- static Handle<FixedArrayOfWeakCells> Add(Handle<Object> maybe_array,
+ static Handle<FixedArrayOfWeakCells> Add(Isolate* isolate,
+ Handle<Object> maybe_array,
Handle<HeapObject> value,
int* assigned_index = nullptr);
@@ -381,7 +395,7 @@ class FixedArrayOfWeakCells : public FixedArray {
};
template <class CompactionCallback>
- void Compact();
+ void Compact(Isolate* isolate);
inline Object* Get(int index) const;
inline void Clear(int index);
@@ -420,8 +434,8 @@ class FixedArrayOfWeakCells : public FixedArray {
Isolate* isolate, int size,
Handle<FixedArrayOfWeakCells> initialize_from);
- static void Set(Handle<FixedArrayOfWeakCells> array, int index,
- Handle<HeapObject> value);
+ static void Set(Isolate* isolate, Handle<FixedArrayOfWeakCells> array,
+ int index, Handle<HeapObject> value);
inline void clear(int index);
inline int last_used_index() const;
@@ -443,9 +457,10 @@ class FixedArrayOfWeakCells : public FixedArray {
// underlying FixedArray starting at kFirstIndex.
class ArrayList : public FixedArray {
public:
- static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj);
- static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj1,
- Handle<Object> obj2);
+ static Handle<ArrayList> Add(Isolate* isolate, Handle<ArrayList> array,
+ Handle<Object> obj);
+ static Handle<ArrayList> Add(Isolate* isolate, Handle<ArrayList> array,
+ Handle<Object> obj1, Handle<Object> obj2);
static Handle<ArrayList> New(Isolate* isolate, int size);
// Returns the number of elements in the list, not the allocated size, which
@@ -468,12 +483,13 @@ class ArrayList : public FixedArray {
// Return a copy of the list of size Length() without the first entry. The
// number returned by Length() is stored in the first entry.
- static Handle<FixedArray> Elements(Handle<ArrayList> array);
+ static Handle<FixedArray> Elements(Isolate* isolate, Handle<ArrayList> array);
bool IsFull();
DECL_CAST(ArrayList)
private:
- static Handle<ArrayList> EnsureSpace(Handle<ArrayList> array, int length);
+ static Handle<ArrayList> EnsureSpace(Isolate* isolate,
+ Handle<ArrayList> array, int length);
static const int kLengthIndex = 0;
static const int kFirstIndex = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index 55b7838484..08ed8fe6af 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -41,7 +41,7 @@ class FrameArray : public FixedArray {
inline bool IsAsmJsWasmFrame(int frame_ix) const;
inline int FrameCount() const;
- void ShrinkToFit();
+ void ShrinkToFit(Isolate* isolate);
// Flags.
enum Flag {
@@ -95,7 +95,8 @@ class FrameArray : public FixedArray {
return kFirstIndex + frame_count * kElementsPerFrame;
}
- static Handle<FrameArray> EnsureSpace(Handle<FrameArray> array, int length);
+ static Handle<FrameArray> EnsureSpace(Isolate* isolate,
+ Handle<FrameArray> array, int length);
friend class Factory;
DISALLOW_IMPLICIT_CONSTRUCTORS(FrameArray);
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index aef7d7230b..11aa0392c0 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -58,26 +58,25 @@ int BaseShape<Key>::GetMapRootIndex() {
return Heap::kHashTableMapRootIndex;
}
-template <typename Derived, typename Shape>
-int HashTable<Derived, Shape>::FindEntry(Key key) {
- return FindEntry(GetIsolate(), key);
+int EphemeronHashTableShape::GetMapRootIndex() {
+ return Heap::kEphemeronHashTableMapRootIndex;
}
template <typename Derived, typename Shape>
int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key) {
- return FindEntry(isolate, key, Shape::Hash(isolate, key));
+ return FindEntry(ReadOnlyRoots(isolate), key, Shape::Hash(isolate, key));
}
// Find entry for key otherwise return kNotFound.
template <typename Derived, typename Shape>
-int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key,
+int HashTable<Derived, Shape>::FindEntry(ReadOnlyRoots roots, Key key,
int32_t hash) {
uint32_t capacity = Capacity();
uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
- Object* undefined = isolate->heap()->undefined_value();
- Object* the_hole = isolate->heap()->the_hole_value();
+ Object* undefined = roots.undefined_value();
+ Object* the_hole = roots.the_hole_value();
USE(the_hole);
while (true) {
Object* element = KeyAt(entry);
@@ -93,28 +92,27 @@ int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key,
}
template <typename Derived, typename Shape>
-bool HashTable<Derived, Shape>::IsKey(Isolate* isolate, Object* k) {
- return Shape::IsKey(isolate, k);
+bool HashTable<Derived, Shape>::IsKey(ReadOnlyRoots roots, Object* k) {
+ return Shape::IsKey(roots, k);
}
template <typename Derived, typename Shape>
-bool HashTable<Derived, Shape>::ToKey(Isolate* isolate, int entry,
+bool HashTable<Derived, Shape>::ToKey(ReadOnlyRoots roots, int entry,
Object** out_k) {
Object* k = KeyAt(entry);
- if (!IsKey(isolate, k)) return false;
+ if (!IsKey(roots, k)) return false;
*out_k = Shape::Unwrap(k);
return true;
}
template <typename KeyT>
-bool BaseShape<KeyT>::IsKey(Isolate* isolate, Object* key) {
- return IsLive(isolate, key);
+bool BaseShape<KeyT>::IsKey(ReadOnlyRoots roots, Object* key) {
+ return IsLive(roots, key);
}
template <typename KeyT>
-bool BaseShape<KeyT>::IsLive(Isolate* isolate, Object* k) {
- Heap* heap = isolate->heap();
- return k != heap->the_hole_value() && k != heap->undefined_value();
+bool BaseShape<KeyT>::IsLive(ReadOnlyRoots roots, Object* k) {
+ return k != roots.the_hole_value() && k != roots.undefined_value();
}
template <typename Derived, typename Shape>
@@ -131,13 +129,13 @@ const HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(
}
bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
- return FindEntry(isolate, key, hash) != kNotFound;
+ return FindEntry(ReadOnlyRoots(isolate), key, hash) != kNotFound;
}
bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
Object* hash = key->GetHash();
if (!hash->IsSmi()) return false;
- return FindEntry(isolate, key, Smi::ToInt(hash)) != kNotFound;
+ return FindEntry(ReadOnlyRoots(isolate), key, Smi::ToInt(hash)) != kNotFound;
}
} // namespace internal
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 8268ad3bc4..f83f3274b4 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -58,8 +58,8 @@ class BaseShape {
static inline int GetMapRootIndex();
static const bool kNeedsHoleCheck = true;
static Object* Unwrap(Object* key) { return key; }
- static inline bool IsKey(Isolate* isolate, Object* key);
- static inline bool IsLive(Isolate* isolate, Object* key);
+ static inline bool IsKey(ReadOnlyRoots roots, Object* key);
+ static inline bool IsLive(ReadOnlyRoots roots, Object* key);
};
class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
@@ -87,7 +87,7 @@ class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
static inline int ComputeCapacity(int at_least_space_for);
// Compute the probe offset (quadratic probing).
- INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
+ V8_INLINE static uint32_t GetProbeOffset(uint32_t n) {
return (n + n * n) >> 1;
}
@@ -144,18 +144,17 @@ class HashTable : public HashTableBase {
void IterateElements(ObjectVisitor* visitor);
// Find entry for key otherwise return kNotFound.
- inline int FindEntry(Key key);
- inline int FindEntry(Isolate* isolate, Key key, int32_t hash);
+ inline int FindEntry(ReadOnlyRoots roots, Key key, int32_t hash);
int FindEntry(Isolate* isolate, Key key);
// Rehashes the table in-place.
- void Rehash();
+ void Rehash(Isolate* isolate);
// Tells whether k is a real key. The hole and undefined are not allowed
// as keys and can be used to indicate missing or deleted elements.
- static bool IsKey(Isolate* isolate, Object* k);
+ static bool IsKey(ReadOnlyRoots roots, Object* k);
- inline bool ToKey(Isolate* isolate, int entry, Object** out_k);
+ inline bool ToKey(ReadOnlyRoots roots, int entry, Object** out_k);
// Returns the key at entry.
Object* KeyAt(int entry) { return get(EntryToIndex(entry) + kEntryKeyIndex); }
@@ -185,7 +184,8 @@ class HashTable : public HashTableBase {
// Ensure enough space for n additional elements.
V8_WARN_UNUSED_RESULT static Handle<Derived> EnsureCapacity(
- Handle<Derived> table, int n, PretenureFlag pretenure = NOT_TENURED);
+ Isolate* isolate, Handle<Derived> table, int n,
+ PretenureFlag pretenure = NOT_TENURED);
// Returns true if this table has sufficient capacity for adding n elements.
bool HasSufficientCapacityToAdd(int number_of_additional_elements);
@@ -202,7 +202,7 @@ class HashTable : public HashTableBase {
// Attempt to shrink hash table after removal of key.
V8_WARN_UNUSED_RESULT static Handle<Derived> Shrink(
- Handle<Derived> table, int additionalCapacity = 0);
+ Isolate* isolate, Handle<Derived> table, int additionalCapacity = 0);
private:
// Ensure that kMaxRegularCapacity yields a non-large object dictionary.
@@ -226,12 +226,13 @@ class HashTable : public HashTableBase {
// Returns _expected_ if one of entries given by the first _probe_ probes is
// equal to _expected_. Otherwise, returns the entry given by the probe
// number _probe_.
- uint32_t EntryForProbe(Object* k, int probe, uint32_t expected);
+ uint32_t EntryForProbe(Isolate* isolate, Object* k, int probe,
+ uint32_t expected);
void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode);
// Rehashes this hash-table into the new table.
- void Rehash(Derived* new_table);
+ void Rehash(Isolate* isolate, Derived* new_table);
};
// HashTableKey is an abstract superclass for virtual key behavior.
@@ -269,54 +270,74 @@ class ObjectHashTableShape : public BaseShape<Handle<Object>> {
static const bool kNeedsHoleCheck = false;
};
-// ObjectHashTable maps keys that are arbitrary objects to object values by
-// using the identity hash of the key for hashing purposes.
-class ObjectHashTable
- : public HashTable<ObjectHashTable, ObjectHashTableShape> {
- typedef HashTable<ObjectHashTable, ObjectHashTableShape> DerivedHashTable;
-
+template <typename Derived, typename Shape>
+class ObjectHashTableBase : public HashTable<Derived, Shape> {
public:
- DECL_CAST(ObjectHashTable)
-
- // Attempt to shrink hash table after removal of key.
- V8_WARN_UNUSED_RESULT static inline Handle<ObjectHashTable> Shrink(
- Handle<ObjectHashTable> table);
-
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
Object* Lookup(Handle<Object> key);
Object* Lookup(Handle<Object> key, int32_t hash);
- Object* Lookup(Isolate* isolate, Handle<Object> key, int32_t hash);
+ Object* Lookup(ReadOnlyRoots roots, Handle<Object> key, int32_t hash);
// Returns the value at entry.
Object* ValueAt(int entry);
+ // Overwrite all keys and values with the hole value.
+ static void FillEntriesWithHoles(Handle<Derived>);
+
// Adds (or overwrites) the value associated with the given key.
- static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table,
- Handle<Object> key, Handle<Object> value);
- static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table,
- Handle<Object> key, Handle<Object> value,
- int32_t hash);
+ static Handle<Derived> Put(Handle<Derived> table, Handle<Object> key,
+ Handle<Object> value);
+ static Handle<Derived> Put(Isolate* isolate, Handle<Derived> table,
+ Handle<Object> key, Handle<Object> value,
+ int32_t hash);
// Returns an ObjectHashTable (possibly |table|) where |key| has been removed.
- static Handle<ObjectHashTable> Remove(Handle<ObjectHashTable> table,
- Handle<Object> key, bool* was_present);
- static Handle<ObjectHashTable> Remove(Handle<ObjectHashTable> table,
- Handle<Object> key, bool* was_present,
- int32_t hash);
+ static Handle<Derived> Remove(Isolate* isolate, Handle<Derived> table,
+ Handle<Object> key, bool* was_present);
+ static Handle<Derived> Remove(Isolate* isolate, Handle<Derived> table,
+ Handle<Object> key, bool* was_present,
+ int32_t hash);
// Returns the index to the value of an entry.
static inline int EntryToValueIndex(int entry) {
- return EntryToIndex(entry) + ObjectHashTableShape::kEntryValueIndex;
+ return HashTable<Derived, Shape>::EntryToIndex(entry) +
+ Shape::kEntryValueIndex;
}
protected:
- friend class MarkCompactCollector;
-
void AddEntry(int entry, Object* key, Object* value);
void RemoveEntry(int entry);
};
+// ObjectHashTable maps keys that are arbitrary objects to object values by
+// using the identity hash of the key for hashing purposes.
+class ObjectHashTable
+ : public ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape> {
+ public:
+ DECL_CAST(ObjectHashTable)
+ DECL_PRINTER(ObjectHashTable)
+};
+
+class EphemeronHashTableShape : public ObjectHashTableShape {
+ public:
+ static inline int GetMapRootIndex();
+};
+
+// EphemeronHashTable is similar to ObjectHashTable but gets special treatment
+// by the GC. The GC treats its entries as ephemerons: both key and value are
+// weak references, however if the key is strongly reachable its corresponding
+// value is also kept alive.
+class EphemeronHashTable
+ : public ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape> {
+ public:
+ DECL_CAST(EphemeronHashTable)
+ DECL_PRINTER(EphemeronHashTable)
+
+ protected:
+ friend class MarkCompactCollector;
+};
+
class ObjectHashSetShape : public ObjectHashTableShape {
public:
static const int kPrefixSize = 0;
@@ -325,7 +346,7 @@ class ObjectHashSetShape : public ObjectHashTableShape {
class ObjectHashSet : public HashTable<ObjectHashSet, ObjectHashSetShape> {
public:
- static Handle<ObjectHashSet> Add(Handle<ObjectHashSet> set,
+ static Handle<ObjectHashSet> Add(Isolate* isolate, Handle<ObjectHashSet> set,
Handle<Object> key);
inline bool Has(Isolate* isolate, Handle<Object> key, int32_t hash);
diff --git a/deps/v8/src/objects/intl-objects-inl.h b/deps/v8/src/objects/intl-objects-inl.h
new file mode 100644
index 0000000000..1fa2d66f94
--- /dev/null
+++ b/deps/v8/src/objects/intl-objects-inl.h
@@ -0,0 +1,27 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_INTL_OBJECTS_INL_H_
+#define V8_OBJECTS_INTL_OBJECTS_INL_H_
+
+#include "src/objects/intl-objects.h"
+
+namespace v8 {
+namespace internal {
+
+inline Intl::Type Intl::TypeFromInt(int type_int) {
+ STATIC_ASSERT(Intl::Type::kNumberFormat == 0);
+ DCHECK_LE(Intl::Type::kNumberFormat, type_int);
+ DCHECK_GT(Intl::Type::kTypeCount, type_int);
+ return static_cast<Intl::Type>(type_int);
+}
+
+inline Intl::Type Intl::TypeFromSmi(Smi* type) {
+ return TypeFromInt(Smi::ToInt(type));
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_INTL_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index 60c3a0721b..67f691a336 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -7,15 +7,18 @@
#endif // V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
+#include "src/objects/intl-objects-inl.h"
#include <memory>
#include "src/api.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
+#include "src/intl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/string.h"
#include "src/property-descriptor.h"
#include "unicode/brkiter.h"
#include "unicode/bytestream.h"
@@ -32,6 +35,7 @@
#include "unicode/numsys.h"
#include "unicode/plurrule.h"
#include "unicode/rbbi.h"
+#include "unicode/regex.h"
#include "unicode/smpdtfmt.h"
#include "unicode/timezone.h"
#include "unicode/uchar.h"
@@ -39,6 +43,7 @@
#include "unicode/ucurr.h"
#include "unicode/unum.h"
#include "unicode/upluralrules.h"
+#include "unicode/ures.h"
#include "unicode/uvernum.h"
#include "unicode/uversion.h"
@@ -56,7 +61,7 @@ bool ExtractStringSetting(Isolate* isolate, Handle<JSObject> options,
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
Handle<Object> object =
- JSReceiver::GetProperty(options, str).ToHandleChecked();
+ JSReceiver::GetProperty(isolate, options, str).ToHandleChecked();
if (object->IsString()) {
v8::String::Utf8Value utf8_string(
v8_isolate, v8::Utils::ToLocal(Handle<String>::cast(object)));
@@ -70,7 +75,7 @@ bool ExtractIntegerSetting(Isolate* isolate, Handle<JSObject> options,
const char* key, int32_t* value) {
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
Handle<Object> object =
- JSReceiver::GetProperty(options, str).ToHandleChecked();
+ JSReceiver::GetProperty(isolate, options, str).ToHandleChecked();
if (object->IsNumber()) {
return object->ToInt32(value);
}
@@ -81,9 +86,9 @@ bool ExtractBooleanSetting(Isolate* isolate, Handle<JSObject> options,
const char* key, bool* value) {
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
Handle<Object> object =
- JSReceiver::GetProperty(options, str).ToHandleChecked();
+ JSReceiver::GetProperty(isolate, options, str).ToHandleChecked();
if (object->IsBoolean()) {
- *value = object->BooleanValue();
+ *value = object->BooleanValue(isolate);
return true;
}
return false;
@@ -163,7 +168,7 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
icu::UnicodeString pattern;
date_format->toPattern(pattern);
JSObject::SetProperty(
- resolved, factory->intl_pattern_symbol(),
+ isolate, resolved, factory->intl_pattern_symbol(),
factory
->NewStringFromTwoByte(Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
@@ -178,9 +183,9 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
// key values. intl.js maps them to BCP47 values for key "ca".
// TODO(jshin): Consider doing it here, instead.
const char* calendar_name = calendar->getType();
- JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("calendar"),
- factory->NewStringFromAsciiChecked(calendar_name),
- LanguageMode::kSloppy)
+ JSObject::SetProperty(
+ isolate, resolved, factory->NewStringFromStaticChars("calendar"),
+ factory->NewStringFromAsciiChecked(calendar_name), LanguageMode::kSloppy)
.Assert();
const icu::TimeZone& tz = calendar->getTimeZone();
@@ -200,11 +205,11 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/UTC") ||
canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("timeZone"),
+ isolate, resolved, factory->NewStringFromStaticChars("timeZone"),
factory->NewStringFromStaticChars("UTC"), LanguageMode::kSloppy)
.Assert();
} else {
- JSObject::SetProperty(resolved,
+ JSObject::SetProperty(isolate, resolved,
factory->NewStringFromStaticChars("timeZone"),
factory
->NewStringFromTwoByte(Vector<const uint16_t>(
@@ -226,11 +231,11 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("numberingSystem"),
+ isolate, resolved, factory->NewStringFromStaticChars("numberingSystem"),
factory->NewStringFromAsciiChecked(ns), LanguageMode::kSloppy)
.Assert();
} else {
- JSObject::SetProperty(resolved,
+ JSObject::SetProperty(isolate, resolved,
factory->NewStringFromStaticChars("numberingSystem"),
factory->undefined_value(), LanguageMode::kSloppy)
.Assert();
@@ -243,15 +248,15 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
FALSE, &status);
if (U_SUCCESS(status)) {
- JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result),
- LanguageMode::kSloppy)
+ JSObject::SetProperty(
+ isolate, resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromAsciiChecked(result), LanguageMode::kSloppy)
.Assert();
} else {
// This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"),
- LanguageMode::kSloppy)
+ JSObject::SetProperty(
+ isolate, resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromStaticChars("und"), LanguageMode::kSloppy)
.Assert();
}
}
@@ -376,19 +381,22 @@ void SetResolvedNumericSettings(Isolate* isolate, const icu::Locale& icu_locale,
Factory* factory = isolate->factory();
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("minimumIntegerDigits"),
+ isolate, resolved,
+ factory->NewStringFromStaticChars("minimumIntegerDigits"),
factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("minimumFractionDigits"),
+ isolate, resolved,
+ factory->NewStringFromStaticChars("minimumFractionDigits"),
factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("maximumFractionDigits"),
+ isolate, resolved,
+ factory->NewStringFromStaticChars("maximumFractionDigits"),
factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
LanguageMode::kSloppy)
.Assert();
@@ -399,7 +407,8 @@ void SetResolvedNumericSettings(Isolate* isolate, const icu::Locale& icu_locale,
CHECK(maybe.IsJust());
if (maybe.FromJust()) {
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("minimumSignificantDigits"),
+ isolate, resolved,
+ factory->NewStringFromStaticChars("minimumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMinimumSignificantDigits()),
LanguageMode::kSloppy)
.Assert();
@@ -410,7 +419,8 @@ void SetResolvedNumericSettings(Isolate* isolate, const icu::Locale& icu_locale,
CHECK(maybe.IsJust());
if (maybe.FromJust()) {
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("maximumSignificantDigits"),
+ isolate, resolved,
+ factory->NewStringFromStaticChars("maximumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMaximumSignificantDigits()),
LanguageMode::kSloppy)
.Assert();
@@ -422,15 +432,15 @@ void SetResolvedNumericSettings(Isolate* isolate, const icu::Locale& icu_locale,
uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
FALSE, &status);
if (U_SUCCESS(status)) {
- JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result),
- LanguageMode::kSloppy)
+ JSObject::SetProperty(
+ isolate, resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromAsciiChecked(result), LanguageMode::kSloppy)
.Assert();
} else {
// This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"),
- LanguageMode::kSloppy)
+ JSObject::SetProperty(
+ isolate, resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromStaticChars("und"), LanguageMode::kSloppy)
.Assert();
}
}
@@ -444,7 +454,7 @@ void SetResolvedNumberSettings(Isolate* isolate, const icu::Locale& icu_locale,
icu::UnicodeString currency(number_format->getCurrency());
if (!currency.isEmpty()) {
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("currency"),
+ isolate, resolved, factory->NewStringFromStaticChars("currency"),
factory
->NewStringFromTwoByte(Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(currency.getBuffer()),
@@ -463,18 +473,18 @@ void SetResolvedNumberSettings(Isolate* isolate, const icu::Locale& icu_locale,
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("numberingSystem"),
+ isolate, resolved, factory->NewStringFromStaticChars("numberingSystem"),
factory->NewStringFromAsciiChecked(ns), LanguageMode::kSloppy)
.Assert();
} else {
- JSObject::SetProperty(resolved,
+ JSObject::SetProperty(isolate, resolved,
factory->NewStringFromStaticChars("numberingSystem"),
factory->undefined_value(), LanguageMode::kSloppy)
.Assert();
}
delete numbering_system;
- JSObject::SetProperty(resolved,
+ JSObject::SetProperty(isolate, resolved,
factory->NewStringFromStaticChars("useGrouping"),
factory->ToBoolean(number_format->isGroupingUsed()),
LanguageMode::kSloppy)
@@ -553,7 +563,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
UErrorCode status = U_ZERO_ERROR;
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("numeric"),
+ isolate, resolved, factory->NewStringFromStaticChars("numeric"),
factory->ToBoolean(
collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
LanguageMode::kSloppy)
@@ -562,19 +572,19 @@ void SetResolvedCollatorSettings(Isolate* isolate,
switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
case UCOL_LOWER_FIRST:
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("caseFirst"),
+ isolate, resolved, factory->NewStringFromStaticChars("caseFirst"),
factory->NewStringFromStaticChars("lower"), LanguageMode::kSloppy)
.Assert();
break;
case UCOL_UPPER_FIRST:
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("caseFirst"),
+ isolate, resolved, factory->NewStringFromStaticChars("caseFirst"),
factory->NewStringFromStaticChars("upper"), LanguageMode::kSloppy)
.Assert();
break;
default:
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("caseFirst"),
+ isolate, resolved, factory->NewStringFromStaticChars("caseFirst"),
factory->NewStringFromStaticChars("false"), LanguageMode::kSloppy)
.Assert();
}
@@ -582,19 +592,19 @@ void SetResolvedCollatorSettings(Isolate* isolate,
switch (collator->getAttribute(UCOL_STRENGTH, status)) {
case UCOL_PRIMARY: {
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("strength"),
+ isolate, resolved, factory->NewStringFromStaticChars("strength"),
factory->NewStringFromStaticChars("primary"), LanguageMode::kSloppy)
.Assert();
// case level: true + s1 -> case, s1 -> base.
if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("sensitivity"),
+ isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
factory->NewStringFromStaticChars("case"), LanguageMode::kSloppy)
.Assert();
} else {
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("sensitivity"),
+ isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
factory->NewStringFromStaticChars("base"), LanguageMode::kSloppy)
.Assert();
}
@@ -602,50 +612,50 @@ void SetResolvedCollatorSettings(Isolate* isolate,
}
case UCOL_SECONDARY:
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("strength"),
+ isolate, resolved, factory->NewStringFromStaticChars("strength"),
factory->NewStringFromStaticChars("secondary"), LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("sensitivity"),
+ isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
factory->NewStringFromStaticChars("accent"), LanguageMode::kSloppy)
.Assert();
break;
case UCOL_TERTIARY:
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("strength"),
+ isolate, resolved, factory->NewStringFromStaticChars("strength"),
factory->NewStringFromStaticChars("tertiary"), LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("sensitivity"),
+ isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
factory->NewStringFromStaticChars("variant"), LanguageMode::kSloppy)
.Assert();
break;
case UCOL_QUATERNARY:
// We shouldn't get quaternary and identical from ICU, but if we do
// put them into variant.
- JSObject::SetProperty(resolved,
+ JSObject::SetProperty(isolate, resolved,
factory->NewStringFromStaticChars("strength"),
factory->NewStringFromStaticChars("quaternary"),
LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("sensitivity"),
+ isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
factory->NewStringFromStaticChars("variant"), LanguageMode::kSloppy)
.Assert();
break;
default:
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("strength"),
+ isolate, resolved, factory->NewStringFromStaticChars("strength"),
factory->NewStringFromStaticChars("identical"), LanguageMode::kSloppy)
.Assert();
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("sensitivity"),
+ isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
factory->NewStringFromStaticChars("variant"), LanguageMode::kSloppy)
.Assert();
}
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("ignorePunctuation"),
+ isolate, resolved, factory->NewStringFromStaticChars("ignorePunctuation"),
factory->ToBoolean(collator->getAttribute(UCOL_ALTERNATE_HANDLING,
status) == UCOL_SHIFTED),
LanguageMode::kSloppy)
@@ -657,15 +667,15 @@ void SetResolvedCollatorSettings(Isolate* isolate,
uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
FALSE, &status);
if (U_SUCCESS(status)) {
- JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result),
- LanguageMode::kSloppy)
+ JSObject::SetProperty(
+ isolate, resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromAsciiChecked(result), LanguageMode::kSloppy)
.Assert();
} else {
// This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"),
- LanguageMode::kSloppy)
+ JSObject::SetProperty(
+ isolate, resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromStaticChars("und"), LanguageMode::kSloppy)
.Assert();
}
}
@@ -726,7 +736,8 @@ bool SetResolvedPluralRulesSettings(Isolate* isolate,
Handle<JSObject> pluralCategories = Handle<JSObject>::cast(
JSObject::GetProperty(
- resolved, factory->NewStringFromStaticChars("pluralCategories"))
+ isolate, resolved,
+ factory->NewStringFromStaticChars("pluralCategories"))
.ToHandleChecked());
UErrorCode status = U_ZERO_ERROR;
@@ -796,15 +807,15 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
FALSE, &status);
if (U_SUCCESS(status)) {
- JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result),
- LanguageMode::kSloppy)
+ JSObject::SetProperty(
+ isolate, resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromAsciiChecked(result), LanguageMode::kSloppy)
.Assert();
} else {
// This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"),
- LanguageMode::kSloppy)
+ JSObject::SetProperty(
+ isolate, resolved, factory->NewStringFromStaticChars("locale"),
+ factory->NewStringFromStaticChars("und"), LanguageMode::kSloppy)
.Assert();
}
}
@@ -905,7 +916,8 @@ icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
icu::DecimalFormat* NumberFormat::UnpackNumberFormat(Isolate* isolate,
Handle<JSObject> obj) {
- return reinterpret_cast<icu::DecimalFormat*>(obj->GetEmbedderField(0));
+ return reinterpret_cast<icu::DecimalFormat*>(
+ obj->GetEmbedderField(NumberFormat::kDecimalFormatIndex));
}
void NumberFormat::DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data) {
@@ -952,7 +964,7 @@ bool Collator::InitializeCollator(Isolate* isolate,
}
Handle<Managed<icu::Collator>> managed =
- Managed<icu::Collator>::FromRawPtr(isolate, collator);
+ Managed<icu::Collator>::FromRawPtr(isolate, 0, collator);
collator_holder->SetEmbedderField(0, *managed);
return true;
@@ -1077,5 +1089,527 @@ void V8BreakIterator::DeleteBreakIterator(
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
+// Build the shortened locale; eg, convert xx_Yyyy_ZZ to xx_ZZ.
+bool Intl::RemoveLocaleScriptTag(const std::string& icu_locale,
+ std::string* locale_less_script) {
+ icu::Locale new_locale = icu::Locale::createCanonical(icu_locale.c_str());
+ const char* icu_script = new_locale.getScript();
+ if (icu_script == NULL || strlen(icu_script) == 0) {
+ *locale_less_script = std::string();
+ return false;
+ }
+
+ const char* icu_language = new_locale.getLanguage();
+ const char* icu_country = new_locale.getCountry();
+ icu::Locale short_locale = icu::Locale(icu_language, icu_country);
+ const char* icu_name = short_locale.getName();
+ *locale_less_script = std::string(icu_name);
+ return true;
+}
+
+std::set<std::string> Intl::GetAvailableLocales(const IcuService& service) {
+ const icu::Locale* icu_available_locales = nullptr;
+ int32_t count = 0;
+ std::set<std::string> locales;
+
+ switch (service) {
+ case IcuService::kBreakIterator:
+ icu_available_locales = icu::BreakIterator::getAvailableLocales(count);
+ break;
+ case IcuService::kCollator:
+ icu_available_locales = icu::Collator::getAvailableLocales(count);
+ break;
+ case IcuService::kDateFormat:
+ icu_available_locales = icu::DateFormat::getAvailableLocales(count);
+ break;
+ case IcuService::kNumberFormat:
+ icu_available_locales = icu::NumberFormat::getAvailableLocales(count);
+ break;
+ case IcuService::kPluralRules:
+ // TODO(littledan): For PluralRules, filter out locales that
+ // don't support PluralRules.
+ // PluralRules is missing an appropriate getAvailableLocales method,
+ // so we should filter from all locales, but it's not clear how; see
+ // https://ssl.icu-project.org/trac/ticket/12756
+ icu_available_locales = icu::Locale::getAvailableLocales(count);
+ break;
+ case IcuService::kResourceBundle: {
+ UErrorCode status = U_ZERO_ERROR;
+ UEnumeration* en = ures_openAvailableLocales(nullptr, &status);
+ int32_t length = 0;
+ const char* locale_str = uenum_next(en, &length, &status);
+ while (U_SUCCESS(status) && (locale_str != nullptr)) {
+ std::string locale(locale_str, length);
+ std::replace(locale.begin(), locale.end(), '_', '-');
+ locales.insert(locale);
+ std::string shortened_locale;
+ if (Intl::RemoveLocaleScriptTag(locale_str, &shortened_locale)) {
+ std::replace(shortened_locale.begin(), shortened_locale.end(), '_',
+ '-');
+ locales.insert(shortened_locale);
+ }
+ locale_str = uenum_next(en, &length, &status);
+ }
+ uenum_close(en);
+ return locales;
+ }
+ case IcuService::kRelativeDateTimeFormatter: {
+ // ICU RelativeDateTimeFormatter does not provide a getAvailableLocales()
+ // interface, because RelativeDateTimeFormatter depends on
+ // 1. NumberFormat and 2. ResourceBundle, return the
+ // intersection of these two set.
+ // ICU FR at https://unicode-org.atlassian.net/browse/ICU-20009
+ // TODO(ftang): change to call ICU's getAvailableLocales() after it is
+ // added.
+ std::set<std::string> number_format_set(
+ Intl::GetAvailableLocales(IcuService::kNumberFormat));
+ std::set<std::string> resource_bundle_set(
+ Intl::GetAvailableLocales(IcuService::kResourceBundle));
+ set_intersection(resource_bundle_set.begin(), resource_bundle_set.end(),
+ number_format_set.begin(), number_format_set.end(),
+ std::inserter(locales, locales.begin()));
+ return locales;
+ }
+ }
+
+ UErrorCode error = U_ZERO_ERROR;
+ char result[ULOC_FULLNAME_CAPACITY];
+
+ for (int32_t i = 0; i < count; ++i) {
+ const char* icu_name = icu_available_locales[i].getName();
+
+ error = U_ZERO_ERROR;
+ // No need to force strict BCP47 rules.
+ uloc_toLanguageTag(icu_name, result, ULOC_FULLNAME_CAPACITY, FALSE, &error);
+ if (U_FAILURE(error) || error == U_STRING_NOT_TERMINATED_WARNING) {
+ // This shouldn't happen, but lets not break the user.
+ continue;
+ }
+ std::string locale(result);
+ locales.insert(locale);
+
+ std::string shortened_locale;
+ if (Intl::RemoveLocaleScriptTag(icu_name, &shortened_locale)) {
+ std::replace(shortened_locale.begin(), shortened_locale.end(), '_', '-');
+ locales.insert(shortened_locale);
+ }
+ }
+
+ return locales;
+}
+
+bool Intl::IsObjectOfType(Isolate* isolate, Handle<Object> input,
+ Intl::Type expected_type) {
+ if (!input->IsJSObject()) return false;
+ Handle<JSObject> obj = Handle<JSObject>::cast(input);
+
+ Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
+ Handle<Object> tag = JSReceiver::GetDataProperty(obj, marker);
+
+ if (!tag->IsSmi()) return false;
+
+ Intl::Type type = Intl::TypeFromSmi(Smi::cast(*tag));
+ return type == expected_type;
+}
+
+namespace {
+
+// In ECMA 402 v1, Intl constructors supported a mode of operation
+// where calling them with an existing object as a receiver would
+// transform the receiver into the relevant Intl instance with all
+// internal slots. In ECMA 402 v2, this capability was removed, to
+// avoid adding internal slots on existing objects. In ECMA 402 v3,
+// the capability was re-added as "normative optional" in a mode
+// which chains the underlying Intl instance on any object, when the
+// constructor is called
+//
+// See ecma402/#legacy-constructor.
+MaybeHandle<Object> LegacyUnwrapReceiver(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSFunction> constructor,
+ Intl::Type type) {
+ bool has_initialized_slot = Intl::IsObjectOfType(isolate, receiver, type);
+
+ Handle<Object> obj_is_instance_of;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj_is_instance_of,
+ Object::InstanceOf(isolate, receiver, constructor),
+ Object);
+ bool is_instance_of = obj_is_instance_of->BooleanValue(isolate);
+
+ // 2. If receiver does not have an [[Initialized...]] internal slot
+ // and ? InstanceofOperator(receiver, constructor) is true, then
+ if (!has_initialized_slot && is_instance_of) {
+ // 2. a. Let new_receiver be ? Get(receiver, %Intl%.[[FallbackSymbol]]).
+ Handle<Object> new_receiver;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, new_receiver,
+ JSReceiver::GetProperty(isolate, receiver,
+ isolate->factory()->intl_fallback_symbol()),
+ Object);
+ return new_receiver;
+ }
+
+ return receiver;
+}
+
+} // namespace
+
+MaybeHandle<JSObject> Intl::UnwrapReceiver(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSFunction> constructor,
+ Intl::Type type,
+ Handle<String> method_name,
+ bool check_legacy_constructor) {
+ Handle<Object> new_receiver = receiver;
+ if (check_legacy_constructor) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, new_receiver,
+ LegacyUnwrapReceiver(isolate, receiver, constructor, type), JSObject);
+ }
+
+ // 3. If Type(new_receiver) is not Object or nf does not have an
+ // [[Initialized...]] internal slot, then
+ if (!Intl::IsObjectOfType(isolate, new_receiver, type)) {
+ // 3. a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ method_name, receiver),
+ JSObject);
+ }
+
+ // The above IsObjectOfType returns true only for JSObjects, which
+ // makes this cast safe.
+ return Handle<JSObject>::cast(new_receiver);
+}
+
+MaybeHandle<JSObject> NumberFormat::Unwrap(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ const char* method_name) {
+ Handle<Context> native_context =
+ Handle<Context>(isolate->context()->native_context(), isolate);
+ Handle<JSFunction> constructor = Handle<JSFunction>(
+ JSFunction::cast(native_context->intl_number_format_function()), isolate);
+ Handle<String> method_name_str =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+
+ return Intl::UnwrapReceiver(isolate, receiver, constructor,
+ Intl::Type::kNumberFormat, method_name_str, true);
+}
+
+MaybeHandle<Object> NumberFormat::FormatNumber(
+ Isolate* isolate, Handle<JSObject> number_format_holder, double value) {
+ icu::DecimalFormat* number_format =
+ NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ CHECK_NOT_NULL(number_format);
+
+ icu::UnicodeString result;
+ number_format->format(value, result);
+
+ return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+}
+
+namespace {
+
+// TODO(bstell): Make all these a constexpr on the Intl class.
+void BuildLanguageTagRegexps(Isolate* isolate) {
+ std::string alpha = "[a-zA-Z]";
+ std::string digit = "[0-9]";
+ std::string alphanum = "(" + alpha + "|" + digit + ")";
+ std::string regular =
+ "(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|"
+ "zh-min|zh-min-nan|zh-xiang)";
+ std::string irregular =
+ "(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|"
+ "i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|"
+ "i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)";
+ std::string grandfathered = "(" + irregular + "|" + regular + ")";
+ std::string private_use = "(x(-" + alphanum + "{1,8})+)";
+
+ std::string singleton = "(" + digit + "|[A-WY-Za-wy-z])";
+ std::string language_singleton_regexp = "^" + singleton + "$";
+
+ std::string extension = "(" + singleton + "(-" + alphanum + "{2,8})+)";
+
+ std::string variant = "(" + alphanum + "{5,8}|(" + digit + alphanum + "{3}))";
+ std::string language_variant_regexp = "^" + variant + "$";
+
+ std::string region = "(" + alpha + "{2}|" + digit + "{3})";
+ std::string script = "(" + alpha + "{4})";
+ std::string ext_lang = "(" + alpha + "{3}(-" + alpha + "{3}){0,2})";
+ std::string language = "(" + alpha + "{2,3}(-" + ext_lang + ")?|" + alpha +
+ "{4}|" + alpha + "{5,8})";
+ std::string lang_tag = language + "(-" + script + ")?(-" + region + ")?(-" +
+ variant + ")*(-" + extension + ")*(-" + private_use +
+ ")?";
+
+ std::string language_tag =
+ "^(" + lang_tag + "|" + private_use + "|" + grandfathered + ")$";
+ std::string language_tag_regexp = std::string(language_tag);
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::RegexMatcher* language_singleton_regexp_matcher = new icu::RegexMatcher(
+ icu::UnicodeString::fromUTF8(language_singleton_regexp), 0, status);
+ CHECK(U_SUCCESS(status));
+ icu::RegexMatcher* language_tag_regexp_matcher = new icu::RegexMatcher(
+ icu::UnicodeString::fromUTF8(language_tag_regexp), 0, status);
+ CHECK(U_SUCCESS(status));
+ icu::RegexMatcher* language_variant_regexp_matcher = new icu::RegexMatcher(
+ icu::UnicodeString::fromUTF8(language_variant_regexp), 0, status);
+ CHECK(U_SUCCESS(status));
+
+ isolate->set_language_tag_regexp_matchers(language_singleton_regexp_matcher,
+ language_tag_regexp_matcher,
+ language_variant_regexp_matcher);
+}
+
+icu::RegexMatcher* GetLanguageSingletonRegexMatcher(Isolate* isolate) {
+ icu::RegexMatcher* language_singleton_regexp_matcher =
+ isolate->language_singleton_regexp_matcher();
+ if (language_singleton_regexp_matcher == nullptr) {
+ BuildLanguageTagRegexps(isolate);
+ language_singleton_regexp_matcher =
+ isolate->language_singleton_regexp_matcher();
+ }
+ return language_singleton_regexp_matcher;
+}
+
+icu::RegexMatcher* GetLanguageTagRegexMatcher(Isolate* isolate) {
+ icu::RegexMatcher* language_tag_regexp_matcher =
+ isolate->language_tag_regexp_matcher();
+ if (language_tag_regexp_matcher == nullptr) {
+ BuildLanguageTagRegexps(isolate);
+ language_tag_regexp_matcher = isolate->language_tag_regexp_matcher();
+ }
+ return language_tag_regexp_matcher;
+}
+
+icu::RegexMatcher* GetLanguageVariantRegexMatcher(Isolate* isolate) {
+ icu::RegexMatcher* language_variant_regexp_matcher =
+ isolate->language_variant_regexp_matcher();
+ if (language_variant_regexp_matcher == nullptr) {
+ BuildLanguageTagRegexps(isolate);
+ language_variant_regexp_matcher =
+ isolate->language_variant_regexp_matcher();
+ }
+ return language_variant_regexp_matcher;
+}
+
+} // anonymous namespace
+
+MaybeHandle<JSObject> Intl::ResolveLocale(Isolate* isolate, const char* service,
+ Handle<Object> requestedLocales,
+ Handle<Object> options) {
+ Handle<String> service_str =
+ isolate->factory()->NewStringFromAsciiChecked(service);
+
+ Handle<JSFunction> resolve_locale_function = isolate->resolve_locale();
+
+ Handle<Object> result;
+ Handle<Object> undefined_value(ReadOnlyRoots(isolate).undefined_value(),
+ isolate);
+ Handle<Object> args[] = {service_str, requestedLocales, options};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, resolve_locale_function, undefined_value,
+ arraysize(args), args),
+ JSObject);
+
+ return Handle<JSObject>::cast(result);
+}
+
+Maybe<bool> Intl::GetStringOption(Isolate* isolate, Handle<JSReceiver> options,
+ const char* property,
+ std::vector<const char*> values,
+ const char* service,
+ std::unique_ptr<char[]>* result) {
+ Handle<String> property_str =
+ isolate->factory()->NewStringFromAsciiChecked(property);
+
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(isolate, options, property_str),
+ Nothing<bool>());
+
+ if (value->IsUndefined(isolate)) {
+ return Just(false);
+ }
+
+ // 2. c. Let value be ? ToString(value).
+ Handle<String> value_str;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value_str, Object::ToString(isolate, value), Nothing<bool>());
+ std::unique_ptr<char[]> value_cstr = value_str->ToCString();
+
+ // 2. d. if values is not undefined, then
+ if (values.size() > 0) {
+ // 2. d. i. If values does not contain an element equal to value,
+ // throw a RangeError exception.
+ for (size_t i = 0; i < values.size(); i++) {
+ if (strcmp(values.at(i), value_cstr.get()) == 0) {
+ // 2. e. return value
+ *result = std::move(value_cstr);
+ return Just(true);
+ }
+ }
+
+ Handle<String> service_str =
+ isolate->factory()->NewStringFromAsciiChecked(service);
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kValueOutOfRange, value, service_str,
+ property_str),
+ Nothing<bool>());
+ }
+
+ // 2. e. return value
+ *result = std::move(value_cstr);
+ return Just(true);
+}
+
+V8_WARN_UNUSED_RESULT Maybe<bool> Intl::GetBoolOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* property,
+ const char* service, bool* result) {
+ Handle<String> property_str =
+ isolate->factory()->NewStringFromAsciiChecked(property);
+
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(isolate, options, property_str),
+ Nothing<bool>());
+
+ // 2. If value is not undefined, then
+ if (!value->IsUndefined(isolate)) {
+ // 2. b. i. Let value be ToBoolean(value).
+ *result = value->BooleanValue(isolate);
+
+ // 2. e. return value
+ return Just(true);
+ }
+
+ return Just(false);
+}
+
+// TODO(bstell): enable this anonymous namespace once
+// IsStructurallyValidLanguageTag called.
+// namespace {
+
+char AsciiToLower(char c) {
+ if (c < 'A' || c > 'Z') {
+ return c;
+ }
+ return c | (1 << 5);
+}
+
+/**
+ * Check the structural Validity of the language tag per ECMA 402 6.2.2:
+ * - Well-formed per RFC 5646 2.1
+ * - There are no duplicate variant subtags
+ * - There are no duplicate singleton (extension) subtags
+ *
+ * One extra-check is done (from RFC 5646 2.2.9): the tag is compared
+ * against the list of grandfathered tags. However, subtags for
+ * primary/extended language, script, region, variant are not checked
+ * against the IANA language subtag registry.
+ *
+ * ICU is too permissible and lets invalid tags, like
+ * hant-cmn-cn, through.
+ *
+ * Returns false if the language tag is invalid.
+ */
+bool IsStructurallyValidLanguageTag(Isolate* isolate,
+ const std::string& locale_in) {
+ if (!String::IsAscii(locale_in.c_str(),
+ static_cast<int>(locale_in.length()))) {
+ return false;
+ }
+ std::string locale(locale_in);
+ icu::RegexMatcher* language_tag_regexp_matcher =
+ GetLanguageTagRegexMatcher(isolate);
+
+ // Check if it's well-formed, including grandfathered tags.
+ language_tag_regexp_matcher->reset(
+ icu::UnicodeString(locale.c_str(), -1, US_INV));
+ UErrorCode status = U_ZERO_ERROR;
+ bool is_valid_lang_tag = language_tag_regexp_matcher->matches(status);
+ if (!is_valid_lang_tag || V8_UNLIKELY(U_FAILURE(status))) {
+ return false;
+ }
+
+ std::transform(locale.begin(), locale.end(), locale.begin(), AsciiToLower);
+
+ // Just return if it's a x- form. It's all private.
+ if (locale.find("x-") == 0) {
+ return true;
+ }
+
+ // Check if there are any duplicate variants or singletons (extensions).
+
+ // Remove private use section.
+ locale = locale.substr(0, locale.find("-x-"));
+
+ // Skip language since it can match variant regex, so we start from 1.
+ // We are matching i-klingon here, but that's ok, since i-klingon-klingon
+ // is not valid and would fail LANGUAGE_TAG_RE test.
+ size_t pos = 0;
+ std::vector<std::string> parts;
+ while ((pos = locale.find("-")) != std::string::npos) {
+ std::string token = locale.substr(0, pos);
+ parts.push_back(token);
+ locale = locale.substr(pos + 1);
+ }
+ if (locale.length() != 0) {
+ parts.push_back(locale);
+ }
+
+ icu::RegexMatcher* language_variant_regexp_matcher =
+ GetLanguageVariantRegexMatcher(isolate);
+
+ icu::RegexMatcher* language_singleton_regexp_matcher =
+ GetLanguageSingletonRegexMatcher(isolate);
+
+ std::vector<std::string> variants;
+ std::vector<std::string> extensions;
+ for (const auto& value : parts) {
+ language_variant_regexp_matcher->reset(
+ icu::UnicodeString::fromUTF8(value.c_str()));
+ bool is_language_variant = language_variant_regexp_matcher->matches(status);
+ if (V8_UNLIKELY(U_FAILURE(status))) {
+ return false;
+ }
+ if (is_language_variant && extensions.size() == 0) {
+ if (std::find(variants.begin(), variants.end(), value) ==
+ variants.end()) {
+ variants.push_back(value);
+ } else {
+ return false;
+ }
+ }
+
+ language_singleton_regexp_matcher->reset(
+ icu::UnicodeString(value.c_str(), -1, US_INV));
+ bool is_language_singleton =
+ language_singleton_regexp_matcher->matches(status);
+ if (V8_UNLIKELY(U_FAILURE(status))) {
+ return false;
+ }
+ if (is_language_singleton) {
+ if (std::find(extensions.begin(), extensions.end(), value) ==
+ extensions.end()) {
+ extensions.push_back(value);
+ } else {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+// } // anonymous namespace
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 84cf85c6da..d9cfd67965 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -9,6 +9,11 @@
#ifndef V8_OBJECTS_INTL_OBJECTS_H_
#define V8_OBJECTS_INTL_OBJECTS_H_
+#include <set>
+#include <string>
+
+#include "src/contexts.h"
+#include "src/intl.h"
#include "src/objects.h"
#include "unicode/uversion.h"
@@ -67,9 +72,47 @@ class NumberFormat {
// holds the pointer gets garbage collected.
static void DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data);
+ // The UnwrapNumberFormat abstract operation gets the underlying
+ // NumberFormat operation for various methods which implement
+ // ECMA-402 v1 semantics for supporting initializing existing Intl
+ // objects.
+ //
+ // ecma402/#sec-unwrapnumberformat
+ static MaybeHandle<JSObject> Unwrap(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ const char* method_name);
+
+ // ecm402/#sec-formatnumber
+ static MaybeHandle<Object> FormatNumber(Isolate* isolate,
+ Handle<JSObject> number_format_holder,
+ double value);
+
// Layout description.
- static const int kDecimalFormat = JSObject::kHeaderSize;
- static const int kSize = kDecimalFormat + kPointerSize;
+#define NUMBER_FORMAT_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kDecimalFormat, kPointerSize) \
+ V(kBoundFormat, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, NUMBER_FORMAT_FIELDS)
+#undef NUMBER_FORMAT_FIELDS
+
+ // ContextSlot defines the context structure for the bound
+ // NumberFormat.prototype.format function.
+ enum ContextSlot {
+ // The number format instance that the function holding this
+ // context is bound to.
+ kNumberFormat = Context::MIN_CONTEXT_SLOTS,
+
+ kLength
+ };
+
+ // TODO(gsathya): Remove this and use regular accessors once
+ // NumberFormat is a sub class of JSObject.
+ //
+ // This needs to be consistent with the above LayoutDescription.
+ static const int kDecimalFormatIndex = 0;
+ static const int kBoundFormatIndex = 1;
private:
NumberFormat();
@@ -159,6 +202,105 @@ class V8BreakIterator {
V8BreakIterator();
};
+class Intl {
+ public:
+ enum Type {
+ kNumberFormat = 0,
+ kCollator,
+ kDateTimeFormat,
+ kPluralRules,
+ kBreakIterator,
+ kLocale,
+
+ kTypeCount
+ };
+
+ inline static Intl::Type TypeFromInt(int type);
+ inline static Intl::Type TypeFromSmi(Smi* type);
+
+ // Checks if the given object has the expected_type based by looking
+ // up a private symbol on the object.
+ //
+ // TODO(gsathya): This should just be an instance type check once we
+ // move all the Intl objects to C++.
+ static bool IsObjectOfType(Isolate* isolate, Handle<Object> object,
+ Intl::Type expected_type);
+
+ // Gets the ICU locales for a given service. If there is a locale with a
+ // script tag then the locales also include a locale without the script; eg,
+ // pa_Guru_IN (language=Panjabi, script=Gurmukhi, country-India) would include
+ // pa_IN.
+ static std::set<std::string> GetAvailableLocales(const IcuService& service);
+
+ // If locale has a script tag then return true and the locale without the
+ // script else return false and an empty string
+ static bool RemoveLocaleScriptTag(const std::string& icu_locale,
+ std::string* locale_less_script);
+
+ // Returns the underlying Intl receiver for various methods which
+ // implement ECMA-402 v1 semantics for supporting initializing
+ // existing Intl objects.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> UnwrapReceiver(
+ Isolate* isolate, Handle<JSReceiver> receiver,
+ Handle<JSFunction> constructor, Intl::Type type,
+ Handle<String> method_name /* TODO(gsathya): Make this char const* */,
+ bool check_legacy_constructor = false);
+
+ // The ResolveLocale abstract operation compares a BCP 47 language
+ // priority list requestedLocales against the locales in
+ // availableLocales and determines the best available language to
+ // meet the request. availableLocales, requestedLocales, and
+ // relevantExtensionKeys must be provided as List values, options
+ // and localeData as Records.
+ //
+ // #ecma402/sec-partitiondatetimepattern
+ //
+ // Returns a JSObject with two properties:
+ // (1) locale
+ // (2) extension
+ //
+ // To access either, use JSObject::GetDataProperty.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ResolveLocale(
+ Isolate* isolate, const char* service, Handle<Object> requestedLocales,
+ Handle<Object> options);
+
+ // ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
+ // ecma402/#sec-getoption
+ //
+ // This is specialized for the case when type is string.
+ //
+ // Instead of passing undefined for the values argument as the spec
+ // defines, pass in an empty vector.
+ //
+ // Returns true if options object has the property and stores the
+ // result in value. Returns false if the value is not found. The
+ // caller is required to use fallback value appropriately in this
+ // case.
+ //
+ // service is a string denoting the type of Intl object; used when
+ // printing the error message.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> GetStringOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* property,
+ std::vector<const char*> values, const char* service,
+ std::unique_ptr<char[]>* result);
+
+ // ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
+ // ecma402/#sec-getoption
+ //
+ // This is specialized for the case when type is boolean.
+ //
+ // Returns true if options object has the property and stores the
+ // result in value. Returns false if the value is not found. The
+ // caller is required to use fallback value appropriately in this
+ // case.
+ //
+ // service is a string denoting the type of Intl object; used when
+ // printing the error message.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> GetBoolOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* property,
+ const char* service, bool* result);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 2500acfe98..04e484c803 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -14,10 +14,6 @@
namespace v8 {
namespace internal {
-TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
-TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
-TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
-
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
@@ -51,12 +47,13 @@ void JSArray::SetContent(Handle<JSArray> array,
EnsureCanContainElements(array, storage, storage->length(),
ALLOW_COPIED_DOUBLE_ELEMENTS);
- DCHECK((storage->map() == array->GetHeap()->fixed_double_array_map() &&
- IsDoubleElementsKind(array->GetElementsKind())) ||
- ((storage->map() != array->GetHeap()->fixed_double_array_map()) &&
- (IsObjectElementsKind(array->GetElementsKind()) ||
- (IsSmiElementsKind(array->GetElementsKind()) &&
- Handle<FixedArray>::cast(storage)->ContainsOnlySmisOrHoles()))));
+ DCHECK(
+ (storage->map() == array->GetReadOnlyRoots().fixed_double_array_map() &&
+ IsDoubleElementsKind(array->GetElementsKind())) ||
+ ((storage->map() != array->GetReadOnlyRoots().fixed_double_array_map()) &&
+ (IsObjectElementsKind(array->GetElementsKind()) ||
+ (IsSmiElementsKind(array->GetElementsKind()) &&
+ Handle<FixedArray>::cast(storage)->ContainsOnlySmisOrHoles()))));
array->set_elements(*storage);
array->set_length(Smi::FromInt(storage->length()));
}
@@ -107,12 +104,6 @@ void* JSArrayBuffer::allocation_base() const {
return backing_store();
}
-ArrayBuffer::Allocator::AllocationMode JSArrayBuffer::allocation_mode() const {
- using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
- return is_wasm_memory() ? AllocationMode::kReservation
- : AllocationMode::kNormal;
-}
-
bool JSArrayBuffer::is_wasm_memory() const {
bool const is_wasm_memory = IsWasmMemory::decode(bit_field());
DCHECK_EQ(is_wasm_memory,
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 6df0af17cb..28cfe71f5d 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -94,6 +94,9 @@ class JSArray : public JSObject {
AllocationMemento::kSize) >>
kDoubleSizeLog2;
+ // Valid array indices range from +0 <= i < 2^32 - 1 (kMaxUInt32).
+ static const uint32_t kMaxArrayIndex = kMaxUInt32 - 1;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
};
@@ -170,23 +173,17 @@ class JSArrayBuffer : public JSObject {
void Neuter();
- inline ArrayBuffer::Allocator::AllocationMode allocation_mode() const;
-
struct Allocation {
- using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
-
Allocation(void* allocation_base, size_t length, void* backing_store,
- AllocationMode mode, bool is_wasm_memory)
+ bool is_wasm_memory)
: allocation_base(allocation_base),
length(length),
backing_store(backing_store),
- mode(mode),
is_wasm_memory(is_wasm_memory) {}
void* allocation_base;
size_t length;
void* backing_store;
- AllocationMode mode;
bool is_wasm_memory;
};
@@ -196,6 +193,10 @@ class JSArrayBuffer : public JSObject {
// Sets whether the buffer is tracked by the WasmMemoryTracker.
void set_is_wasm_memory(bool is_wasm_memory);
+ // Removes the backing store from the WasmMemoryTracker and sets
+ // |is_wasm_memory| to false.
+ void StopTrackingWasmMemory(Isolate* isolate);
+
void FreeBackingStoreFromMainThread();
static void FreeBackingStore(Isolate* isolate, Allocation allocation);
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
index 7ad24bcf12..12bba94eaa 100644
--- a/deps/v8/src/objects/js-collection-inl.h
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -18,12 +18,6 @@ ACCESSORS(JSCollectionIterator, table, Object, kTableOffset)
ACCESSORS(JSCollectionIterator, index, Object, kIndexOffset)
ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
-ACCESSORS(JSWeakCollection, next, Object, kNextOffset)
-
-TYPE_CHECKER(JSMap, JS_MAP_TYPE)
-TYPE_CHECKER(JSSet, JS_SET_TYPE)
-TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
-TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
@@ -37,7 +31,7 @@ Object* JSMapIterator::CurrentValue() {
OrderedHashMap* table(OrderedHashMap::cast(this->table()));
int index = Smi::ToInt(this->index());
Object* value = table->ValueAt(index);
- DCHECK(!value->IsTheHole(table->GetIsolate()));
+ DCHECK(!value->IsTheHole());
return value;
}
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 0777ccf1bd..5b9fcf1c29 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -31,7 +31,7 @@ class JSSet : public JSCollection {
DECL_CAST(JSSet)
static void Initialize(Handle<JSSet> set, Isolate* isolate);
- static void Clear(Handle<JSSet> set);
+ static void Clear(Isolate* isolate, Handle<JSSet> set);
// Dispatched behavior.
DECL_PRINTER(JSSet)
@@ -60,7 +60,7 @@ class JSMap : public JSCollection {
DECL_CAST(JSMap)
static void Initialize(Handle<JSMap> map, Isolate* isolate);
- static void Clear(Handle<JSMap> map);
+ static void Clear(Isolate* isolate, Handle<JSMap> map);
// Dispatched behavior.
DECL_PRINTER(JSMap)
@@ -95,9 +95,6 @@ class JSWeakCollection : public JSObject {
// [table]: the backing hash table mapping keys to values.
DECL_ACCESSORS(table, Object)
- // [next]: linked list of encountered weak maps during GC.
- DECL_ACCESSORS(next, Object)
-
static void Initialize(Handle<JSWeakCollection> collection, Isolate* isolate);
static void Set(Handle<JSWeakCollection> collection, Handle<Object> key,
Handle<Object> value, int32_t hash);
@@ -107,22 +104,16 @@ class JSWeakCollection : public JSObject {
int max_entries);
static const int kTableOffset = JSObject::kHeaderSize;
- static const int kNextOffset = kTableOffset + kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
-
- // Visiting policy defines whether the table and next collection fields
- // should be visited or not.
- enum BodyVisitingPolicy { kIgnoreWeakness, kRespectWeakness };
+ static const int kSize = kTableOffset + kPointerSize;
// Iterates the function object according to the visiting policy.
- template <BodyVisitingPolicy>
class BodyDescriptorImpl;
// Visit the whole object.
- typedef BodyDescriptorImpl<kIgnoreWeakness> BodyDescriptor;
+ typedef BodyDescriptorImpl BodyDescriptor;
- // Don't visit table and next collection fields.
- typedef BodyDescriptorImpl<kRespectWeakness> BodyDescriptorWeak;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 4261824e77..5e78c0bc3f 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -11,12 +11,14 @@
#include <map>
#include <memory>
#include <string>
+#include <vector>
#include "src/api.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/intl-objects.h"
#include "src/objects/js-locale-inl.h"
#include "unicode/locid.h"
#include "unicode/unistr.h"
@@ -32,28 +34,12 @@ namespace internal {
namespace {
-// Extracts value of a given property key in the Object.
-Maybe<bool> ExtractStringSetting(Isolate* isolate, Handle<JSReceiver> options,
- const char* key, icu::UnicodeString* setting) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
-
- // JSReceiver::GetProperty could throw an exception and return an empty
- // MaybeHandle<Object>().
- // Returns Nothing<bool> on exception.
- Handle<Object> object;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, object, JSReceiver::GetProperty(options, str), Nothing<bool>());
-
- if (object->IsString()) {
- v8::String::Utf8Value utf8_string(
- v8_isolate, v8::Utils::ToLocal(Handle<String>::cast(object)));
- *setting = icu::UnicodeString::fromUTF8(*utf8_string);
- return Just(true);
- }
-
- return Just(false);
-}
+struct OptionData {
+ const char* name;
+ const char* key;
+ const std::vector<const char*>* possible_values;
+ bool is_bool_value;
+};
// Inserts tags from options into locale string.
Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
@@ -62,38 +48,52 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
CHECK(isolate);
CHECK(icu_locale);
- static const std::array<std::pair<const char*, const char*>, 6>
- kOptionToUnicodeTagMap = {{{"calendar", "ca"},
- {"collation", "co"},
- {"hourCycle", "hc"},
- {"caseFirst", "kf"},
- {"numeric", "kn"},
- {"numberingSystem", "nu"}}};
+ static std::vector<const char*> hour_cycle_values = {"h11", "h12", "h23",
+ "h24"};
+ static std::vector<const char*> case_first_values = {"upper", "lower",
+ "false"};
+ static std::vector<const char*> empty_values = {};
+ static const std::array<OptionData, 6> kOptionToUnicodeTagMap = {
+ {{"calendar", "ca", &empty_values, false},
+ {"collation", "co", &empty_values, false},
+ {"hourCycle", "hc", &hour_cycle_values, false},
+ {"caseFirst", "kf", &case_first_values, false},
+ {"numeric", "kn", &empty_values, true},
+ {"numberingSystem", "nu", &empty_values, false}}};
+
+ // TODO(cira): Pass in values as per the spec to make this to be
+ // spec compliant.
for (const auto& option_to_bcp47 : kOptionToUnicodeTagMap) {
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString value_unicode;
-
- Maybe<bool> found = ExtractStringSetting(
- isolate, options, option_to_bcp47.first, &value_unicode);
- // Return on exception.
- MAYBE_RETURN(found, Nothing<bool>());
- if (!found.FromJust()) {
- // Skip this key, user didn't specify it in options.
- continue;
+ std::unique_ptr<char[]> value_str = nullptr;
+ bool value_bool = false;
+ Maybe<bool> maybe_found =
+ option_to_bcp47.is_bool_value
+ ? Intl::GetBoolOption(isolate, options, option_to_bcp47.name,
+ "locale", &value_bool)
+ : Intl::GetStringOption(isolate, options, option_to_bcp47.name,
+ *(option_to_bcp47.possible_values),
+ "locale", &value_str);
+ if (maybe_found.IsNothing()) return maybe_found;
+
+ // TODO(cira): Use fallback value if value is not found to make
+ // this spec compliant.
+ if (!maybe_found.FromJust()) continue;
+
+ if (option_to_bcp47.is_bool_value) {
+ value_str = value_bool ? isolate->factory()->true_string()->ToCString()
+ : isolate->factory()->false_string()->ToCString();
}
- DCHECK(found.FromJust());
-
- std::string value_string;
- value_unicode.toUTF8String(value_string);
+ DCHECK_NOT_NULL(value_str.get());
// Convert bcp47 key and value into legacy ICU format so we can use
// uloc_setKeywordValue.
- const char* key = uloc_toLegacyKey(option_to_bcp47.second);
- if (!key) return Just(false);
+ const char* key = uloc_toLegacyKey(option_to_bcp47.key);
+ DCHECK_NOT_NULL(key);
// Overwrite existing, or insert new key-value to the locale string.
- const char* value = uloc_toLegacyType(key, value_string.c_str());
+ const char* value = uloc_toLegacyType(key, value_str.get());
+ UErrorCode status = U_ZERO_ERROR;
if (value) {
// TODO(cira): ICU puts artificial limit on locale length, while BCP47
// doesn't. Switch to C++ API when it's ready.
@@ -137,17 +137,11 @@ bool PopulateLocaleWithUnicodeTags(Isolate* isolate, const char* icu_locale,
if (bcp47_key) {
const char* bcp47_value = uloc_toUnicodeLocaleType(bcp47_key, value);
if (bcp47_value) {
- // It's either Boolean value.
- if (strncmp(bcp47_key, "kn", 2) == 0) {
- bool numeric = strcmp(bcp47_value, "true") == 0 ? true : false;
- Handle<Object> numeric_handle = factory->ToBoolean(numeric);
- locale_holder->set_numeric(*numeric_handle);
- continue;
- }
- // Or a string.
Handle<String> bcp47_handle =
factory->NewStringFromAsciiChecked(bcp47_value);
- if (strncmp(bcp47_key, "ca", 2) == 0) {
+ if (strncmp(bcp47_key, "kn", 2) == 0) {
+ locale_holder->set_numeric(*bcp47_handle);
+ } else if (strncmp(bcp47_key, "ca", 2) == 0) {
locale_holder->set_calendar(*bcp47_handle);
} else if (strncmp(bcp47_key, "kf", 2) == 0) {
locale_holder->set_case_first(*bcp47_handle);
@@ -168,10 +162,11 @@ bool PopulateLocaleWithUnicodeTags(Isolate* isolate, const char* icu_locale,
}
} // namespace
-bool JSLocale::InitializeLocale(Isolate* isolate,
- Handle<JSLocale> locale_holder,
- Handle<String> locale,
- Handle<JSReceiver> options) {
+MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
+ Handle<JSLocale> locale_holder,
+ Handle<String> locale,
+ Handle<JSReceiver> options) {
+ static const char* const kMethod = "Intl.Locale";
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
UErrorCode status = U_ZERO_ERROR;
@@ -180,28 +175,54 @@ bool JSLocale::InitializeLocale(Isolate* isolate,
char icu_canonical[ULOC_FULLNAME_CAPACITY];
v8::String::Utf8Value bcp47_locale(v8_isolate, v8::Utils::ToLocal(locale));
- if (bcp47_locale.length() == 0) return false;
+ if (bcp47_locale.length() == 0) return MaybeHandle<JSLocale>();
int icu_length = uloc_forLanguageTag(
*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY, nullptr, &status);
if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING ||
icu_length == 0) {
- return false;
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters,
+ isolate->factory()->NewStringFromAsciiChecked(kMethod),
+ locale_holder),
+ JSLocale);
+ return MaybeHandle<JSLocale>();
}
Maybe<bool> error = InsertOptionsIntoLocale(isolate, options, icu_result);
- if (error.IsNothing() || !error.FromJust()) {
- return false;
+ MAYBE_RETURN(error, MaybeHandle<JSLocale>());
+ if (!error.FromJust()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters,
+ isolate->factory()->NewStringFromAsciiChecked(kMethod),
+ locale_holder),
+ JSLocale);
+ return MaybeHandle<JSLocale>();
}
+ DCHECK(error.FromJust());
uloc_canonicalize(icu_result, icu_canonical, ULOC_FULLNAME_CAPACITY, &status);
if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
- return false;
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters,
+ isolate->factory()->NewStringFromAsciiChecked(kMethod),
+ locale_holder),
+ JSLocale);
+ return MaybeHandle<JSLocale>();
}
if (!PopulateLocaleWithUnicodeTags(isolate, icu_canonical, locale_holder)) {
- return false;
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters,
+ isolate->factory()->NewStringFromAsciiChecked(kMethod),
+ locale_holder),
+ JSLocale);
+ return MaybeHandle<JSLocale>();
}
// Extract language, script and region parts.
@@ -215,7 +236,13 @@ bool JSLocale::InitializeLocale(Isolate* isolate,
uloc_getCountry(icu_canonical, icu_region, ULOC_COUNTRY_CAPACITY, &status);
if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
- return false;
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters,
+ isolate->factory()->NewStringFromAsciiChecked(kMethod),
+ locale_holder),
+ JSLocale);
+ return MaybeHandle<JSLocale>();
}
Factory* factory = isolate->factory();
@@ -244,7 +271,13 @@ bool JSLocale::InitializeLocale(Isolate* isolate,
uloc_toLanguageTag(icu_base_name, bcp47_result, ULOC_FULLNAME_CAPACITY, true,
&status);
if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
- return false;
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters,
+ isolate->factory()->NewStringFromAsciiChecked(kMethod),
+ locale_holder),
+ JSLocale);
+ return MaybeHandle<JSLocale>();
}
Handle<String> base_name = factory->NewStringFromAsciiChecked(bcp47_result);
locale_holder->set_base_name(*base_name);
@@ -253,13 +286,19 @@ bool JSLocale::InitializeLocale(Isolate* isolate,
uloc_toLanguageTag(icu_canonical, bcp47_result, ULOC_FULLNAME_CAPACITY, true,
&status);
if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
- return false;
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters,
+ isolate->factory()->NewStringFromAsciiChecked(kMethod),
+ locale_holder),
+ JSLocale);
+ return MaybeHandle<JSLocale>();
}
Handle<String> locale_handle =
factory->NewStringFromAsciiChecked(bcp47_result);
locale_holder->set_locale(*locale_handle);
- return true;
+ return locale_holder;
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index 6d0d2b3d59..74d64bf486 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -26,9 +26,10 @@ class JSLocale : public JSObject {
public:
// Initializes locale object with properties derived from input locale string
// and options.
- static bool InitializeLocale(Isolate* isolate, Handle<JSLocale> locale_holder,
- Handle<String> locale,
- Handle<JSReceiver> options);
+ static MaybeHandle<JSLocale> InitializeLocale(Isolate* isolate,
+ Handle<JSLocale> locale_holder,
+ Handle<String> locale,
+ Handle<JSReceiver> options);
DECL_CAST(JSLocale)
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index afe297b880..3eb1a4fff1 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -14,7 +14,6 @@
namespace v8 {
namespace internal {
-TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
CAST_ACCESSOR(JSPromise)
ACCESSORS(JSPromise, reactions_or_result, Object, kReactionsOrResultOffset)
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index 434bae8933..c52e19ce49 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -45,6 +45,9 @@ class JSPromise : public JSObject {
// block in an async function.
DECL_BOOLEAN_ACCESSORS(handled_hint)
+ int async_task_id() const;
+ void set_async_task_id(int id);
+
static const char* Status(Promise::PromiseState status);
Promise::PromiseState status() const;
void set_status(Promise::PromiseState status);
@@ -77,6 +80,7 @@ class JSPromise : public JSObject {
static const int kStatusBits = 2;
static const int kHasHandlerBit = 2;
static const int kHandledHintBit = 3;
+ class AsyncTaskIdField : public BitField<int, kHandledHintBit + 1, 22> {};
static const int kStatusShift = 0;
static const int kStatusMask = 0x3;
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 697c81eb42..c6f31e81af 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -15,8 +15,6 @@
namespace v8 {
namespace internal {
-TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
-
CAST_ACCESSOR(JSRegExp)
ACCESSORS(JSRegExp, data, Object, kDataOffset)
@@ -26,7 +24,7 @@ ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
JSRegExp::Type JSRegExp::TypeTag() {
Object* data = this->data();
- if (data->IsUndefined(GetIsolate())) return JSRegExp::NOT_COMPILED;
+ if (data->IsUndefined()) return JSRegExp::NOT_COMPILED;
Smi* smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
return static_cast<JSRegExp::Type>(smi->value());
}
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index 1d6a64ec0c..ca099d48e5 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -13,8 +13,6 @@
namespace v8 {
namespace internal {
-TYPE_CHECKER(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE)
-
ACCESSORS(JSRegExpStringIterator, iterating_regexp, Object,
kIteratingRegExpOffset)
ACCESSORS(JSRegExpStringIterator, iterating_string, String,
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 6a0c4e3391..7c8841ee79 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -55,7 +55,8 @@ class JSRegExp : public JSObject {
DECL_ACCESSORS(last_index, Object)
DECL_ACCESSORS(source, Object)
- V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(Handle<String> source,
+ V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(Isolate* isolate,
+ Handle<String> source,
Flags flags);
static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
new file mode 100644
index 0000000000..3d2a5c8e09
--- /dev/null
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -0,0 +1,62 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_INL_H_
+#define V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/js-relative-time-format.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Base relative time format accessors.
+ACCESSORS(JSRelativeTimeFormat, locale, String, kLocaleOffset);
+ACCESSORS(JSRelativeTimeFormat, formatter, Foreign, kFormatterOffset);
+
+// TODO(ftang): Use bit field accessor for style and numeric later.
+
+inline void JSRelativeTimeFormat::set_style(Style style) {
+ DCHECK_GT(Style::COUNT, style);
+ int value = static_cast<int>(style);
+ WRITE_FIELD(this, kStyleOffset, Smi::FromInt(value));
+}
+
+inline JSRelativeTimeFormat::Style JSRelativeTimeFormat::style() const {
+ Object* value = READ_FIELD(this, kStyleOffset);
+ int style = Smi::ToInt(value);
+ DCHECK_LE(0, style);
+ DCHECK_GT(static_cast<int>(Style::COUNT), style);
+ return static_cast<Style>(style);
+}
+
+inline void JSRelativeTimeFormat::set_numeric(Numeric numeric) {
+ DCHECK_GT(Numeric::COUNT, numeric);
+ int value = static_cast<int>(numeric);
+ WRITE_FIELD(this, kNumericOffset, Smi::FromInt(value));
+}
+
+inline JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::numeric() const {
+ Object* value = READ_FIELD(this, kNumericOffset);
+ int numeric = Smi::ToInt(value);
+ DCHECK_LE(0, numeric);
+ DCHECK_GT(static_cast<int>(Numeric::COUNT), numeric);
+ return static_cast<Numeric>(numeric);
+}
+
+CAST_ACCESSOR(JSRelativeTimeFormat);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_INL_H_
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
new file mode 100644
index 0000000000..c70db1b339
--- /dev/null
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -0,0 +1,225 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/js-relative-time-format.h"
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-relative-time-format-inl.h"
+#include "unicode/numfmt.h"
+#include "unicode/reldatefmt.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+UDateRelativeDateTimeFormatterStyle getIcuStyle(
+ JSRelativeTimeFormat::Style style) {
+ switch (style) {
+ case JSRelativeTimeFormat::Style::LONG:
+ return UDAT_STYLE_LONG;
+ case JSRelativeTimeFormat::Style::SHORT:
+ return UDAT_STYLE_SHORT;
+ case JSRelativeTimeFormat::Style::NARROW:
+ return UDAT_STYLE_NARROW;
+ case JSRelativeTimeFormat::Style::COUNT:
+ UNREACHABLE();
+ }
+}
+
+JSRelativeTimeFormat::Style getStyle(const char* str) {
+ if (strcmp(str, "long") == 0) return JSRelativeTimeFormat::Style::LONG;
+ if (strcmp(str, "short") == 0) return JSRelativeTimeFormat::Style::SHORT;
+ if (strcmp(str, "narrow") == 0) return JSRelativeTimeFormat::Style::NARROW;
+ UNREACHABLE();
+}
+
+JSRelativeTimeFormat::Numeric getNumeric(const char* str) {
+ if (strcmp(str, "auto") == 0) return JSRelativeTimeFormat::Numeric::AUTO;
+ if (strcmp(str, "always") == 0) return JSRelativeTimeFormat::Numeric::ALWAYS;
+ UNREACHABLE();
+}
+
+} // namespace
+
+MaybeHandle<JSRelativeTimeFormat>
+JSRelativeTimeFormat::InitializeRelativeTimeFormat(
+ Isolate* isolate, Handle<JSRelativeTimeFormat> relative_time_format_holder,
+ Handle<Object> input_locales, Handle<Object> input_options) {
+ Factory* factory = isolate->factory();
+
+ // 4. If options is undefined, then
+ Handle<JSReceiver> options;
+ if (input_options->IsUndefined(isolate)) {
+ // a. Let options be ObjectCreate(null).
+ options = isolate->factory()->NewJSObjectWithNullProto();
+ // 5. Else
+ } else {
+ // a. Let options be ? ToObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ Object::ToObject(isolate, input_options),
+ JSRelativeTimeFormat);
+ }
+
+ // 10. Let r be ResolveLocale(%RelativeTimeFormat%.[[AvailableLocales]],
+ // requestedLocales, opt,
+ // %RelativeTimeFormat%.[[RelevantExtensionKeys]],
+ // localeData).
+ Handle<JSObject> r;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, r,
+ Intl::ResolveLocale(isolate, "relativetimeformat",
+ input_locales, options),
+ JSRelativeTimeFormat);
+ Handle<Object> locale_obj =
+ JSObject::GetDataProperty(r, factory->locale_string());
+ Handle<String> locale;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, locale,
+ Object::ToString(isolate, locale_obj),
+ JSRelativeTimeFormat);
+
+ // 11. Let locale be r.[[Locale]].
+ // 12. Set relativeTimeFormat.[[Locale]] to locale.
+ relative_time_format_holder->set_locale(*locale);
+
+ // 14. Let s be ? GetOption(options, "style", "string",
+ // «"long", "short", "narrow"», "long").
+ std::unique_ptr<char[]> style_str = nullptr;
+ std::vector<const char*> style_values = {"long", "short", "narrow"};
+ Maybe<bool> maybe_found_style =
+ Intl::GetStringOption(isolate, options, "style", style_values,
+ "Intl.RelativeTimeFormat", &style_str);
+ Style style_enum = Style::LONG;
+ MAYBE_RETURN(maybe_found_style, MaybeHandle<JSRelativeTimeFormat>());
+ if (maybe_found_style.FromJust()) {
+ DCHECK_NOT_NULL(style_str.get());
+ style_enum = getStyle(style_str.get());
+ }
+
+ // 15. Set relativeTimeFormat.[[Style]] to s.
+ relative_time_format_holder->set_style(style_enum);
+
+ // 16. Let numeric be ? GetOption(options, "numeric", "string",
+ // «"always", "auto"», "always").
+ std::unique_ptr<char[]> numeric_str = nullptr;
+ std::vector<const char*> numeric_values = {"always", "auto"};
+ Maybe<bool> maybe_found_numeric =
+ Intl::GetStringOption(isolate, options, "numeric", numeric_values,
+ "Intl.RelativeTimeFormat", &numeric_str);
+ Numeric numeric_enum = Numeric::ALWAYS;
+ MAYBE_RETURN(maybe_found_numeric, MaybeHandle<JSRelativeTimeFormat>());
+ if (maybe_found_numeric.FromJust()) {
+ DCHECK_NOT_NULL(numeric_str.get());
+ numeric_enum = getNumeric(numeric_str.get());
+ }
+
+ // 17. Set relativeTimeFormat.[[Numeric]] to numeric.
+ relative_time_format_holder->set_numeric(numeric_enum);
+
+ std::unique_ptr<char[]> locale_name = locale->ToCString();
+ icu::Locale icu_locale(locale_name.get());
+ UErrorCode status = U_ZERO_ERROR;
+
+ // 25. Let relativeTimeFormat.[[NumberFormat]] be
+ // ? Construct(%NumberFormat%, « nfLocale, nfOptions »).
+ icu::NumberFormat* number_format =
+ icu::NumberFormat::createInstance(icu_locale, UNUM_DECIMAL, status);
+ if (U_FAILURE(status) || number_format == nullptr) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kRelativeDateTimeFormatterBadParameters),
+ JSRelativeTimeFormat);
+ }
+ // 23. Perform ! CreateDataPropertyOrThrow(nfOptions, "useGrouping", false).
+ number_format->setGroupingUsed(false);
+
+ // 24. Perform ! CreateDataPropertyOrThrow(nfOptions,
+ // "minimumIntegerDigits", 2).
+ // Ref: https://github.com/tc39/proposal-intl-relative-time/issues/80
+ number_format->setMinimumIntegerDigits(2);
+
+ // Change UDISPCTX_CAPITALIZATION_NONE to other values if
+ // ECMA402 later include option to change capitalization.
+ // Ref: https://github.com/tc39/proposal-intl-relative-time/issues/11
+ icu::RelativeDateTimeFormatter* icu_formatter =
+ new icu::RelativeDateTimeFormatter(icu_locale, number_format,
+ getIcuStyle(style_enum),
+ UDISPCTX_CAPITALIZATION_NONE, status);
+
+ if (U_FAILURE(status) || (icu_formatter == nullptr)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kRelativeDateTimeFormatterBadParameters),
+ JSRelativeTimeFormat);
+ }
+ Handle<Managed<icu::RelativeDateTimeFormatter>> managed_formatter =
+ Managed<icu::RelativeDateTimeFormatter>::FromRawPtr(isolate, 0,
+ icu_formatter);
+
+ // 30. Set relativeTimeFormat.[[InitializedRelativeTimeFormat]] to true.
+ relative_time_format_holder->set_formatter(*managed_formatter);
+ // 31. Return relativeTimeFormat.
+ return relative_time_format_holder;
+}
+
+Handle<JSObject> JSRelativeTimeFormat::ResolvedOptions(
+ Isolate* isolate, Handle<JSRelativeTimeFormat> format_holder) {
+ Factory* factory = isolate->factory();
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ Handle<String> locale(format_holder->locale(), isolate);
+ JSObject::AddProperty(isolate, result, factory->locale_string(), locale,
+ NONE);
+ JSObject::AddProperty(isolate, result, factory->style_string(),
+ format_holder->StyleAsString(), NONE);
+ JSObject::AddProperty(isolate, result, factory->numeric_string(),
+ format_holder->NumericAsString(), NONE);
+ return result;
+}
+
+icu::RelativeDateTimeFormatter* JSRelativeTimeFormat::UnpackFormatter(
+ Isolate* isolate, Handle<JSRelativeTimeFormat> holder) {
+ return Managed<icu::RelativeDateTimeFormatter>::cast(holder->formatter())
+ ->raw();
+}
+
+Handle<String> JSRelativeTimeFormat::StyleAsString() const {
+ switch (style()) {
+ case Style::LONG:
+ return GetReadOnlyRoots().long_string_handle();
+ case Style::SHORT:
+ return GetReadOnlyRoots().short_string_handle();
+ case Style::NARROW:
+ return GetReadOnlyRoots().narrow_string_handle();
+ case Style::COUNT:
+ UNREACHABLE();
+ }
+}
+
+Handle<String> JSRelativeTimeFormat::NumericAsString() const {
+ switch (numeric()) {
+ case Numeric::ALWAYS:
+ return GetReadOnlyRoots().always_string_handle();
+ case Numeric::AUTO:
+ return GetReadOnlyRoots().auto_string_handle();
+ case Numeric::COUNT:
+ UNREACHABLE();
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
new file mode 100644
index 0000000000..70d1cdacf4
--- /dev/null
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -0,0 +1,105 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_H_
+#define V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_H_
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "unicode/uversion.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace U_ICU_NAMESPACE {
+class RelativeDateTimeFormatter;
+}
+
+namespace v8 {
+namespace internal {
+
+class JSRelativeTimeFormat : public JSObject {
+ public:
+ // Initializes relative time format object with properties derived from input
+ // locales and options.
+ static MaybeHandle<JSRelativeTimeFormat> InitializeRelativeTimeFormat(
+ Isolate* isolate,
+ Handle<JSRelativeTimeFormat> relative_time_format_holder,
+ Handle<Object> locales, Handle<Object> options);
+
+ static Handle<JSObject> ResolvedOptions(
+ Isolate* isolate, Handle<JSRelativeTimeFormat> format_holder);
+
+ // Unpacks formatter object from corresponding JavaScript object.
+ static icu::RelativeDateTimeFormatter* UnpackFormatter(
+ Isolate* isolate,
+ Handle<JSRelativeTimeFormat> relative_time_format_holder);
+ Handle<String> StyleAsString() const;
+ Handle<String> NumericAsString() const;
+
+ DECL_CAST(JSRelativeTimeFormat)
+
+ // RelativeTimeFormat accessors.
+ DECL_ACCESSORS(locale, String)
+ // TODO(ftang): Style requires only 3 bits and Numeric requires only 2 bits
+ // but here we're using 64 bits for each. We should fold these two fields into
+ // a single Flags field and use BIT_FIELD_ACCESSORS to access it.
+ //
+ // Style: identifying the relative time format style used.
+ //
+ // ecma402/#sec-properties-of-intl-relativetimeformat-instances
+
+ enum class Style {
+ LONG, // Everything spelled out.
+ SHORT, // Abbreviations used when possible.
+ NARROW, // Use the shortest possible form.
+ COUNT
+ };
+ inline void set_style(Style style);
+ inline Style style() const;
+
+ // Numeric: identifying whether numerical descriptions are always used, or
+ // used only when no more specific version is available (e.g., "1 day ago" vs
+ // "yesterday").
+ //
+ // ecma402/#sec-properties-of-intl-relativetimeformat-instances
+ enum class Numeric {
+ ALWAYS, // numerical descriptions are always used ("1 day ago")
+ AUTO, // numerical descriptions are used only when no more specific
+ // version is available ("yesterday")
+ COUNT
+ };
+ inline void set_numeric(Numeric numeric);
+ inline Numeric numeric() const;
+
+ DECL_ACCESSORS(formatter, Foreign)
+ DECL_PRINTER(JSRelativeTimeFormat)
+ DECL_VERIFIER(JSRelativeTimeFormat)
+
+ // Layout description.
+ static const int kJSRelativeTimeFormatOffset = JSObject::kHeaderSize;
+ static const int kLocaleOffset = kJSRelativeTimeFormatOffset + kPointerSize;
+ static const int kStyleOffset = kLocaleOffset + kPointerSize;
+ static const int kNumericOffset = kStyleOffset + kPointerSize;
+ static const int kFormatterOffset = kNumericOffset + kPointerSize;
+ static const int kSize = kFormatterOffset + kPointerSize;
+
+ // Constant to access field
+ static const int kFormatterField = 3;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSRelativeTimeFormat);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_H_
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index fa9fcedaab..4d247cfd29 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -14,6 +14,9 @@
namespace v8 {
namespace internal {
+SMI_ACCESSORS(ObjectBoilerplateDescription, flags,
+ FixedArray::OffsetOfElementAt(kLiteralTypeOffset));
+
CAST_ACCESSOR(ClassBoilerplate)
BIT_FIELD_ACCESSORS(ClassBoilerplate, flags, install_class_name_accessor,
@@ -43,6 +46,23 @@ ACCESSORS(ClassBoilerplate, instance_elements_template, Object,
ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
FixedArray::OffsetOfElementAt(kPrototypeComputedPropertiesIndex));
+SMI_ACCESSORS(ArrayBoilerplateDescription, flags, kFlagsOffset);
+
+ACCESSORS(ArrayBoilerplateDescription, constant_elements, FixedArrayBase,
+ kConstantElementsOffset);
+
+ElementsKind ArrayBoilerplateDescription::elements_kind() const {
+ return static_cast<ElementsKind>(flags());
+}
+
+void ArrayBoilerplateDescription::set_elements_kind(ElementsKind kind) {
+ set_flags(kind);
+}
+
+bool ArrayBoilerplateDescription::is_empty() const {
+ return constant_elements()->length() == 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index e47a444ce0..29d7c42b5d 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -15,24 +15,34 @@
namespace v8 {
namespace internal {
-Object* BoilerplateDescription::name(int index) const {
+Object* ObjectBoilerplateDescription::name(int index) const {
// get() already checks for out of bounds access, but we do not want to allow
// access to the last element, if it is the number of properties.
DCHECK_NE(size(), index);
- return get(2 * index);
+ return get(2 * index + kDescriptionStartIndex);
}
-Object* BoilerplateDescription::value(int index) const {
- return get(2 * index + 1);
+Object* ObjectBoilerplateDescription::value(int index) const {
+ return get(2 * index + 1 + kDescriptionStartIndex);
}
-int BoilerplateDescription::size() const {
- DCHECK_EQ(0, (length() - (this->has_number_of_properties() ? 1 : 0)) % 2);
+void ObjectBoilerplateDescription::set_key_value(int index, Object* key,
+ Object* value) {
+ DCHECK_LT(index, size());
+ DCHECK_GE(index, 0);
+ set(2 * index + kDescriptionStartIndex, key);
+ set(2 * index + 1 + kDescriptionStartIndex, value);
+}
+
+int ObjectBoilerplateDescription::size() const {
+ DCHECK_EQ(0, (length() - kDescriptionStartIndex -
+ (this->has_number_of_properties() ? 1 : 0)) %
+ 2);
// Rounding is intended.
- return length() / 2;
+ return (length() - kDescriptionStartIndex) / 2;
}
-int BoilerplateDescription::backing_store_size() const {
+int ObjectBoilerplateDescription::backing_store_size() const {
if (has_number_of_properties()) {
// If present, the last entry contains the number of properties.
return Smi::ToInt(this->get(length() - 1));
@@ -42,8 +52,8 @@ int BoilerplateDescription::backing_store_size() const {
return size();
}
-void BoilerplateDescription::set_backing_store_size(Isolate* isolate,
- int backing_store_size) {
+void ObjectBoilerplateDescription::set_backing_store_size(
+ Isolate* isolate, int backing_store_size) {
DCHECK(has_number_of_properties());
DCHECK_NE(size(), backing_store_size);
Handle<Object> backing_store_size_obj =
@@ -51,8 +61,8 @@ void BoilerplateDescription::set_backing_store_size(Isolate* isolate,
set(length() - 1, *backing_store_size_obj);
}
-bool BoilerplateDescription::has_number_of_properties() const {
- return length() % 2 != 0;
+bool ObjectBoilerplateDescription::has_number_of_properties() const {
+ return (length() - kDescriptionStartIndex) % 2 != 0;
}
namespace {
@@ -99,7 +109,7 @@ void AddToDescriptorArrayTemplate(
} else {
DCHECK(value_kind == ClassBoilerplate::kGetter ||
value_kind == ClassBoilerplate::kSetter);
- Object* raw_accessor = descriptor_array_template->GetValue(entry);
+ Object* raw_accessor = descriptor_array_template->GetStrongValue(entry);
AccessorPair* pair;
if (raw_accessor->IsAccessorPair()) {
pair = AccessorPair::cast(raw_accessor);
@@ -118,18 +128,19 @@ void AddToDescriptorArrayTemplate(
}
Handle<NameDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
- Handle<NameDictionary> dictionary, Handle<Name> name, Handle<Object> value,
- PropertyDetails details, int* entry_out = nullptr) {
+ Isolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
+ Handle<Object> value, PropertyDetails details, int* entry_out = nullptr) {
return NameDictionary::AddNoUpdateNextEnumerationIndex(
- dictionary, name, value, details, entry_out);
+ isolate, dictionary, name, value, details, entry_out);
}
Handle<NumberDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
- Handle<NumberDictionary> dictionary, uint32_t element, Handle<Object> value,
- PropertyDetails details, int* entry_out = nullptr) {
+ Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t element,
+ Handle<Object> value, PropertyDetails details, int* entry_out = nullptr) {
// NumberDictionary does not maintain the enumeration order, so it's
// a normal Add().
- return NumberDictionary::Add(dictionary, element, value, details, entry_out);
+ return NumberDictionary::Add(isolate, dictionary, element, value, details,
+ entry_out);
}
void DictionaryUpdateMaxNumberKey(Handle<NameDictionary> dictionary,
@@ -188,7 +199,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
// Add value to the dictionary without updating next enumeration index.
Handle<Dictionary> dict = DictionaryAddNoUpdateNextEnumerationIndex(
- dictionary, key, value_handle, details, &entry);
+ isolate, dictionary, key, value_handle, details, &entry);
// It is crucial to avoid dictionary reallocations because it may remove
// potential gaps in enumeration indices values that are necessary for
// inserting computed properties into right places in the enumeration order.
@@ -215,7 +226,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
// so overwrite both.
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
enum_order);
- dictionary->DetailsAtPut(entry, details);
+ dictionary->DetailsAtPut(isolate, entry, details);
dictionary->ValueAtPut(entry, value);
} else {
@@ -240,7 +251,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
if (existing_value_index < key_index) {
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
enum_order);
- dictionary->DetailsAtPut(entry, details);
+ dictionary->DetailsAtPut(isolate, entry, details);
dictionary->ValueAtPut(entry, value);
}
}
@@ -260,9 +271,9 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
} else {
Handle<AccessorPair> pair(isolate->factory()->NewAccessorPair());
pair->set(component, value);
- PropertyDetails details(kAccessor, DONT_ENUM,
- PropertyCellType::kNoCell);
- dictionary->DetailsAtPut(entry, details);
+ PropertyDetails details(kAccessor, DONT_ENUM, PropertyCellType::kNoCell,
+ enum_order);
+ dictionary->DetailsAtPut(isolate, entry, details);
dictionary->ValueAtPut(entry, *pair);
}
}
@@ -324,7 +335,7 @@ class ObjectDescriptor {
temp_handle_ = handle(Smi::kZero, isolate);
}
- void AddConstant(Handle<Name> name, Handle<Object> value,
+ void AddConstant(Isolate* isolate, Handle<Name> name, Handle<Object> value,
PropertyAttributes attribs) {
bool is_accessor = value->IsAccessorInfo();
DCHECK(!value->IsAccessorPair());
@@ -334,7 +345,7 @@ class ObjectDescriptor {
next_enumeration_index_++);
properties_dictionary_template_ =
DictionaryAddNoUpdateNextEnumerationIndex(
- properties_dictionary_template_, name, value, details);
+ isolate, properties_dictionary_template_, name, value, details);
} else {
Descriptor d = is_accessor
? Descriptor::AccessorConstant(name, value, attribs)
@@ -384,10 +395,8 @@ class ObjectDescriptor {
if (HasDictionaryProperties()) {
properties_dictionary_template_->SetNextEnumerationIndex(
next_enumeration_index_);
-
- isolate->heap()->RightTrimFixedArray(
- *computed_properties_,
- computed_properties_->length() - current_computed_index_);
+ computed_properties_ = FixedArray::ShrinkOrEmpty(
+ isolate, computed_properties_, current_computed_index_);
} else {
DCHECK(descriptor_array_template_->IsSortedNoDuplicates());
}
@@ -459,14 +468,14 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
// Add length_accessor.
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- static_desc.AddConstant(factory->length_string(),
+ static_desc.AddConstant(isolate, factory->length_string(),
factory->function_length_accessor(), attribs);
}
{
// Add prototype_accessor.
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- static_desc.AddConstant(factory->prototype_string(),
+ static_desc.AddConstant(isolate, factory->prototype_string(),
factory->function_prototype_accessor(), attribs);
}
if (FunctionLiteral::NeedsHomeObject(expr->constructor())) {
@@ -474,15 +483,16 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
Handle<Object> value(
Smi::FromInt(ClassBoilerplate::kPrototypeArgumentIndex), isolate);
- static_desc.AddConstant(factory->home_object_symbol(), value, attribs);
+ static_desc.AddConstant(isolate, factory->home_object_symbol(), value,
+ attribs);
}
{
Handle<Smi> start_position(Smi::FromInt(expr->start_position()), isolate);
Handle<Smi> end_position(Smi::FromInt(expr->end_position()), isolate);
Handle<Tuple2> class_positions =
factory->NewTuple2(start_position, end_position, NOT_TENURED);
- static_desc.AddConstant(factory->class_positions_symbol(), class_positions,
- DONT_ENUM);
+ static_desc.AddConstant(isolate, factory->class_positions_symbol(),
+ class_positions, DONT_ENUM);
}
//
@@ -492,7 +502,8 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
{
Handle<Object> value(
Smi::FromInt(ClassBoilerplate::kConstructorArgumentIndex), isolate);
- instance_desc.AddConstant(factory->constructor_string(), value, DONT_ENUM);
+ instance_desc.AddConstant(isolate, factory->constructor_string(), value,
+ DONT_ENUM);
}
//
@@ -558,7 +569,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
// Set class name accessor if the "name" method was not added yet.
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- static_desc.AddConstant(factory->name_string(),
+ static_desc.AddConstant(isolate, factory->name_string(),
factory->function_name_accessor(), attribs);
}
}
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index c5c2c765c9..43a176017d 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -16,16 +16,18 @@ namespace internal {
class ClassLiteral;
-// BoilerplateDescription is a list of properties consisting of name value
+// ObjectBoilerplateDescription is a list of properties consisting of name value
// pairs. In addition to the properties, it provides the projected number
// of properties in the backing store. This number includes properties with
// computed names that are not
// in the list.
-class BoilerplateDescription : public FixedArray {
+class ObjectBoilerplateDescription : public FixedArray {
public:
Object* name(int index) const;
Object* value(int index) const;
+ void set_key_value(int index, Object* key, Object* value);
+
// The number of boilerplate properties.
int size() const;
@@ -34,30 +36,48 @@ class BoilerplateDescription : public FixedArray {
void set_backing_store_size(Isolate* isolate, int backing_store_size);
- DECL_CAST(BoilerplateDescription)
- DECL_PRINTER(BoilerplateDescription)
+ // Used to encode ObjectLiteral::Flags for nested object literals
+ // Stored as the first element of the fixed array
+ DECL_INT_ACCESSORS(flags)
+ static const int kLiteralTypeOffset = 0;
+ static const int kDescriptionStartIndex = 1;
+
+ DECL_CAST(ObjectBoilerplateDescription)
+ DECL_VERIFIER(ObjectBoilerplateDescription)
+ DECL_PRINTER(ObjectBoilerplateDescription)
private:
bool has_number_of_properties() const;
};
-// Pair of {ElementsKind} and an array of constant values for {ArrayLiteral}
-// expressions. Used to communicate with the runtime for literal boilerplate
-// creation within the {Runtime_CreateArrayLiteral} method.
-class ConstantElementsPair : public Tuple2 {
+class ArrayBoilerplateDescription : public Struct {
public:
- DECL_INT_ACCESSORS(elements_kind)
- DECL_ACCESSORS(constant_values, FixedArrayBase)
+ // store constant_elements of a fixed array
+ DECL_ACCESSORS(constant_elements, FixedArrayBase)
+
+ inline ElementsKind elements_kind() const;
+ inline void set_elements_kind(ElementsKind kind);
inline bool is_empty() const;
- DECL_CAST(ConstantElementsPair)
+ DECL_CAST(ArrayBoilerplateDescription)
+ // Dispatched behavior.
+ DECL_PRINTER(ArrayBoilerplateDescription)
+ DECL_VERIFIER(ArrayBoilerplateDescription)
+ void BriefPrintDetails(std::ostream& os);
+
+#define ARRAY_BOILERPLATE_DESCRIPTION_FIELDS(V) \
+ V(kFlagsOffset, kPointerSize) \
+ V(kConstantElementsOffset, kPointerSize) \
+ V(kSize, 0)
- static const int kElementsKindOffset = kValue1Offset;
- static const int kConstantValuesOffset = kValue2Offset;
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ ARRAY_BOILERPLATE_DESCRIPTION_FIELDS)
+#undef ARRAY_BOILERPLATE_DESCRIPTION_FIELDS
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantElementsPair);
+ DECL_INT_ACCESSORS(flags)
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayBoilerplateDescription);
};
class ClassBoilerplate : public FixedArray {
diff --git a/deps/v8/src/objects/managed.cc b/deps/v8/src/objects/managed.cc
index 2e2cfe24ab..034a2085f9 100644
--- a/deps/v8/src/objects/managed.cc
+++ b/deps/v8/src/objects/managed.cc
@@ -13,8 +13,10 @@ namespace {
void ManagedObjectFinalizerSecondPass(const v8::WeakCallbackInfo<void>& data) {
auto destructor =
reinterpret_cast<ManagedPtrDestructor*>(data.GetParameter());
+ int64_t adjustment = 0 - static_cast<int64_t>(destructor->estimated_size_);
destructor->destructor_(destructor->shared_ptr_ptr_);
delete destructor;
+ data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(adjustment);
}
} // namespace
diff --git a/deps/v8/src/objects/managed.h b/deps/v8/src/objects/managed.h
index 3cf1dcd3af..4c3d67bbfc 100644
--- a/deps/v8/src/objects/managed.h
+++ b/deps/v8/src/objects/managed.h
@@ -16,14 +16,21 @@ namespace internal {
// Implements a doubly-linked lists of destructors for the isolate.
struct ManagedPtrDestructor {
+ // Estimated size of external memory associated with the managed object.
+ // This is used to adjust the garbage collector's heuristics upon
+ // allocation and deallocation of a managed object.
+ size_t estimated_size_ = 0;
ManagedPtrDestructor* prev_ = nullptr;
ManagedPtrDestructor* next_ = nullptr;
void* shared_ptr_ptr_ = nullptr;
void (*destructor_)(void* shared_ptr) = nullptr;
Object** global_handle_location_ = nullptr;
- ManagedPtrDestructor(void* shared_ptr_ptr, void (*destructor)(void*))
- : shared_ptr_ptr_(shared_ptr_ptr), destructor_(destructor) {}
+ ManagedPtrDestructor(size_t estimated_size, void* shared_ptr_ptr,
+ void (*destructor)(void*))
+ : estimated_size_(estimated_size),
+ shared_ptr_ptr_(shared_ptr_ptr),
+ destructor_(destructor) {}
};
// The GC finalizer of a managed object, which does not depend on
@@ -53,30 +60,40 @@ class Managed : public Foreign {
// Allocate a new {CppType} and wrap it in a {Managed<CppType>}.
template <typename... Args>
- static Handle<Managed<CppType>> Allocate(Isolate* isolate, Args&&... args) {
+ static Handle<Managed<CppType>> Allocate(Isolate* isolate,
+ size_t estimated_size,
+ Args&&... args) {
CppType* ptr = new CppType(std::forward<Args>(args)...);
- return FromSharedPtr(isolate, std::shared_ptr<CppType>(ptr));
+ return FromSharedPtr(isolate, estimated_size,
+ std::shared_ptr<CppType>(ptr));
}
// Create a {Managed<CppType>} from an existing raw {CppType*}. The returned
// object will now own the memory pointed to by {CppType}.
- static Handle<Managed<CppType>> FromRawPtr(Isolate* isolate, CppType* ptr) {
- return FromSharedPtr(isolate, std::shared_ptr<CppType>(ptr));
+ static Handle<Managed<CppType>> FromRawPtr(Isolate* isolate,
+ size_t estimated_size,
+ CppType* ptr) {
+ return FromSharedPtr(isolate, estimated_size,
+ std::shared_ptr<CppType>(ptr));
}
// Create a {Managed<CppType>} from an existing {std::unique_ptr<CppType>}.
// The returned object will now own the memory pointed to by {CppType}, and
// the unique pointer will be released.
static Handle<Managed<CppType>> FromUniquePtr(
- Isolate* isolate, std::unique_ptr<CppType> unique_ptr) {
- return FromSharedPtr(isolate, std::move(unique_ptr));
+ Isolate* isolate, size_t estimated_size,
+ std::unique_ptr<CppType> unique_ptr) {
+ return FromSharedPtr(isolate, estimated_size, std::move(unique_ptr));
}
// Create a {Managed<CppType>} from an existing {std::shared_ptr<CppType>}.
static Handle<Managed<CppType>> FromSharedPtr(
- Isolate* isolate, std::shared_ptr<CppType> shared_ptr) {
+ Isolate* isolate, size_t estimated_size,
+ std::shared_ptr<CppType> shared_ptr) {
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(estimated_size);
auto destructor = new ManagedPtrDestructor(
- new std::shared_ptr<CppType>(shared_ptr), Destructor);
+ estimated_size, new std::shared_ptr<CppType>(shared_ptr), Destructor);
Handle<Managed<CppType>> handle = Handle<Managed<CppType>>::cast(
isolate->factory()->NewForeign(reinterpret_cast<Address>(destructor)));
Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 696dea91ea..05d2416996 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -11,6 +11,7 @@
#include "src/objects-inl.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/prototype-info-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/templates-inl.h"
#include "src/property.h"
@@ -70,8 +71,6 @@ BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols,
BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
Map::ConstructionCounterBits)
-TYPE_CHECKER(Map, MAP_TYPE)
-
InterceptorInfo* Map::GetNamedInterceptor() {
DCHECK(has_named_interceptor());
FunctionTemplateInfo* info = GetFunctionTemplateInfo();
@@ -88,8 +87,9 @@ bool Map::IsInplaceGeneralizableField(PropertyConstness constness,
Representation representation,
FieldType* field_type) {
if (FLAG_track_constant_fields && FLAG_modify_map_inplace &&
- (constness == kConst)) {
- // kConst -> kMutable field generalization may happen in-place.
+ (constness == PropertyConstness::kConst)) {
+ // VariableMode::kConst -> PropertyConstness::kMutable field generalization
+ // may happen in-place.
return true;
}
if (representation.IsHeapObject() && !field_type->IsAny()) {
@@ -119,9 +119,9 @@ void Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
// do not have fields that can be generalized in-place (without creation
// of a new map).
if (FLAG_track_constant_fields && FLAG_modify_map_inplace) {
- // The constness is either already kMutable or should become kMutable if
- // it was kConst.
- *constness = kMutable;
+ // The constness is either already PropertyConstness::kMutable or should
+ // become PropertyConstness::kMutable if it was VariableMode::kConst.
+ *constness = PropertyConstness::kMutable;
}
if (representation->IsHeapObject()) {
// The field type is either already Any or should become Any if it was
@@ -181,17 +181,17 @@ void Map::SetEnumLength(int length) {
FixedArrayBase* Map::GetInitialElements() const {
FixedArrayBase* result = nullptr;
if (has_fast_elements() || has_fast_string_wrapper_elements()) {
- result = GetHeap()->empty_fixed_array();
+ result = GetReadOnlyRoots().empty_fixed_array();
} else if (has_fast_sloppy_arguments_elements()) {
- result = GetHeap()->empty_sloppy_arguments_elements();
+ result = GetReadOnlyRoots().empty_sloppy_arguments_elements();
} else if (has_fixed_typed_array_elements()) {
- result = GetHeap()->EmptyFixedTypedArrayForMap(this);
+ result = GetReadOnlyRoots().EmptyFixedTypedArrayForMap(this);
} else if (has_dictionary_elements()) {
- result = GetHeap()->empty_slow_element_dictionary();
+ result = GetReadOnlyRoots().empty_slow_element_dictionary();
} else {
UNREACHABLE();
}
- DCHECK(!GetHeap()->InNewSpace(result));
+ DCHECK(!Heap::InNewSpace(result));
return result;
}
@@ -268,9 +268,11 @@ int Map::GetInObjectPropertyOffset(int index) const {
}
Handle<Map> Map::AddMissingTransitionsForTesting(
- Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Isolate* isolate, Handle<Map> split_map,
+ Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor) {
- return AddMissingTransitions(split_map, descriptors, full_layout_descriptor);
+ return AddMissingTransitions(isolate, split_map, descriptors,
+ full_layout_descriptor);
}
InstanceType Map::instance_type() const {
@@ -480,11 +482,11 @@ bool Map::CanBeDeprecated() const {
return false;
}
-void Map::NotifyLeafMapLayoutChange() {
+void Map::NotifyLeafMapLayoutChange(Isolate* isolate) {
if (is_stable()) {
mark_unstable();
dependent_code()->DeoptimizeDependentCodeGroup(
- GetIsolate(), DependentCode::kPrototypeCheckGroup);
+ isolate, DependentCode::kPrototypeCheckGroup);
}
}
@@ -498,7 +500,9 @@ bool Map::CanTransition() const {
return IsJSObject(instance_type());
}
-bool Map::IsBooleanMap() const { return this == GetHeap()->boolean_map(); }
+bool Map::IsBooleanMap() const {
+ return this == GetReadOnlyRoots().boolean_map();
+}
bool Map::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
@@ -530,9 +534,10 @@ bool Map::IsJSDataViewMap() const {
Object* Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); }
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
- DCHECK(value->IsNull(GetIsolate()) || value->IsJSReceiver());
+ DCHECK(value->IsNull() || value->IsJSReceiver());
WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
+ kPrototypeOffset, value, mode);
}
LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
@@ -632,13 +637,16 @@ Object* Map::GetBackPointer() const {
if (object->IsMap()) {
return object;
}
- return GetIsolate()->heap()->undefined_value();
+ return GetReadOnlyRoots().undefined_value();
}
Map* Map::ElementsTransitionMap() {
DisallowHeapAllocation no_gc;
- return TransitionsAccessor(this, &no_gc)
- .SearchSpecial(GetHeap()->elements_transition_symbol());
+ // TODO(delphick): While it's safe to pass nullptr for Isolate* here as
+ // SearchSpecial doesn't need it, this is really ugly. Perhaps factor out a
+ // base class for methods not requiring an Isolate?
+ return TransitionsAccessor(nullptr, this, &no_gc)
+ .SearchSpecial(GetReadOnlyRoots().elements_transition_symbol());
}
Object* Map::prototype_info() const {
@@ -649,14 +657,15 @@ Object* Map::prototype_info() const {
void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
CHECK(is_prototype_map());
WRITE_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset, value);
- CONDITIONAL_WRITE_BARRIER(
- GetHeap(), this, Map::kTransitionsOrPrototypeInfoOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
+ Map::kTransitionsOrPrototypeInfoOffset, value,
+ mode);
}
void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
CHECK(value->IsMap());
- CHECK(GetBackPointer()->IsUndefined(GetIsolate()));
+ CHECK(GetBackPointer()->IsUndefined());
CHECK_IMPLIES(value->IsMap(), Map::cast(value)->GetConstructor() ==
constructor_or_backpointer());
set_constructor_or_backpointer(value, mode);
@@ -701,8 +710,9 @@ void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
set_constructor_or_backpointer(constructor, mode);
}
-Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
- return CopyInitialMap(map, map->instance_size(), map->GetInObjectProperties(),
+Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map) {
+ return CopyInitialMap(isolate, map, map->instance_size(),
+ map->GetInObjectProperties(),
map->UnusedPropertyFields());
}
@@ -710,14 +720,14 @@ bool Map::IsInobjectSlackTrackingInProgress() const {
return construction_counter() != Map::kNoSlackTracking;
}
-void Map::InobjectSlackTrackingStep() {
+void Map::InobjectSlackTrackingStep(Isolate* isolate) {
// Slack tracking should only be performed on an initial map.
- DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
+ DCHECK(GetBackPointer()->IsUndefined());
if (!IsInobjectSlackTrackingInProgress()) return;
int counter = construction_counter();
set_construction_counter(counter - 1);
if (counter == kSlackTrackingCounterEnd) {
- CompleteInobjectSlackTracking();
+ CompleteInobjectSlackTracking(isolate);
}
}
@@ -742,8 +752,9 @@ bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj))
- ->NormalizedMapCacheVerify();
+ NormalizedMapCache* cache =
+ reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj));
+ cache->NormalizedMapCacheVerify(cache->GetIsolate());
}
#endif
return true;
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 995b626563..09afb83a8f 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -16,47 +16,52 @@
namespace v8 {
namespace internal {
-#define VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(ConsString) \
- V(DataObject) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
- V(FreeSpace) \
- V(JSApiObject) \
- V(JSArrayBuffer) \
- V(JSFunction) \
- V(JSObject) \
- V(JSObjectFast) \
- V(JSWeakCollection) \
- V(Map) \
- V(NativeContext) \
- V(Oddball) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(ShortcutCandidate) \
- V(SlicedString) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(Struct) \
- V(Symbol) \
- V(ThinString) \
- V(TransitionArray) \
- V(WasmInstanceObject) \
- V(WeakCell) \
+#define VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(ConsString) \
+ V(DataHandler) \
+ V(DataObject) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(FixedFloat64Array) \
+ V(FixedTypedArrayBase) \
+ V(FreeSpace) \
+ V(JSApiObject) \
+ V(JSArrayBuffer) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSObjectFast) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(NativeContext) \
+ V(Oddball) \
+ V(PreParsedScopeData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(ShortcutCandidate) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(Struct) \
+ V(Symbol) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledDataWithPreParsedScope) \
+ V(WasmInstanceObject) \
+ V(WeakCell) \
V(WeakArray)
// For data objects, JS objects and structs along with generic visitor which
@@ -321,11 +326,11 @@ class Map : public HeapObject {
inline bool IsInobjectSlackTrackingInProgress() const;
// Does the tracking step.
- inline void InobjectSlackTrackingStep();
+ inline void InobjectSlackTrackingStep(Isolate* isolate);
// Completes inobject slack tracking for the transition tree starting at this
// initial map.
- void CompleteInobjectSlackTracking();
+ void CompleteInobjectSlackTracking(Isolate* isolate);
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -397,7 +402,7 @@ class Map : public HeapObject {
// Returns true if the current map doesn't have DICTIONARY_ELEMENTS but if a
// map with DICTIONARY_ELEMENTS was found in the prototype chain.
- bool DictionaryElementsInPrototypeChainOnly();
+ bool DictionaryElementsInPrototypeChainOnly(Isolate* isolate);
inline Map* ElementsTransitionMap();
@@ -441,8 +446,8 @@ class Map : public HeapObject {
static Handle<WeakCell> GetOrCreatePrototypeWeakCell(
Handle<JSReceiver> prototype, Isolate* isolate);
- Map* FindRootMap() const;
- Map* FindFieldOwner(int descriptor) const;
+ Map* FindRootMap(Isolate* isolate) const;
+ Map* FindFieldOwner(Isolate* isolate, int descriptor) const;
inline int GetInObjectPropertyOffset(int index) const;
@@ -466,12 +471,12 @@ class Map : public HeapObject {
int target_inobject, int target_unused,
int* old_number_of_fields) const;
// TODO(ishell): moveit!
- static Handle<Map> GeneralizeAllFields(Handle<Map> map);
+ static Handle<Map> GeneralizeAllFields(Isolate* isolate, Handle<Map> map);
V8_WARN_UNUSED_RESULT static Handle<FieldType> GeneralizeFieldType(
Representation rep1, Handle<FieldType> type1, Representation rep2,
Handle<FieldType> type2, Isolate* isolate);
- static void GeneralizeField(Handle<Map> map, int modify_index,
- PropertyConstness new_constness,
+ static void GeneralizeField(Isolate* isolate, Handle<Map> map,
+ int modify_index, PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type);
// Returns true if |descriptor|'th property is a field that may be generalized
@@ -485,28 +490,31 @@ class Map : public HeapObject {
// optimized code to more general elements kind.
// This generalization is necessary in order to ensure that elements kind
// transitions performed by stubs / optimized code don't silently transition
- // kMutable fields back to kConst state or fields with HeapObject
- // representation and "Any" type back to "Class" type.
+ // PropertyConstness::kMutable fields back to VariableMode::kConst state or
+ // fields with HeapObject representation and "Any" type back to "Class" type.
static inline void GeneralizeIfCanHaveTransitionableFastElementsKind(
Isolate* isolate, InstanceType instance_type,
PropertyConstness* constness, Representation* representation,
Handle<FieldType>* field_type);
- static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
+ static Handle<Map> ReconfigureProperty(Isolate* isolate, Handle<Map> map,
+ int modify_index,
PropertyKind new_kind,
PropertyAttributes new_attributes,
Representation new_representation,
Handle<FieldType> new_field_type);
- static Handle<Map> ReconfigureElementsKind(Handle<Map> map,
+ static Handle<Map> ReconfigureElementsKind(Isolate* isolate, Handle<Map> map,
ElementsKind new_elements_kind);
- static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
+ static Handle<Map> PrepareForDataProperty(Isolate* isolate,
+ Handle<Map> old_map,
int descriptor_number,
PropertyConstness constness,
Handle<Object> value);
- static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
+ static Handle<Map> Normalize(Isolate* isolate, Handle<Map> map,
+ PropertyNormalizationMode mode,
const char* reason);
// Tells whether the map is used for JSObjects in dictionary mode (ie
@@ -522,7 +530,8 @@ class Map : public HeapObject {
// [prototype]: implicit prototype object.
DECL_ACCESSORS(prototype, Object)
// TODO(jkummerow): make set_prototype private.
- static void SetPrototype(Handle<Map> map, Handle<Object> prototype,
+ static void SetPrototype(Isolate* isolate, Handle<Map> map,
+ Handle<Object> prototype,
bool enable_prototype_setup_mode = true);
// [constructor]: points back to the function or FunctionTemplateInfo
@@ -627,51 +636,55 @@ class Map : public HeapObject {
// is found by re-transitioning from the root of the transition tree using the
// descriptor array of the map. Returns MaybeHandle<Map>() if no updated map
// is found.
- static MaybeHandle<Map> TryUpdate(Handle<Map> map) V8_WARN_UNUSED_RESULT;
+ static MaybeHandle<Map> TryUpdate(Isolate* isolate,
+ Handle<Map> map) V8_WARN_UNUSED_RESULT;
// Returns a non-deprecated version of the input. This method may deprecate
// existing maps along the way if encodings conflict. Not for use while
// gathering type feedback. Use TryUpdate in those cases instead.
- static Handle<Map> Update(Handle<Map> map);
+ static Handle<Map> Update(Isolate* isolate, Handle<Map> map);
- static inline Handle<Map> CopyInitialMap(Handle<Map> map);
- static Handle<Map> CopyInitialMap(Handle<Map> map, int instance_size,
- int in_object_properties,
+ static inline Handle<Map> CopyInitialMap(Isolate* isolate, Handle<Map> map);
+ static Handle<Map> CopyInitialMap(Isolate* isolate, Handle<Map> map,
+ int instance_size, int in_object_properties,
int unused_property_fields);
static Handle<Map> CopyInitialMapNormalized(
- Handle<Map> map,
+ Isolate* isolate, Handle<Map> map,
PropertyNormalizationMode mode = CLEAR_INOBJECT_PROPERTIES);
- static Handle<Map> CopyDropDescriptors(Handle<Map> map);
- static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
+ static Handle<Map> CopyDropDescriptors(Isolate* isolate, Handle<Map> map);
+ static Handle<Map> CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag);
- static Handle<Object> WrapFieldType(Handle<FieldType> type);
- static FieldType* UnwrapFieldType(Object* wrapped_type);
+ static MaybeObjectHandle WrapFieldType(Handle<FieldType> type);
+ static FieldType* UnwrapFieldType(MaybeObject* wrapped_type);
V8_WARN_UNUSED_RESULT static MaybeHandle<Map> CopyWithField(
- Handle<Map> map, Handle<Name> name, Handle<FieldType> type,
- PropertyAttributes attributes, PropertyConstness constness,
- Representation representation, TransitionFlag flag);
+ Isolate* isolate, Handle<Map> map, Handle<Name> name,
+ Handle<FieldType> type, PropertyAttributes attributes,
+ PropertyConstness constness, Representation representation,
+ TransitionFlag flag);
V8_WARN_UNUSED_RESULT static MaybeHandle<Map> CopyWithConstant(
- Handle<Map> map, Handle<Name> name, Handle<Object> constant,
- PropertyAttributes attributes, TransitionFlag flag);
+ Isolate* isolate, Handle<Map> map, Handle<Name> name,
+ Handle<Object> constant, PropertyAttributes attributes,
+ TransitionFlag flag);
// Returns a new map with all transitions dropped from the given map and
// the ElementsKind set.
- static Handle<Map> TransitionElementsTo(Handle<Map> map,
+ static Handle<Map> TransitionElementsTo(Isolate* isolate, Handle<Map> map,
ElementsKind to_kind);
- static Handle<Map> AsElementsKind(Handle<Map> map, ElementsKind kind);
+ static Handle<Map> AsElementsKind(Isolate* isolate, Handle<Map> map,
+ ElementsKind kind);
- static Handle<Map> CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
- TransitionFlag flag);
+ static Handle<Map> CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
+ ElementsKind kind, TransitionFlag flag);
- static Handle<Map> AsLanguageMode(Handle<Map> initial_map,
+ static Handle<Map> AsLanguageMode(Isolate* isolate, Handle<Map> initial_map,
Handle<SharedFunctionInfo> shared_info);
- static Handle<Map> CopyForPreventExtensions(Handle<Map> map,
+ static Handle<Map> CopyForPreventExtensions(Isolate* isolate, Handle<Map> map,
PropertyAttributes attrs_to_add,
Handle<Symbol> transition_marker,
const char* reason);
@@ -682,7 +695,7 @@ class Map : public HeapObject {
// transitions to avoid an explosion in the number of maps for objects used as
// dictionaries.
inline bool TooManyFastProperties(StoreFromKeyed store_mode) const;
- static Handle<Map> TransitionToDataProperty(Handle<Map> map,
+ static Handle<Map> TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
@@ -692,7 +705,8 @@ class Map : public HeapObject {
Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
Handle<Object> getter, Handle<Object> setter,
PropertyAttributes attributes);
- static Handle<Map> ReconfigureExistingProperty(Handle<Map> map,
+ static Handle<Map> ReconfigureExistingProperty(Isolate* isolate,
+ Handle<Map> map,
int descriptor,
PropertyKind kind,
PropertyAttributes attributes);
@@ -702,11 +716,13 @@ class Map : public HeapObject {
// Returns a copy of the map, prepared for inserting into the transition
// tree (if the |map| owns descriptors then the new one will share
// descriptors with |map|).
- static Handle<Map> CopyForTransition(Handle<Map> map, const char* reason);
+ static Handle<Map> CopyForTransition(Isolate* isolate, Handle<Map> map,
+ const char* reason);
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
- static Handle<Map> Copy(Handle<Map> map, const char* reason);
+ static Handle<Map> Copy(Isolate* isolate, Handle<Map> map,
+ const char* reason);
static Handle<Map> Create(Isolate* isolate, int inobject_properties);
// Returns the next free property index (only valid for FAST MODE).
@@ -719,15 +735,18 @@ class Map : public HeapObject {
static inline int SlackForArraySize(int old_size, int size_limit);
- static void EnsureDescriptorSlack(Handle<Map> map, int slack);
+ static void EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map,
+ int slack);
// Returns the map to be used for instances when the given {prototype} is
// passed to an Object.create call. Might transition the given {prototype}.
- static Handle<Map> GetObjectCreateMap(Handle<HeapObject> prototype);
+ static Handle<Map> GetObjectCreateMap(Isolate* isolate,
+ Handle<HeapObject> prototype);
// Similar to {GetObjectCreateMap} but does not transition {prototype} and
// fails gracefully by returning an empty handle instead.
- static MaybeHandle<Map> TryGetObjectCreateMap(Handle<HeapObject> prototype);
+ static MaybeHandle<Map> TryGetObjectCreateMap(Isolate* isolate,
+ Handle<HeapObject> prototype);
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
@@ -735,7 +754,8 @@ class Map : public HeapObject {
// Returns the transitioned map for this map with the most generic
// elements_kind that's found in |candidates|, or |nullptr| if no match is
// found at all.
- Map* FindElementsKindTransitionedMap(MapHandles const& candidates);
+ Map* FindElementsKindTransitionedMap(Isolate* isolate,
+ MapHandles const& candidates);
inline static bool IsJSObject(InstanceType type);
@@ -755,33 +775,28 @@ class Map : public HeapObject {
inline bool IsJSGlobalObjectMap() const;
inline bool IsJSTypedArrayMap() const;
inline bool IsJSDataViewMap() const;
-
inline bool IsSpecialReceiverMap() const;
-
inline bool IsCustomElementsReceiverMap() const;
- static void AddDependentCode(Handle<Map> map,
- DependentCode::DependencyGroup group,
- Handle<Code> code);
+ bool IsMapInArrayPrototypeChain(Isolate* isolate) const;
- bool IsMapInArrayPrototypeChain() const;
-
- static Handle<WeakCell> WeakCellForMap(Handle<Map> map);
+ static Handle<WeakCell> WeakCellForMap(Isolate* isolate, Handle<Map> map);
// Dispatched behavior.
DECL_PRINTER(Map)
DECL_VERIFIER(Map)
#ifdef VERIFY_HEAP
- void DictionaryMapVerify();
+ void DictionaryMapVerify(Isolate* isolate);
#endif
DECL_PRIMITIVE_ACCESSORS(visitor_id, VisitorId)
- static Handle<Map> TransitionToPrototype(Handle<Map> map,
+ static Handle<Map> TransitionToPrototype(Isolate* isolate, Handle<Map> map,
Handle<Object> prototype);
- static Handle<Map> TransitionToImmutableProto(Handle<Map> map);
+ static Handle<Map> TransitionToImmutableProto(Isolate* isolate,
+ Handle<Map> map);
static const int kMaxPreAllocatedPropertyFields = 255;
@@ -828,16 +843,17 @@ class Map : public HeapObject {
// Returns true if given field is unboxed double.
inline bool IsUnboxedDoubleField(FieldIndex index) const;
- void PrintMapDetails(std::ostream& os, JSObject* holder = nullptr);
+ void PrintMapDetails(std::ostream& os);
static inline Handle<Map> AddMissingTransitionsForTesting(
- Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Isolate* isolate, Handle<Map> split_map,
+ Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor);
// Fires when the layout of an object with a leaf map changes.
// This includes adding transitions to the leaf map or changing
// the descriptor array.
- inline void NotifyLeafMapLayoutChange();
+ inline void NotifyLeafMapLayoutChange(Isolate* isolate);
static VisitorId GetVisitorId(Map* map);
@@ -867,81 +883,81 @@ class Map : public HeapObject {
// Returns the map that this (root) map transitions to if its elements_kind
// is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
- Map* LookupElementsTransitionMap(ElementsKind elements_kind);
+ Map* LookupElementsTransitionMap(Isolate* isolate,
+ ElementsKind elements_kind);
// Tries to replay property transitions starting from this (root) map using
// the descriptor array of the |map|. The |root_map| is expected to have
// proper elements kind and therefore elements kinds transitions are not
// taken by this function. Returns |nullptr| if matching transition map is
// not found.
- Map* TryReplayPropertyTransitions(Map* map);
+ Map* TryReplayPropertyTransitions(Isolate* isolate, Map* map);
- static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
- Handle<Name> name, SimpleTransitionFlag flag);
+ static void ConnectTransition(Isolate* isolate, Handle<Map> parent,
+ Handle<Map> child, Handle<Name> name,
+ SimpleTransitionFlag flag);
bool EquivalentToForTransition(const Map* other) const;
bool EquivalentToForElementsKindTransition(const Map* other) const;
- static Handle<Map> RawCopy(Handle<Map> map, int instance_size,
- int inobject_properties);
- static Handle<Map> ShareDescriptor(Handle<Map> map,
+ static Handle<Map> RawCopy(Isolate* isolate, Handle<Map> map,
+ int instance_size, int inobject_properties);
+ static Handle<Map> ShareDescriptor(Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor);
static Handle<Map> AddMissingTransitions(
- Handle<Map> map, Handle<DescriptorArray> descriptors,
+ Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor);
static void InstallDescriptors(
- Handle<Map> parent_map, Handle<Map> child_map, int new_descriptor,
- Handle<DescriptorArray> descriptors,
+ Isolate* isolate, Handle<Map> parent_map, Handle<Map> child_map,
+ int new_descriptor, Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor);
- static Handle<Map> CopyAddDescriptor(Handle<Map> map, Descriptor* descriptor,
+ static Handle<Map> CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
+ Descriptor* descriptor,
TransitionFlag flag);
static Handle<Map> CopyReplaceDescriptors(
- Handle<Map> map, Handle<DescriptorArray> descriptors,
+ Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
MaybeHandle<Name> maybe_name, const char* reason,
SimpleTransitionFlag simple_flag);
- static Handle<Map> CopyReplaceDescriptor(Handle<Map> map,
+ static Handle<Map> CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor, int index,
TransitionFlag flag);
- static V8_WARN_UNUSED_RESULT MaybeHandle<Map> TryReconfigureExistingProperty(
- Handle<Map> map, int descriptor, PropertyKind kind,
- PropertyAttributes attributes, const char** reason);
-
- static Handle<Map> CopyNormalized(Handle<Map> map,
+ static Handle<Map> CopyNormalized(Isolate* isolate, Handle<Map> map,
PropertyNormalizationMode mode);
// TODO(ishell): Move to MapUpdater.
- static Handle<Map> CopyGeneralizeAllFields(
- Handle<Map> map, ElementsKind elements_kind, int modify_index,
- PropertyKind kind, PropertyAttributes attributes, const char* reason);
+ static Handle<Map> CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
+ ElementsKind elements_kind,
+ int modify_index,
+ PropertyKind kind,
+ PropertyAttributes attributes,
+ const char* reason);
- void DeprecateTransitionTree();
+ void DeprecateTransitionTree(Isolate* isolate);
- void ReplaceDescriptors(DescriptorArray* new_descriptors,
+ void ReplaceDescriptors(Isolate* isolate, DescriptorArray* new_descriptors,
LayoutDescriptor* new_layout_descriptor);
// Update field type of the given descriptor to new representation and new
// type. The type must be prepared for storing in descriptor array:
// it must be either a simple type or a map wrapped in a weak cell.
- void UpdateFieldType(int descriptor_number, Handle<Name> name,
- PropertyConstness new_constness,
+ void UpdateFieldType(Isolate* isolate, int descriptor_number,
+ Handle<Name> name, PropertyConstness new_constness,
Representation new_representation,
- Handle<Object> new_wrapped_type);
+ MaybeObjectHandle new_wrapped_type);
// TODO(ishell): Move to MapUpdater.
- void PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
- PropertyAttributes attributes);
+ void PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
+ PropertyKind kind, PropertyAttributes attributes);
// TODO(ishell): Move to MapUpdater.
- void PrintGeneralization(FILE* file, const char* reason, int modify_index,
- int split, int descriptors, bool constant_to_field,
- Representation old_representation,
- Representation new_representation,
- MaybeHandle<FieldType> old_field_type,
- MaybeHandle<Object> old_value,
- MaybeHandle<FieldType> new_field_type,
- MaybeHandle<Object> new_value);
+ void PrintGeneralization(
+ Isolate* isolate, FILE* file, const char* reason, int modify_index,
+ int split, int descriptors, bool constant_to_field,
+ Representation old_representation, Representation new_representation,
+ MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
+ MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value);
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
@@ -953,8 +969,11 @@ class Map : public HeapObject {
// The cache for maps used by normalized (dictionary mode) objects.
// Such maps do not have property descriptors, so a typical program
// needs very limited number of distinct normalized maps.
-class NormalizedMapCache : public FixedArray {
+class NormalizedMapCache : public FixedArray, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
static Handle<NormalizedMapCache> New(Isolate* isolate);
V8_WARN_UNUSED_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
@@ -969,6 +988,7 @@ class NormalizedMapCache : public FixedArray {
static inline bool IsNormalizedMapCache(const HeapObject* obj);
DECL_VERIFIER(NormalizedMapCache)
+
private:
static const int kEntries = 64;
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index bb9a4c5790..0c04550673 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -104,6 +104,8 @@ Object* MaybeObject::GetHeapObjectOrSmi() {
return GetHeapObject();
}
+bool MaybeObject::IsObject() { return IsSmi() || IsStrongHeapObject(); }
+
Object* MaybeObject::ToObject() {
DCHECK(!HasWeakHeapObjectTag(this));
return reinterpret_cast<Object*>(this);
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index efbf3862f4..0d8751b652 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -43,6 +43,7 @@ class MaybeObject {
inline HeapObject* GetHeapObject();
inline Object* GetHeapObjectOrSmi();
+ inline bool IsObject();
inline Object* ToObject();
static MaybeObject* FromSmi(Smi* smi) {
@@ -58,7 +59,7 @@ class MaybeObject {
static inline MaybeObject* MakeWeak(MaybeObject* object);
#ifdef VERIFY_HEAP
- static void VerifyMaybeObjectPointer(MaybeObject* p);
+ static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject* p);
#endif
// Prints this object without details.
@@ -71,7 +72,6 @@ class MaybeObject {
#ifdef OBJECT_PRINT
void Print();
-
void Print(std::ostream& os);
#else
void Print() { ShortPrint(); }
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index 02202ad72c..1fffd01005 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -35,7 +35,6 @@ ModuleInfo* Module::info() const {
: GetSharedFunctionInfo()->scope_info()->ModuleDescriptorInfo();
}
-TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
CAST_ACCESSOR(JSModuleNamespace)
ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
@@ -50,10 +49,41 @@ SMI_ACCESSORS(ModuleInfoEntry, end_pos, kEndPosOffset)
CAST_ACCESSOR(ModuleInfo)
-bool HeapObject::IsModuleInfo() const {
- return map() == GetHeap()->module_info_map();
+FixedArray* ModuleInfo::module_requests() const {
+ return FixedArray::cast(get(kModuleRequestsIndex));
}
+FixedArray* ModuleInfo::special_exports() const {
+ return FixedArray::cast(get(kSpecialExportsIndex));
+}
+
+FixedArray* ModuleInfo::regular_exports() const {
+ return FixedArray::cast(get(kRegularExportsIndex));
+}
+
+FixedArray* ModuleInfo::regular_imports() const {
+ return FixedArray::cast(get(kRegularImportsIndex));
+}
+
+FixedArray* ModuleInfo::namespace_imports() const {
+ return FixedArray::cast(get(kNamespaceImportsIndex));
+}
+
+FixedArray* ModuleInfo::module_request_positions() const {
+ return FixedArray::cast(get(kModuleRequestPositionsIndex));
+}
+
+#ifdef DEBUG
+bool ModuleInfo::Equals(ModuleInfo* other) const {
+ return regular_exports() == other->regular_exports() &&
+ regular_imports() == other->regular_imports() &&
+ special_exports() == other->special_exports() &&
+ namespace_imports() == other->namespace_imports() &&
+ module_requests() == other->module_requests() &&
+ module_request_positions() == other->module_request_positions();
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index f1f9955946..8672d43264 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -12,6 +12,7 @@
#include "src/ast/modules.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/module-inl.h"
namespace v8 {
namespace internal {
@@ -120,20 +121,18 @@ int ImportIndex(int cell_index) {
} // anonymous namespace
-void Module::CreateIndirectExport(Handle<Module> module, Handle<String> name,
+void Module::CreateIndirectExport(Isolate* isolate, Handle<Module> module,
+ Handle<String> name,
Handle<ModuleInfoEntry> entry) {
- Isolate* isolate = module->GetIsolate();
Handle<ObjectHashTable> exports(module->exports(), isolate);
DCHECK(exports->Lookup(name)->IsTheHole(isolate));
exports = ObjectHashTable::Put(exports, name, entry);
module->set_exports(*exports);
}
-void Module::CreateExport(Handle<Module> module, int cell_index,
- Handle<FixedArray> names) {
+void Module::CreateExport(Isolate* isolate, Handle<Module> module,
+ int cell_index, Handle<FixedArray> names) {
DCHECK_LT(0, names->length());
- Isolate* isolate = module->GetIsolate();
-
Handle<Cell> cell =
isolate->factory()->NewCell(isolate->factory()->undefined_value());
module->regular_exports()->set(ExportIndex(cell_index), *cell);
@@ -164,8 +163,8 @@ Cell* Module::GetCell(int cell_index) {
return Cell::cast(cell);
}
-Handle<Object> Module::LoadVariable(Handle<Module> module, int cell_index) {
- Isolate* isolate = module->GetIsolate();
+Handle<Object> Module::LoadVariable(Isolate* isolate, Handle<Module> module,
+ int cell_index) {
return handle(module->GetCell(cell_index)->value(), isolate);
}
@@ -177,9 +176,9 @@ void Module::StoreVariable(Handle<Module> module, int cell_index,
}
#ifdef DEBUG
-void Module::PrintStatusTransition(Status new_status) {
+void Module::PrintStatusTransition(Isolate* isolate, Status new_status) {
if (FLAG_trace_module_status) {
- OFStream os(stdout);
+ StdoutStream os;
os << "Changing module status from " << status() << " to " << new_status
<< " for ";
script()->GetNameOrSourceURL()->Print(os);
@@ -190,35 +189,33 @@ void Module::PrintStatusTransition(Status new_status) {
}
#endif // DEBUG
-void Module::SetStatus(Status new_status) {
+void Module::SetStatus(Isolate* isolate, Status new_status) {
DisallowHeapAllocation no_alloc;
DCHECK_LE(status(), new_status);
DCHECK_NE(new_status, Module::kErrored);
#ifdef DEBUG
- PrintStatusTransition(new_status);
+ PrintStatusTransition(isolate, new_status);
#endif // DEBUG
set_status(new_status);
}
-void Module::ResetGraph(Handle<Module> module) {
+void Module::ResetGraph(Isolate* isolate, Handle<Module> module) {
DCHECK_NE(module->status(), kInstantiating);
DCHECK_NE(module->status(), kEvaluating);
if (module->status() != kPreInstantiating) return;
- Isolate* isolate = module->GetIsolate();
Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
- Reset(module);
+ Reset(isolate, module);
for (int i = 0; i < requested_modules->length(); ++i) {
Handle<Object> descendant(requested_modules->get(i), isolate);
if (descendant->IsModule()) {
- ResetGraph(Handle<Module>::cast(descendant));
+ ResetGraph(isolate, Handle<Module>::cast(descendant));
} else {
DCHECK(descendant->IsUndefined(isolate));
}
}
}
-void Module::Reset(Handle<Module> module) {
- Isolate* isolate = module->GetIsolate();
+void Module::Reset(Isolate* isolate, Handle<Module> module) {
Factory* factory = isolate->factory();
DCHECK(module->status() == kPreInstantiating ||
@@ -243,7 +240,7 @@ void Module::Reset(Handle<Module> module) {
module->set_code(JSFunction::cast(module->code())->shared());
}
#ifdef DEBUG
- module->PrintStatusTransition(kUninstantiated);
+ module->PrintStatusTransition(isolate, kUninstantiated);
#endif // DEBUG
module->set_status(kUninstantiated);
module->set_exports(*exports);
@@ -254,17 +251,15 @@ void Module::Reset(Handle<Module> module) {
module->set_dfs_ancestor_index(-1);
}
-void Module::RecordError() {
+void Module::RecordError(Isolate* isolate) {
DisallowHeapAllocation no_alloc;
- Isolate* isolate = GetIsolate();
-
DCHECK(exception()->IsTheHole(isolate));
Object* the_exception = isolate->pending_exception();
DCHECK(!the_exception->IsTheHole(isolate));
set_code(info());
#ifdef DEBUG
- PrintStatusTransition(Module::kErrored);
+ PrintStatusTransition(isolate, Module::kErrored);
#endif // DEBUG
set_status(Module::kErrored);
set_exception(the_exception);
@@ -273,7 +268,7 @@ void Module::RecordError() {
Object* Module::GetException() {
DisallowHeapAllocation no_alloc;
DCHECK_EQ(status(), Module::kErrored);
- DCHECK(!exception()->IsTheHole(GetIsolate()));
+ DCHECK(!exception()->IsTheHole());
return exception();
}
@@ -301,31 +296,29 @@ SharedFunctionInfo* Module::GetSharedFunctionInfo() const {
UNREACHABLE();
}
-MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
+MaybeHandle<Cell> Module::ResolveImport(Isolate* isolate, Handle<Module> module,
Handle<String> name, int module_request,
MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
- Isolate* isolate = module->GetIsolate();
Handle<Module> requested_module(
Module::cast(module->requested_modules()->get(module_request)), isolate);
Handle<String> specifier(
String::cast(module->info()->module_requests()->get(module_request)),
isolate);
- MaybeHandle<Cell> result = Module::ResolveExport(
- requested_module, specifier, name, loc, must_resolve, resolve_set);
+ MaybeHandle<Cell> result =
+ Module::ResolveExport(isolate, requested_module, specifier, name, loc,
+ must_resolve, resolve_set);
DCHECK_IMPLIES(isolate->has_pending_exception(), result.is_null());
return result;
}
-MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
+MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
Handle<String> module_specifier,
Handle<String> export_name,
MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
DCHECK_GE(module->status(), kPreInstantiating);
DCHECK_NE(module->status(), kEvaluating);
-
- Isolate* isolate = module->GetIsolate();
Handle<Object> object(module->exports()->Lookup(export_name), isolate);
if (object->IsCell()) {
// Already resolved (e.g. because it's a local export).
@@ -364,8 +357,8 @@ MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
Handle<Cell> cell;
- if (!ResolveImport(module, import_name, entry->module_request(), new_loc,
- true, resolve_set)
+ if (!ResolveImport(isolate, module, import_name, entry->module_request(),
+ new_loc, true, resolve_set)
.ToHandle(&cell)) {
DCHECK(isolate->has_pending_exception());
return MaybeHandle<Cell>();
@@ -382,16 +375,16 @@ MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
}
DCHECK(object->IsTheHole(isolate));
- return Module::ResolveExportUsingStarExports(
- module, module_specifier, export_name, loc, must_resolve, resolve_set);
+ return Module::ResolveExportUsingStarExports(isolate, module,
+ module_specifier, export_name,
+ loc, must_resolve, resolve_set);
}
MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
- Handle<Module> module, Handle<String> module_specifier,
+ Isolate* isolate, Handle<Module> module, Handle<String> module_specifier,
Handle<String> export_name, MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
- Isolate* isolate = module->GetIsolate();
- if (!export_name->Equals(isolate->heap()->default_string())) {
+ if (!export_name->Equals(ReadOnlyRoots(isolate).default_string())) {
// Go through all star exports looking for the given name. If multiple star
// exports provide the name, make sure they all map it to the same cell.
Handle<Cell> unique_cell;
@@ -408,8 +401,8 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
Handle<Cell> cell;
- if (ResolveImport(module, export_name, entry->module_request(), new_loc,
- false, resolve_set)
+ if (ResolveImport(isolate, module, export_name, entry->module_request(),
+ new_loc, false, resolve_set)
.ToHandle(&cell)) {
if (unique_cell.is_null()) unique_cell = cell;
if (*unique_cell != *cell) {
@@ -443,11 +436,12 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
return MaybeHandle<Cell>();
}
-bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
+bool Module::Instantiate(Isolate* isolate, Handle<Module> module,
+ v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback) {
#ifdef DEBUG
if (FLAG_trace_module_status) {
- OFStream os(stdout);
+ StdoutStream os;
os << "Instantiating module ";
module->script()->GetNameOrSourceURL()->Print(os);
#ifndef OBJECT_PRINT
@@ -456,18 +450,16 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
}
#endif // DEBUG
- if (!PrepareInstantiate(module, context, callback)) {
- ResetGraph(module);
+ if (!PrepareInstantiate(isolate, module, context, callback)) {
+ ResetGraph(isolate, module);
return false;
}
-
- Isolate* isolate = module->GetIsolate();
Zone zone(isolate->allocator(), ZONE_NAME);
ZoneForwardList<Handle<Module>> stack(&zone);
unsigned dfs_index = 0;
- if (!FinishInstantiate(module, &stack, &dfs_index, &zone)) {
+ if (!FinishInstantiate(isolate, module, &stack, &dfs_index, &zone)) {
for (auto& descendant : stack) {
- Reset(descendant);
+ Reset(isolate, descendant);
}
DCHECK_EQ(module->status(), kUninstantiated);
return false;
@@ -478,15 +470,13 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
return true;
}
-bool Module::PrepareInstantiate(Handle<Module> module,
+bool Module::PrepareInstantiate(Isolate* isolate, Handle<Module> module,
v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback) {
DCHECK_NE(module->status(), kEvaluating);
DCHECK_NE(module->status(), kInstantiating);
if (module->status() >= kPreInstantiating) return true;
- module->SetStatus(kPreInstantiating);
-
- Isolate* isolate = module->GetIsolate();
+ module->SetStatus(isolate, kPreInstantiating);
STACK_CHECK(isolate, false);
// Obtain requested modules.
@@ -510,7 +500,7 @@ bool Module::PrepareInstantiate(Handle<Module> module,
for (int i = 0, length = requested_modules->length(); i < length; ++i) {
Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
isolate);
- if (!PrepareInstantiate(requested_module, context, callback)) {
+ if (!PrepareInstantiate(isolate, requested_module, context, callback)) {
return false;
}
}
@@ -521,7 +511,7 @@ bool Module::PrepareInstantiate(Handle<Module> module,
int cell_index = module_info->RegularExportCellIndex(i);
Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
isolate);
- CreateExport(module, cell_index, export_names);
+ CreateExport(isolate, module, cell_index, export_names);
}
// Partially set up indirect exports.
@@ -535,16 +525,16 @@ bool Module::PrepareInstantiate(Handle<Module> module,
ModuleInfoEntry::cast(special_exports->get(i)), isolate);
Handle<Object> export_name(entry->export_name(), isolate);
if (export_name->IsUndefined(isolate)) continue; // Star export.
- CreateIndirectExport(module, Handle<String>::cast(export_name), entry);
+ CreateIndirectExport(isolate, module, Handle<String>::cast(export_name),
+ entry);
}
DCHECK_EQ(module->status(), kPreInstantiating);
return true;
}
-bool Module::RunInitializationCode(Handle<Module> module) {
+bool Module::RunInitializationCode(Isolate* isolate, Handle<Module> module) {
DCHECK_EQ(module->status(), kInstantiating);
- Isolate* isolate = module->GetIsolate();
Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
DCHECK_EQ(MODULE_SCOPE, function->shared()->scope_info()->scope_type());
Handle<Object> receiver = isolate->factory()->undefined_value();
@@ -561,7 +551,7 @@ bool Module::RunInitializationCode(Handle<Module> module) {
return true;
}
-bool Module::MaybeTransitionComponent(Handle<Module> module,
+bool Module::MaybeTransitionComponent(Isolate* isolate, Handle<Module> module,
ZoneForwardList<Handle<Module>>* stack,
Status new_status) {
DCHECK(new_status == kInstantiated || new_status == kEvaluated);
@@ -579,22 +569,20 @@ bool Module::MaybeTransitionComponent(Handle<Module> module,
DCHECK_EQ(ancestor->status(),
new_status == kInstantiated ? kInstantiating : kEvaluating);
if (new_status == kInstantiated) {
- if (!RunInitializationCode(ancestor)) return false;
+ if (!RunInitializationCode(isolate, ancestor)) return false;
}
- ancestor->SetStatus(new_status);
+ ancestor->SetStatus(isolate, new_status);
} while (*ancestor != *module);
}
return true;
}
-bool Module::FinishInstantiate(Handle<Module> module,
+bool Module::FinishInstantiate(Isolate* isolate, Handle<Module> module,
ZoneForwardList<Handle<Module>>* stack,
unsigned* dfs_index, Zone* zone) {
DCHECK_NE(module->status(), kEvaluating);
if (module->status() >= kInstantiating) return true;
DCHECK_EQ(module->status(), kPreInstantiating);
-
- Isolate* isolate = module->GetIsolate();
STACK_CHECK(isolate, false);
// Instantiate SharedFunctionInfo and mark module as instantiating for
@@ -605,7 +593,7 @@ bool Module::FinishInstantiate(Handle<Module> module,
isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, isolate->native_context());
module->set_code(*function);
- module->SetStatus(kInstantiating);
+ module->SetStatus(isolate, kInstantiating);
module->set_dfs_index(*dfs_index);
module->set_dfs_ancestor_index(*dfs_index);
stack->push_front(module);
@@ -616,7 +604,7 @@ bool Module::FinishInstantiate(Handle<Module> module,
for (int i = 0, length = requested_modules->length(); i < length; ++i) {
Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
isolate);
- if (!FinishInstantiate(requested_module, stack, dfs_index, zone)) {
+ if (!FinishInstantiate(isolate, requested_module, stack, dfs_index, zone)) {
return false;
}
@@ -648,8 +636,8 @@ bool Module::FinishInstantiate(Handle<Module> module,
MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
ResolveSet resolve_set(zone);
Handle<Cell> cell;
- if (!ResolveImport(module, name, entry->module_request(), loc, true,
- &resolve_set)
+ if (!ResolveImport(isolate, module, name, entry->module_request(), loc,
+ true, &resolve_set)
.ToHandle(&cell)) {
return false;
}
@@ -665,20 +653,20 @@ bool Module::FinishInstantiate(Handle<Module> module,
if (name->IsUndefined(isolate)) continue; // Star export.
MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
ResolveSet resolve_set(zone);
- if (ResolveExport(module, Handle<String>(), Handle<String>::cast(name), loc,
- true, &resolve_set)
+ if (ResolveExport(isolate, module, Handle<String>(),
+ Handle<String>::cast(name), loc, true, &resolve_set)
.is_null()) {
return false;
}
}
- return MaybeTransitionComponent(module, stack, kInstantiated);
+ return MaybeTransitionComponent(isolate, module, stack, kInstantiated);
}
-MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
+MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
#ifdef DEBUG
if (FLAG_trace_module_status) {
- OFStream os(stdout);
+ StdoutStream os;
os << "Evaluating module ";
module->script()->GetNameOrSourceURL()->Print(os);
#ifndef OBJECT_PRINT
@@ -686,8 +674,6 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
#endif // OBJECT_PRINT
}
#endif // DEBUG
-
- Isolate* isolate = module->GetIsolate();
if (module->status() == kErrored) {
isolate->Throw(module->GetException());
return MaybeHandle<Object>();
@@ -699,10 +685,10 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
ZoneForwardList<Handle<Module>> stack(&zone);
unsigned dfs_index = 0;
Handle<Object> result;
- if (!Evaluate(module, &stack, &dfs_index).ToHandle(&result)) {
+ if (!Evaluate(isolate, module, &stack, &dfs_index).ToHandle(&result)) {
for (auto& descendant : stack) {
DCHECK_EQ(descendant->status(), kEvaluating);
- descendant->RecordError();
+ descendant->RecordError(isolate);
}
DCHECK_EQ(module->GetException(), isolate->pending_exception());
return MaybeHandle<Object>();
@@ -712,10 +698,9 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
return result;
}
-MaybeHandle<Object> Module::Evaluate(Handle<Module> module,
+MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
ZoneForwardList<Handle<Module>>* stack,
unsigned* dfs_index) {
- Isolate* isolate = module->GetIsolate();
if (module->status() == kErrored) {
isolate->Throw(module->GetException());
return MaybeHandle<Object>();
@@ -730,7 +715,7 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module,
isolate);
module->set_code(
generator->function()->shared()->scope_info()->ModuleDescriptorInfo());
- module->SetStatus(kEvaluating);
+ module->SetStatus(isolate, kEvaluating);
module->set_dfs_index(*dfs_index);
module->set_dfs_ancestor_index(*dfs_index);
stack->push_front(module);
@@ -741,8 +726,8 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module,
for (int i = 0, length = requested_modules->length(); i < length; ++i) {
Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
isolate);
- RETURN_ON_EXCEPTION(isolate, Evaluate(requested_module, stack, dfs_index),
- Object);
+ RETURN_ON_EXCEPTION(
+ isolate, Evaluate(isolate, requested_module, stack, dfs_index), Object);
DCHECK_GE(requested_module->status(), kEvaluating);
DCHECK_NE(requested_module->status(), kErrored);
@@ -769,9 +754,9 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module,
Object);
DCHECK(static_cast<JSIteratorResult*>(JSObject::cast(*result))
->done()
- ->BooleanValue());
+ ->BooleanValue(isolate));
- CHECK(MaybeTransitionComponent(module, stack, kEvaluated));
+ CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated));
return handle(
static_cast<JSIteratorResult*>(JSObject::cast(*result))->value(),
isolate);
@@ -779,7 +764,7 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module,
namespace {
-void FetchStarExports(Handle<Module> module, Zone* zone,
+void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
UnorderedModuleSet* visited) {
DCHECK_GE(module->status(), Module::kInstantiating);
@@ -787,20 +772,19 @@ void FetchStarExports(Handle<Module> module, Zone* zone,
bool cycle = !visited->insert(module).second;
if (cycle) return;
-
- Isolate* isolate = module->GetIsolate();
Handle<ObjectHashTable> exports(module->exports(), isolate);
UnorderedStringMap more_exports(zone);
// TODO(neis): Only allocate more_exports if there are star exports.
// Maybe split special_exports into indirect_exports and star_exports.
+ ReadOnlyRoots roots(isolate);
Handle<FixedArray> special_exports(module->info()->special_exports(),
isolate);
for (int i = 0, n = special_exports->length(); i < n; ++i) {
Handle<ModuleInfoEntry> entry(
ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- if (!entry->export_name()->IsUndefined(isolate)) {
+ if (!entry->export_name()->IsUndefined(roots)) {
continue; // Indirect export.
}
@@ -809,7 +793,7 @@ void FetchStarExports(Handle<Module> module, Zone* zone,
isolate);
// Recurse.
- FetchStarExports(requested_module, zone, visited);
+ FetchStarExports(isolate, requested_module, zone, visited);
// Collect all of [requested_module]'s exports that must be added to
// [module]'s exports (i.e. to [exports]). We record these in
@@ -819,24 +803,24 @@ void FetchStarExports(Handle<Module> module, Zone* zone,
isolate);
for (int i = 0, n = requested_exports->Capacity(); i < n; ++i) {
Object* key;
- if (!requested_exports->ToKey(isolate, i, &key)) continue;
+ if (!requested_exports->ToKey(roots, i, &key)) continue;
Handle<String> name(String::cast(key), isolate);
- if (name->Equals(isolate->heap()->default_string())) continue;
- if (!exports->Lookup(name)->IsTheHole(isolate)) continue;
+ if (name->Equals(roots.default_string())) continue;
+ if (!exports->Lookup(name)->IsTheHole(roots)) continue;
Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
auto insert_result = more_exports.insert(std::make_pair(name, cell));
if (!insert_result.second) {
auto it = insert_result.first;
- if (*it->second == *cell || it->second->IsUndefined(isolate)) {
+ if (*it->second == *cell || it->second->IsUndefined(roots)) {
// We already recorded this mapping before, or the name is already
// known to be ambiguous. In either case, there's nothing to do.
} else {
DCHECK(it->second->IsCell());
// Different star exports provide different cells for this name, hence
// mark the name as ambiguous.
- it->second = isolate->factory()->undefined_value();
+ it->second = roots.undefined_value_handle();
}
}
}
@@ -845,7 +829,7 @@ void FetchStarExports(Handle<Module> module, Zone* zone,
// Copy [more_exports] into [exports].
for (const auto& elem : more_exports) {
if (elem.second->IsUndefined(isolate)) continue; // Ambiguous export.
- DCHECK(!elem.first->Equals(isolate->heap()->default_string()));
+ DCHECK(!elem.first->Equals(ReadOnlyRoots(isolate).default_string()));
DCHECK(elem.second->IsCell());
exports = ObjectHashTable::Put(exports, elem.first, elem.second);
}
@@ -854,19 +838,19 @@ void FetchStarExports(Handle<Module> module, Zone* zone,
} // anonymous namespace
-Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module,
+Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
+ Handle<Module> module,
int module_request) {
- Isolate* isolate = module->GetIsolate();
Handle<Module> requested_module(
Module::cast(module->requested_modules()->get(module_request)), isolate);
- return Module::GetModuleNamespace(requested_module);
+ return Module::GetModuleNamespace(isolate, requested_module);
}
-Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module) {
- Isolate* isolate = module->GetIsolate();
-
+Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
+ Handle<Module> module) {
Handle<HeapObject> object(module->module_namespace(), isolate);
- if (!object->IsUndefined(isolate)) {
+ ReadOnlyRoots roots(isolate);
+ if (!object->IsUndefined(roots)) {
// Namespace object already exists.
return Handle<JSModuleNamespace>::cast(object);
}
@@ -874,24 +858,23 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module) {
// Collect the export names.
Zone zone(isolate->allocator(), ZONE_NAME);
UnorderedModuleSet visited(&zone);
- FetchStarExports(module, &zone, &visited);
+ FetchStarExports(isolate, module, &zone, &visited);
Handle<ObjectHashTable> exports(module->exports(), isolate);
ZoneVector<Handle<String>> names(&zone);
names.reserve(exports->NumberOfElements());
for (int i = 0, n = exports->Capacity(); i < n; ++i) {
Object* key;
- if (!exports->ToKey(isolate, i, &key)) continue;
+ if (!exports->ToKey(roots, i, &key)) continue;
names.push_back(handle(String::cast(key), isolate));
}
DCHECK_EQ(static_cast<int>(names.size()), exports->NumberOfElements());
// Sort them alphabetically.
- struct {
- bool operator()(Handle<String> a, Handle<String> b) {
- return String::Compare(a, b) == ComparisonResult::kLessThan;
- }
- } StringLess;
- std::sort(names.begin(), names.end(), StringLess);
+ std::sort(names.begin(), names.end(),
+ [&isolate](Handle<String> a, Handle<String> b) {
+ return String::Compare(isolate, a, b) ==
+ ComparisonResult::kLessThan;
+ });
// Create the namespace object (initially empty).
Handle<JSModuleNamespace> ns = isolate->factory()->NewJSModuleNamespace();
@@ -920,9 +903,8 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module) {
return ns;
}
-MaybeHandle<Object> JSModuleNamespace::GetExport(Handle<String> name) {
- Isolate* isolate = name->GetIsolate();
-
+MaybeHandle<Object> JSModuleNamespace::GetExport(Isolate* isolate,
+ Handle<String> name) {
Handle<Object> object(module()->exports()->Lookup(name), isolate);
if (object->IsTheHole(isolate)) {
return isolate->factory()->undefined_value();
@@ -943,7 +925,7 @@ Maybe<PropertyAttributes> JSModuleNamespace::GetPropertyAttributes(
Handle<String> name = Handle<String>::cast(it->GetName());
DCHECK_EQ(it->state(), LookupIterator::ACCESSOR);
- Isolate* isolate = name->GetIsolate();
+ Isolate* isolate = it->isolate();
Handle<Object> lookup(object->module()->exports()->Lookup(name), isolate);
if (lookup->IsTheHole(isolate)) {
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 3cdae658cd..23c1356817 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -25,8 +25,11 @@ class String;
class Zone;
// The runtime representation of an ECMAScript module.
-class Module : public Struct {
+class Module : public Struct, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
DECL_CAST(Module)
DECL_VERIFIER(Module)
DECL_PRINTER(Module)
@@ -95,26 +98,29 @@ class Module : public Struct {
// otherwise. (In the case where the callback throws an exception, that
// exception is propagated.)
static V8_WARN_UNUSED_RESULT bool Instantiate(
- Handle<Module> module, v8::Local<v8::Context> context,
+ Isolate* isolate, Handle<Module> module, v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback);
// Implementation of spec operation ModuleEvaluation.
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
- Handle<Module> module);
+ Isolate* isolate, Handle<Module> module);
Cell* GetCell(int cell_index);
- static Handle<Object> LoadVariable(Handle<Module> module, int cell_index);
+ static Handle<Object> LoadVariable(Isolate* isolate, Handle<Module> module,
+ int cell_index);
static void StoreVariable(Handle<Module> module, int cell_index,
Handle<Object> value);
// Get the namespace object for [module_request] of [module]. If it doesn't
// exist yet, it is created.
- static Handle<JSModuleNamespace> GetModuleNamespace(Handle<Module> module,
+ static Handle<JSModuleNamespace> GetModuleNamespace(Isolate* isolate,
+ Handle<Module> module,
int module_request);
// Get the namespace object for [module]. If it doesn't exist yet, it is
// created.
- static Handle<JSModuleNamespace> GetModuleNamespace(Handle<Module> module);
+ static Handle<JSModuleNamespace> GetModuleNamespace(Isolate* isolate,
+ Handle<Module> module);
static const int kCodeOffset = HeapObject::kHeaderSize;
static const int kExportsOffset = kCodeOffset + kPointerSize;
@@ -143,9 +149,10 @@ class Module : public Struct {
// Helpers for Instantiate and Evaluate.
- static void CreateExport(Handle<Module> module, int cell_index,
- Handle<FixedArray> names);
- static void CreateIndirectExport(Handle<Module> module, Handle<String> name,
+ static void CreateExport(Isolate* isolate, Handle<Module> module,
+ int cell_index, Handle<FixedArray> names);
+ static void CreateIndirectExport(Isolate* isolate, Handle<Module> module,
+ Handle<String> name,
Handle<ModuleInfoEntry> entry);
// The [must_resolve] argument indicates whether or not an exception should be
@@ -158,47 +165,48 @@ class Module : public Struct {
// exception (so check manually!).
class ResolveSet;
static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExport(
- Handle<Module> module, Handle<String> module_specifier,
+ Isolate* isolate, Handle<Module> module, Handle<String> module_specifier,
Handle<String> export_name, MessageLocation loc, bool must_resolve,
ResolveSet* resolve_set);
static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveImport(
- Handle<Module> module, Handle<String> name, int module_request,
- MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
+ Isolate* isolate, Handle<Module> module, Handle<String> name,
+ int module_request, MessageLocation loc, bool must_resolve,
+ ResolveSet* resolve_set);
static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
- Handle<Module> module, Handle<String> module_specifier,
+ Isolate* isolate, Handle<Module> module, Handle<String> module_specifier,
Handle<String> export_name, MessageLocation loc, bool must_resolve,
ResolveSet* resolve_set);
static V8_WARN_UNUSED_RESULT bool PrepareInstantiate(
- Handle<Module> module, v8::Local<v8::Context> context,
+ Isolate* isolate, Handle<Module> module, v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback);
static V8_WARN_UNUSED_RESULT bool FinishInstantiate(
- Handle<Module> module, ZoneForwardList<Handle<Module>>* stack,
- unsigned* dfs_index, Zone* zone);
+ Isolate* isolate, Handle<Module> module,
+ ZoneForwardList<Handle<Module>>* stack, unsigned* dfs_index, Zone* zone);
static V8_WARN_UNUSED_RESULT bool RunInitializationCode(
- Handle<Module> module);
+ Isolate* isolate, Handle<Module> module);
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
- Handle<Module> module, ZoneForwardList<Handle<Module>>* stack,
- unsigned* dfs_index);
+ Isolate* isolate, Handle<Module> module,
+ ZoneForwardList<Handle<Module>>* stack, unsigned* dfs_index);
static V8_WARN_UNUSED_RESULT bool MaybeTransitionComponent(
- Handle<Module> module, ZoneForwardList<Handle<Module>>* stack,
- Status new_status);
+ Isolate* isolate, Handle<Module> module,
+ ZoneForwardList<Handle<Module>>* stack, Status new_status);
// Set module's status back to kUninstantiated and reset other internal state.
// This is used when instantiation fails.
- static void Reset(Handle<Module> module);
- static void ResetGraph(Handle<Module> module);
+ static void Reset(Isolate* isolate, Handle<Module> module);
+ static void ResetGraph(Isolate* isolate, Handle<Module> module);
// To set status to kErrored, RecordError should be used.
- void SetStatus(Status status);
- void RecordError();
+ void SetStatus(Isolate* isolate, Status status);
+ void RecordError(Isolate* isolate);
#ifdef DEBUG
// For --trace-module-status.
- void PrintStatusTransition(Status new_status);
+ void PrintStatusTransition(Isolate* isolate, Status new_status);
#endif // DEBUG
DISALLOW_IMPLICIT_CONSTRUCTORS(Module);
@@ -219,7 +227,8 @@ class JSModuleNamespace : public JSObject {
// Retrieve the value exported by [module] under the given [name]. If there is
// no such export, return Just(undefined). If the export is uninitialized,
// schedule an exception and return Nothing.
- V8_WARN_UNUSED_RESULT MaybeHandle<Object> GetExport(Handle<String> name);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> GetExport(Isolate* isolate,
+ Handle<String> name);
// Return the (constant) property attributes for the referenced property,
// which is assumed to correspond to an export. If the export is
@@ -250,29 +259,12 @@ class ModuleInfo : public FixedArray {
static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
ModuleDescriptor* descr);
- inline FixedArray* module_requests() const {
- return FixedArray::cast(get(kModuleRequestsIndex));
- }
-
- inline FixedArray* special_exports() const {
- return FixedArray::cast(get(kSpecialExportsIndex));
- }
-
- inline FixedArray* regular_exports() const {
- return FixedArray::cast(get(kRegularExportsIndex));
- }
-
- inline FixedArray* regular_imports() const {
- return FixedArray::cast(get(kRegularImportsIndex));
- }
-
- inline FixedArray* namespace_imports() const {
- return FixedArray::cast(get(kNamespaceImportsIndex));
- }
-
- inline FixedArray* module_request_positions() const {
- return FixedArray::cast(get(kModuleRequestPositionsIndex));
- }
+ inline FixedArray* module_requests() const;
+ inline FixedArray* special_exports() const;
+ inline FixedArray* regular_exports() const;
+ inline FixedArray* regular_imports() const;
+ inline FixedArray* namespace_imports() const;
+ inline FixedArray* module_request_positions() const;
// Accessors for [regular_exports].
int RegularExportCount() const;
@@ -281,14 +273,7 @@ class ModuleInfo : public FixedArray {
FixedArray* RegularExportExportNames(int i) const;
#ifdef DEBUG
- inline bool Equals(ModuleInfo* other) const {
- return regular_exports() == other->regular_exports() &&
- regular_imports() == other->regular_imports() &&
- special_exports() == other->special_exports() &&
- namespace_imports() == other->namespace_imports() &&
- module_requests() == other->module_requests() &&
- module_request_positions() == other->module_request_positions();
- }
+ inline bool Equals(ModuleInfo* other) const;
#endif
private:
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index b4ebeb632b..e768a40ec2 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -25,8 +25,6 @@ BOOL_ACCESSORS(Symbol, flags, is_well_known_symbol, kWellKnownSymbolBit)
BOOL_ACCESSORS(Symbol, flags, is_public, kPublicBit)
BOOL_ACCESSORS(Symbol, flags, is_interesting_symbol, kInterestingSymbolBit)
-TYPE_CHECKER(Symbol, SYMBOL_TYPE)
-
bool Symbol::is_private_field() const {
bool value = BooleanBit::get(flags(), kPrivateFieldBit);
DCHECK_IMPLIES(value, is_private());
@@ -71,13 +69,13 @@ bool Name::Equals(Name* other) {
return String::cast(this)->SlowEquals(String::cast(other));
}
-bool Name::Equals(Handle<Name> one, Handle<Name> two) {
+bool Name::Equals(Isolate* isolate, Handle<Name> one, Handle<Name> two) {
if (one.is_identical_to(two)) return true;
if ((one->IsInternalizedString() && two->IsInternalizedString()) ||
one->IsSymbol() || two->IsSymbol()) {
return false;
}
- return String::SlowEquals(Handle<String>::cast(one),
+ return String::SlowEquals(isolate, Handle<String>::cast(one),
Handle<String>::cast(two));
}
@@ -92,7 +90,10 @@ uint32_t Name::Hash() {
uint32_t field = hash_field();
if (IsHashFieldComputed(field)) return field >> kHashShift;
// Slow case: compute hash code and set it. Has to be a string.
- return String::cast(this)->ComputeAndSetHash();
+ // Also the string must be writable, because read-only strings will have their
+ // hash values precomputed.
+ return String::cast(this)->ComputeAndSetHash(
+ Heap::FromWritableHeapObject(this)->isolate());
}
bool Name::IsInterestingSymbol() const {
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index 091eb4c641..06e08deb82 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -29,7 +29,8 @@ class Name : public HeapObject {
// Equality operations.
inline bool Equals(Name* other);
- inline static bool Equals(Handle<Name> one, Handle<Name> two);
+ inline static bool Equals(Isolate* isolate, Handle<Name> one,
+ Handle<Name> two);
// Conversion.
inline bool AsArrayIndex(uint32_t* index);
@@ -55,9 +56,9 @@ class Name : public HeapObject {
// Return a string version of this name that is converted according to the
// rules described in ES6 section 9.2.11.
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToFunctionName(
- Handle<Name> name);
+ Isolate* isolate, Handle<Name> name);
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToFunctionName(
- Handle<Name> name, Handle<String> prefix);
+ Isolate* isolate, Handle<Name> name, Handle<String> prefix);
DECL_CAST(Name)
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index ec47506c5b..4bbf9e535b 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -24,14 +24,18 @@
#define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
+#define DECL_UINT16_ACCESSORS(name) \
+ inline uint16_t name() const; \
+ inline void set_##name(int value);
+
#define DECL_ACCESSORS(name, type) \
inline type* name() const; \
inline void set_##name(type* value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-#define DECL_CAST(type) \
- INLINE(static type* cast(Object* object)); \
- INLINE(static const type* cast(const Object* object));
+#define DECL_CAST(type) \
+ V8_INLINE static type* cast(Object* object); \
+ V8_INLINE static const type* cast(const Object* object);
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
@@ -53,17 +57,26 @@
WRITE_INT32_FIELD(this, offset, value); \
}
-#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
- set_condition) \
- type* holder::name() const { \
- type* value = type::cast(READ_FIELD(this, offset)); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+#define UINT16_ACCESSORS(holder, name, offset) \
+ uint16_t holder::name() const { return READ_UINT16_FIELD(this, offset); } \
+ void holder::set_##name(int value) { \
+ DCHECK_GE(value, 0); \
+ DCHECK_LE(value, static_cast<uint16_t>(-1)); \
+ WRITE_UINT16_FIELD(this, offset, value); \
+ }
+
+#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
+ set_condition) \
+ type* holder::name() const { \
+ type* value = type::cast(READ_FIELD(this, offset)); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \
+ offset, value, mode); \
}
#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
@@ -71,17 +84,18 @@
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
-#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
- set_condition) \
- MaybeObject* holder::name() const { \
- MaybeObject* value = READ_WEAK_FIELD(this, offset); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- WRITE_WEAK_FIELD(this, offset, value); \
- CONDITIONAL_WEAK_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
+ set_condition) \
+ MaybeObject* holder::name() const { \
+ MaybeObject* value = READ_WEAK_FIELD(this, offset); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ WRITE_WEAK_FIELD(this, offset, value); \
+ CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \
+ offset, value, mode); \
}
#define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
@@ -192,35 +206,49 @@
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
-#define WRITE_BARRIER(heap, object, offset, value) \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- heap->RecordWrite(object, HeapObject::RawField(object, offset), value);
-
-#define WEAK_WRITE_BARRIER(heap, object, offset, value) \
- heap->incremental_marking()->RecordMaybeWeakWrite( \
- object, HeapObject::RawMaybeWeakField(object, offset), value); \
- heap->RecordWrite(object, HeapObject::RawMaybeWeakField(object, offset), \
- value);
-
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- } \
- heap->RecordWrite(object, HeapObject::RawField(object, offset), value); \
- }
-
-#define CONDITIONAL_WEAK_WRITE_BARRIER(heap, object, offset, value, mode) \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->incremental_marking()->RecordMaybeWeakWrite( \
- object, HeapObject::RawMaybeWeakField(object, offset), value); \
- } \
- heap->RecordWrite(object, HeapObject::RawMaybeWeakField(object, offset), \
- value); \
- }
+#define WRITE_BARRIER(heap, object, offset, value) \
+ do { \
+ Heap* __heap__ = heap; \
+ __heap__->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ __heap__->RecordWrite(object, HeapObject::RawField(object, offset), \
+ value); \
+ } while (false)
+
+#define WEAK_WRITE_BARRIER(heap, object, offset, value) \
+ do { \
+ Heap* __heap__ = heap; \
+ __heap__->incremental_marking()->RecordMaybeWeakWrite( \
+ object, HeapObject::RawMaybeWeakField(object, offset), value); \
+ __heap__->RecordWrite( \
+ object, HeapObject::RawMaybeWeakField(object, offset), value); \
+ } while (false)
+
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
+ do { \
+ Heap* __heap__ = heap; \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ __heap__->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ } \
+ __heap__->RecordWrite(object, HeapObject::RawField(object, offset), \
+ value); \
+ } \
+ } while (false)
+
+#define CONDITIONAL_WEAK_WRITE_BARRIER(heap, object, offset, value, mode) \
+ do { \
+ Heap* __heap__ = heap; \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ __heap__->incremental_marking()->RecordMaybeWeakWrite( \
+ object, HeapObject::RawMaybeWeakField(object, offset), value); \
+ } \
+ __heap__->RecordWrite( \
+ object, HeapObject::RawMaybeWeakField(object, offset), value); \
+ } \
+ } while (false)
#define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
@@ -325,7 +353,7 @@
static_cast<base::Atomic8>(value));
#ifdef VERIFY_HEAP
-#define DECL_VERIFIER(Name) void Name##Verify();
+#define DECL_VERIFIER(Name) void Name##Verify(Isolate* isolate);
#else
#define DECL_VERIFIER(Name)
#endif
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 8789420d24..25d5dc938a 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -19,11 +19,34 @@ int OrderedHashMap::GetMapRootIndex() {
return Heap::kOrderedHashMapMapRootIndex;
}
+int SmallOrderedHashMap::GetMapRootIndex() {
+ return Heap::kSmallOrderedHashMapMapRootIndex;
+}
+
+int SmallOrderedHashSet::GetMapRootIndex() {
+ return Heap::kSmallOrderedHashSetMapRootIndex;
+}
+
inline Object* OrderedHashMap::ValueAt(int entry) {
DCHECK_LT(entry, this->UsedCapacity());
return get(EntryToIndex(entry) + kValueOffset);
}
+inline bool OrderedHashSet::Is(Handle<HeapObject> table) {
+ return table->IsOrderedHashSet();
+}
+
+inline bool OrderedHashMap::Is(Handle<HeapObject> table) {
+ return table->IsOrderedHashMap();
+}
+
+inline bool SmallOrderedHashSet::Is(Handle<HeapObject> table) {
+ return table->IsSmallOrderedHashSet();
+}
+
+inline bool SmallOrderedHashMap::Is(Handle<HeapObject> table) {
+ return table->IsSmallOrderedHashMap();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index 0f80876fee..ad558e731e 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -6,6 +6,7 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/js-collection-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
namespace v8 {
@@ -39,7 +40,7 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
template <class Derived, int entrysize>
Handle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
- Handle<Derived> table) {
+ Isolate* isolate, Handle<Derived> table) {
DCHECK(!table->IsObsolete());
int nof = table->NumberOfElements();
@@ -49,28 +50,28 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
// Don't need to grow if we can simply clear out deleted entries instead.
// Note that we can't compact in place, though, so we always allocate
// a new table.
- return Rehash(table, (nod < (capacity >> 1)) ? capacity << 1 : capacity);
+ return Rehash(isolate, table,
+ (nod < (capacity >> 1)) ? capacity << 1 : capacity);
}
template <class Derived, int entrysize>
Handle<Derived> OrderedHashTable<Derived, entrysize>::Shrink(
- Handle<Derived> table) {
+ Isolate* isolate, Handle<Derived> table) {
DCHECK(!table->IsObsolete());
int nof = table->NumberOfElements();
int capacity = table->Capacity();
if (nof >= (capacity >> 2)) return table;
- return Rehash(table, capacity / 2);
+ return Rehash(isolate, table, capacity / 2);
}
template <class Derived, int entrysize>
Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
- Handle<Derived> table) {
+ Isolate* isolate, Handle<Derived> table) {
DCHECK(!table->IsObsolete());
- Handle<Derived> new_table =
- Allocate(table->GetIsolate(), kMinCapacity,
- table->GetHeap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
+ Handle<Derived> new_table = Allocate(
+ isolate, kMinCapacity, Heap::InNewSpace(*table) ? NOT_TENURED : TENURED);
table->SetNextTable(*new_table);
table->SetNumberOfDeletedElements(kClearedTableSentinel);
@@ -88,9 +89,10 @@ bool OrderedHashTable<Derived, entrysize>::HasKey(Isolate* isolate,
return entry != kNotFound;
}
-Handle<OrderedHashSet> OrderedHashSet::Add(Handle<OrderedHashSet> table,
+Handle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
+ Handle<OrderedHashSet> table,
Handle<Object> key) {
- int hash = key->GetOrCreateHash(table->GetIsolate())->value();
+ int hash = key->GetOrCreateHash(isolate)->value();
int entry = table->HashToEntry(hash);
// Walk the chain of the bucket and try finding the key.
while (entry != kNotFound) {
@@ -100,7 +102,7 @@ Handle<OrderedHashSet> OrderedHashSet::Add(Handle<OrderedHashSet> table,
entry = table->NextChainEntry(entry);
}
- table = OrderedHashSet::EnsureGrowable(table);
+ table = OrderedHashSet::EnsureGrowable(isolate, table);
// Read the existing bucket values.
int bucket = table->HashToBucket(hash);
int previous_entry = table->HashToEntry(hash);
@@ -117,14 +119,13 @@ Handle<OrderedHashSet> OrderedHashSet::Add(Handle<OrderedHashSet> table,
}
Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
- Handle<OrderedHashSet> table, GetKeysConversion convert) {
- Isolate* isolate = table->GetIsolate();
+ Isolate* isolate, Handle<OrderedHashSet> table, GetKeysConversion convert) {
int length = table->NumberOfElements();
int nof_buckets = table->NumberOfBuckets();
// Convert the dictionary to a linear list.
Handle<FixedArray> result = Handle<FixedArray>::cast(table);
// From this point on table is no longer a valid OrderedHashSet.
- result->set_map(isolate->heap()->fixed_array_map());
+ result->set_map(ReadOnlyRoots(isolate).fixed_array_map());
for (int i = 0; i < length; i++) {
int index = kHashTableStartIndex + nof_buckets + (i * kEntrySize);
Object* key = table->get(index);
@@ -138,27 +139,24 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
}
result->set(i, key);
}
- result->Shrink(length);
- return result;
+ return FixedArray::ShrinkOrEmpty(isolate, result, length);
}
-HeapObject* OrderedHashSet::GetEmpty(Isolate* isolate) {
- return isolate->heap()->empty_ordered_hash_set();
+HeapObject* OrderedHashSet::GetEmpty(ReadOnlyRoots ro_roots) {
+ return ro_roots.empty_ordered_hash_set();
}
-HeapObject* OrderedHashMap::GetEmpty(Isolate* isolate) {
- return isolate->heap()->empty_ordered_hash_map();
+HeapObject* OrderedHashMap::GetEmpty(ReadOnlyRoots ro_roots) {
+ return ro_roots.empty_ordered_hash_map();
}
template <class Derived, int entrysize>
Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
- Handle<Derived> table, int new_capacity) {
- Isolate* isolate = table->GetIsolate();
+ Isolate* isolate, Handle<Derived> table, int new_capacity) {
DCHECK(!table->IsObsolete());
- Handle<Derived> new_table =
- Allocate(isolate, new_capacity,
- isolate->heap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
+ Handle<Derived> new_table = Allocate(
+ isolate, new_capacity, Heap::InNewSpace(*table) ? NOT_TENURED : TENURED);
int nof = table->NumberOfElements();
int nod = table->NumberOfDeletedElements();
int new_buckets = new_table->NumberOfBuckets();
@@ -206,7 +204,7 @@ bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
int nod = table->NumberOfDeletedElements();
int index = table->EntryToIndex(entry);
- Object* hole = isolate->heap()->the_hole_value();
+ Object* hole = ReadOnlyRoots(isolate).the_hole_value();
for (int i = 0; i < entrysize; ++i) {
table->set(index + i, hole);
}
@@ -228,10 +226,11 @@ Object* OrderedHashMap::GetHash(Isolate* isolate, Object* key) {
return hash;
}
-Handle<OrderedHashMap> OrderedHashMap::Add(Handle<OrderedHashMap> table,
+Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
+ Handle<OrderedHashMap> table,
Handle<Object> key,
Handle<Object> value) {
- int hash = key->GetOrCreateHash(table->GetIsolate())->value();
+ int hash = key->GetOrCreateHash(isolate)->value();
int entry = table->HashToEntry(hash);
// Walk the chain of the bucket and try finding the key.
{
@@ -245,7 +244,7 @@ Handle<OrderedHashMap> OrderedHashMap::Add(Handle<OrderedHashMap> table,
}
}
- table = OrderedHashMap::EnsureGrowable(table);
+ table = OrderedHashMap::EnsureGrowable(isolate, table);
// Read the existing bucket values.
int bucket = table->HashToBucket(hash);
int previous_entry = table->HashToEntry(hash);
@@ -265,14 +264,15 @@ Handle<OrderedHashMap> OrderedHashMap::Add(Handle<OrderedHashMap> table,
template Handle<OrderedHashSet> OrderedHashTable<OrderedHashSet, 1>::Allocate(
Isolate* isolate, int capacity, PretenureFlag pretenure);
-template Handle<OrderedHashSet> OrderedHashTable<
- OrderedHashSet, 1>::EnsureGrowable(Handle<OrderedHashSet> table);
+template Handle<OrderedHashSet>
+OrderedHashTable<OrderedHashSet, 1>::EnsureGrowable(
+ Isolate* isolate, Handle<OrderedHashSet> table);
template Handle<OrderedHashSet> OrderedHashTable<OrderedHashSet, 1>::Shrink(
- Handle<OrderedHashSet> table);
+ Isolate* isolate, Handle<OrderedHashSet> table);
template Handle<OrderedHashSet> OrderedHashTable<OrderedHashSet, 1>::Clear(
- Handle<OrderedHashSet> table);
+ Isolate* isolate, Handle<OrderedHashSet> table);
template bool OrderedHashTable<OrderedHashSet, 1>::HasKey(Isolate* isolate,
OrderedHashSet* table,
@@ -285,14 +285,15 @@ template bool OrderedHashTable<OrderedHashSet, 1>::Delete(Isolate* isolate,
template Handle<OrderedHashMap> OrderedHashTable<OrderedHashMap, 2>::Allocate(
Isolate* isolate, int capacity, PretenureFlag pretenure);
-template Handle<OrderedHashMap> OrderedHashTable<
- OrderedHashMap, 2>::EnsureGrowable(Handle<OrderedHashMap> table);
+template Handle<OrderedHashMap>
+OrderedHashTable<OrderedHashMap, 2>::EnsureGrowable(
+ Isolate* isolate, Handle<OrderedHashMap> table);
template Handle<OrderedHashMap> OrderedHashTable<OrderedHashMap, 2>::Shrink(
- Handle<OrderedHashMap> table);
+ Isolate* isolate, Handle<OrderedHashMap> table);
template Handle<OrderedHashMap> OrderedHashTable<OrderedHashMap, 2>::Clear(
- Handle<OrderedHashMap> table);
+ Isolate* isolate, Handle<OrderedHashMap> table);
template bool OrderedHashTable<OrderedHashMap, 2>::HasKey(Isolate* isolate,
OrderedHashMap* table,
@@ -333,14 +334,14 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
memset(reinterpret_cast<byte*>(hashtable_start), kNotFound,
num_buckets + num_chains);
- if (isolate->heap()->InNewSpace(this)) {
- MemsetPointer(RawField(this, kHeaderSize + kDataTableStartOffset),
- isolate->heap()->the_hole_value(),
+ if (Heap::InNewSpace(this)) {
+ MemsetPointer(RawField(this, kDataTableStartOffset),
+ ReadOnlyRoots(isolate).the_hole_value(),
capacity * Derived::kEntrySize);
} else {
for (int i = 0; i < capacity; i++) {
for (int j = 0; j < Derived::kEntrySize; j++) {
- SetDataEntry(i, j, isolate->heap()->the_hole_value());
+ SetDataEntry(i, j, ReadOnlyRoots(isolate).the_hole_value());
}
}
}
@@ -356,22 +357,25 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
for (int i = 0; i < capacity; ++i) {
for (int j = 0; j < Derived::kEntrySize; j++) {
- DCHECK_EQ(isolate->heap()->the_hole_value(), GetDataEntry(i, j));
+ DCHECK_EQ(ReadOnlyRoots(isolate).the_hole_value(), GetDataEntry(i, j));
}
}
#endif // DEBUG
}
-Handle<SmallOrderedHashSet> SmallOrderedHashSet::Add(
- Handle<SmallOrderedHashSet> table, Handle<Object> key) {
- Isolate* isolate = table->GetIsolate();
+MaybeHandle<SmallOrderedHashSet> SmallOrderedHashSet::Add(
+ Isolate* isolate, Handle<SmallOrderedHashSet> table, Handle<Object> key) {
if (table->HasKey(isolate, key)) return table;
if (table->UsedCapacity() >= table->Capacity()) {
- table = SmallOrderedHashSet::Grow(table);
+ MaybeHandle<SmallOrderedHashSet> new_table =
+ SmallOrderedHashSet::Grow(isolate, table);
+ if (!new_table.ToHandle(&table)) {
+ return MaybeHandle<SmallOrderedHashSet>();
+ }
}
- int hash = key->GetOrCreateHash(table->GetIsolate())->value();
+ int hash = key->GetOrCreateHash(isolate)->value();
int nof = table->NumberOfElements();
// Read the existing bucket values.
@@ -391,17 +395,20 @@ Handle<SmallOrderedHashSet> SmallOrderedHashSet::Add(
return table;
}
-Handle<SmallOrderedHashMap> SmallOrderedHashMap::Add(
- Handle<SmallOrderedHashMap> table, Handle<Object> key,
+MaybeHandle<SmallOrderedHashMap> SmallOrderedHashMap::Add(
+ Isolate* isolate, Handle<SmallOrderedHashMap> table, Handle<Object> key,
Handle<Object> value) {
- Isolate* isolate = table->GetIsolate();
if (table->HasKey(isolate, key)) return table;
if (table->UsedCapacity() >= table->Capacity()) {
- table = SmallOrderedHashMap::Grow(table);
+ MaybeHandle<SmallOrderedHashMap> new_table =
+ SmallOrderedHashMap::Grow(isolate, table);
+ if (!new_table.ToHandle(&table)) {
+ return MaybeHandle<SmallOrderedHashMap>();
+ }
}
- int hash = key->GetOrCreateHash(table->GetIsolate())->value();
+ int hash = key->GetOrCreateHash(isolate)->value();
int nof = table->NumberOfElements();
// Read the existing bucket values.
@@ -439,7 +446,7 @@ bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived* table,
int nof = table->NumberOfElements();
int nod = table->NumberOfDeletedElements();
- Object* hole = isolate->heap()->the_hole_value();
+ Object* hole = ReadOnlyRoots(isolate).the_hole_value();
for (int j = 0; j < Derived::kEntrySize; j++) {
table->SetDataEntry(entry, j, hole);
}
@@ -451,14 +458,13 @@ bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived* table,
}
template <class Derived>
-Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Handle<Derived> table,
+Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
+ Handle<Derived> table,
int new_capacity) {
DCHECK_GE(kMaxCapacity, new_capacity);
- Isolate* isolate = table->GetIsolate();
Handle<Derived> new_table = SmallOrderedHashTable<Derived>::Allocate(
- isolate, new_capacity,
- isolate->heap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
+ isolate, new_capacity, Heap::InNewSpace(*table) ? NOT_TENURED : TENURED);
int nof = table->NumberOfElements();
int nod = table->NumberOfDeletedElements();
int new_entry = 0;
@@ -490,7 +496,8 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Handle<Derived> table,
}
template <class Derived>
-Handle<Derived> SmallOrderedHashTable<Derived>::Grow(Handle<Derived> table) {
+MaybeHandle<Derived> SmallOrderedHashTable<Derived>::Grow(
+ Isolate* isolate, Handle<Derived> table) {
int capacity = table->Capacity();
int new_capacity = capacity;
@@ -506,19 +513,23 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Grow(Handle<Derived> table) {
new_capacity = kMaxCapacity;
}
- // TODO(gsathya): Transition to OrderedHashTable for size > kMaxCapacity.
+ // We need to migrate to a bigger hash table.
+ if (new_capacity > kMaxCapacity) {
+ return MaybeHandle<Derived>();
+ }
}
- return Rehash(table, new_capacity);
+ return Rehash(isolate, table, new_capacity);
}
template bool SmallOrderedHashTable<SmallOrderedHashSet>::HasKey(
Isolate* isolate, Handle<Object> key);
template Handle<SmallOrderedHashSet>
SmallOrderedHashTable<SmallOrderedHashSet>::Rehash(
- Handle<SmallOrderedHashSet> table, int new_capacity);
-template Handle<SmallOrderedHashSet> SmallOrderedHashTable<
- SmallOrderedHashSet>::Grow(Handle<SmallOrderedHashSet> table);
+ Isolate* isolate, Handle<SmallOrderedHashSet> table, int new_capacity);
+template MaybeHandle<SmallOrderedHashSet>
+SmallOrderedHashTable<SmallOrderedHashSet>::Grow(
+ Isolate* isolate, Handle<SmallOrderedHashSet> table);
template void SmallOrderedHashTable<SmallOrderedHashSet>::Initialize(
Isolate* isolate, int capacity);
@@ -526,9 +537,10 @@ template bool SmallOrderedHashTable<SmallOrderedHashMap>::HasKey(
Isolate* isolate, Handle<Object> key);
template Handle<SmallOrderedHashMap>
SmallOrderedHashTable<SmallOrderedHashMap>::Rehash(
- Handle<SmallOrderedHashMap> table, int new_capacity);
-template Handle<SmallOrderedHashMap> SmallOrderedHashTable<
- SmallOrderedHashMap>::Grow(Handle<SmallOrderedHashMap> table);
+ Isolate* isolate, Handle<SmallOrderedHashMap> table, int new_capacity);
+template MaybeHandle<SmallOrderedHashMap>
+SmallOrderedHashTable<SmallOrderedHashMap>::Grow(
+ Isolate* isolate, Handle<SmallOrderedHashMap> table);
template void SmallOrderedHashTable<SmallOrderedHashMap>::Initialize(
Isolate* isolate, int capacity);
@@ -537,6 +549,134 @@ template bool SmallOrderedHashTable<SmallOrderedHashMap>::Delete(
template bool SmallOrderedHashTable<SmallOrderedHashSet>::Delete(
Isolate* isolate, SmallOrderedHashSet* table, Object* key);
+template <class SmallTable, class LargeTable>
+Handle<HeapObject> OrderedHashTableHandler<SmallTable, LargeTable>::Allocate(
+ Isolate* isolate, int capacity) {
+ if (capacity < SmallTable::kMaxCapacity) {
+ return SmallTable::Allocate(isolate, capacity);
+ }
+
+ return LargeTable::Allocate(isolate, capacity);
+}
+
+template Handle<HeapObject>
+OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::Allocate(
+ Isolate* isolate, int capacity);
+template Handle<HeapObject>
+OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap>::Allocate(
+ Isolate* isolate, int capacity);
+
+template <class SmallTable, class LargeTable>
+bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
+ Handle<HeapObject> table, Handle<Object> key) {
+ if (SmallTable::Is(table)) {
+ return SmallTable::Delete(Handle<SmallTable>::cast(table), key);
+ }
+
+ DCHECK(LargeTable::Is(table));
+ // Note: Once we migrate to the a big hash table, we never migrate
+ // down to a smaller hash table.
+ return LargeTable::Delete(Handle<LargeTable>::cast(table), key);
+}
+
+template <class SmallTable, class LargeTable>
+bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
+ Isolate* isolate, Handle<HeapObject> table, Handle<Object> key) {
+ if (SmallTable::Is(table)) {
+ return Handle<SmallTable>::cast(table)->HasKey(isolate, key);
+ }
+
+ DCHECK(LargeTable::Is(table));
+ return LargeTable::HasKey(isolate, LargeTable::cast(*table), *key);
+}
+
+template bool
+OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::HasKey(
+ Isolate* isolate, Handle<HeapObject> table, Handle<Object> key);
+template bool
+OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap>::HasKey(
+ Isolate* isolate, Handle<HeapObject> table, Handle<Object> key);
+
+Handle<OrderedHashMap> OrderedHashMapHandler::AdjustRepresentation(
+ Isolate* isolate, Handle<SmallOrderedHashMap> table) {
+ Handle<OrderedHashMap> new_table =
+ OrderedHashMap::Allocate(isolate, OrderedHashTableMinSize);
+ int nof = table->NumberOfElements();
+ int nod = table->NumberOfDeletedElements();
+
+ // TODO(gsathya): Optimize the lookup to not re calc offsets. Also,
+ // unhandlify this code as we preallocate the new backing store with
+ // the proper capacity.
+ for (int entry = 0; entry < (nof + nod); ++entry) {
+ Handle<Object> key = handle(table->KeyAt(entry), isolate);
+ if (key->IsTheHole(isolate)) continue;
+ Handle<Object> value = handle(
+ table->GetDataEntry(entry, SmallOrderedHashMap::kValueIndex), isolate);
+ new_table = OrderedHashMap::Add(isolate, new_table, key, value);
+ }
+
+ return new_table;
+}
+
+Handle<OrderedHashSet> OrderedHashSetHandler::AdjustRepresentation(
+ Isolate* isolate, Handle<SmallOrderedHashSet> table) {
+ Handle<OrderedHashSet> new_table =
+ OrderedHashSet::Allocate(isolate, OrderedHashTableMinSize);
+ int nof = table->NumberOfElements();
+ int nod = table->NumberOfDeletedElements();
+
+ // TODO(gsathya): Optimize the lookup to not re calc offsets. Also,
+ // unhandlify this code as we preallocate the new backing store with
+ // the proper capacity.
+ for (int entry = 0; entry < (nof + nod); ++entry) {
+ Handle<Object> key = handle(table->KeyAt(entry), isolate);
+ if (key->IsTheHole(isolate)) continue;
+ new_table = OrderedHashSet::Add(isolate, new_table, key);
+ }
+
+ return new_table;
+}
+
+Handle<HeapObject> OrderedHashMapHandler::Add(Isolate* isolate,
+ Handle<HeapObject> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ if (table->IsSmallOrderedHashMap()) {
+ Handle<SmallOrderedHashMap> small_map =
+ Handle<SmallOrderedHashMap>::cast(table);
+ MaybeHandle<SmallOrderedHashMap> new_map =
+ SmallOrderedHashMap::Add(isolate, small_map, key, value);
+ if (!new_map.is_null()) return new_map.ToHandleChecked();
+
+ // We couldn't add to the small table, let's migrate to the
+ // big table.
+ table = OrderedHashMapHandler::AdjustRepresentation(isolate, small_map);
+ }
+
+ DCHECK(table->IsOrderedHashMap());
+ return OrderedHashMap::Add(isolate, Handle<OrderedHashMap>::cast(table), key,
+ value);
+}
+
+Handle<HeapObject> OrderedHashSetHandler::Add(Isolate* isolate,
+ Handle<HeapObject> table,
+ Handle<Object> key) {
+ if (table->IsSmallOrderedHashSet()) {
+ Handle<SmallOrderedHashSet> small_set =
+ Handle<SmallOrderedHashSet>::cast(table);
+ MaybeHandle<SmallOrderedHashSet> new_set =
+ SmallOrderedHashSet::Add(isolate, small_set, key);
+ if (!new_set.is_null()) return new_set.ToHandleChecked();
+
+ // We couldn't add to the small table, let's migrate to the
+ // big table.
+ table = OrderedHashSetHandler::AdjustRepresentation(isolate, small_set);
+ }
+
+ DCHECK(table->IsOrderedHashSet());
+ return OrderedHashSet::Add(isolate, Handle<OrderedHashSet>::cast(table), key);
+}
+
template <class Derived, class TableType>
void OrderedHashTableIterator<Derived, TableType>::Transition() {
DisallowHeapAllocation no_allocation;
@@ -572,7 +712,7 @@ void OrderedHashTableIterator<Derived, TableType>::Transition() {
template <class Derived, class TableType>
bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
DisallowHeapAllocation no_allocation;
- Isolate* isolate = this->GetIsolate();
+ ReadOnlyRoots ro_roots = GetReadOnlyRoots();
Transition();
@@ -580,7 +720,7 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
int index = Smi::ToInt(this->index());
int used_capacity = table->UsedCapacity();
- while (index < used_capacity && table->KeyAt(index)->IsTheHole(isolate)) {
+ while (index < used_capacity && table->KeyAt(index)->IsTheHole(ro_roots)) {
index++;
}
@@ -588,7 +728,7 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
if (index < used_capacity) return true;
- set_table(TableType::GetEmpty(isolate));
+ set_table(TableType::GetEmpty(ro_roots));
return false;
}
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index a1129d105d..20f3fe2eda 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -93,15 +93,16 @@ class OrderedHashTable : public OrderedHashTableBase {
// Returns an OrderedHashTable (possibly |table|) with enough space
// to add at least one new element.
- static Handle<Derived> EnsureGrowable(Handle<Derived> table);
+ static Handle<Derived> EnsureGrowable(Isolate* isolate,
+ Handle<Derived> table);
// Returns an OrderedHashTable (possibly |table|) that's shrunken
// if possible.
- static Handle<Derived> Shrink(Handle<Derived> table);
+ static Handle<Derived> Shrink(Isolate* isolate, Handle<Derived> table);
// Returns a new empty OrderedHashTable and records the clearing so that
// existing iterators can be updated.
- static Handle<Derived> Clear(Handle<Derived> table);
+ static Handle<Derived> Clear(Isolate* isolate, Handle<Derived> table);
// Returns true if the OrderedHashTable contains the key
static bool HasKey(Isolate* isolate, Derived* table, Object* key);
@@ -194,7 +195,8 @@ class OrderedHashTable : public OrderedHashTableBase {
(1 + (kEntrySize * kLoadFactor));
protected:
- static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity);
+ static Handle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
+ int new_capacity);
void SetNumberOfBuckets(int num) {
set(kNumberOfBucketsIndex, Smi::FromInt(num));
@@ -222,12 +224,15 @@ class OrderedHashSet : public OrderedHashTable<OrderedHashSet, 1> {
public:
DECL_CAST(OrderedHashSet)
- static Handle<OrderedHashSet> Add(Handle<OrderedHashSet> table,
+ static Handle<OrderedHashSet> Add(Isolate* isolate,
+ Handle<OrderedHashSet> table,
Handle<Object> value);
- static Handle<FixedArray> ConvertToKeysArray(Handle<OrderedHashSet> table,
+ static Handle<FixedArray> ConvertToKeysArray(Isolate* isolate,
+ Handle<OrderedHashSet> table,
GetKeysConversion convert);
- static HeapObject* GetEmpty(Isolate* isolate);
+ static HeapObject* GetEmpty(ReadOnlyRoots ro_roots);
static inline int GetMapRootIndex();
+ static inline bool Is(Handle<HeapObject> table);
};
class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
@@ -236,14 +241,16 @@ class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
// Returns a value if the OrderedHashMap contains the key, otherwise
// returns undefined.
- static Handle<OrderedHashMap> Add(Handle<OrderedHashMap> table,
+ static Handle<OrderedHashMap> Add(Isolate* isolate,
+ Handle<OrderedHashMap> table,
Handle<Object> key, Handle<Object> value);
Object* ValueAt(int entry);
static Object* GetHash(Isolate* isolate, Object* key);
- static HeapObject* GetEmpty(Isolate* isolate);
+ static HeapObject* GetEmpty(ReadOnlyRoots ro_roots);
static inline int GetMapRootIndex();
+ static inline bool Is(Handle<HeapObject> table);
static const int kValueOffset = 1;
};
@@ -309,10 +316,12 @@ class SmallOrderedHashTable : public HeapObject {
static bool Delete(Isolate* isolate, Derived* table, Object* key);
// Returns an SmallOrderedHashTable (possibly |table|) with enough
- // space to add at least one new element.
- static Handle<Derived> Grow(Handle<Derived> table);
+ // space to add at least one new element. Returns empty handle if
+ // we've already reached MaxCapacity.
+ static MaybeHandle<Derived> Grow(Isolate* isolate, Handle<Derived> table);
- static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity);
+ static Handle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
+ int new_capacity);
// Iterates only fields in the DataTable.
class BodyDescriptor;
@@ -329,10 +338,10 @@ class SmallOrderedHashTable : public HeapObject {
int data_table_size = DataTableSizeFor(capacity);
int hash_table_size = capacity / kLoadFactor;
int chain_table_size = capacity;
- int total_size = kHeaderSize + kDataTableStartOffset + data_table_size +
- hash_table_size + chain_table_size;
+ int total_size = kDataTableStartOffset + data_table_size + hash_table_size +
+ chain_table_size;
- return ((total_size + kPointerSize - 1) / kPointerSize) * kPointerSize;
+ return RoundUp(total_size, kPointerSize);
}
// Returns the number elements that can fit into the allocated table.
@@ -346,20 +355,20 @@ class SmallOrderedHashTable : public HeapObject {
// Returns the number elements that are present in the table.
int NumberOfElements() const {
- int nof_elements = getByte(0, kNumberOfElementsByteIndex);
+ int nof_elements = getByte(kNumberOfElementsOffset, 0);
DCHECK_LE(nof_elements, Capacity());
return nof_elements;
}
int NumberOfDeletedElements() const {
- int nof_deleted_elements = getByte(0, kNumberOfDeletedElementsByteIndex);
+ int nof_deleted_elements = getByte(kNumberOfDeletedElementsOffset, 0);
DCHECK_LE(nof_deleted_elements, Capacity());
return nof_deleted_elements;
}
- int NumberOfBuckets() const { return getByte(0, kNumberOfBucketsByteIndex); }
+ int NumberOfBuckets() const { return getByte(kNumberOfBucketsOffset, 0); }
DECL_VERIFIER(SmallOrderedHashTable)
@@ -379,6 +388,13 @@ class SmallOrderedHashTable : public HeapObject {
// should be stored as another field of this object.
static const int kLoadFactor = 2;
+ // Our growth strategy involves doubling the capacity until we reach
+ // kMaxCapacity, but since the kMaxCapacity is always less than 256,
+ // we will never fully utilize this table. We special case for 256,
+ // by changing the new capacity to be kMaxCapacity in
+ // SmallOrderedHashTable::Grow.
+ static const int kGrowthHack = 256;
+
protected:
void SetDataEntry(int entry, int relative_index, Object* value);
@@ -391,8 +407,7 @@ class SmallOrderedHashTable : public HeapObject {
}
Address GetHashTableStartAddress(int capacity) const {
- return FIELD_ADDR(
- this, kHeaderSize + kDataTableStartOffset + DataTableSizeFor(capacity));
+ return FIELD_ADDR(this, kDataTableStartOffset + DataTableSizeFor(capacity));
}
void SetFirstEntry(int bucket, byte value) {
@@ -433,13 +448,13 @@ class SmallOrderedHashTable : public HeapObject {
DCHECK_LT(entry, Capacity());
DCHECK_LE(static_cast<unsigned>(relative_index), Derived::kEntrySize);
Offset entry_offset = GetDataEntryOffset(entry, relative_index);
- return READ_FIELD(this, kHeaderSize + entry_offset);
+ return READ_FIELD(this, entry_offset);
}
Object* KeyAt(int entry) const {
DCHECK_LT(entry, Capacity());
Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
- return READ_FIELD(this, kHeaderSize + entry_offset);
+ return READ_FIELD(this, entry_offset);
}
int HashToBucket(int hash) const { return hash & (NumberOfBuckets() - 1); }
@@ -451,18 +466,16 @@ class SmallOrderedHashTable : public HeapObject {
return entry;
}
- void SetNumberOfBuckets(int num) {
- setByte(0, kNumberOfBucketsByteIndex, num);
- }
+ void SetNumberOfBuckets(int num) { setByte(kNumberOfBucketsOffset, 0, num); }
void SetNumberOfElements(int num) {
DCHECK_LE(static_cast<unsigned>(num), Capacity());
- setByte(0, kNumberOfElementsByteIndex, num);
+ setByte(kNumberOfElementsOffset, 0, num);
}
void SetNumberOfDeletedElements(int num) {
DCHECK_LE(static_cast<unsigned>(num), Capacity());
- setByte(0, kNumberOfDeletedElementsByteIndex, num);
+ setByte(kNumberOfDeletedElementsOffset, 0, num);
}
int FindEntry(Isolate* isolate, Object* key) {
@@ -481,33 +494,28 @@ class SmallOrderedHashTable : public HeapObject {
return kNotFound;
}
- static const int kNumberOfElementsByteIndex = 0;
- static const int kNumberOfDeletedElementsByteIndex = 1;
- static const int kNumberOfBucketsByteIndex = 2;
+ static const Offset kNumberOfElementsOffset = kHeaderSize;
+ static const Offset kNumberOfDeletedElementsOffset =
+ kNumberOfElementsOffset + kOneByteSize;
+ static const Offset kNumberOfBucketsOffset =
+ kNumberOfDeletedElementsOffset + kOneByteSize;
+ static const constexpr Offset kDataTableStartOffset =
+ RoundUp<kPointerSize>(kNumberOfBucketsOffset);
- static const Offset kDataTableStartOffset = kPointerSize;
static constexpr int DataTableSizeFor(int capacity) {
return capacity * Derived::kEntrySize * kPointerSize;
}
- // Our growth strategy involves doubling the capacity until we reach
- // kMaxCapacity, but since the kMaxCapacity is always less than 256,
- // we will never fully utilize this table. We special case for 256,
- // by changing the new capacity to be kMaxCapacity in
- // SmallOrderedHashTable::Grow.
- static const int kGrowthHack = 256;
-
// This is used for accessing the non |DataTable| part of the
// structure.
byte getByte(Offset offset, ByteIndex index) const {
DCHECK(offset < kDataTableStartOffset || offset >= GetBucketsStartOffset());
- return READ_BYTE_FIELD(this, kHeaderSize + offset + (index * kOneByteSize));
+ return READ_BYTE_FIELD(this, offset + (index * kOneByteSize));
}
void setByte(Offset offset, ByteIndex index, byte value) {
DCHECK(offset < kDataTableStartOffset || offset >= GetBucketsStartOffset());
- WRITE_BYTE_FIELD(this, kHeaderSize + offset + (index * kOneByteSize),
- value);
+ WRITE_BYTE_FIELD(this, offset + (index * kOneByteSize), value);
}
Offset GetDataEntryOffset(int entry, int relative_index) const {
@@ -523,6 +531,11 @@ class SmallOrderedHashTable : public HeapObject {
return used;
}
+
+ private:
+ friend class OrderedHashMapHandler;
+ friend class OrderedHashSetHandler;
+ friend class CodeStubAssembler;
};
class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
@@ -537,8 +550,11 @@ class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
// Adds |value| to |table|, if the capacity isn't enough, a new
// table is created. The original |table| is returned if there is
// capacity to store |value| otherwise the new table is returned.
- static Handle<SmallOrderedHashSet> Add(Handle<SmallOrderedHashSet> table,
- Handle<Object> key);
+ static MaybeHandle<SmallOrderedHashSet> Add(Isolate* isolate,
+ Handle<SmallOrderedHashSet> table,
+ Handle<Object> key);
+ static inline bool Is(Handle<HeapObject> table);
+ static inline int GetMapRootIndex();
};
class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
@@ -554,9 +570,49 @@ class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
// Adds |value| to |table|, if the capacity isn't enough, a new
// table is created. The original |table| is returned if there is
// capacity to store |value| otherwise the new table is returned.
- static Handle<SmallOrderedHashMap> Add(Handle<SmallOrderedHashMap> table,
- Handle<Object> key,
- Handle<Object> value);
+ static MaybeHandle<SmallOrderedHashMap> Add(Isolate* isolate,
+ Handle<SmallOrderedHashMap> table,
+ Handle<Object> key,
+ Handle<Object> value);
+ static inline bool Is(Handle<HeapObject> table);
+ static inline int GetMapRootIndex();
+};
+
+// TODO(gsathya): Rename this to OrderedHashTable, after we rename
+// OrderedHashTable to LargeOrderedHashTable. Also set up a
+// OrderedHashSetBase class as a base class for the two tables and use
+// that instead of a HeapObject here.
+template <class SmallTable, class LargeTable>
+class OrderedHashTableHandler {
+ public:
+ typedef int Entry;
+
+ static Handle<HeapObject> Allocate(Isolate* isolate, int capacity);
+ static bool Delete(Handle<HeapObject> table, Handle<Object> key);
+ static bool HasKey(Isolate* isolate, Handle<HeapObject> table,
+ Handle<Object> key);
+
+ // TODO(gsathya): Move this to OrderedHashTable
+ static const int OrderedHashTableMinSize =
+ SmallOrderedHashTable<SmallTable>::kGrowthHack << 1;
+};
+
+class OrderedHashMapHandler
+ : public OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap> {
+ public:
+ static Handle<HeapObject> Add(Isolate* isolate, Handle<HeapObject> table,
+ Handle<Object> key, Handle<Object> value);
+ static Handle<OrderedHashMap> AdjustRepresentation(
+ Isolate* isolate, Handle<SmallOrderedHashMap> table);
+};
+
+class OrderedHashSetHandler
+ : public OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet> {
+ public:
+ static Handle<HeapObject> Add(Isolate* isolate, Handle<HeapObject> table,
+ Handle<Object> key);
+ static Handle<OrderedHashSet> AdjustRepresentation(
+ Isolate* isolate, Handle<SmallOrderedHashSet> table);
};
class JSCollectionIterator : public JSObject {
diff --git a/deps/v8/src/objects/promise-inl.h b/deps/v8/src/objects/promise-inl.h
index f9fb6110f3..f33bc42681 100644
--- a/deps/v8/src/objects/promise-inl.h
+++ b/deps/v8/src/objects/promise-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_PROMISE_INL_H_
#define V8_OBJECTS_PROMISE_INL_H_
+#include "src/objects/js-promise-inl.h"
#include "src/objects/promise.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
new file mode 100644
index 0000000000..df638c9e5b
--- /dev/null
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -0,0 +1,64 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROTOTYPE_INFO_INL_H_
+#define V8_OBJECTS_PROTOTYPE_INFO_INL_H_
+
+#include "src/objects/prototype-info.h"
+
+#include "src/objects/maybe-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(PrototypeInfo)
+
+Map* PrototypeInfo::ObjectCreateMap() {
+ return Map::cast(object_create_map()->ToWeakHeapObject());
+}
+
+// static
+void PrototypeInfo::SetObjectCreateMap(Handle<PrototypeInfo> info,
+ Handle<Map> map) {
+ info->set_object_create_map(HeapObjectReference::Weak(*map));
+}
+
+bool PrototypeInfo::HasObjectCreateMap() {
+ MaybeObject* cache = object_create_map();
+ return cache->IsWeakHeapObject();
+}
+
+ACCESSORS(PrototypeInfo, weak_cell, Object, kWeakCellOffset)
+ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
+WEAK_ACCESSORS(PrototypeInfo, object_create_map, kObjectCreateMapOffset)
+SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
+SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
+BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
+
+void PrototypeUsers::MarkSlotEmpty(WeakArrayList* array, int index) {
+ DCHECK_GT(index, 0);
+ DCHECK_LT(index, array->length());
+ // Chain the empty slots into a linked list (each empty slot contains the
+ // index of the next empty slot).
+ array->Set(index, MaybeObject::FromObject(empty_slot_index(array)));
+ set_empty_slot_index(array, index);
+}
+
+Smi* PrototypeUsers::empty_slot_index(WeakArrayList* array) {
+ return array->Get(kEmptySlotIndex)->ToSmi();
+}
+
+void PrototypeUsers::set_empty_slot_index(WeakArrayList* array, int index) {
+ array->Set(kEmptySlotIndex, MaybeObject::FromObject(Smi::FromInt(index)));
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROTOTYPE_INFO_INL_H_
diff --git a/deps/v8/src/objects/prototype-info.h b/deps/v8/src/objects/prototype-info.h
new file mode 100644
index 0000000000..303617fda0
--- /dev/null
+++ b/deps/v8/src/objects/prototype-info.h
@@ -0,0 +1,112 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROTOTYPE_INFO_H_
+#define V8_OBJECTS_PROTOTYPE_INFO_H_
+
+#include "src/objects.h"
+#include "src/objects/fixed-array.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Container for metadata stored on each prototype map.
+class PrototypeInfo : public Struct {
+ public:
+ static const int UNREGISTERED = -1;
+
+ // [weak_cell]: A WeakCell containing this prototype. ICs cache the cell here.
+ DECL_ACCESSORS(weak_cell, Object)
+
+ // [prototype_users]: WeakArrayList containing weak references to maps using
+ // this prototype, or Smi(0) if uninitialized.
+ DECL_ACCESSORS(prototype_users, Object)
+
+ // [object_create_map]: A field caching the map for Object.create(prototype).
+ static inline void SetObjectCreateMap(Handle<PrototypeInfo> info,
+ Handle<Map> map);
+ inline Map* ObjectCreateMap();
+ inline bool HasObjectCreateMap();
+
+ // [registry_slot]: Slot in prototype's user registry where this user
+ // is stored. Returns UNREGISTERED if this prototype has not been registered.
+ inline int registry_slot() const;
+ inline void set_registry_slot(int slot);
+
+ // [bit_field]
+ inline int bit_field() const;
+ inline void set_bit_field(int bit_field);
+
+ DECL_BOOLEAN_ACCESSORS(should_be_fast_map)
+
+ DECL_CAST(PrototypeInfo)
+
+ // Dispatched behavior.
+ DECL_PRINTER(PrototypeInfo)
+ DECL_VERIFIER(PrototypeInfo)
+
+ static const int kWeakCellOffset = HeapObject::kHeaderSize;
+ static const int kPrototypeUsersOffset = kWeakCellOffset + kPointerSize;
+ static const int kRegistrySlotOffset = kPrototypeUsersOffset + kPointerSize;
+ static const int kValidityCellOffset = kRegistrySlotOffset + kPointerSize;
+ static const int kObjectCreateMapOffset = kValidityCellOffset + kPointerSize;
+ static const int kBitFieldOffset = kObjectCreateMapOffset + kPointerSize;
+ static const int kSize = kBitFieldOffset + kPointerSize;
+
+ // Bit field usage.
+ static const int kShouldBeFastBit = 0;
+
+ class BodyDescriptor;
+
+ private:
+ DECL_ACCESSORS(object_create_map, MaybeObject)
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
+};
+
+// A growing array with an additional API for marking slots "empty". When adding
+// new elements, we reuse the empty slots instead of growing the array.
+class PrototypeUsers : public WeakArrayList {
+ public:
+ static Handle<WeakArrayList> Add(Isolate* isolate,
+ Handle<WeakArrayList> array,
+ Handle<Map> value, int* assigned_index);
+
+ static inline void MarkSlotEmpty(WeakArrayList* array, int index);
+
+ // The callback is called when a weak pointer to HeapObject "object" is moved
+ // from index "from_index" to index "to_index" during compaction. The callback
+ // must not cause GC.
+ typedef void (*CompactionCallback)(HeapObject* object, int from_index,
+ int to_index);
+ static WeakArrayList* Compact(Handle<WeakArrayList> array, Heap* heap,
+ CompactionCallback callback);
+
+#ifdef VERIFY_HEAP
+ static void Verify(WeakArrayList* array);
+#endif // VERIFY_HEAP
+
+ static const int kEmptySlotIndex = 0;
+ static const int kFirstIndex = 1;
+
+ static const int kNoEmptySlotsMarker = 0;
+
+ private:
+ static inline Smi* empty_slot_index(WeakArrayList* array);
+ static inline void set_empty_slot_index(WeakArrayList* array, int index);
+
+ static void IsSlotEmpty(WeakArrayList* array, int index);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeUsers);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROTOTYPE_INFO_H_
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index 24eb805f5a..838c299bd2 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -46,7 +46,7 @@ class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
// Reserves space for captures.
static Handle<RegExpMatchInfo> ReserveCaptures(
- Handle<RegExpMatchInfo> match_info, int capture_count);
+ Isolate* isolate, Handle<RegExpMatchInfo> match_info, int capture_count);
DECL_CAST(RegExpMatchInfo)
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 479d7b3ca4..9ec87dcb92 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -10,7 +10,9 @@
#include "src/ast/scopes.h"
#include "src/ast/variables.h"
#include "src/bootstrapper.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/module-inl.h"
namespace v8 {
namespace internal {
@@ -61,20 +63,14 @@ bool ScopeInfo::Equals(ScopeInfo* other) const {
Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
MaybeHandle<ScopeInfo> outer_scope) {
// Collect variables.
- int stack_local_count = 0;
int context_local_count = 0;
int module_vars_count = 0;
// Stack allocated block scope variables are allocated in the parent
// declaration scope, but are recorded in the block scope's scope info. First
// slot index indicates at which offset a particular scope starts in the
// parent declaration scope.
- int first_slot_index = 0;
for (Variable* var : *scope->locals()) {
switch (var->location()) {
- case VariableLocation::LOCAL:
- if (stack_local_count == 0) first_slot_index = var->index();
- stack_local_count++;
- break;
case VariableLocation::CONTEXT:
context_local_count++;
break;
@@ -143,8 +139,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT;
const int parameter_count = scope->num_parameters();
const bool has_outer_scope_info = !outer_scope.is_null();
- const int length = kVariablePartIndex + parameter_count +
- (1 + stack_local_count) + 2 * context_local_count +
+ const int length = kVariablePartIndex + 2 * context_local_count +
(has_receiver ? 1 : 0) +
(has_function_name ? kFunctionNameEntries : 0) +
(has_inferred_function_name ? 1 : 0) +
@@ -189,50 +184,29 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
- scope_info->SetStackLocalCount(stack_local_count);
scope_info->SetContextLocalCount(context_local_count);
int index = kVariablePartIndex;
- // Add parameters.
- DCHECK_EQ(index, scope_info->ParameterNamesIndex());
- if (scope->is_declaration_scope()) {
- for (int i = 0; i < parameter_count; ++i) {
- scope_info->set(index++,
- *scope->AsDeclarationScope()->parameter(i)->name());
- }
- }
- // Add stack locals' names, context locals' names and info, module variables'
- // names and info. We are assuming that the stack locals' slots are allocated
- // in increasing order, so we can simply add them to the ScopeInfo object.
+ // Add context locals' names and info, module variables' names and info.
// Context locals are added using their index.
- DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
- scope_info->set(index++, Smi::FromInt(first_slot_index));
- DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
-
- int stack_local_base = index;
- int context_local_base = stack_local_base + stack_local_count;
+ int context_local_base = index;
int context_local_info_base = context_local_base + context_local_count;
int module_var_entry = scope_info->ModuleVariablesIndex();
for (Variable* var : *scope->locals()) {
switch (var->location()) {
- case VariableLocation::LOCAL: {
- int local_index = var->index() - first_slot_index;
- DCHECK_LE(0, local_index);
- DCHECK_LT(local_index, stack_local_count);
- scope_info->set(stack_local_base + local_index, *var->name());
- break;
- }
case VariableLocation::CONTEXT: {
// Due to duplicate parameters, context locals aren't guaranteed to come
// in order.
int local_index = var->index() - Context::MIN_CONTEXT_SLOTS;
DCHECK_LE(0, local_index);
DCHECK_LT(local_index, context_local_count);
- uint32_t info = VariableModeField::encode(var->mode()) |
- InitFlagField::encode(var->initialization_flag()) |
- MaybeAssignedFlagField::encode(var->maybe_assigned());
+ uint32_t info =
+ VariableModeField::encode(var->mode()) |
+ InitFlagField::encode(var->initialization_flag()) |
+ MaybeAssignedFlagField::encode(var->maybe_assigned()) |
+ ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info->set(context_local_base + local_index, *var->name());
scope_info->set(context_local_info_base + local_index,
Smi::FromInt(info));
@@ -246,7 +220,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
uint32_t properties =
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
- MaybeAssignedFlagField::encode(var->maybe_assigned());
+ MaybeAssignedFlagField::encode(var->maybe_assigned()) |
+ ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info->set(module_var_entry + kModuleVariablePropertiesOffset,
Smi::FromInt(properties));
module_var_entry += kModuleVariableEntryLength;
@@ -257,7 +232,26 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
}
}
- index += stack_local_count + 2 * context_local_count;
+ if (scope->is_declaration_scope()) {
+ // Mark contexts slots with the parameter number they represent. We walk the
+ // list of parameters. That can include duplicate entries if a parameter
+ // name is repeated. By walking upwards, we'll automatically mark the
+ // context slot with the highest parameter number that uses this variable.
+ // That will be the parameter number that is represented by the context
+ // slot. All lower parameters will only be available on the stack through
+ // the arguments object.
+ for (int i = 0; i < parameter_count; i++) {
+ Variable* parameter = scope->AsDeclarationScope()->parameter(i);
+ if (parameter->location() != VariableLocation::CONTEXT) continue;
+ int index = parameter->index() - Context::MIN_CONTEXT_SLOTS;
+ int info_index = context_local_info_base + index;
+ int info = Smi::ToInt(scope_info->get(info_index));
+ info = ParameterNumberField::update(info, i);
+ scope_info->set(info_index, Smi::FromInt(info));
+ }
+ }
+
+ index += 2 * context_local_count;
// If the receiver is allocated, add its index.
DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
@@ -326,7 +320,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope) {
const bool has_outer_scope_info = !outer_scope.is_null();
- const int length = kVariablePartIndex + 1 + (has_outer_scope_info ? 1 : 0);
+ const int length = kVariablePartIndex + (has_outer_scope_info ? 1 : 0);
Factory* factory = isolate->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
@@ -345,14 +339,9 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
scope_info->SetFlags(flags);
scope_info->SetParameterCount(0);
- scope_info->SetStackLocalCount(0);
scope_info->SetContextLocalCount(0);
int index = kVariablePartIndex;
- DCHECK_EQ(index, scope_info->ParameterNamesIndex());
- DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
- scope_info->set(index++, Smi::kZero);
- DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
DCHECK_EQ(index, scope_info->InferredFunctionNameIndex());
@@ -384,14 +373,12 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
DCHECK(type == SCRIPT_SCOPE || type == FUNCTION_SCOPE);
const int parameter_count = 0;
- const int stack_local_count = 0;
const bool is_empty_function = type == FUNCTION_SCOPE;
const int context_local_count = is_empty_function ? 0 : 1;
const bool has_receiver = !is_empty_function;
const bool has_inferred_function_name = is_empty_function;
const bool has_position_info = true;
- const int length = kVariablePartIndex + parameter_count +
- (1 + stack_local_count) + 2 * context_local_count +
+ const int length = kVariablePartIndex + 2 * context_local_count +
(has_receiver ? 1 : 0) +
(is_empty_function ? kFunctionNameEntries : 0) +
(has_inferred_function_name ? 1 : 0) +
@@ -415,25 +402,22 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
IsDebugEvaluateScopeField::encode(false);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
- scope_info->SetStackLocalCount(stack_local_count);
scope_info->SetContextLocalCount(context_local_count);
int index = kVariablePartIndex;
- const int first_slot_index = 0;
- DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
- scope_info->set(index++, Smi::FromInt(first_slot_index));
- DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
// Here we add info for context-allocated "this".
DCHECK_EQ(index, scope_info->ContextLocalNamesIndex());
if (context_local_count) {
- scope_info->set(index++, isolate->heap()->this_string());
+ scope_info->set(index++, ReadOnlyRoots(isolate).this_string());
}
DCHECK_EQ(index, scope_info->ContextLocalInfosIndex());
if (context_local_count) {
- const uint32_t value = VariableModeField::encode(CONST) |
- InitFlagField::encode(kCreatedInitialized) |
- MaybeAssignedFlagField::encode(kNotAssigned);
+ const uint32_t value =
+ VariableModeField::encode(VariableMode::kConst) |
+ InitFlagField::encode(kCreatedInitialized) |
+ MaybeAssignedFlagField::encode(kNotAssigned) |
+ ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info->set(index++, Smi::FromInt(value));
}
@@ -470,7 +454,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
}
ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
- return isolate->heap()->empty_scope_info();
+ return ReadOnlyRoots(isolate).empty_scope_info();
}
ScopeType ScopeInfo::scope_type() const {
@@ -495,19 +479,6 @@ bool ScopeInfo::is_declaration_scope() const {
return DeclarationScopeField::decode(Flags());
}
-int ScopeInfo::LocalCount() const {
- return StackLocalCount() + ContextLocalCount();
-}
-
-int ScopeInfo::StackSlotCount() const {
- if (length() > 0) {
- bool function_name_stack_slot =
- FunctionVariableField::decode(Flags()) == STACK;
- return StackLocalCount() + (function_name_stack_slot ? 1 : 0);
- }
- return 0;
-}
-
int ScopeInfo::ContextLength() const {
if (length() > 0) {
int context_locals = ContextLocalCount();
@@ -620,17 +591,17 @@ String* ScopeInfo::FunctionDebugName() const {
name = InferredFunctionName();
if (name->IsString()) return String::cast(name);
}
- return GetHeap()->empty_string();
+ return GetReadOnlyRoots().empty_string();
}
int ScopeInfo::StartPosition() const {
DCHECK(HasPositionInfo());
- return Smi::cast(get(PositionInfoIndex()))->value();
+ return Smi::ToInt(get(PositionInfoIndex()));
}
int ScopeInfo::EndPosition() const {
DCHECK(HasPositionInfo());
- return Smi::cast(get(PositionInfoIndex() + 1))->value();
+ return Smi::ToInt(get(PositionInfoIndex() + 1));
}
void ScopeInfo::SetPositionInfo(int start, int end) {
@@ -650,36 +621,6 @@ ModuleInfo* ScopeInfo::ModuleDescriptorInfo() const {
return ModuleInfo::cast(get(ModuleInfoIndex()));
}
-String* ScopeInfo::ParameterName(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, ParameterCount());
- int info_index = ParameterNamesIndex() + var;
- return String::cast(get(info_index));
-}
-
-String* ScopeInfo::LocalName(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, LocalCount());
- DCHECK(StackLocalNamesIndex() + StackLocalCount() ==
- ContextLocalNamesIndex());
- int info_index = StackLocalNamesIndex() + var;
- return String::cast(get(info_index));
-}
-
-String* ScopeInfo::StackLocalName(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, StackLocalCount());
- int info_index = StackLocalNamesIndex() + var;
- return String::cast(get(info_index));
-}
-
-int ScopeInfo::StackLocalIndex(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, StackLocalCount());
- int first_slot_index = Smi::ToInt(get(StackLocalFirstSlotIndex()));
- return first_slot_index + var;
-}
-
String* ScopeInfo::ContextLocalName(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
@@ -703,6 +644,21 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) const {
return InitFlagField::decode(value);
}
+bool ScopeInfo::ContextLocalIsParameter(int var) const {
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ContextLocalCount());
+ int info_index = ContextLocalInfosIndex() + var;
+ int value = Smi::ToInt(get(info_index));
+ return ParameterNumberField::decode(value) != ParameterNumberField::kMax;
+}
+
+uint32_t ScopeInfo::ContextLocalParameterNumber(int var) const {
+ DCHECK(ContextLocalIsParameter(var));
+ int info_index = ContextLocalInfosIndex() + var;
+ int value = Smi::ToInt(get(info_index));
+ return ParameterNumberField::decode(value);
+}
+
MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
@@ -718,26 +674,13 @@ bool ScopeInfo::VariableIsSynthetic(String* name) {
// with user declarations, the current temporaries like .generator_object and
// .result start with a dot, so we can use that as a flag. It's a hack!
return name->length() == 0 || name->Get(0) == '.' ||
- name->Equals(name->GetHeap()->this_string());
-}
-
-int ScopeInfo::StackSlotIndex(String* name) const {
- DCHECK(name->IsInternalizedString());
- if (length() == 0) return -1;
- int first_slot_index = Smi::ToInt(get(StackLocalFirstSlotIndex()));
- int start = StackLocalNamesIndex();
- int end = start + StackLocalCount();
- for (int i = start; i < end; ++i) {
- if (name == get(i)) {
- return i - start + first_slot_index;
- }
- }
- return -1;
+ name->Equals(name->GetReadOnlyRoots().this_string());
}
int ScopeInfo::ModuleIndex(Handle<String> name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
+ DCHECK(name->IsInternalizedString());
DCHECK_EQ(scope_type(), MODULE_SCOPE);
DCHECK_NOT_NULL(mode);
DCHECK_NOT_NULL(init_flag);
@@ -770,8 +713,19 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
if (scope_info->length() == 0) return -1;
- ContextSlotCache* context_slot_cache =
- scope_info->GetIsolate()->context_slot_cache();
+ // Get the Isolate via the heap.
+ //
+ // Ideally we'd pass Isolate* through to this function, however this is mostly
+ // called from the parser, which is otherwise isolate independent. We can't
+ // assume that all scope infos are never RO space (like we can with JSReceiver
+ // or Context), but we can assume that *non-empty* scope infos are.
+ //
+ // So, we take the least-ugly approach of manually getting the isolate to be
+ // able to remove GetIsolate from ScopeInfo in the general case, while
+ // allowing it in this one particular case.
+ Isolate* isolate = Heap::FromWritableHeapObject(*scope_info)->isolate();
+
+ ContextSlotCache* context_slot_cache = isolate->context_slot_cache();
int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
maybe_assigned_flag);
if (result != ContextSlotCache::kNotFound) {
@@ -796,30 +750,12 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
}
}
// Cache as not found. Mode, init flag and maybe assigned flag don't matter.
- context_slot_cache->Update(scope_info, name, TEMPORARY, kNeedsInitialization,
- kNotAssigned, -1);
+ context_slot_cache->Update(scope_info, name, VariableMode::kTemporary,
+ kNeedsInitialization, kNotAssigned, -1);
return -1;
}
-int ScopeInfo::ParameterIndex(String* name) const {
- DCHECK(name->IsInternalizedString());
- if (length() == 0) return -1;
- // We must read parameters from the end since for
- // multiply declared parameters the value of the
- // last declaration of that parameter is used
- // inside a function (and thus we need to look
- // at the last index). Was bug# 1110337.
- int start = ParameterNamesIndex();
- int end = start + ParameterCount();
- for (int i = end - 1; i >= start; --i) {
- if (name == get(i)) {
- return i - start;
- }
- }
- return -1;
-}
-
int ScopeInfo::ReceiverContextSlotIndex() const {
if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT) {
return Smi::ToInt(get(ReceiverInfoIndex()));
@@ -842,23 +778,11 @@ FunctionKind ScopeInfo::function_kind() const {
return FunctionKindField::decode(Flags());
}
-int ScopeInfo::ParameterNamesIndex() const {
+int ScopeInfo::ContextLocalNamesIndex() const {
DCHECK_LT(0, length());
return kVariablePartIndex;
}
-int ScopeInfo::StackLocalFirstSlotIndex() const {
- return ParameterNamesIndex() + ParameterCount();
-}
-
-int ScopeInfo::StackLocalNamesIndex() const {
- return StackLocalFirstSlotIndex() + 1;
-}
-
-int ScopeInfo::ContextLocalNamesIndex() const {
- return StackLocalNamesIndex() + StackLocalCount();
-}
-
int ScopeInfo::ContextLocalInfosIndex() const {
return ContextLocalNamesIndex() + ContextLocalCount();
}
@@ -1030,9 +954,8 @@ String* ModuleInfo::RegularExportLocalName(int i) const {
}
int ModuleInfo::RegularExportCellIndex(int i) const {
- return Smi::cast(regular_exports()->get(i * kRegularExportLength +
- kRegularExportCellIndexOffset))
- ->value();
+ return Smi::ToInt(regular_exports()->get(i * kRegularExportLength +
+ kRegularExportCellIndexOffset));
}
FixedArray* ModuleInfo::RegularExportExportNames(int i) const {
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 6efab5ca62..ac0664f7fb 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -49,16 +49,6 @@ class ScopeInfo : public FixedArray {
// Does this scope make a sloppy eval call?
bool CallsSloppyEval() const;
- // Return the total number of locals allocated on the stack and in the
- // context. This includes the parameters that are allocated in the context.
- int LocalCount() const;
-
- // Return the number of stack slots for code. This number consists of two
- // parts:
- // 1. One stack slot per stack allocated local.
- // 2. One stack slot for the function name if it is stack allocated.
- int StackSlotCount() const;
-
// Return the number of context slots for code if a context is allocated. This
// number consists of three parts:
// 1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
@@ -118,18 +108,6 @@ class ScopeInfo : public FixedArray {
ModuleInfo* ModuleDescriptorInfo() const;
- // Return the name of the given parameter.
- String* ParameterName(int var) const;
-
- // Return the name of the given local.
- String* LocalName(int var) const;
-
- // Return the name of the given stack local.
- String* StackLocalName(int var) const;
-
- // Return the name of the given stack local.
- int StackLocalIndex(int var) const;
-
// Return the name of the given context local.
String* ContextLocalName(int var) const;
@@ -139,6 +117,9 @@ class ScopeInfo : public FixedArray {
// Return the initialization flag of the given context local.
InitializationFlag ContextLocalInitFlag(int var) const;
+ bool ContextLocalIsParameter(int var) const;
+ uint32_t ContextLocalParameterNumber(int var) const;
+
// Return the initialization flag of the given context local.
MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var) const;
@@ -146,12 +127,6 @@ class ScopeInfo : public FixedArray {
// exposed to the user in a debugger.
static bool VariableIsSynthetic(String* name);
- // Lookup support for serialized scope info. Returns the
- // the stack slot index for a given slot name if the slot is
- // present; otherwise returns a value < 0. The name must be an internalized
- // string.
- int StackSlotIndex(String* name) const;
-
// Lookup support for serialized scope info. Returns the local context slot
// index for a given slot name if the slot is present; otherwise
// returns a value < 0. The name must be an internalized string.
@@ -168,11 +143,6 @@ class ScopeInfo : public FixedArray {
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
- // Lookup support for serialized scope info. Returns the
- // parameter index for a given parameter name if the parameter is present;
- // otherwise returns a value < 0. The name must be an internalized string.
- int ParameterIndex(String* name) const;
-
// Lookup support for serialized scope info. Returns the function context
// slot index if the function name is present and context-allocated (named
// function expressions, only), otherwise returns a value < 0. The name
@@ -217,13 +187,11 @@ class ScopeInfo : public FixedArray {
// numeric and occupies one array slot.
// 1. A set of properties of the scope.
// 2. The number of parameters. For non-function scopes this is 0.
-// 3. The number of non-parameter variables allocated on the stack.
-// 4. The number of non-parameter and parameter variables allocated in the
+// 3. The number of non-parameter and parameter variables allocated in the
// context.
#define FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(V) \
V(Flags) \
V(ParameterCount) \
- V(StackLocalCount) \
V(ContextLocalCount)
#define FIELD_ACCESSORS(name) \
@@ -241,53 +209,36 @@ class ScopeInfo : public FixedArray {
private:
// The layout of the variable part of a ScopeInfo is as follows:
- // 1. ParameterNames:
- // This part stores the names of the parameters for function scopes. One
- // slot is used per parameter, so in total this part occupies
- // ParameterCount() slots in the array. For other scopes than function
- // scopes ParameterCount() is 0.
- // 2. StackLocalFirstSlot:
- // Index of a first stack slot for stack local. Stack locals belonging to
- // this scope are located on a stack at slots starting from this index.
- // 3. StackLocalNames:
- // Contains the names of local variables that are allocated on the stack,
- // in increasing order of the stack slot index. First local variable has a
- // stack slot index defined in StackLocalFirstSlot (point 2 above).
- // One slot is used per stack local, so in total this part occupies
- // StackLocalCount() slots in the array.
- // 4. ContextLocalNames:
+ // 1. ContextLocalNames:
// Contains the names of local variables and parameters that are allocated
// in the context. They are stored in increasing order of the context slot
// index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
// context local, so in total this part occupies ContextLocalCount() slots
// in the array.
- // 5. ContextLocalInfos:
+ // 2. ContextLocalInfos:
// Contains the variable modes and initialization flags corresponding to
// the context locals in ContextLocalNames. One slot is used per
// context local, so in total this part occupies ContextLocalCount()
// slots in the array.
- // 6. ReceiverInfo:
+ // 3. ReceiverInfo:
// If the scope binds a "this" value, one slot is reserved to hold the
// context or stack slot index for the variable.
- // 7. FunctionNameInfo:
+ // 4. FunctionNameInfo:
// If the scope belongs to a named function expression this part contains
// information about the function variable. It always occupies two array
// slots: a. The name of the function variable.
// b. The context or stack slot index for the variable.
- // 8. InferredFunctionName:
+ // 5. InferredFunctionName:
// Contains the function's inferred name.
- // 9. SourcePosition:
+ // 6. SourcePosition:
// Contains two slots with a) the startPosition and b) the endPosition if
// the scope belongs to a function or script.
- // 10. OuterScopeInfoIndex:
+ // 7. OuterScopeInfoIndex:
// The outer scope's ScopeInfo or the hole if there's none.
- // 11. ModuleInfo, ModuleVariableCount, and ModuleVariables:
+ // 8. ModuleInfo, ModuleVariableCount, and ModuleVariables:
// For a module scope, this part contains the ModuleInfo, the number of
// MODULE-allocated variables, and the metadata of those variables. For
// non-module scopes it is empty.
- int ParameterNamesIndex() const;
- int StackLocalFirstSlotIndex() const;
- int StackLocalNamesIndex() const;
int ContextLocalNamesIndex() const;
int ContextLocalInfosIndex() const;
int ReceiverInfoIndex() const;
@@ -359,6 +310,8 @@ class ScopeInfo : public FixedArray {
class VariableModeField : public BitField<VariableMode, 0, 3> {};
class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
+ class ParameterNumberField
+ : public BitField<uint32_t, MaybeAssignedFlagField::kNext, 16> {};
friend class ScopeIterator;
friend std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 6f6151d590..eaea8f78e8 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -24,7 +24,6 @@ SMI_ACCESSORS(Script, id, kIdOffset)
SMI_ACCESSORS(Script, line_offset, kLineOffsetOffset)
SMI_ACCESSORS(Script, column_offset, kColumnOffsetOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
-ACCESSORS(Script, wrapper, HeapObject, kWrapperOffset)
SMI_ACCESSORS(Script, type, kTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 8412c72b11..46adcb2a8a 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -15,8 +15,11 @@ namespace v8 {
namespace internal {
// Script describes a script which has been added to the VM.
-class Script : public Struct {
+class Script : public Struct, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
// Script types.
enum Type {
TYPE_NATIVE = 0,
@@ -54,9 +57,6 @@ class Script : public Struct {
// [context_data]: context data for the context this script was compiled in.
DECL_ACCESSORS(context_data, Object)
- // [wrapper]: the wrapper cache. This is either undefined or a WeakCell.
- DECL_ACCESSORS(wrapper, HeapObject)
-
// [type]: the script type.
DECL_INT_ACCESSORS(type)
@@ -170,9 +170,6 @@ class Script : public Struct {
static int GetLineNumber(Handle<Script> script, int code_offset);
int GetLineNumber(int code_pos) const;
- // Get the JS object wrapping the given script; create it if none exists.
- static Handle<JSObject> GetWrapper(Handle<Script> script);
-
// Look through the list of existing shared function infos to find one
// that matches the function literal. Return empty handle if not found.
MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(
@@ -198,8 +195,7 @@ class Script : public Struct {
static const int kLineOffsetOffset = kNameOffset + kPointerSize;
static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
static const int kContextOffset = kColumnOffsetOffset + kPointerSize;
- static const int kWrapperOffset = kContextOffset + kPointerSize;
- static const int kTypeOffset = kWrapperOffset + kPointerSize;
+ static const int kTypeOffset = kContextOffset + kPointerSize;
static const int kLineEndsOffset = kTypeOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
static const int kEvalFromSharedOrWrappedArgumentsOffset =
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index e3cfd69fc5..892da7c5c5 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_SHARED_FUNCTION_INFO_INL_H_
#include "src/heap/heap-inl.h"
+#include "src/objects/debug-objects-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/templates.h"
@@ -19,14 +20,63 @@ namespace internal {
CAST_ACCESSOR(PreParsedScopeData)
ACCESSORS(PreParsedScopeData, scope_data, PodArray<uint8_t>, kScopeDataOffset)
-ACCESSORS(PreParsedScopeData, child_data, FixedArray, kChildDataOffset)
+INT_ACCESSORS(PreParsedScopeData, length, kLengthOffset)
+
+Object* PreParsedScopeData::child_data(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kChildDataStartOffset + index * kPointerSize;
+ return RELAXED_READ_FIELD(this, offset);
+}
+
+void PreParsedScopeData::set_child_data(int index, Object* value,
+ WriteBarrierMode mode) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kChildDataStartOffset + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
+ value, mode);
+}
+
+Object** PreParsedScopeData::child_data_start() const {
+ return HeapObject::RawField(this, kChildDataStartOffset);
+}
+
+void PreParsedScopeData::clear_padding() {
+ // For archs where kIntSize < kPointerSize, there will be padding between the
+ // length field and the start of the child data.
+ if (kUnalignedChildDataStartOffset < kChildDataStartOffset) {
+ memset(reinterpret_cast<void*>(address() + kUnalignedChildDataStartOffset),
+ 0, kChildDataStartOffset - kUnalignedChildDataStartOffset);
+ }
+}
+
+CAST_ACCESSOR(UncompiledData)
+INT32_ACCESSORS(UncompiledData, start_position, kStartPositionOffset)
+INT32_ACCESSORS(UncompiledData, end_position, kEndPositionOffset)
+INT32_ACCESSORS(UncompiledData, function_literal_id, kFunctionLiteralIdOffset)
+
+void UncompiledData::clear_padding() {
+ // For archs where kIntSize < kPointerSize, there will be padding at the end
+ // of the data.
+ if (kUnalignedSize < kSize) {
+ memset(reinterpret_cast<void*>(address() + kUnalignedSize), 0,
+ kSize - kUnalignedSize);
+ }
+}
+
+CAST_ACCESSOR(UncompiledDataWithoutPreParsedScope)
+
+CAST_ACCESSOR(UncompiledDataWithPreParsedScope)
+ACCESSORS(UncompiledDataWithPreParsedScope, pre_parsed_scope_data,
+ PreParsedScopeData, kPreParsedScopeDataOffset)
CAST_ACCESSOR(InterpreterData)
ACCESSORS(InterpreterData, bytecode_array, BytecodeArray, kBytecodeArrayOffset)
ACCESSORS(InterpreterData, interpreter_trampoline, Code,
kInterpreterTrampolineOffset)
-TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
CAST_ACCESSOR(SharedFunctionInfo)
DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
@@ -34,30 +84,19 @@ ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
kNameOrScopeInfoOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
-ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
-ACCESSORS(SharedFunctionInfo, function_identifier, Object,
- kFunctionIdentifierOffset)
-
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, raw_start_position_and_type,
- is_named_expression,
- SharedFunctionInfo::IsNamedExpressionBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, raw_start_position_and_type,
- is_toplevel, SharedFunctionInfo::IsTopLevelBit)
+ACCESSORS(SharedFunctionInfo, function_identifier_or_debug_info, Object,
+ kFunctionIdentifierOrDebugInfoOffset)
-INT_ACCESSORS(SharedFunctionInfo, function_literal_id, kFunctionLiteralIdOffset)
#if V8_SFI_HAS_UNIQUE_ID
INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
#endif
-INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
-INT_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
- kFormalParameterCountOffset)
-INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
- kExpectedNofPropertiesOffset)
-INT_ACCESSORS(SharedFunctionInfo, raw_end_position, kEndPositionOffset)
-INT_ACCESSORS(SharedFunctionInfo, raw_start_position_and_type,
- kStartPositionAndTypeOffset)
-INT_ACCESSORS(SharedFunctionInfo, function_token_position,
- kFunctionTokenPositionOffset)
+UINT16_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
+UINT16_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
+ kFormalParameterCountOffset)
+UINT16_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
+ kExpectedNofPropertiesOffset)
+UINT16_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
+ kFunctionTokenOffsetOffset)
INT_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
bool SharedFunctionInfo::HasSharedName() const {
@@ -69,13 +108,13 @@ bool SharedFunctionInfo::HasSharedName() const {
}
String* SharedFunctionInfo::Name() const {
- if (!HasSharedName()) return GetHeap()->empty_string();
+ if (!HasSharedName()) return GetReadOnlyRoots().empty_string();
Object* value = name_or_scope_info();
if (value->IsScopeInfo()) {
if (ScopeInfo::cast(value)->HasFunctionName()) {
return String::cast(ScopeInfo::cast(value)->FunctionName());
}
- return GetHeap()->empty_string();
+ return GetReadOnlyRoots().empty_string();
}
return String::cast(value);
}
@@ -100,6 +139,15 @@ AbstractCode* SharedFunctionInfo::abstract_code() {
}
}
+int SharedFunctionInfo::function_token_position() const {
+ int offset = raw_function_token_offset();
+ if (offset == kFunctionTokenOutOfRange) {
+ return kNoSourcePosition;
+ } else {
+ return StartPosition() - offset;
+ }
+}
+
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_wrapped,
SharedFunctionInfo::IsWrappedBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, allows_lazy_compilation,
@@ -117,6 +165,20 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
requires_instance_fields_initializer,
SharedFunctionInfo::RequiresInstanceFieldsInitializer)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, name_should_print_as_anonymous,
+ SharedFunctionInfo::NameShouldPrintAsAnonymousBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_anonymous_expression,
+ SharedFunctionInfo::IsAnonymousExpressionBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, deserialized,
+ SharedFunctionInfo::IsDeserializedBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_reported_binary_coverage,
+ SharedFunctionInfo::HasReportedBinaryCoverageBit)
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_named_expression,
+ SharedFunctionInfo::IsNamedExpressionBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_toplevel,
+ SharedFunctionInfo::IsTopLevelBit)
+
bool SharedFunctionInfo::optimization_disabled() const {
return disable_optimization_reason() != BailoutReason::kNoReason;
}
@@ -213,58 +275,70 @@ void SharedFunctionInfo::UpdateFunctionMapIndex() {
set_function_map_index(map_index);
}
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
- name_should_print_as_anonymous,
- SharedFunctionInfo::NameShouldPrintAsAnonymousBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, is_anonymous_expression,
- SharedFunctionInfo::IsAnonymousExpressionBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, deserialized,
- SharedFunctionInfo::IsDeserializedBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, side_effect_state,
- SharedFunctionInfo::SideEffectStateBits)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, debug_is_blackboxed,
- SharedFunctionInfo::DebugIsBlackboxedBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
- computed_debug_is_blackboxed,
- SharedFunctionInfo::ComputedDebugIsBlackboxedBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
- has_reported_binary_coverage,
- SharedFunctionInfo::HasReportedBinaryCoverageBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, debugging_id,
- SharedFunctionInfo::DebuggingIdBits)
-
void SharedFunctionInfo::DontAdaptArguments() {
// TODO(leszeks): Revise this DCHECK now that the code field is gone.
DCHECK(!HasWasmExportedFunctionData());
set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
}
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, raw_start_position_and_type,
- raw_start_position, SharedFunctionInfo::StartPositionBits)
-
int SharedFunctionInfo::StartPosition() const {
- ScopeInfo* info = scope_info();
- if (!info->HasPositionInfo()) {
- // TODO(cbruni): use preparsed_scope_data
- return raw_start_position();
+ Object* maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info->IsScopeInfo()) {
+ ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
+ if (info->HasPositionInfo()) {
+ return info->StartPosition();
+ }
+ } else if (HasUncompiledData()) {
+ // Works with or without scope.
+ return uncompiled_data()->start_position();
+ } else if (IsApiFunction() || HasBuiltinId()) {
+ DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
+ return 0;
}
- return info->StartPosition();
+ return kNoSourcePosition;
}
int SharedFunctionInfo::EndPosition() const {
- ScopeInfo* info = scope_info();
- if (!info->HasPositionInfo()) {
- // TODO(cbruni): use preparsed_scope_data
- return raw_end_position();
+ Object* maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info->IsScopeInfo()) {
+ ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
+ if (info->HasPositionInfo()) {
+ return info->EndPosition();
+ }
+ } else if (HasUncompiledData()) {
+ // Works with or without scope.
+ return uncompiled_data()->end_position();
+ } else if (IsApiFunction() || HasBuiltinId()) {
+ DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
+ return 0;
+ }
+ return kNoSourcePosition;
+}
+
+void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
+ Object* maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info->IsScopeInfo()) {
+ ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
+ if (info->HasPositionInfo()) {
+ info->SetPositionInfo(start_position, end_position);
+ }
+ } else if (HasUncompiledData()) {
+ if (HasUncompiledDataWithPreParsedScope()) {
+ // Clear out preparsed scope data, since the position setter invalidates
+ // any scope data.
+ ClearPreParsedScopeData();
+ }
+ uncompiled_data()->set_start_position(start_position);
+ uncompiled_data()->set_end_position(end_position);
+ } else {
+ UNREACHABLE();
}
- return info->EndPosition();
}
Code* SharedFunctionInfo::GetCode() const {
// ======
// NOTE: This chain of checks MUST be kept in sync with the equivalent CSA
- // GetSharedFunctionInfoCode method in code-stub-assembler.cc, and the
- // architecture-specific GetSharedFunctionInfoCode methods in builtins-*.cc.
+ // GetSharedFunctionInfoCode method in code-stub-assembler.cc.
// ======
Isolate* isolate = GetIsolate();
@@ -281,9 +355,9 @@ Code* SharedFunctionInfo::GetCode() const {
// Having a fixed array means we are an asm.js/wasm function.
DCHECK(HasAsmWasmData());
return isolate->builtins()->builtin(Builtins::kInstantiateAsmJs);
- } else if (data->IsPreParsedScopeData()) {
- // Having pre-parsed scope data means we need to compile.
- DCHECK(HasPreParsedScopeData());
+ } else if (data->IsUncompiledData()) {
+ // Having uncompiled data (with or without scope) means we need to compile.
+ DCHECK(HasUncompiledData());
return isolate->builtins()->builtin(Builtins::kCompileLazy);
} else if (data->IsFunctionTemplateInfo()) {
// Having a function template info means we are an API function.
@@ -314,11 +388,6 @@ ScopeInfo* SharedFunctionInfo::scope_info() const {
void SharedFunctionInfo::set_scope_info(ScopeInfo* scope_info,
WriteBarrierMode mode) {
- // TODO(cbruni): this code is no longer necessary once we store the positon
- // only on the ScopeInfo.
- if (scope_info->HasPositionInfo()) {
- scope_info->SetPositionInfo(raw_start_position(), raw_end_position());
- }
// Move the existing name onto the ScopeInfo.
Object* name = name_or_scope_info();
if (name->IsScopeInfo()) {
@@ -366,8 +435,8 @@ ScopeInfo* SharedFunctionInfo::GetOuterScopeInfo() const {
void SharedFunctionInfo::set_outer_scope_info(HeapObject* value,
WriteBarrierMode mode) {
DCHECK(!is_compiled());
- DCHECK(raw_outer_scope_info_or_feedback_metadata()->IsTheHole(GetIsolate()));
- DCHECK(value->IsScopeInfo() || value->IsTheHole(GetIsolate()));
+ DCHECK(raw_outer_scope_info_or_feedback_metadata()->IsTheHole());
+ DCHECK(value->IsScopeInfo() || value->IsTheHole());
return set_raw_outer_scope_info_or_feedback_metadata(value, mode);
}
@@ -390,17 +459,16 @@ void SharedFunctionInfo::set_feedback_metadata(FeedbackMetadata* value,
bool SharedFunctionInfo::is_compiled() const {
Object* data = function_data();
return data != Smi::FromEnum(Builtins::kCompileLazy) &&
- !data->IsPreParsedScopeData();
+ !data->IsUncompiledData();
}
-int SharedFunctionInfo::GetLength() const {
+uint16_t SharedFunctionInfo::GetLength() const {
DCHECK(is_compiled());
DCHECK(HasLength());
return length();
}
bool SharedFunctionInfo::HasLength() const {
- DCHECK_IMPLIES(length() < 0, length() == kInvalidLength);
return length() != kInvalidLength;
}
@@ -408,12 +476,6 @@ bool SharedFunctionInfo::has_simple_parameters() {
return scope_info()->HasSimpleParameters();
}
-bool SharedFunctionInfo::HasDebugInfo() const {
- bool has_debug_info = !debug_info()->IsSmi();
- DCHECK_EQ(debug_info()->IsStruct(), has_debug_info);
- return has_debug_info;
-}
-
bool SharedFunctionInfo::IsApiFunction() const {
return function_data()->IsFunctionTemplateInfo();
}
@@ -430,6 +492,19 @@ bool SharedFunctionInfo::HasBytecodeArray() const {
BytecodeArray* SharedFunctionInfo::GetBytecodeArray() const {
DCHECK(HasBytecodeArray());
+ if (HasDebugInfo() && GetDebugInfo()->HasInstrumentedBytecodeArray()) {
+ return GetDebugInfo()->OriginalBytecodeArray();
+ } else if (function_data()->IsBytecodeArray()) {
+ return BytecodeArray::cast(function_data());
+ } else {
+ DCHECK(function_data()->IsInterpreterData());
+ return InterpreterData::cast(function_data())->bytecode_array();
+ }
+}
+
+BytecodeArray* SharedFunctionInfo::GetDebugBytecodeArray() const {
+ DCHECK(HasBytecodeArray());
+ DCHECK(HasDebugInfo() && GetDebugInfo()->HasInstrumentedBytecodeArray());
if (function_data()->IsBytecodeArray()) {
return BytecodeArray::cast(function_data());
} else {
@@ -438,8 +513,19 @@ BytecodeArray* SharedFunctionInfo::GetBytecodeArray() const {
}
}
-void SharedFunctionInfo::set_bytecode_array(class BytecodeArray* bytecode) {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
+void SharedFunctionInfo::SetDebugBytecodeArray(BytecodeArray* bytecode) {
+ DCHECK(HasBytecodeArray());
+ if (function_data()->IsBytecodeArray()) {
+ set_function_data(bytecode);
+ } else {
+ DCHECK(function_data()->IsInterpreterData());
+ interpreter_data()->set_bytecode_array(bytecode);
+ }
+}
+
+void SharedFunctionInfo::set_bytecode_array(BytecodeArray* bytecode) {
+ DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+ HasUncompiledData());
set_function_data(bytecode);
}
@@ -474,7 +560,7 @@ FixedArray* SharedFunctionInfo::asm_wasm_data() const {
void SharedFunctionInfo::set_asm_wasm_data(FixedArray* data) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
- HasAsmWasmData());
+ HasUncompiledData() || HasAsmWasmData());
set_function_data(data);
}
@@ -495,25 +581,72 @@ void SharedFunctionInfo::set_builtin_id(int builtin_id) {
set_function_data(Smi::FromInt(builtin_id), SKIP_WRITE_BARRIER);
}
-bool SharedFunctionInfo::HasPreParsedScopeData() const {
- return function_data()->IsPreParsedScopeData();
+bool SharedFunctionInfo::HasUncompiledData() const {
+ return function_data()->IsUncompiledData();
}
-PreParsedScopeData* SharedFunctionInfo::preparsed_scope_data() const {
- DCHECK(HasPreParsedScopeData());
- return PreParsedScopeData::cast(function_data());
+UncompiledData* SharedFunctionInfo::uncompiled_data() const {
+ DCHECK(HasUncompiledData());
+ return UncompiledData::cast(function_data());
}
-void SharedFunctionInfo::set_preparsed_scope_data(
- PreParsedScopeData* preparsed_scope_data) {
+void SharedFunctionInfo::set_uncompiled_data(UncompiledData* uncompiled_data) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
- set_function_data(preparsed_scope_data);
+ DCHECK(uncompiled_data->IsUncompiledData());
+ set_function_data(uncompiled_data);
+}
+
+bool SharedFunctionInfo::HasUncompiledDataWithPreParsedScope() const {
+ return function_data()->IsUncompiledDataWithPreParsedScope();
+}
+
+UncompiledDataWithPreParsedScope*
+SharedFunctionInfo::uncompiled_data_with_pre_parsed_scope() const {
+ DCHECK(HasUncompiledDataWithPreParsedScope());
+ return UncompiledDataWithPreParsedScope::cast(function_data());
+}
+
+void SharedFunctionInfo::set_uncompiled_data_with_pre_parsed_scope(
+ UncompiledDataWithPreParsedScope* uncompiled_data_with_pre_parsed_scope) {
+ DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
+ DCHECK(uncompiled_data_with_pre_parsed_scope
+ ->IsUncompiledDataWithPreParsedScope());
+ set_function_data(uncompiled_data_with_pre_parsed_scope);
+}
+
+bool SharedFunctionInfo::HasUncompiledDataWithoutPreParsedScope() const {
+ return function_data()->IsUncompiledDataWithoutPreParsedScope();
}
void SharedFunctionInfo::ClearPreParsedScopeData() {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
- HasPreParsedScopeData());
- set_builtin_id(Builtins::kCompileLazy);
+ DCHECK(HasUncompiledDataWithPreParsedScope());
+ UncompiledDataWithPreParsedScope* data =
+ uncompiled_data_with_pre_parsed_scope();
+
+ // Trim off the pre-parsed scope data from the uncompiled data by swapping the
+ // map, leaving only an uncompiled data without pre-parsed scope.
+ DisallowHeapAllocation no_gc;
+ Heap* heap = Heap::FromWritableHeapObject(data);
+
+ // Swap the map.
+ heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreParsedScope::kSize,
+ no_gc);
+ STATIC_ASSERT(UncompiledDataWithoutPreParsedScope::kSize <
+ UncompiledDataWithPreParsedScope::kSize);
+ STATIC_ASSERT(UncompiledDataWithoutPreParsedScope::kSize ==
+ UncompiledData::kSize);
+ data->synchronized_set_map(
+ GetReadOnlyRoots().uncompiled_data_without_pre_parsed_scope_map());
+
+ // Fill the remaining space with filler.
+ heap->CreateFillerObjectAt(
+ data->address() + UncompiledDataWithoutPreParsedScope::kSize,
+ UncompiledDataWithPreParsedScope::kSize -
+ UncompiledDataWithoutPreParsedScope::kSize,
+ ClearRecordedSlots::kNo);
+
+ // Ensure that the clear was successful.
+ DCHECK(HasUncompiledDataWithoutPreParsedScope());
}
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
@@ -526,6 +659,48 @@ WasmExportedFunctionData* SharedFunctionInfo::wasm_exported_function_data()
return WasmExportedFunctionData::cast(function_data());
}
+int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
+ // Fast path for the common case when the SFI is uncompiled and so the
+ // function literal id is already in the uncompiled data.
+ if (HasUncompiledData()) {
+ int id = uncompiled_data()->function_literal_id();
+ // Make sure the id is what we should have found with the slow path.
+ DCHECK_EQ(id, FindIndexInScript(isolate));
+ return id;
+ }
+
+ // Otherwise, search for the function in the SFI's script's function list,
+ // and return its index in that list.
+ return FindIndexInScript(isolate);
+}
+
+bool SharedFunctionInfo::HasDebugInfo() const {
+ return function_identifier_or_debug_info()->IsDebugInfo();
+}
+
+DebugInfo* SharedFunctionInfo::GetDebugInfo() const {
+ DCHECK(HasDebugInfo());
+ return DebugInfo::cast(function_identifier_or_debug_info());
+}
+
+Object* SharedFunctionInfo::function_identifier() const {
+ Object* result;
+ if (HasDebugInfo()) {
+ result = GetDebugInfo()->function_identifier();
+ } else {
+ result = function_identifier_or_debug_info();
+ }
+ DCHECK(result->IsSmi() || result->IsString() || result->IsUndefined());
+ return result;
+}
+
+void SharedFunctionInfo::SetDebugInfo(DebugInfo* debug_info) {
+ DCHECK(!HasDebugInfo());
+ DCHECK_EQ(debug_info->function_identifier(),
+ function_identifier_or_debug_info());
+ set_function_identifier_or_debug_info(debug_info);
+}
+
bool SharedFunctionInfo::HasBuiltinFunctionId() {
return function_identifier()->IsSmi();
}
@@ -536,7 +711,8 @@ BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
}
void SharedFunctionInfo::set_builtin_function_id(BuiltinFunctionId id) {
- set_function_identifier(Smi::FromInt(id));
+ DCHECK(!HasDebugInfo());
+ set_function_identifier_or_debug_info(Smi::FromInt(id));
}
bool SharedFunctionInfo::HasInferredName() {
@@ -547,19 +723,23 @@ String* SharedFunctionInfo::inferred_name() {
if (HasInferredName()) {
return String::cast(function_identifier());
}
- DCHECK(function_identifier()->IsUndefined(GetIsolate()) ||
- HasBuiltinFunctionId());
- return GetHeap()->empty_string();
+ DCHECK(function_identifier()->IsUndefined() || HasBuiltinFunctionId());
+ return GetReadOnlyRoots().empty_string();
}
void SharedFunctionInfo::set_inferred_name(String* inferred_name) {
- DCHECK(function_identifier()->IsUndefined(GetIsolate()) || HasInferredName());
- set_function_identifier(inferred_name);
+ DCHECK(function_identifier_or_debug_info()->IsUndefined() ||
+ HasInferredName() || HasDebugInfo());
+ if (HasDebugInfo()) {
+ GetDebugInfo()->set_function_identifier(inferred_name);
+ } else {
+ set_function_identifier_or_debug_info(inferred_name);
+ }
}
bool SharedFunctionInfo::IsUserJavaScript() {
Object* script_obj = script();
- if (script_obj->IsUndefined(GetIsolate())) return false;
+ if (script_obj->IsUndefined()) return false;
Script* script = Script::cast(script_obj);
return script->IsUserJavaScript();
}
@@ -568,34 +748,52 @@ bool SharedFunctionInfo::IsSubjectToDebugging() {
return IsUserJavaScript() && !HasAsmWasmData();
}
-bool SharedFunctionInfo::CanFlushCompiled() const {
- bool can_decompile =
- (HasBytecodeArray() || HasAsmWasmData() || HasPreParsedScopeData());
+bool SharedFunctionInfo::CanDiscardCompiled() const {
+ bool can_decompile = (HasBytecodeArray() || HasAsmWasmData() ||
+ HasUncompiledDataWithPreParsedScope());
return can_decompile;
}
-void SharedFunctionInfo::FlushCompiled() {
- DisallowHeapAllocation no_gc;
+// static
+void SharedFunctionInfo::DiscardCompiled(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
+ DCHECK(shared_info->CanDiscardCompiled());
- DCHECK(CanFlushCompiled());
+ int start_position = shared_info->StartPosition();
+ int end_position = shared_info->EndPosition();
+ int function_literal_id = shared_info->FunctionLiteralId(isolate);
- Oddball* the_hole = GetIsolate()->heap()->the_hole_value();
+ if (shared_info->is_compiled()) {
+ DisallowHeapAllocation no_gc;
- if (is_compiled()) {
- HeapObject* outer_scope_info = the_hole;
- if (!is_toplevel()) {
- if (scope_info()->HasOuterScopeInfo()) {
- outer_scope_info = scope_info()->OuterScopeInfo();
- }
+ HeapObject* outer_scope_info;
+ if (shared_info->scope_info()->HasOuterScopeInfo()) {
+ outer_scope_info = shared_info->scope_info()->OuterScopeInfo();
+ } else {
+ outer_scope_info = ReadOnlyRoots(isolate).the_hole_value();
}
// Raw setter to avoid validity checks, since we're performing the unusual
// task of decompiling.
- set_raw_outer_scope_info_or_feedback_metadata(outer_scope_info);
+ shared_info->set_raw_outer_scope_info_or_feedback_metadata(
+ outer_scope_info);
} else {
- DCHECK(outer_scope_info()->IsScopeInfo() || is_toplevel());
+ DCHECK(shared_info->outer_scope_info()->IsScopeInfo() ||
+ shared_info->outer_scope_info()->IsTheHole());
}
- set_builtin_id(Builtins::kCompileLazy);
+ if (shared_info->HasUncompiledDataWithPreParsedScope()) {
+ // If this is uncompiled data with a pre-parsed scope data, we can just
+ // clear out the scope data and keep the uncompiled data.
+ shared_info->ClearPreParsedScopeData();
+ } else {
+ // Create a new UncompiledData, without pre-parsed scope, and update the
+ // function data to point to it. Use the raw function data setter to avoid
+ // validity checks, since we're performing the unusual task of decompiling.
+ Handle<UncompiledData> data =
+ isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
+ start_position, end_position, function_literal_id);
+ shared_info->set_function_data(*data);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 24db56158d..c8684ea2f0 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -20,23 +20,128 @@ class CoverageInfo;
class DebugInfo;
class WasmExportedFunctionData;
-class PreParsedScopeData : public Struct {
+// Data collected by the pre-parser storing information about scopes and inner
+// functions.
+class PreParsedScopeData : public HeapObject {
public:
DECL_ACCESSORS(scope_data, PodArray<uint8_t>)
- DECL_ACCESSORS(child_data, FixedArray)
+ DECL_INT_ACCESSORS(length)
- static const int kScopeDataOffset = Struct::kHeaderSize;
- static const int kChildDataOffset = kScopeDataOffset + kPointerSize;
- static const int kSize = kChildDataOffset + kPointerSize;
+ inline Object* child_data(int index) const;
+ inline void set_child_data(int index, Object* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ inline Object** child_data_start() const;
+
+ // Clear uninitialized padding space.
+ inline void clear_padding();
DECL_CAST(PreParsedScopeData)
DECL_PRINTER(PreParsedScopeData)
DECL_VERIFIER(PreParsedScopeData)
+#define PRE_PARSED_SCOPE_DATA_FIELDS(V) \
+ V(kScopeDataOffset, kPointerSize) \
+ V(kLengthOffset, kIntSize) \
+ V(kUnalignedChildDataStartOffset, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ PRE_PARSED_SCOPE_DATA_FIELDS)
+#undef PRE_PARSED_SCOPE_DATA_FIELDS
+
+ static const int kChildDataStartOffset =
+ POINTER_SIZE_ALIGN(kUnalignedChildDataStartOffset);
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ static constexpr int SizeFor(int length) {
+ return kChildDataStartOffset + length * kPointerSize;
+ }
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PreParsedScopeData);
};
+// Abstract class representing extra data for an uncompiled function, which is
+// not stored in the SharedFunctionInfo.
+class UncompiledData : public HeapObject {
+ public:
+ DECL_INT32_ACCESSORS(start_position)
+ DECL_INT32_ACCESSORS(end_position)
+ DECL_INT32_ACCESSORS(function_literal_id)
+
+ DECL_CAST(UncompiledData)
+
+#define UNCOMPILED_DATA_FIELDS(V) \
+ V(kStartPositionOffset, kInt32Size) \
+ V(kEndPositionOffset, kInt32Size) \
+ V(kFunctionLiteralIdOffset, kInt32Size) \
+ /* Total size. */ \
+ V(kUnalignedSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, UNCOMPILED_DATA_FIELDS)
+#undef UNCOMPILED_DATA_FIELDS
+
+ static const int kSize = POINTER_SIZE_ALIGN(kUnalignedSize);
+
+ // Clear uninitialized padding space.
+ inline void clear_padding();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(UncompiledData);
+};
+
+// Class representing data for an uncompiled function that does not have any
+// data from the pre-parser, either because it's a leaf function or because the
+// pre-parser bailed out.
+class UncompiledDataWithoutPreParsedScope : public UncompiledData {
+ public:
+ DECL_CAST(UncompiledDataWithoutPreParsedScope)
+ DECL_PRINTER(UncompiledDataWithoutPreParsedScope)
+ DECL_VERIFIER(UncompiledDataWithoutPreParsedScope)
+
+ static const int kSize = UncompiledData::kSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(UncompiledDataWithoutPreParsedScope);
+};
+
+// Class representing data for an uncompiled function that has pre-parsed scope
+// data.
+class UncompiledDataWithPreParsedScope : public UncompiledData {
+ public:
+ DECL_ACCESSORS(pre_parsed_scope_data, PreParsedScopeData)
+
+ DECL_CAST(UncompiledDataWithPreParsedScope)
+ DECL_PRINTER(UncompiledDataWithPreParsedScope)
+ DECL_VERIFIER(UncompiledDataWithPreParsedScope)
+
+#define UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_FIELDS(V) \
+ V(kStartOfPointerFieldsOffset, 0) \
+ V(kPreParsedScopeDataOffset, kPointerSize) \
+ V(kEndOfPointerFieldsOffset, 0) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(UncompiledData::kSize,
+ UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_FIELDS)
+#undef UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_FIELDS
+
+ // Make sure the size is aligned
+ STATIC_ASSERT(kSize == POINTER_SIZE_ALIGN(kSize));
+
+ typedef FixedBodyDescriptor<kStartOfPointerFieldsOffset,
+ kEndOfPointerFieldsOffset, kSize>
+ BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(UncompiledDataWithPreParsedScope);
+};
+
class InterpreterData : public Struct {
public:
DECL_ACCESSORS(bytecode_array, BytecodeArray)
@@ -57,8 +162,11 @@ class InterpreterData : public Struct {
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
-class SharedFunctionInfo : public HeapObject {
+class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
static constexpr Object* const kNoSharedNameSentinel = Smi::kZero;
// [name]: Returns shared name if it exists or an empty string otherwise.
@@ -83,7 +191,7 @@ class SharedFunctionInfo : public HeapObject {
// function info is added to the list on the script.
V8_EXPORT_PRIVATE static void SetScript(
Handle<SharedFunctionInfo> shared, Handle<Object> script_object,
- bool reset_preparsed_scope_data = true);
+ int function_literal_id, bool reset_preparsed_scope_data = true);
// Layout description of the optimized code map.
static const int kEntriesStart = 0;
@@ -93,7 +201,7 @@ class SharedFunctionInfo : public HeapObject {
static const int kInitialLength = kEntriesStart + kEntryLength;
static const int kNotFound = -1;
- static const int kInvalidLength = -1;
+ static const uint16_t kInvalidLength = static_cast<uint16_t>(-1);
// Helpers for assembly code that does a backwards walk of the optimized code
// map.
@@ -112,6 +220,10 @@ class SharedFunctionInfo : public HeapObject {
// Start position of this function in the script source.
inline int StartPosition() const;
+ // Set the start and end position of this function in the script source.
+ // Updates the scope info if available.
+ inline void SetPosition(int start_position, int end_position);
+
// [outer scope info | feedback metadata] Shared storage for outer scope info
// (on uncompiled functions) and feedback metadata (on compiled functions).
DECL_ACCESSORS(raw_outer_scope_info_or_feedback_metadata, HeapObject)
@@ -129,16 +241,17 @@ class SharedFunctionInfo : public HeapObject {
inline bool is_compiled() const;
// [length]: The function length - usually the number of declared parameters.
- // Use up to 2^30 parameters. The value is only reliable when the function has
- // been compiled.
- inline int GetLength() const;
+ // Use up to 2^16-2 parameters (16 bits of values, where one is reserved for
+ // kDontAdaptArgumentsSentinel). The value is only reliable when the function
+ // has been compiled.
+ inline uint16_t GetLength() const;
inline bool HasLength() const;
inline void set_length(int value);
// [internal formal parameter count]: The declared number of parameters.
// For subclass constructors, also includes new.target.
// The size of function's frame is internal_formal_parameter_count + 1.
- DECL_INT_ACCESSORS(internal_formal_parameter_count)
+ DECL_UINT16_ACCESSORS(internal_formal_parameter_count)
// Set the formal parameter count so the function code will be
// called without using argument adaptor frames.
@@ -146,12 +259,7 @@ class SharedFunctionInfo : public HeapObject {
// [expected_nof_properties]: Expected number of properties for the
// function. The value is only reliable when the function has been compiled.
- DECL_INT_ACCESSORS(expected_nof_properties)
-
- // [function_literal_id] - uniquely identifies the FunctionLiteral this
- // SharedFunctionInfo represents within its script, or -1 if this
- // SharedFunctionInfo object doesn't correspond to a parsed FunctionLiteral.
- DECL_INT_ACCESSORS(function_literal_id)
+ DECL_UINT16_ACCESSORS(expected_nof_properties)
#if V8_SFI_HAS_UNIQUE_ID
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
@@ -167,7 +275,10 @@ class SharedFunctionInfo : public HeapObject {
// interpreter trampoline [HasInterpreterData()]
// - a FixedArray with Asm->Wasm conversion [HasAsmWasmData()].
// - a Smi containing the builtin id [HasBuiltinId()]
- // - a PreParsedScopeData for the parser [HasPreParsedScopeData()]
+ // - a UncompiledDataWithoutPreParsedScope for lazy compilation
+ // [HasUncompiledDataWithoutPreParsedScope()]
+ // - a UncompiledDataWithPreParsedScope for lazy compilation
+ // [HasUncompiledDataWithPreParsedScope()]
// - a WasmExportedFunctionData for Wasm [HasWasmExportedFunctionData()]
DECL_ACCESSORS(function_data, Object)
@@ -176,14 +287,17 @@ class SharedFunctionInfo : public HeapObject {
inline void set_api_func_data(FunctionTemplateInfo* data);
inline bool HasBytecodeArray() const;
inline BytecodeArray* GetBytecodeArray() const;
- inline void set_bytecode_array(class BytecodeArray* bytecode);
+ inline void set_bytecode_array(BytecodeArray* bytecode);
inline Code* InterpreterTrampoline() const;
inline bool HasInterpreterData() const;
inline InterpreterData* interpreter_data() const;
inline void set_interpreter_data(InterpreterData* interpreter_data);
+ inline BytecodeArray* GetDebugBytecodeArray() const;
+ inline void SetDebugBytecodeArray(BytecodeArray* bytecode);
inline bool HasAsmWasmData() const;
inline FixedArray* asm_wasm_data() const;
inline void set_asm_wasm_data(FixedArray* data);
+
// A brief note to clear up possible confusion:
// builtin_id corresponds to the auto-generated
// Builtins::Name id, while builtin_function_id corresponds to
@@ -192,24 +306,34 @@ class SharedFunctionInfo : public HeapObject {
inline bool HasBuiltinId() const;
inline int builtin_id() const;
inline void set_builtin_id(int builtin_id);
- inline bool HasPreParsedScopeData() const;
- inline PreParsedScopeData* preparsed_scope_data() const;
- inline void set_preparsed_scope_data(PreParsedScopeData* data);
- inline void ClearPreParsedScopeData();
+ inline bool HasUncompiledData() const;
+ inline UncompiledData* uncompiled_data() const;
+ inline void set_uncompiled_data(UncompiledData* data);
+ inline bool HasUncompiledDataWithPreParsedScope() const;
+ inline UncompiledDataWithPreParsedScope*
+ uncompiled_data_with_pre_parsed_scope() const;
+ inline void set_uncompiled_data_with_pre_parsed_scope(
+ UncompiledDataWithPreParsedScope* data);
+ inline bool HasUncompiledDataWithoutPreParsedScope() const;
inline bool HasWasmExportedFunctionData() const;
inline WasmExportedFunctionData* wasm_exported_function_data() const;
inline void set_wasm_exported_function_data(WasmExportedFunctionData* data);
+ // Clear out pre-parsed scope data from UncompiledDataWithPreParsedScope,
+ // turning it into UncompiledDataWithoutPreParsedScope.
+ inline void ClearPreParsedScopeData();
+
// [function identifier]: This field holds an additional identifier for the
// function.
// - a Smi identifying a builtin function [HasBuiltinFunctionId()].
// - a String identifying the function's inferred name [HasInferredName()].
+ // - a DebugInfo which holds the actual function_identifier [HasDebugInfo()].
// The inferred_name is inferred from variable or property
// assignment of this function. It is used to facilitate debugging and
// profiling of JavaScript code written in OO style, where almost
// all functions are anonymous but are assigned to object
// properties.
- DECL_ACCESSORS(function_identifier, Object)
+ DECL_ACCESSORS(function_identifier_or_debug_info, Object)
inline bool HasBuiltinFunctionId();
inline BuiltinFunctionId builtin_function_id();
@@ -218,12 +342,13 @@ class SharedFunctionInfo : public HeapObject {
inline String* inferred_name();
inline void set_inferred_name(String* inferred_name);
- // [script]: Script from which the function originates.
- DECL_ACCESSORS(script, Object)
+ // Get the function literal id associated with this function, for parsing.
+ inline int FunctionLiteralId(Isolate* isolate) const;
// The function is subject to debugging if a debug info is attached.
inline bool HasDebugInfo() const;
- DebugInfo* GetDebugInfo() const;
+ inline DebugInfo* GetDebugInfo() const;
+ inline void SetDebugInfo(DebugInfo* debug_info);
// Break infos are contained in DebugInfo, this is a convenience method
// to simplify access.
@@ -235,88 +360,37 @@ class SharedFunctionInfo : public HeapObject {
bool HasCoverageInfo() const;
CoverageInfo* GetCoverageInfo() const;
- // [debug info]: Debug information.
- DECL_ACCESSORS(debug_info, Object)
-
- // Bit field containing various information collected for debugging.
- // This field is either stored on the kDebugInfo slot or inside the
- // debug info struct.
- int debugger_hints() const;
- void set_debugger_hints(int value);
-
- // Indicates that the function was created by the Function function.
- // Though it's anonymous, toString should treat it as if it had the name
- // "anonymous". We don't set the name itself so that the system does not
- // see a binding for it.
- DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
-
- // Indicates that the function is either an anonymous expression
- // or an arrow function (the name field can be set through the API,
- // which does not change this flag).
- DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
-
- // Indicates that the the shared function info is deserialized from cache.
- DECL_BOOLEAN_ACCESSORS(deserialized)
-
- // Indicates that the function should be skipped during stepping.
- DECL_BOOLEAN_ACCESSORS(debug_is_blackboxed)
-
- // Indicates that |debug_is_blackboxed| has been computed and set.
- DECL_BOOLEAN_ACCESSORS(computed_debug_is_blackboxed)
-
- // Indicates that the function has been reported for binary code coverage.
- DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
-
- // Id assigned to the function for debugging.
- // This could also be implemented as a weak hash table.
- inline int debugging_id() const;
- inline void set_debugging_id(int value);
-
// The function's name if it is non-empty, otherwise the inferred name.
String* DebugName();
- enum SideEffectState {
- kNotComputed = 0,
- kHasSideEffects = 1,
- kRequiresRuntimeChecks = 2,
- kHasNoSideEffect = 3,
- };
- static SideEffectState GetSideEffectState(Handle<SharedFunctionInfo> info);
-
// Used for flags such as --turbo-filter.
bool PassesFilter(const char* raw_filter);
- // Position of the 'function' token in the script source.
- DECL_INT_ACCESSORS(function_token_position)
-
- // [raw_start_position_and_type]: Field used to store both the source code
- // position, whether or not the function is a function expression,
- // and whether or not the function is a toplevel function. The two
- // least significants bit indicates whether the function is an
- // expression and the rest contains the source code position.
- // TODO(cbruni): start_position should be removed from SFI.
- DECL_INT_ACCESSORS(raw_start_position_and_type)
+ // [script]: Script from which the function originates.
+ DECL_ACCESSORS(script, Object)
- // Position of this function in the script source.
- // TODO(cbruni): start_position should be removed from SFI.
- DECL_INT_ACCESSORS(raw_start_position)
+ // The offset of the 'function' token in the script source relative to the
+ // start position. Can return kFunctionTokenOutOfRange if offset doesn't
+ // fit in 16 bits.
+ DECL_UINT16_ACCESSORS(raw_function_token_offset)
- // End position of this function in the script source.
- // TODO(cbruni): end_position should be removed from SFI.
- DECL_INT_ACCESSORS(raw_end_position)
+ // The position of the 'function' token in the script source. Can return
+ // kNoSourcePosition if raw_function_token_offset() returns
+ // kFunctionTokenOutOfRange.
+ inline int function_token_position() const;
// Returns true if the function has shared name.
inline bool HasSharedName() const;
+ // [flags] Bit field containing various flags about the function.
+ DECL_INT_ACCESSORS(flags)
+
// Is this function a named function expression in the source code.
DECL_BOOLEAN_ACCESSORS(is_named_expression)
// Is this function a top-level function (scripts, evals).
DECL_BOOLEAN_ACCESSORS(is_toplevel)
- // [flags] Bit field containing various flags about the function.
- DECL_INT_ACCESSORS(flags)
-
// Indicates if this function can be lazy compiled.
DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
@@ -342,6 +416,23 @@ class SharedFunctionInfo : public HeapObject {
// Indicates that asm->wasm conversion failed and should not be re-attempted.
DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
+ // Indicates that the function was created by the Function function.
+ // Though it's anonymous, toString should treat it as if it had the name
+ // "anonymous". We don't set the name itself so that the system does not
+ // see a binding for it.
+ DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
+
+ // Indicates that the function is either an anonymous expression
+ // or an arrow function (the name field can be set through the API,
+ // which does not change this flag).
+ DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
+
+ // Indicates that the the shared function info is deserialized from cache.
+ DECL_BOOLEAN_ACCESSORS(deserialized)
+
+ // Indicates that the function has been reported for binary code coverage.
+ DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
+
inline FunctionKind kind() const;
// Defines the index in a native context of closure's map instantiated using
@@ -390,11 +481,12 @@ class SharedFunctionInfo : public HeapObject {
// True if one can flush compiled code from this function, in such a way that
// it can later be re-compiled.
- inline bool CanFlushCompiled() const;
+ inline bool CanDiscardCompiled() const;
// Flush compiled data from this function, setting it back to CompileLazy and
// clearing any feedback metadata.
- inline void FlushCompiled();
+ static inline void DiscardCompiled(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info);
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -414,6 +506,11 @@ class SharedFunctionInfo : public HeapObject {
// Sets the expected number of properties based on estimate from parser.
void SetExpectedNofPropertiesFromEstimate(FunctionLiteral* literal);
+ // Sets the FunctionTokenOffset field based on the given token position and
+ // start position.
+ void SetFunctionTokenPosition(int function_token_position,
+ int start_position);
+
inline bool construct_as_builtin() const;
// Determines and sets the ConstructAsBuiltinBit in |flags|, based on the
@@ -433,13 +530,14 @@ class SharedFunctionInfo : public HeapObject {
// Iterate over all shared function infos in a given script.
class ScriptIterator {
public:
- explicit ScriptIterator(Handle<Script> script);
+ ScriptIterator(Isolate* isolate, Script* script);
ScriptIterator(Isolate* isolate,
Handle<WeakFixedArray> shared_function_infos);
SharedFunctionInfo* Next();
+ int CurrentIndex() const { return index_ - 1; }
// Reset the iterator to run on |script|.
- void Reset(Handle<Script> script);
+ void Reset(Script* script);
private:
Isolate* isolate_;
@@ -465,7 +563,11 @@ class SharedFunctionInfo : public HeapObject {
DECL_CAST(SharedFunctionInfo)
// Constants.
- static const int kDontAdaptArgumentsSentinel = -1;
+ static const uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
+
+ static const int kMaximumFunctionTokenOffset = kMaxUInt16 - 1;
+ static const uint16_t kFunctionTokenOutOfRange = static_cast<uint16_t>(-1);
+ STATIC_ASSERT(kMaximumFunctionTokenOffset + 1 == kFunctionTokenOutOfRange);
#if V8_SFI_HAS_UNIQUE_ID
static const int kUniqueIdFieldSize = kInt32Size;
@@ -482,18 +584,14 @@ class SharedFunctionInfo : public HeapObject {
V(kNameOrScopeInfoOffset, kPointerSize) \
V(kOuterScopeInfoOrFeedbackMetadataOffset, kPointerSize) \
V(kScriptOffset, kPointerSize) \
- V(kDebugInfoOffset, kPointerSize) \
- V(kFunctionIdentifierOffset, kPointerSize) \
+ V(kFunctionIdentifierOrDebugInfoOffset, kPointerSize) \
V(kEndOfPointerFieldsOffset, 0) \
/* Raw data fields. */ \
- V(kFunctionLiteralIdOffset, kInt32Size) \
V(kUniqueIdOffset, kUniqueIdFieldSize) \
- V(kLengthOffset, kInt32Size) \
- V(kFormalParameterCountOffset, kInt32Size) \
- V(kExpectedNofPropertiesOffset, kInt32Size) \
- V(kStartPositionAndTypeOffset, kInt32Size) \
- V(kEndPositionOffset, kInt32Size) \
- V(kFunctionTokenPositionOffset, kInt32Size) \
+ V(kLengthOffset, kUInt16Size) \
+ V(kFormalParameterCountOffset, kUInt16Size) \
+ V(kExpectedNofPropertiesOffset, kUInt16Size) \
+ V(kFunctionTokenOffsetOffset, kUInt16Size) \
V(kFlagsOffset, kInt32Size) \
/* Total size. */ \
V(kSize, 0)
@@ -510,15 +608,6 @@ class SharedFunctionInfo : public HeapObject {
// No weak fields.
typedef BodyDescriptor BodyDescriptorWeak;
-// Bit fields in |raw_start_position_and_type|.
-#define START_POSITION_AND_TYPE_BIT_FIELDS(V, _) \
- V(IsNamedExpressionBit, bool, 1, _) \
- V(IsTopLevelBit, bool, 1, _) \
- V(StartPositionBits, int, 30, _)
-
- DEFINE_BIT_FIELDS(START_POSITION_AND_TYPE_BIT_FIELDS)
-#undef START_POSITION_AND_TYPE_BIT_FIELDS
-
// Bit positions in |flags|.
#define FLAGS_BIT_FIELDS(V, _) \
V(IsNativeBit, bool, 1, _) \
@@ -535,8 +624,13 @@ class SharedFunctionInfo : public HeapObject {
V(FunctionMapIndexBits, int, 5, _) \
V(DisabledOptimizationReasonBits, BailoutReason, 4, _) \
V(RequiresInstanceFieldsInitializer, bool, 1, _) \
- V(ConstructAsBuiltinBit, bool, 1, _)
-
+ V(ConstructAsBuiltinBit, bool, 1, _) \
+ V(IsAnonymousExpressionBit, bool, 1, _) \
+ V(NameShouldPrintAsAnonymousBit, bool, 1, _) \
+ V(IsDeserializedBit, bool, 1, _) \
+ V(HasReportedBinaryCoverageBit, bool, 1, _) \
+ V(IsNamedExpressionBit, bool, 1, _) \
+ V(IsTopLevelBit, bool, 1, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
@@ -546,22 +640,6 @@ class SharedFunctionInfo : public HeapObject {
STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
-// Bit positions in |debugger_hints|.
-#define DEBUGGER_HINTS_BIT_FIELDS(V, _) \
- V(IsAnonymousExpressionBit, bool, 1, _) \
- V(NameShouldPrintAsAnonymousBit, bool, 1, _) \
- V(IsDeserializedBit, bool, 1, _) \
- V(SideEffectStateBits, int, 2, _) \
- V(DebugIsBlackboxedBit, bool, 1, _) \
- V(ComputedDebugIsBlackboxedBit, bool, 1, _) \
- V(HasReportedBinaryCoverageBit, bool, 1, _) \
- V(DebuggingIdBits, int, 20, _)
-
- DEFINE_BIT_FIELDS(DEBUGGER_HINTS_BIT_FIELDS)
-#undef DEBUGGER_HINTS_BIT_FIELDS
-
- static const int kNoDebuggingId = 0;
-
// Indicates that this function uses a super property (or an eval that may
// use a super property).
// This is needed to set up the [[HomeObject]] on the function instance.
@@ -576,8 +654,7 @@ class SharedFunctionInfo : public HeapObject {
// function.
DECL_ACCESSORS(outer_scope_info, HeapObject)
- inline int side_effect_state() const;
- inline void set_side_effect_state(int value);
+ inline Object* function_identifier() const;
inline void set_kind(FunctionKind kind);
@@ -587,7 +664,11 @@ class SharedFunctionInfo : public HeapObject {
friend class V8HeapExplorer;
FRIEND_TEST(PreParserTest, LazyFunctionLength);
- inline int length() const;
+ inline uint16_t length() const;
+
+ // Find the index of this function in the parent script. Slow path of
+ // FunctionLiteralId.
+ int FindIndexInScript(Isolate* isolate) const;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index a5d02aec80..235bdcd8e7 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -195,7 +195,7 @@ Char FlatStringReader::Get(int index) {
template <typename Char>
class SequentialStringKey : public StringTableKey {
public:
- explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
+ explicit SequentialStringKey(Vector<const Char> string, uint64_t seed)
: StringTableKey(StringHasher::HashSequentialString<Char>(
string.start(), string.length(), seed)),
string_(string) {}
@@ -205,7 +205,7 @@ class SequentialStringKey : public StringTableKey {
class OneByteStringKey : public SequentialStringKey<uint8_t> {
public:
- OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
+ OneByteStringKey(Vector<const uint8_t> str, uint64_t seed)
: SequentialStringKey<uint8_t>(str, seed) {}
bool IsMatch(Object* string) override {
@@ -225,9 +225,10 @@ class SeqOneByteSubStringKey : public StringTableKey {
#pragma warning(push)
#pragma warning(disable : 4789)
#endif
- SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
+ SeqOneByteSubStringKey(Isolate* isolate, Handle<SeqOneByteString> string,
+ int from, int length)
: StringTableKey(StringHasher::HashSequentialString(
- string->GetChars() + from, length, string->GetHeap()->HashSeed())),
+ string->GetChars() + from, length, isolate->heap()->HashSeed())),
string_(string),
from_(from),
length_(length) {
@@ -250,7 +251,7 @@ class SeqOneByteSubStringKey : public StringTableKey {
class TwoByteStringKey : public SequentialStringKey<uc16> {
public:
- explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
+ explicit TwoByteStringKey(Vector<const uc16> str, uint64_t seed)
: SequentialStringKey<uc16>(str, seed) {}
bool IsMatch(Object* string) override {
@@ -263,7 +264,7 @@ class TwoByteStringKey : public SequentialStringKey<uc16> {
// Utf8StringKey carries a vector of chars as key.
class Utf8StringKey : public StringTableKey {
public:
- explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
+ explicit Utf8StringKey(Vector<const char> string, uint64_t seed)
: StringTableKey(StringHasher::ComputeUtf8Hash(string, seed, &chars_)),
string_(string) {}
@@ -289,25 +290,26 @@ bool String::Equals(String* other) {
return SlowEquals(other);
}
-bool String::Equals(Handle<String> one, Handle<String> two) {
+bool String::Equals(Isolate* isolate, Handle<String> one, Handle<String> two) {
if (one.is_identical_to(two)) return true;
if (one->IsInternalizedString() && two->IsInternalizedString()) {
return false;
}
- return SlowEquals(one, two);
+ return SlowEquals(isolate, one, two);
}
-Handle<String> String::Flatten(Handle<String> string, PretenureFlag pretenure) {
+Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
+ PretenureFlag pretenure) {
if (string->IsConsString()) {
Handle<ConsString> cons = Handle<ConsString>::cast(string);
if (cons->IsFlat()) {
- string = handle(cons->first());
+ string = handle(cons->first(), isolate);
} else {
- return SlowFlatten(cons, pretenure);
+ return SlowFlatten(isolate, cons, pretenure);
}
}
if (string->IsThinString()) {
- string = handle(Handle<ThinString>::cast(string)->actual());
+ string = handle(Handle<ThinString>::cast(string)->actual(), isolate);
DCHECK(!string->IsConsString());
}
return string;
@@ -491,10 +493,11 @@ String* SlicedString::parent() {
return String::cast(READ_FIELD(this, kParentOffset));
}
-void SlicedString::set_parent(String* parent, WriteBarrierMode mode) {
+void SlicedString::set_parent(Isolate* isolate, String* parent,
+ WriteBarrierMode mode) {
DCHECK(parent->IsSeqString() || parent->IsExternalString());
WRITE_FIELD(this, kParentOffset, parent);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode);
+ CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kParentOffset, parent, mode);
}
SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
@@ -505,9 +508,10 @@ String* ConsString::first() {
Object* ConsString::unchecked_first() { return READ_FIELD(this, kFirstOffset); }
-void ConsString::set_first(String* value, WriteBarrierMode mode) {
+void ConsString::set_first(Isolate* isolate, String* value,
+ WriteBarrierMode mode) {
WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kFirstOffset, value, mode);
}
String* ConsString::second() {
@@ -518,9 +522,10 @@ Object* ConsString::unchecked_second() {
return RELAXED_READ_FIELD(this, kSecondOffset);
}
-void ConsString::set_second(String* value, WriteBarrierMode mode) {
+void ConsString::set_second(Isolate* isolate, String* value,
+ WriteBarrierMode mode) {
WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kSecondOffset, value, mode);
}
ACCESSORS(ThinString, actual, String, kActualOffset);
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index 2c61f12cd3..8003bf1aac 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -67,13 +67,15 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
// Shink the StringTable if it's very empty (kMaxEmptyFactor) to avoid the
// performance overhead of re-allocating the StringTable over and over again.
- static Handle<StringTable> CautiousShrink(Handle<StringTable> table);
+ static Handle<StringTable> CautiousShrink(Isolate* isolate,
+ Handle<StringTable> table);
// Looks up a string that is equal to the given string and returns
// string handle if it is found, or an empty handle otherwise.
V8_WARN_UNUSED_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists(
Isolate* isolate, uint16_t c1, uint16_t c2);
- static Object* LookupStringIfExists_NoAllocate(String* string);
+ static Object* LookupStringIfExists_NoAllocate(Isolate* isolate,
+ String* string);
static void EnsureCapacityForDeserialization(Isolate* isolate, int expected);
@@ -103,9 +105,9 @@ class StringSetShape : public BaseShape<String*> {
class StringSet : public HashTable<StringSet, StringSetShape> {
public:
static Handle<StringSet> New(Isolate* isolate);
- static Handle<StringSet> Add(Handle<StringSet> blacklist,
+ static Handle<StringSet> Add(Isolate* isolate, Handle<StringSet> blacklist,
Handle<String> name);
- bool Has(Handle<String> name);
+ bool Has(Isolate* isolate, Handle<String> name);
DECL_CAST(StringSet)
};
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index bdf84911aa..ba036edee9 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -155,7 +155,7 @@ class String : public Name {
};
template <typename Char>
- INLINE(Vector<const Char> GetCharVector());
+ V8_INLINE Vector<const Char> GetCharVector();
// Get and set the length of the string.
inline int length() const;
@@ -187,10 +187,10 @@ class String : public Name {
inline void Set(int index, uint16_t value);
// Get individual two byte char in the string. Repeated calls
// to this method are not efficient unless the string is flat.
- INLINE(uint16_t Get(int index));
+ V8_INLINE uint16_t Get(int index);
// ES6 section 7.1.3.1 ToNumber Applied to the String Type
- static Handle<Object> ToNumber(Handle<String> subject);
+ static Handle<Object> ToNumber(Isolate* isolate, Handle<String> subject);
// Flattens the string. Checks first inline to see if it is
// necessary. Does nothing if the string is not a cons string.
@@ -205,7 +205,7 @@ class String : public Name {
// Degenerate cons strings are handled specially by the garbage
// collector (see IsShortcutCandidate).
- static inline Handle<String> Flatten(Handle<String> string,
+ static inline Handle<String> Flatten(Isolate* isolate, Handle<String> string,
PretenureFlag pretenure = NOT_TENURED);
// Tries to return the content of a flat string as a structure holding either
@@ -230,7 +230,8 @@ class String : public Name {
// for strings containing supplementary characters, lexicographic ordering on
// sequences of UTF-16 code unit values differs from that on sequences of code
// point values.
- V8_WARN_UNUSED_RESULT static ComparisonResult Compare(Handle<String> x,
+ V8_WARN_UNUSED_RESULT static ComparisonResult Compare(Isolate* isolate,
+ Handle<String> x,
Handle<String> y);
// Perform ES6 21.1.3.8, including checking arguments.
@@ -278,7 +279,8 @@ class String : public Name {
// String equality operations.
inline bool Equals(String* other);
- inline static bool Equals(Handle<String> one, Handle<String> two);
+ inline static bool Equals(Isolate* isolate, Handle<String> one,
+ Handle<String> two);
bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false);
// Dispatches to Is{One,Two}ByteEqualTo.
@@ -316,7 +318,8 @@ class String : public Name {
// Trimming.
enum TrimMode { kTrim, kTrimStart, kTrimEnd };
- static Handle<String> Trim(Handle<String> string, TrimMode mode);
+ static Handle<String> Trim(Isolate* isolate, Handle<String> string,
+ TrimMode mode);
DECL_CAST(String)
@@ -437,7 +440,8 @@ class String : public Name {
static inline ConsString* VisitFlat(Visitor* visitor, String* string,
int offset = 0);
- static Handle<FixedArray> CalculateLineEnds(Handle<String> string,
+ static Handle<FixedArray> CalculateLineEnds(Isolate* isolate,
+ Handle<String> string,
bool include_ending_line);
private:
@@ -445,20 +449,21 @@ class String : public Name {
friend class StringTableInsertionKey;
friend class InternalizedStringKey;
- static Handle<String> SlowFlatten(Handle<ConsString> cons,
+ static Handle<String> SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
PretenureFlag tenure);
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
bool SlowEquals(String* other);
- static bool SlowEquals(Handle<String> one, Handle<String> two);
+ static bool SlowEquals(Isolate* isolate, Handle<String> one,
+ Handle<String> two);
// Slow case of AsArrayIndex.
V8_EXPORT_PRIVATE bool SlowAsArrayIndex(uint32_t* index);
// Compute and set the hash code.
- uint32_t ComputeAndSetHash();
+ uint32_t ComputeAndSetHash(Isolate* isolate);
DISALLOW_IMPLICIT_CONSTRUCTORS(String);
};
@@ -588,7 +593,7 @@ class ConsString : public String {
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
inline Object* unchecked_first();
- inline void set_first(String* first,
+ inline void set_first(Isolate* isolate, String* first,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Second string of the cons cell.
@@ -596,7 +601,7 @@ class ConsString : public String {
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
inline Object* unchecked_second();
- inline void set_second(String* second,
+ inline void set_second(Isolate* isolate, String* second,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
@@ -670,7 +675,7 @@ class ThinString : public String {
class SlicedString : public String {
public:
inline String* parent();
- inline void set_parent(String* parent,
+ inline void set_parent(Isolate* isolate, String* parent,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline int offset() const;
inline void set_offset(int offset);
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 24389c96a2..1218ba5a7d 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -14,9 +14,7 @@ namespace internal {
// static
Handle<JSArray> TemplateObjectDescription::CreateTemplateObject(
- Handle<TemplateObjectDescription> description) {
- Isolate* const isolate = description->GetIsolate();
-
+ Isolate* isolate, Handle<TemplateObjectDescription> description) {
// Create the raw object from the {raw_strings}.
Handle<FixedArray> raw_strings(description->raw_strings(), isolate);
Handle<JSArray> raw_object = isolate->factory()->NewJSArrayWithElements(
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index 6c1a99831a..ad8ff95950 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -24,7 +24,7 @@ class TemplateObjectDescription final : public Tuple2 {
DECL_ACCESSORS(cooked_strings, FixedArray)
static Handle<JSArray> CreateTemplateObject(
- Handle<TemplateObjectDescription> description);
+ Isolate* isolate, Handle<TemplateObjectDescription> description);
DECL_CAST(TemplateObjectDescription)
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index f9f0056a11..6a229d847b 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -13,8 +13,11 @@
namespace v8 {
namespace internal {
-class TemplateInfo : public Struct {
+class TemplateInfo : public Struct, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
DECL_ACCESSORS(tag, Object)
DECL_ACCESSORS(serial_number, Object)
DECL_INT_ACCESSORS(number_of_properties)
diff --git a/deps/v8/src/optimized-compilation-info.cc b/deps/v8/src/optimized-compilation-info.cc
index dc932a1c04..09b8a0edea 100644
--- a/deps/v8/src/optimized-compilation-info.cc
+++ b/deps/v8/src/optimized-compilation-info.cc
@@ -23,7 +23,6 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
- dependencies_.reset(new CompilationDependencies(isolate, zone));
SetFlag(kCalledWithCodeStartRegister);
if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
@@ -39,7 +38,7 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
// Collect source positions for optimized code when profiling or if debugger
// is active, to be able to get more precise source positions at the price of
// more memory consumption.
- if (isolate->NeedsDetailedOptimizedCodeLineInfo()) {
+ if (isolate->NeedsSourcePositionsForProfiling()) {
MarkAsSourcePositionsEnabled();
}
@@ -78,7 +77,6 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
osr_offset_(BailoutId::None()),
zone_(zone),
deferred_handles_(nullptr),
- dependencies_(nullptr),
bailout_reason_(BailoutReason::kNoReason),
optimization_id_(-1),
debug_name_(debug_name) {}
@@ -87,9 +85,6 @@ OptimizedCompilationInfo::~OptimizedCompilationInfo() {
if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
shared_info()->DisableOptimization(bailout_reason());
}
- if (dependencies()) {
- dependencies()->Rollback();
- }
}
void OptimizedCompilationInfo::set_deferred_handles(
@@ -104,12 +99,12 @@ void OptimizedCompilationInfo::set_deferred_handles(
deferred_handles_.reset(deferred_handles);
}
-void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope() {
+void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
if (!shared_info_.is_null()) {
- shared_info_ = Handle<SharedFunctionInfo>(*shared_info_);
+ shared_info_ = Handle<SharedFunctionInfo>(*shared_info_, isolate);
}
if (!closure_.is_null()) {
- closure_ = Handle<JSFunction>(*closure_);
+ closure_ = Handle<JSFunction>(*closure_, isolate);
}
}
diff --git a/deps/v8/src/optimized-compilation-info.h b/deps/v8/src/optimized-compilation-info.h
index 174f97fde0..a8cb8d220b 100644
--- a/deps/v8/src/optimized-compilation-info.h
+++ b/deps/v8/src/optimized-compilation-info.h
@@ -8,7 +8,7 @@
#include <memory>
#include "src/bailout-reason.h"
-#include "src/compilation-dependencies.h"
+#include "src/code-reference.h"
#include "src/feedback-vector.h"
#include "src/frames.h"
#include "src/globals.h"
@@ -58,18 +58,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
kTraceTurboScheduled = 1 << 16,
};
- // TODO(mtrofin): investigate if this might be generalized outside wasm, with
- // the goal of better separating the compiler from where compilation lands. At
- // that point, the Handle<Code> member of OptimizedCompilationInfo would also
- // be removed.
- struct WasmCodeDesc {
- CodeDesc code_desc;
- size_t safepoint_table_offset = 0;
- size_t handler_table_offset = 0;
- uint32_t frame_slot_count = 0;
- Handle<ByteArray> source_positions_table;
- };
-
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
@@ -85,7 +73,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
bool has_shared_info() const { return !shared_info().is_null(); }
Handle<JSFunction> closure() const { return closure_; }
- Handle<Code> code() const { return code_; }
+ Handle<Code> code() const { return code_.as_js_code(); }
+
+ wasm::WasmCode* wasm_code() const {
+ return const_cast<wasm::WasmCode*>(code_.as_wasm_code());
+ }
AbstractCode::Kind abstract_code_kind() const { return code_kind_; }
Code::Kind code_kind() const {
DCHECK(code_kind_ < static_cast<AbstractCode::Kind>(Code::NUMBER_OF_KINDS));
@@ -184,7 +176,10 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Code getters and setters.
- void SetCode(Handle<Code> code) { code_ = code; }
+ template <typename T>
+ void SetCode(T code) {
+ code_ = CodeReference(code);
+ }
bool has_context() const;
Context* context() const;
@@ -218,7 +213,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
return deferred_handles_;
}
- void ReopenHandlesInNewHandleScope();
+ void ReopenHandlesInNewHandleScope(Isolate* isolate);
void AbortOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
@@ -234,8 +229,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
BailoutReason bailout_reason() const { return bailout_reason_; }
- CompilationDependencies* dependencies() { return dependencies_.get(); }
-
int optimization_id() const {
DCHECK(IsOptimizing());
return optimization_id_;
@@ -270,7 +263,13 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
StackFrame::Type GetOutputStackFrameType() const;
- WasmCodeDesc* wasm_code_desc() { return &wasm_code_desc_; }
+ const char* trace_turbo_filename() const {
+ return trace_turbo_filename_.get();
+ }
+
+ void set_trace_turbo_filename(std::unique_ptr<char[]> filename) {
+ trace_turbo_filename_ = std::move(filename);
+ }
private:
OptimizedCompilationInfo(Vector<const char> debug_name,
@@ -295,8 +294,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
Handle<JSFunction> closure_;
// The compiled code.
- Handle<Code> code_;
- WasmCodeDesc wasm_code_desc_;
+ CodeReference code_;
// Entry point when compiling for OSR, {BailoutId::None} otherwise.
BailoutId osr_offset_;
@@ -307,9 +305,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
std::shared_ptr<DeferredHandles> deferred_handles_;
- // Dependencies for this compilation, e.g. stable maps.
- std::unique_ptr<CompilationDependencies> dependencies_;
-
BailoutReason bailout_reason_;
InlinedFunctionList inlined_functions_;
@@ -320,6 +315,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
JavaScriptFrame* osr_frame_ = nullptr;
Vector<const char> debug_name_;
+ std::unique_ptr<char[]> trace_turbo_filename_;
DISALLOW_COPY_AND_ASSIGN(OptimizedCompilationInfo);
};
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index 66b57020ad..04486354cb 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -12,6 +12,11 @@
#endif
#endif
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+#define LOG_TAG "v8"
+#include <android/log.h> // NOLINT
+#endif
+
namespace v8 {
namespace internal {
@@ -37,7 +42,6 @@ std::streamsize OFStreamBase::xsputn(const char* s, std::streamsize n) {
std::fwrite(s, 1, static_cast<size_t>(n), f_));
}
-
OFStream::OFStream(FILE* f) : std::ostream(nullptr), buf_(f) {
DCHECK_NOT_NULL(f);
rdbuf(&buf_);
@@ -46,6 +50,32 @@ OFStream::OFStream(FILE* f) : std::ostream(nullptr), buf_(f) {
OFStream::~OFStream() {}
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+AndroidLogStream::~AndroidLogStream() {
+ // If there is anything left in the line buffer, print it now, even though it
+ // was not terminated by a newline.
+ if (!line_buffer_.empty()) {
+ __android_log_write(ANDROID_LOG_INFO, LOG_TAG, line_buffer_.c_str());
+ }
+}
+
+std::streamsize AndroidLogStream::xsputn(const char* s, std::streamsize n) {
+ const char* const e = s + n;
+ while (s < e) {
+ const char* newline = reinterpret_cast<const char*>(memchr(s, '\n', e - s));
+ size_t line_chars = (newline ? newline : e) - s;
+ line_buffer_.append(s, line_chars);
+ // Without terminating newline, keep the characters in the buffer for the
+ // next invocation.
+ if (!newline) break;
+ // Otherwise, write out the first line, then continue.
+ __android_log_write(ANDROID_LOG_INFO, LOG_TAG, line_buffer_.c_str());
+ line_buffer_.clear();
+ s = newline + 1;
+ }
+ return n;
+}
+#endif
namespace {
@@ -131,3 +161,6 @@ std::ostream& operator<<(std::ostream& os, const AsHexBytes& hex) {
} // namespace internal
} // namespace v8
+
+#undef snprintf
+#undef LOG_TAG
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index f2f961e379..c6b64a1cd9 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -18,7 +18,6 @@
namespace v8 {
namespace internal {
-
class OFStreamBase : public std::streambuf {
public:
explicit OFStreamBase(FILE* f);
@@ -27,12 +26,11 @@ class OFStreamBase : public std::streambuf {
protected:
FILE* const f_;
- virtual int sync();
- virtual int_type overflow(int_type c);
- virtual std::streamsize xsputn(const char* s, std::streamsize n);
+ int sync() override;
+ int_type overflow(int_type c) override;
+ std::streamsize xsputn(const char* s, std::streamsize n) override;
};
-
// An output stream writing to a file.
class V8_EXPORT_PRIVATE OFStream : public std::ostream {
public:
@@ -43,6 +41,31 @@ class V8_EXPORT_PRIVATE OFStream : public std::ostream {
OFStreamBase buf_;
};
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+class V8_EXPORT_PRIVATE AndroidLogStream : public std::streambuf {
+ public:
+ virtual ~AndroidLogStream();
+
+ protected:
+ std::streamsize xsputn(const char* s, std::streamsize n) override;
+
+ private:
+ std::string line_buffer_;
+};
+
+class StdoutStream : public std::ostream {
+ public:
+ StdoutStream() : std::ostream(&stream_) {}
+
+ private:
+ AndroidLogStream stream_;
+};
+#else
+class StdoutStream : public OFStream {
+ public:
+ StdoutStream() : OFStream(stdout) {}
+};
+#endif
// Wrappers to disambiguate uint16_t and uc16.
struct AsUC16 {
@@ -92,6 +115,23 @@ struct AsHexBytes {
ByteOrder byte_order;
};
+template <typename T>
+struct PrintIteratorRange {
+ T start;
+ T end;
+ PrintIteratorRange(T start, T end) : start(start), end(end) {}
+};
+
+// Print any collection which can be iterated via std::begin and std::end.
+// {Iterator} is the common type of {std::begin} and {std::end} called on a
+// {const T&}. This function is only instantiable if that type exists.
+template <typename T, typename Iterator = typename std::common_type<
+ decltype(std::begin(std::declval<const T&>())),
+ decltype(std::end(std::declval<const T&>()))>::type>
+PrintIteratorRange<Iterator> PrintCollection(const T& collection) {
+ return {std::begin(collection), std::end(collection)};
+}
+
// Writes the given character to the output escaping everything outside of
// printable/space ASCII range. Additionally escapes '\' making escaping
// reversible.
@@ -113,6 +153,17 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const AsHex& v);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const AsHexBytes& v);
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const PrintIteratorRange<T>& range) {
+ const char* comma = "";
+ os << "[";
+ for (T it = range.start; it != range.end; ++it, comma = ", ") {
+ os << comma << *it;
+ }
+ os << "]";
+ return os;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/expression-scope-reparenter.cc b/deps/v8/src/parsing/expression-scope-reparenter.cc
index bdb0aeadd6..30e96d1688 100644
--- a/deps/v8/src/parsing/expression-scope-reparenter.cc
+++ b/deps/v8/src/parsing/expression-scope-reparenter.cc
@@ -55,7 +55,7 @@ void Reparenter::VisitClassLiteral(ClassLiteral* class_literal) {
#if DEBUG
// The same goes for the rest of the class, but we do some
// sanity checking in debug mode.
- ZoneList<ClassLiteralProperty*>* props = class_literal->properties();
+ ZonePtrList<ClassLiteralProperty>* props = class_literal->properties();
for (int i = 0; i < props->length(); ++i) {
ClassLiteralProperty* prop = props->at(i);
// No need to visit the values, since all values are functions with
@@ -74,7 +74,7 @@ void Reparenter::VisitVariableProxy(VariableProxy* proxy) {
}
} else {
// Ensure that temporaries we find are already in the correct scope.
- DCHECK(proxy->var()->mode() != TEMPORARY ||
+ DCHECK(proxy->var()->mode() != VariableMode::kTemporary ||
proxy->var()->scope() == scope_->GetClosureScope());
}
}
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index b55c5ddd5d..4920877610 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -52,6 +52,12 @@ void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
}
}
+void FuncNameInferrer::Leave() {
+ DCHECK(IsOpen());
+ names_stack_.Rewind(entries_stack_.RemoveLast());
+ if (entries_stack_.is_empty()) funcs_to_infer_.Clear();
+}
+
const AstConsString* FuncNameInferrer::MakeNameFromStack() {
AstConsString* result = ast_value_factory_->NewConsString();
for (int pos = 0; pos < names_stack_.length(); pos++) {
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index 643909f701..21c4da3be9 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -96,11 +96,7 @@ class FuncNameInferrer : public ZoneObject {
void Enter() { entries_stack_.Add(names_stack_.length(), zone()); }
- void Leave() {
- DCHECK(IsOpen());
- names_stack_.Rewind(entries_stack_.RemoveLast());
- if (entries_stack_.is_empty()) funcs_to_infer_.Clear();
- }
+ void Leave();
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 451d2e8131..ee7e4b1569 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -16,7 +16,7 @@
namespace v8 {
namespace internal {
-ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
+ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
: zone_(std::make_shared<Zone>(zone_allocator, ZONE_NAME)),
flags_(0),
extension_(nullptr),
@@ -25,6 +25,7 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
stack_limit_(0),
hash_seed_(0),
function_flags_(0),
+ script_id_(-1),
start_position_(0),
end_position_(0),
parameters_end_pos_(kNoSourcePosition),
@@ -36,13 +37,25 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
function_name_(nullptr),
runtime_call_stats_(nullptr),
source_range_map_(nullptr),
- literal_(nullptr) {}
+ literal_(nullptr) {
+ set_hash_seed(isolate->heap()->HashSeed());
+ set_stack_limit(isolate->stack_guard()->real_climit());
+ set_unicode_cache(isolate->unicode_cache());
+ set_runtime_call_stats(isolate->counters()->runtime_call_stats());
+ set_logger(isolate->logger());
+ set_ast_string_constants(isolate->ast_string_constants());
+ if (isolate->is_block_code_coverage()) set_block_coverage_enabled();
+ if (isolate->is_collecting_type_profile()) set_collect_type_profile();
+}
-ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
- : ParseInfo(shared->GetIsolate()->allocator()) {
- Isolate* isolate = shared->GetIsolate();
- InitFromIsolate(isolate);
+ParseInfo::ParseInfo(Isolate* isolate)
+ : ParseInfo(isolate, isolate->allocator()) {
+ script_id_ = isolate->heap()->NextScriptId();
+ LOG(isolate, ScriptEvent(Logger::ScriptEventType::kReserveId, script_id_));
+}
+ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
+ : ParseInfo(isolate, isolate->allocator()) {
// Do not support re-parsing top-level function of a wrapped script.
// TODO(yangguo): consider whether we need a top-level function in a
// wrapped script at all.
@@ -55,19 +68,15 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
set_function_flags(shared->flags());
set_start_position(shared->StartPosition());
set_end_position(shared->EndPosition());
- function_literal_id_ = shared->function_literal_id();
+ function_literal_id_ = shared->FunctionLiteralId(isolate);
set_language_mode(shared->language_mode());
set_asm_wasm_broken(shared->is_asm_wasm_broken());
- Handle<Script> script(Script::cast(shared->script()));
+ Handle<Script> script(Script::cast(shared->script()), isolate);
set_script(script);
- set_native(script->type() == Script::TYPE_NATIVE);
- set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
- set_module(script->origin_options().IsModule());
- DCHECK(!(is_eval() && is_module()));
if (shared->HasOuterScopeInfo()) {
- set_outer_scope_info(handle(shared->GetOuterScopeInfo()));
+ set_outer_scope_info(handle(shared->GetOuterScopeInfo(), isolate));
}
// CollectTypeProfile uses its own feedback slots. If we have existing
@@ -78,70 +87,17 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
(shared->HasFeedbackMetadata()
? shared->feedback_metadata()->HasTypeProfileSlot()
: script->IsUserJavaScript()));
- if (block_coverage_enabled() && script->IsUserJavaScript()) {
- AllocateSourceRangeMap();
- }
}
-ParseInfo::ParseInfo(Handle<Script> script)
- : ParseInfo(script->GetIsolate()->allocator()) {
- InitFromIsolate(script->GetIsolate());
-
- set_allow_lazy_parsing();
- set_toplevel();
- set_script(script);
- set_wrapped_as_function(script->is_wrapped());
-
- set_native(script->type() == Script::TYPE_NATIVE);
- set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
- set_module(script->origin_options().IsModule());
- DCHECK(!(is_eval() && is_module()));
-
- set_collect_type_profile(script->GetIsolate()->is_collecting_type_profile() &&
+ParseInfo::ParseInfo(Isolate* isolate, Handle<Script> script)
+ : ParseInfo(isolate, isolate->allocator()) {
+ SetScriptForToplevelCompile(isolate, script);
+ set_collect_type_profile(isolate->is_collecting_type_profile() &&
script->IsUserJavaScript());
- if (block_coverage_enabled() && script->IsUserJavaScript()) {
- AllocateSourceRangeMap();
- }
}
ParseInfo::~ParseInfo() {}
-// static
-ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
- Isolate* isolate = shared->GetIsolate();
- ParseInfo* p = new ParseInfo(isolate->allocator());
-
- p->InitFromIsolate(isolate);
- p->set_toplevel(shared->is_toplevel());
- p->set_allow_lazy_parsing(FLAG_lazy_inner_functions);
- p->set_is_named_expression(shared->is_named_expression());
- p->set_function_flags(shared->flags());
- p->set_start_position(shared->StartPosition());
- p->set_end_position(shared->EndPosition());
- p->function_literal_id_ = shared->function_literal_id();
- p->set_language_mode(shared->language_mode());
-
- // BUG(5946): This function exists as a workaround until we can
- // get rid of %SetCode in our native functions. The ParseInfo
- // is explicitly set up for the case that:
- // a) you have a native built-in,
- // b) it's being run for the 2nd-Nth time in an isolate,
- // c) we've already compiled bytecode and therefore don't need
- // to parse.
- // We tolerate a ParseInfo without a Script in this case.
- p->set_native(true);
- p->set_eval(false);
- p->set_module(false);
- DCHECK_NE(shared->kind(), FunctionKind::kModule);
-
- Handle<HeapObject> scope_info(shared->GetOuterScopeInfo());
- if (!scope_info->IsTheHole(isolate) &&
- Handle<ScopeInfo>::cast(scope_info)->length() > 0) {
- p->set_outer_scope_info(Handle<ScopeInfo>::cast(scope_info));
- }
- return p;
-}
-
DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
bool ParseInfo::is_declaration() const {
@@ -157,18 +113,6 @@ bool ParseInfo::requires_instance_fields_initializer() const {
function_flags_);
}
-void ParseInfo::InitFromIsolate(Isolate* isolate) {
- DCHECK_NOT_NULL(isolate);
- set_hash_seed(isolate->heap()->HashSeed());
- set_stack_limit(isolate->stack_guard()->real_climit());
- set_unicode_cache(isolate->unicode_cache());
- set_runtime_call_stats(isolate->counters()->runtime_call_stats());
- set_logger(isolate->logger());
- set_ast_string_constants(isolate->ast_string_constants());
- if (isolate->is_block_code_coverage()) set_block_coverage_enabled();
- if (isolate->is_collecting_type_profile()) set_collect_type_profile();
-}
-
void ParseInfo::EmitBackgroundParseStatisticsOnBackgroundThread() {
// If runtime call stats was enabled by tracing, emit a trace event at the
// end of background parsing on the background thread.
@@ -202,6 +146,38 @@ void ParseInfo::ShareZone(ParseInfo* other) {
zone_ = other->zone_;
}
+Handle<Script> ParseInfo::CreateScript(Isolate* isolate, Handle<String> source,
+ ScriptOriginOptions origin_options,
+ NativesFlag natives) {
+ // Create a script object describing the script to be compiled.
+ Handle<Script> script;
+ if (script_id_ == -1) {
+ script = isolate->factory()->NewScript(source);
+ } else {
+ script = isolate->factory()->NewScriptWithId(source, script_id_);
+ }
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
+ switch (natives) {
+ case NATIVES_CODE:
+ script->set_type(Script::TYPE_NATIVE);
+ break;
+ case EXTENSION_CODE:
+ script->set_type(Script::TYPE_EXTENSION);
+ break;
+ case INSPECTOR_CODE:
+ script->set_type(Script::TYPE_INSPECTOR);
+ break;
+ case NOT_NATIVES_CODE:
+ break;
+ }
+ script->set_origin_options(origin_options);
+
+ SetScriptForToplevelCompile(isolate, script);
+ return script;
+}
+
AstValueFactory* ParseInfo::GetOrCreateAstValueFactory() {
if (!ast_value_factory_.get()) {
ast_value_factory_.reset(
@@ -228,5 +204,30 @@ void ParseInfo::set_character_stream(
character_stream_.swap(character_stream);
}
+void ParseInfo::SetScriptForToplevelCompile(Isolate* isolate,
+ Handle<Script> script) {
+ set_script(script);
+ set_allow_lazy_parsing();
+ set_toplevel();
+ set_collect_type_profile(isolate->is_collecting_type_profile() &&
+ script->IsUserJavaScript());
+ set_wrapped_as_function(script->is_wrapped());
+}
+
+void ParseInfo::set_script(Handle<Script> script) {
+ script_ = script;
+ DCHECK(script_id_ == -1 || script_id_ == script->id());
+ script_id_ = script->id();
+
+ set_native(script->type() == Script::TYPE_NATIVE);
+ set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
+ set_module(script->origin_options().IsModule());
+ DCHECK(!(is_eval() && is_module()));
+
+ if (block_coverage_enabled() && script->IsUserJavaScript()) {
+ AllocateSourceRangeMap();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 08f15c865c..4abf3a1fb0 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -37,15 +37,16 @@ class Zone;
// A container for the inputs, configuration options, and outputs of parsing.
class V8_EXPORT_PRIVATE ParseInfo {
public:
- explicit ParseInfo(AccountingAllocator* zone_allocator);
- ParseInfo(Handle<Script> script);
- ParseInfo(Handle<SharedFunctionInfo> shared);
+ ParseInfo(Isolate*);
+ ParseInfo(Isolate*, AccountingAllocator* zone_allocator);
+ ParseInfo(Isolate* isolate, Handle<Script> script);
+ ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared);
~ParseInfo();
- void InitFromIsolate(Isolate* isolate);
-
- static ParseInfo* AllocateWithoutScript(Handle<SharedFunctionInfo> shared);
+ Handle<Script> CreateScript(Isolate* isolate, Handle<String> source,
+ ScriptOriginOptions origin_options,
+ NativesFlag natives = NOT_NATIVES_CODE);
// Either returns the ast-value-factory associcated with this ParseInfo, or
// creates and returns a new factory if none exists.
@@ -139,8 +140,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
uintptr_t stack_limit() const { return stack_limit_; }
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
- uint32_t hash_seed() const { return hash_seed_; }
- void set_hash_seed(uint32_t hash_seed) { hash_seed_ = hash_seed; }
+ uint64_t hash_seed() const { return hash_seed_; }
+ void set_hash_seed(uint64_t hash_seed) { hash_seed_ = hash_seed; }
int function_flags() const { return function_flags_; }
void set_function_flags(int function_flags) {
@@ -207,11 +208,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
MaybeHandle<ScopeInfo> maybe_outer_scope_info() const {
return maybe_outer_scope_info_;
}
- void clear_script() { script_ = Handle<Script>::null(); }
void set_outer_scope_info(Handle<ScopeInfo> outer_scope_info) {
maybe_outer_scope_info_ = outer_scope_info;
}
- void set_script(Handle<Script> script) { script_ = script; }
+
+ int script_id() const { return script_id_; }
//--------------------------------------------------------------------------
LanguageMode language_mode() const {
@@ -222,20 +223,13 @@ class V8_EXPORT_PRIVATE ParseInfo {
set_strict_mode(is_strict(language_mode));
}
- void ReopenHandlesInNewHandleScope() {
- if (!script_.is_null()) {
- script_ = Handle<Script>(*script_);
- }
- Handle<ScopeInfo> outer_scope_info;
- if (maybe_outer_scope_info_.ToHandle(&outer_scope_info)) {
- maybe_outer_scope_info_ = Handle<ScopeInfo>(*outer_scope_info);
- }
- }
-
void EmitBackgroundParseStatisticsOnBackgroundThread();
void UpdateBackgroundParseStatisticsOnMainThread(Isolate* isolate);
private:
+ void SetScriptForToplevelCompile(Isolate* isolate, Handle<Script> script);
+ void set_script(Handle<Script> script);
+
// Various configuration flags for parsing.
enum Flag {
// ---------- Input flags ---------------------------
@@ -264,10 +258,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
DeclarationScope* script_scope_;
UnicodeCache* unicode_cache_;
uintptr_t stack_limit_;
- uint32_t hash_seed_;
+ uint64_t hash_seed_;
// TODO(leszeks): Move any remaining flags used here either to the flags_
// field or to other fields.
int function_flags_;
+ int script_id_;
int start_position_;
int end_position_;
int parameters_end_pos_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 3f79d3733a..6f6cff8e20 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -282,7 +282,6 @@ class ParserBase {
allow_harmony_static_fields_(false),
allow_harmony_dynamic_import_(false),
allow_harmony_import_meta_(false),
- allow_harmony_optional_catch_binding_(false),
allow_harmony_private_fields_(false),
allow_eval_cache_(true) {}
@@ -296,7 +295,6 @@ class ParserBase {
ALLOW_ACCESSORS(harmony_static_fields);
ALLOW_ACCESSORS(harmony_dynamic_import);
ALLOW_ACCESSORS(harmony_import_meta);
- ALLOW_ACCESSORS(harmony_optional_catch_binding);
ALLOW_ACCESSORS(eval_cache);
#undef ALLOW_ACCESSORS
@@ -407,6 +405,7 @@ class ParserBase {
void AddSuspend() { suspend_count_++; }
int suspend_count() const { return suspend_count_; }
+ bool CanSuspend() const { return suspend_count_ > 0; }
FunctionKind kind() const { return scope()->function_kind(); }
@@ -554,7 +553,7 @@ class ParserBase {
Scope* scope;
BlockT init_block;
BlockT inner_block;
- ZoneList<const AstRawString*> bound_names;
+ ZonePtrList<const AstRawString> bound_names;
};
struct ForInfo {
@@ -564,7 +563,7 @@ class ParserBase {
mode(ForEachStatement::ENUMERATE),
position(kNoSourcePosition),
parsing_result() {}
- ZoneList<const AstRawString*> bound_names;
+ ZonePtrList<const AstRawString> bound_names;
ForEachStatement::VisitMode mode;
int position;
DeclarationParsingResult parsing_result;
@@ -684,7 +683,7 @@ class ParserBase {
int script_id() { return script_id_; }
void set_script_id(int id) { script_id_ = id; }
- INLINE(Token::Value peek()) {
+ V8_INLINE Token::Value peek() {
if (stack_overflow()) return Token::ILLEGAL;
return scanner()->peek();
}
@@ -696,12 +695,12 @@ class ParserBase {
: scanner_->location().end_pos;
}
- INLINE(Token::Value PeekAhead()) {
+ V8_INLINE Token::Value PeekAhead() {
if (stack_overflow()) return Token::ILLEGAL;
return scanner()->PeekAhead();
}
- INLINE(Token::Value Next()) {
+ V8_INLINE Token::Value Next() {
if (stack_overflow()) return Token::ILLEGAL;
{
if (GetCurrentStackPosition() < stack_limit_) {
@@ -1192,17 +1191,17 @@ class ParserBase {
BlockT ParseVariableDeclarations(VariableDeclarationContext var_context,
DeclarationParsingResult* parsing_result,
- ZoneList<const AstRawString*>* names,
+ ZonePtrList<const AstRawString>* names,
bool* ok);
- StatementT ParseAsyncFunctionDeclaration(ZoneList<const AstRawString*>* names,
- bool default_export, bool* ok);
+ StatementT ParseAsyncFunctionDeclaration(
+ ZonePtrList<const AstRawString>* names, bool default_export, bool* ok);
StatementT ParseFunctionDeclaration(bool* ok);
- StatementT ParseHoistableDeclaration(ZoneList<const AstRawString*>* names,
+ StatementT ParseHoistableDeclaration(ZonePtrList<const AstRawString>* names,
bool default_export, bool* ok);
StatementT ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
- ZoneList<const AstRawString*>* names,
+ ZonePtrList<const AstRawString>* names,
bool default_export, bool* ok);
- StatementT ParseClassDeclaration(ZoneList<const AstRawString*>* names,
+ StatementT ParseClassDeclaration(ZonePtrList<const AstRawString>* names,
bool default_export, bool* ok);
StatementT ParseNativeDeclaration(bool* ok);
@@ -1233,22 +1232,22 @@ class ParserBase {
Token::Value end_token, bool may_abort,
bool* ok);
StatementT ParseStatementListItem(bool* ok);
- StatementT ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok) {
+ StatementT ParseStatement(ZonePtrList<const AstRawString>* labels, bool* ok) {
return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
}
- StatementT ParseStatement(ZoneList<const AstRawString*>* labels,
+ StatementT ParseStatement(ZonePtrList<const AstRawString>* labels,
AllowLabelledFunctionStatement allow_function,
bool* ok);
- BlockT ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
+ BlockT ParseBlock(ZonePtrList<const AstRawString>* labels, bool* ok);
// Parse a SubStatement in strict mode, or with an extra block scope in
// sloppy mode to handle
// ES#sec-functiondeclarations-in-ifstatement-statement-clauses
- StatementT ParseScopedStatement(ZoneList<const AstRawString*>* labels,
+ StatementT ParseScopedStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
StatementT ParseVariableStatement(VariableDeclarationContext var_context,
- ZoneList<const AstRawString*>* names,
+ ZonePtrList<const AstRawString>* names,
bool* ok);
// Magical syntax support.
@@ -1259,43 +1258,45 @@ class ParserBase {
StatementT ParseDebuggerStatement(bool* ok);
StatementT ParseExpressionOrLabelledStatement(
- ZoneList<const AstRawString*>* labels,
+ ZonePtrList<const AstRawString>* labels,
AllowLabelledFunctionStatement allow_function, bool* ok);
- StatementT ParseIfStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+ StatementT ParseIfStatement(ZonePtrList<const AstRawString>* labels,
+ bool* ok);
StatementT ParseContinueStatement(bool* ok);
- StatementT ParseBreakStatement(ZoneList<const AstRawString*>* labels,
+ StatementT ParseBreakStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
StatementT ParseReturnStatement(bool* ok);
- StatementT ParseWithStatement(ZoneList<const AstRawString*>* labels,
+ StatementT ParseWithStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
- StatementT ParseDoWhileStatement(ZoneList<const AstRawString*>* labels,
+ StatementT ParseDoWhileStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
- StatementT ParseWhileStatement(ZoneList<const AstRawString*>* labels,
+ StatementT ParseWhileStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
StatementT ParseThrowStatement(bool* ok);
- StatementT ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
+ StatementT ParseSwitchStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
StatementT ParseTryStatement(bool* ok);
- StatementT ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+ StatementT ParseForStatement(ZonePtrList<const AstRawString>* labels,
+ bool* ok);
StatementT ParseForEachStatementWithDeclarations(
- int stmt_pos, ForInfo* for_info, ZoneList<const AstRawString*>* labels,
+ int stmt_pos, ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
Scope* inner_block_scope, bool* ok);
StatementT ParseForEachStatementWithoutDeclarations(
int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
- ForInfo* for_info, ZoneList<const AstRawString*>* labels, bool* ok);
+ ForInfo* for_info, ZonePtrList<const AstRawString>* labels, bool* ok);
// Parse a C-style for loop: 'for (<init>; <cond>; <next>) { ... }'
// "for (<init>;" is assumed to have been parser already.
ForStatementT ParseStandardForLoop(int stmt_pos,
- ZoneList<const AstRawString*>* labels,
+ ZonePtrList<const AstRawString>* labels,
ExpressionT* cond, StatementT* next,
StatementT* body, bool* ok);
// Same as the above, but handles those cases where <init> is a
// lexical variable declaration.
StatementT ParseStandardForLoopWithLexicalDeclarations(
int stmt_pos, StatementT init, ForInfo* for_info,
- ZoneList<const AstRawString*>* labels, bool* ok);
- StatementT ParseForAwaitStatement(ZoneList<const AstRawString*>* labels,
+ ZonePtrList<const AstRawString>* labels, bool* ok);
+ StatementT ParseForAwaitStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
bool IsNextLetKeyword();
@@ -1561,7 +1562,6 @@ class ParserBase {
bool allow_harmony_static_fields_;
bool allow_harmony_dynamic_import_;
bool allow_harmony_import_meta_;
- bool allow_harmony_optional_catch_binding_;
bool allow_harmony_private_fields_;
bool allow_eval_cache_;
@@ -2944,7 +2944,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
}
expression = ParseArrowFunctionLiteral(accept_IN, parameters,
rewritable_length, CHECK_OK);
- DiscardExpressionClassifier();
+ Accumulate(ExpressionClassifier::AsyncArrowFormalParametersProduction);
classifier()->RecordPatternError(arrow_loc,
MessageTemplate::kUnexpectedToken,
Token::String(Token::ARROW));
@@ -3599,13 +3599,14 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseImportExpressions(
bool* ok) {
DCHECK(allow_harmony_dynamic_import());
+
+ classifier()->RecordPatternError(scanner()->peek_location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::IMPORT));
+
Consume(Token::IMPORT);
int pos = position();
if (allow_harmony_import_meta() && peek() == Token::PERIOD) {
- classifier()->RecordPatternError(
- Scanner::Location(pos, scanner()->location().end_pos),
- MessageTemplate::kInvalidDestructuringTarget);
- ArrowFormalParametersUnexpectedToken();
ExpectMetaProperty(Token::META, "import.meta", pos, CHECK_OK);
if (!parsing_module_) {
impl()->ReportMessageAt(scanner()->location(),
@@ -3851,7 +3852,7 @@ template <typename Impl>
typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
VariableDeclarationContext var_context,
DeclarationParsingResult* parsing_result,
- ZoneList<const AstRawString*>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool* ok) {
// VariableDeclarations ::
// ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
//
@@ -3871,18 +3872,18 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
switch (peek()) {
case Token::VAR:
- parsing_result->descriptor.mode = VAR;
+ parsing_result->descriptor.mode = VariableMode::kVar;
Consume(Token::VAR);
break;
case Token::CONST:
Consume(Token::CONST);
DCHECK_NE(var_context, kStatement);
- parsing_result->descriptor.mode = CONST;
+ parsing_result->descriptor.mode = VariableMode::kConst;
break;
case Token::LET:
Consume(Token::LET);
DCHECK_NE(var_context, kStatement);
- parsing_result->descriptor.mode = LET;
+ parsing_result->descriptor.mode = VariableMode::kLet;
break;
default:
UNREACHABLE(); // by current callers
@@ -3947,7 +3948,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
} else {
if (var_context != kForStatement || !PeekInOrOf()) {
// ES6 'const' and binding patterns require initializers.
- if (parsing_result->descriptor.mode == CONST ||
+ if (parsing_result->descriptor.mode == VariableMode::kConst ||
!impl()->IsIdentifier(pattern)) {
impl()->ReportMessageAt(
Scanner::Location(decl_pos, scanner()->location().end_pos),
@@ -3957,7 +3958,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
return impl()->NullStatement();
}
// 'let x' initializes 'x' to undefined.
- if (parsing_result->descriptor.mode == LET) {
+ if (parsing_result->descriptor.mode == VariableMode::kLet) {
value = factory()->NewUndefinedLiteral(position());
}
}
@@ -4009,7 +4010,7 @@ ParserBase<Impl>::ParseFunctionDeclaration(bool* ok) {
template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseHoistableDeclaration(
- ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool default_export, bool* ok) {
Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
int pos = position();
ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
@@ -4022,7 +4023,7 @@ ParserBase<Impl>::ParseHoistableDeclaration(
template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseHoistableDeclaration(
- int pos, ParseFunctionFlags flags, ZoneList<const AstRawString*>* names,
+ int pos, ParseFunctionFlags flags, ZonePtrList<const AstRawString>* names,
bool default_export, bool* ok) {
// FunctionDeclaration ::
// 'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
@@ -4073,8 +4074,9 @@ ParserBase<Impl>::ParseHoistableDeclaration(
// In ES6, a function behaves as a lexical binding, except in
// a script scope, or the initial scope of eval or another function.
VariableMode mode =
- (!scope()->is_declaration_scope() || scope()->is_module_scope()) ? LET
- : VAR;
+ (!scope()->is_declaration_scope() || scope()->is_module_scope())
+ ? VariableMode::kLet
+ : VariableMode::kVar;
// Async functions don't undergo sloppy mode block scoped hoisting, and don't
// allow duplicates in a block. Both are represented by the
// sloppy_block_function_map. Don't add them to the map for async functions.
@@ -4090,7 +4092,7 @@ ParserBase<Impl>::ParseHoistableDeclaration(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseClassDeclaration(
- ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool default_export, bool* ok) {
// ClassDeclaration ::
// 'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
// 'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
@@ -4160,7 +4162,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseNativeDeclaration(
template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseAsyncFunctionDeclaration(
- ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool default_export, bool* ok) {
// AsyncFunctionDeclaration ::
// async [no LineTerminator here] function BindingIdentifier[Await]
// ( FormalParameters[Await] ) { AsyncFunctionBody }
@@ -4480,9 +4482,8 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
const char* event_name =
is_lazy_top_level_function ? "preparse-no-resolution" : "parse";
const char* name = "arrow function";
- logger_->FunctionEvent(event_name, nullptr, script_id(), ms,
- scope->start_position(), scope->end_position(), name,
- strlen(name));
+ logger_->FunctionEvent(event_name, script_id(), ms, scope->start_position(),
+ scope->end_position(), name, strlen(name));
}
return function_literal;
@@ -4983,7 +4984,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
- ZoneList<const AstRawString*>* labels,
+ ZonePtrList<const AstRawString>* labels,
AllowLabelledFunctionStatement allow_function, bool* ok) {
// Statement ::
// Block
@@ -5083,7 +5084,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
template <typename Impl>
typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// Block ::
// '{' StatementList '}'
@@ -5115,7 +5116,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
if (is_strict(language_mode()) || peek() != Token::FUNCTION) {
return ParseStatement(labels, ok);
} else {
@@ -5135,7 +5136,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseVariableStatement(
VariableDeclarationContext var_context,
- ZoneList<const AstRawString*>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool* ok) {
// VariableStatement ::
// VariableDeclarations ';'
@@ -5176,7 +5177,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDebuggerStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseExpressionOrLabelledStatement(
- ZoneList<const AstRawString*>* labels,
+ ZonePtrList<const AstRawString>* labels,
AllowLabelledFunctionStatement allow_function, bool* ok) {
// ExpressionStatement | LabelledStatement ::
// Expression ';'
@@ -5247,7 +5248,7 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// IfStatement ::
// 'if' '(' Expression ')' Statement ('else' Statement)?
@@ -5317,7 +5318,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseContinueStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseBreakStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// BreakStatement ::
// 'break' Identifier? ';'
@@ -5398,7 +5399,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
@@ -5428,7 +5429,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
@@ -5463,7 +5464,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
@@ -5512,7 +5513,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseThrowStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
// CaseClause ::
@@ -5611,12 +5612,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
SourceRangeScope catch_range_scope(scanner(), &catch_range);
if (Check(Token::CATCH)) {
bool has_binding;
- if (allow_harmony_optional_catch_binding()) {
- has_binding = Check(Token::LPAREN);
- } else {
- has_binding = true;
- Expect(Token::LPAREN, CHECK_OK);
- }
+ has_binding = Check(Token::LPAREN);
if (has_binding) {
catch_info.scope = NewScope(CATCH_SCOPE);
@@ -5683,7 +5679,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// Either a standard for loop
// for (<init>; <cond>; <next>) { ... }
// or a for-each loop
@@ -5744,7 +5740,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
if (peek() == Token::VAR) {
ParseVariableDeclarations(kForStatement, &for_info.parsing_result, nullptr,
CHECK_OK);
- DCHECK_EQ(for_info.parsing_result.descriptor.mode, VAR);
+ DCHECK_EQ(for_info.parsing_result.descriptor.mode, VariableMode::kVar);
for_info.position = scanner()->location().beg_pos;
if (CheckInOrOf(&for_info.mode)) {
@@ -5795,7 +5791,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseForEachStatementWithDeclarations(
- int stmt_pos, ForInfo* for_info, ZoneList<const AstRawString*>* labels,
+ int stmt_pos, ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
Scope* inner_block_scope, bool* ok) {
// Just one declaration followed by in/of.
if (for_info->parsing_result.declarations.size() != 1) {
@@ -5892,7 +5888,7 @@ template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
- ForInfo* for_info, ZoneList<const AstRawString*>* labels, bool* ok) {
+ ForInfo* for_info, ZonePtrList<const AstRawString>* labels, bool* ok) {
// Initializer is reference followed by in/of.
if (!expression->IsArrayLiteral() && !expression->IsObjectLiteral()) {
expression = CheckAndRewriteReferenceExpression(
@@ -5929,7 +5925,7 @@ template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseStandardForLoopWithLexicalDeclarations(
int stmt_pos, StatementT init, ForInfo* for_info,
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// The condition and the next statement of the for loop must be parsed
// in a new scope.
Scope* inner_scope = NewScope(BLOCK_SCOPE);
@@ -5984,7 +5980,7 @@ ParserBase<Impl>::ParseStandardForLoopWithLexicalDeclarations(
template <typename Impl>
typename ParserBase<Impl>::ForStatementT ParserBase<Impl>::ParseStandardForLoop(
- int stmt_pos, ZoneList<const AstRawString*>* labels, ExpressionT* cond,
+ int stmt_pos, ZonePtrList<const AstRawString>* labels, ExpressionT* cond,
StatementT* next, StatementT* body, bool* ok) {
ForStatementT loop = factory()->NewForStatement(labels, stmt_pos);
typename Types::Target target(this, loop);
@@ -6023,7 +6019,7 @@ void ParserBase<Impl>::MarkLoopVariableAsAssigned(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels, bool* ok) {
// for await '(' ForDeclaration of AssignmentExpression ')'
DCHECK(is_async_function());
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 009e76cb26..dacce7d38f 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -7,7 +7,6 @@
#include <algorithm>
#include <memory>
-#include "src/api.h"
#include "src/ast/ast-function-literal-id-reindexer.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/ast.h"
@@ -15,9 +14,10 @@
#include "src/base/platform/platform.h"
#include "src/char-predicates-inl.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
+#include "src/conversions-inl.h"
#include "src/log.h"
#include "src/messages.h"
-#include "src/objects-inl.h"
+#include "src/objects/scope-info.h"
#include "src/parsing/duplicate-finder.h"
#include "src/parsing/expression-scope-reparenter.h"
#include "src/parsing/parse-info.h"
@@ -92,12 +92,12 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
// Set start and end position to the same value
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
- ZoneList<Statement*>* body = nullptr;
+ ZonePtrList<Statement>* body = nullptr;
{
FunctionState function_state(&function_state_, &scope_, function_scope);
- body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
+ body = new (zone()) ZonePtrList<Statement>(call_super ? 2 : 1, zone());
if (call_super) {
// Create a SuperCallReference and handle in BytecodeGenerator.
auto constructor_args_name = ast_value_factory()->empty_string();
@@ -105,11 +105,11 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
bool is_rest = true;
bool is_optional = false;
Variable* constructor_args = function_scope->DeclareParameter(
- constructor_args_name, TEMPORARY, is_optional, is_rest, &is_duplicate,
- ast_value_factory(), pos);
+ constructor_args_name, VariableMode::kTemporary, is_optional, is_rest,
+ &is_duplicate, ast_value_factory(), pos);
- ZoneList<Expression*>* args =
- new (zone()) ZoneList<Expression*>(1, zone());
+ ZonePtrList<Expression>* args =
+ new (zone()) ZonePtrList<Expression>(1, zone());
Spread* spread_args = factory()->NewSpread(
factory()->NewVariableProxy(constructor_args), pos, pos);
@@ -283,7 +283,8 @@ Expression* Parser::BuildUnaryExpression(Expression* expression,
Expression* Parser::NewThrowError(Runtime::FunctionId id,
MessageTemplate::Template message,
const AstRawString* arg, int pos) {
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ ZonePtrList<Expression>* args =
+ new (zone()) ZonePtrList<Expression>(2, zone());
args->Add(factory()->NewSmiLiteral(message, pos), zone());
args->Add(factory()->NewStringLiteral(arg, pos), zone());
CallRuntime* call_constructor = factory()->NewCallRuntime(id, args, pos);
@@ -321,7 +322,7 @@ Expression* Parser::NewTargetExpression(int pos) {
Expression* Parser::ImportMetaExpression(int pos) {
return factory()->NewCallRuntime(
Runtime::kInlineGetImportMetaObject,
- new (zone()) ZoneList<Expression*>(0, zone()), pos);
+ new (zone()) ZonePtrList<Expression>(0, zone()), pos);
}
Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
@@ -350,7 +351,7 @@ Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
}
Expression* Parser::NewV8Intrinsic(const AstRawString* name,
- ZoneList<Expression*>* args, int pos,
+ ZonePtrList<Expression>* args, int pos,
bool* ok) {
if (extension_ != nullptr) {
// The extension structures are only accessible while parsing the
@@ -449,7 +450,6 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_import_meta(FLAG_harmony_import_meta);
set_allow_harmony_bigint(FLAG_harmony_bigint);
set_allow_harmony_numeric_separator(FLAG_harmony_numeric_separator);
- set_allow_harmony_optional_catch_binding(FLAG_harmony_optional_catch_binding);
set_allow_harmony_private_fields(FLAG_harmony_private_fields);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
@@ -458,7 +458,8 @@ Parser::Parser(ParseInfo* info)
}
void Parser::DeserializeScopeChain(
- ParseInfo* info, MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
+ Isolate* isolate, ParseInfo* info,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
// TODO(wingo): Add an outer SCRIPT_SCOPE corresponding to the native
// context, which will have the "this" binding for script scopes.
DeclarationScope* script_scope = NewScriptScope();
@@ -466,10 +467,9 @@ void Parser::DeserializeScopeChain(
Scope* scope = script_scope;
Handle<ScopeInfo> outer_scope_info;
if (maybe_outer_scope_info.ToHandle(&outer_scope_info)) {
- DCHECK(ThreadId::Current().Equals(
- outer_scope_info->GetIsolate()->thread_id()));
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
scope = Scope::DeserializeScopeChain(
- zone(), *outer_scope_info, script_scope, ast_value_factory(),
+ isolate, zone(), *outer_scope_info, script_scope, ast_value_factory(),
Scope::DeserializationMode::kScopesOnly);
}
original_scope_ = scope;
@@ -505,10 +505,10 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
// Initialize parser state.
- DeserializeScopeChain(info, info->maybe_outer_scope_info());
+ DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info());
scanner_.Initialize(info->character_stream(), info->is_module());
- FunctionLiteral* result = DoParseProgram(info);
+ FunctionLiteral* result = DoParseProgram(isolate, info);
MaybeResetCharacterStream(info, result);
HandleSourceURLComments(isolate, info->script());
@@ -524,17 +524,18 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
start = 0;
end = String::cast(script->source())->length();
}
- LOG(script->GetIsolate(),
- FunctionEvent(event_name, script, -1, ms, start, end, "", 0));
+ LOG(isolate,
+ FunctionEvent(event_name, script->id(), ms, start, end, "", 0));
}
return result;
}
-
-FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
+FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
// Note that this function can be called from the main thread or from a
// background thread. We should not access anything Isolate / heap dependent
- // via ParseInfo, and also not pass it forward.
+ // via ParseInfo, and also not pass it forward. If not on the main thread
+ // isolate will be nullptr.
+ DCHECK_EQ(parsing_on_main_thread_, isolate != nullptr);
DCHECK_NULL(scope_);
DCHECK_NULL(target_stack_);
@@ -558,7 +559,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
scope->set_start_position(0);
FunctionState function_state(&function_state_, &scope_, scope);
- ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
+ ZonePtrList<Statement>* body =
+ new (zone()) ZonePtrList<Statement>(16, zone());
bool ok = true;
int beg_pos = scanner()->location().beg_pos;
if (parsing_module_) {
@@ -568,9 +570,9 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
bool is_duplicate = false;
bool is_rest = false;
bool is_optional = false;
- auto var =
- scope->DeclareParameter(name, VAR, is_optional, is_rest,
- &is_duplicate, ast_value_factory(), beg_pos);
+ auto var = scope->DeclareParameter(name, VariableMode::kVar, is_optional,
+ is_rest, &is_duplicate,
+ ast_value_factory(), beg_pos);
DCHECK(!is_duplicate);
var->AllocateTo(VariableLocation::PARAMETER, 0);
@@ -585,7 +587,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
ok = ok && module()->Validate(this->scope()->AsModuleScope(),
pending_error_handler(), zone());
} else if (info->is_wrapped_as_function()) {
- ParseWrapped(info, body, scope, zone(), &ok);
+ ParseWrapped(isolate, info, body, scope, zone(), &ok);
} else {
// Don't count the mode in the use counters--give the program a chance
// to enable script-wide strict mode below.
@@ -639,23 +641,26 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
return result;
}
-ZoneList<const AstRawString*>* Parser::PrepareWrappedArguments(ParseInfo* info,
- Zone* zone) {
+ZonePtrList<const AstRawString>* Parser::PrepareWrappedArguments(
+ Isolate* isolate, ParseInfo* info, Zone* zone) {
DCHECK(parsing_on_main_thread_);
- Handle<FixedArray> arguments(info->script()->wrapped_arguments());
+ DCHECK_NOT_NULL(isolate);
+ Handle<FixedArray> arguments(info->script()->wrapped_arguments(), isolate);
int arguments_length = arguments->length();
- ZoneList<const AstRawString*>* arguments_for_wrapped_function =
- new (zone) ZoneList<const AstRawString*>(arguments_length, zone);
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function =
+ new (zone) ZonePtrList<const AstRawString>(arguments_length, zone);
for (int i = 0; i < arguments_length; i++) {
const AstRawString* argument_string = ast_value_factory()->GetString(
- Handle<String>(String::cast(arguments->get(i))));
+ Handle<String>(String::cast(arguments->get(i)), isolate));
arguments_for_wrapped_function->Add(argument_string, zone);
}
return arguments_for_wrapped_function;
}
-void Parser::ParseWrapped(ParseInfo* info, ZoneList<Statement*>* body,
+void Parser::ParseWrapped(Isolate* isolate, ParseInfo* info,
+ ZonePtrList<Statement>* body,
DeclarationScope* outer_scope, Zone* zone, bool* ok) {
+ DCHECK_EQ(parsing_on_main_thread_, isolate != nullptr);
DCHECK(info->is_wrapped_as_function());
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
@@ -666,8 +671,8 @@ void Parser::ParseWrapped(ParseInfo* info, ZoneList<Statement*>* body,
const AstRawString* function_name = nullptr;
Scanner::Location location(0, 0);
- ZoneList<const AstRawString*>* arguments_for_wrapped_function =
- PrepareWrappedArguments(info, zone);
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function =
+ PrepareWrappedArguments(isolate, info, zone);
FunctionLiteral* function_literal = ParseFunctionLiteral(
function_name, location, kSkipFunctionNameCheck, kNormalFunction,
@@ -690,18 +695,19 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
- DeserializeScopeChain(info, info->maybe_outer_scope_info());
+ DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info());
DCHECK_EQ(factory()->zone(), info->zone());
// Initialize parser state.
- Handle<String> name(shared_info->Name());
+ Handle<String> name(shared_info->Name(), isolate);
info->set_function_name(ast_value_factory()->GetString(name));
scanner_.Initialize(info->character_stream(), info->is_module());
- FunctionLiteral* result = DoParseFunction(info, info->function_name());
+ FunctionLiteral* result =
+ DoParseFunction(isolate, info, info->function_name());
MaybeResetCharacterStream(info, result);
if (result != nullptr) {
- Handle<String> inferred_name(shared_info->inferred_name());
+ Handle<String> inferred_name(shared_info->inferred_name(), isolate);
result->set_inferred_name(inferred_name);
}
@@ -710,10 +716,9 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
// We need to make sure that the debug-name is available.
ast_value_factory()->Internalize(isolate);
DeclarationScope* function_scope = result->scope();
- Script* script = *info->script();
std::unique_ptr<char[]> function_name = result->GetDebugName();
- LOG(script->GetIsolate(),
- FunctionEvent("parse-function", script, -1, ms,
+ LOG(isolate,
+ FunctionEvent("parse-function", info->script()->id(), ms,
function_scope->start_position(),
function_scope->end_position(), function_name.get(),
strlen(function_name.get())));
@@ -735,8 +740,9 @@ static FunctionLiteral::FunctionType ComputeFunctionType(ParseInfo* info) {
return FunctionLiteral::kAnonymousExpression;
}
-FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
+FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
const AstRawString* raw_name) {
+ DCHECK_EQ(parsing_on_main_thread_, isolate != nullptr);
DCHECK_NOT_NULL(raw_name);
DCHECK_NULL(scope_);
DCHECK_NULL(target_stack_);
@@ -858,9 +864,10 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
result = DefaultConstructor(raw_name, IsDerivedConstructor(kind),
info->start_position(), info->end_position());
} else {
- ZoneList<const AstRawString*>* arguments_for_wrapped_function =
- info->is_wrapped_as_function() ? PrepareWrappedArguments(info, zone())
- : nullptr;
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function =
+ info->is_wrapped_as_function()
+ ? PrepareWrappedArguments(isolate, info, zone())
+ : nullptr;
result = ParseFunctionLiteral(
raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
kNoSourcePosition, function_type, info->language_mode(),
@@ -909,8 +916,7 @@ Statement* Parser::ParseModuleItem(bool* ok) {
return ParseStatementListItem(ok);
}
-
-void Parser::ParseModuleItemList(ZoneList<Statement*>* body, bool* ok) {
+void Parser::ParseModuleItemList(ZonePtrList<Statement>* body, bool* ok) {
// ecma262/#prod-Module
// Module :
// ModuleBody?
@@ -937,10 +943,9 @@ const AstRawString* Parser::ParseModuleSpecifier(bool* ok) {
return GetSymbol();
}
-
-void Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
+void Parser::ParseExportClause(ZonePtrList<const AstRawString>* export_names,
ZoneList<Scanner::Location>* export_locations,
- ZoneList<const AstRawString*>* local_names,
+ ZonePtrList<const AstRawString>* local_names,
Scanner::Location* reserved_loc, bool* ok) {
// ExportClause :
// '{' '}'
@@ -988,9 +993,8 @@ void Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
Expect(Token::RBRACE, CHECK_OK_VOID);
}
-
-ZoneList<const Parser::NamedImport*>* Parser::ParseNamedImports(
- int pos, bool* ok) {
+ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos,
+ bool* ok) {
// NamedImports :
// '{' '}'
// '{' ImportsList '}'
@@ -1006,7 +1010,7 @@ ZoneList<const Parser::NamedImport*>* Parser::ParseNamedImports(
Expect(Token::LBRACE, CHECK_OK);
- auto result = new (zone()) ZoneList<const NamedImport*>(1, zone());
+ auto result = new (zone()) ZonePtrList<const NamedImport>(1, zone());
while (peek() != Token::RBRACE) {
const AstRawString* import_name = ParseIdentifierName(CHECK_OK);
const AstRawString* local_name = import_name;
@@ -1028,8 +1032,8 @@ ZoneList<const Parser::NamedImport*>* Parser::ParseNamedImports(
return nullptr;
}
- DeclareVariable(local_name, CONST, kNeedsInitialization, position(),
- CHECK_OK);
+ DeclareVariable(local_name, VariableMode::kConst, kNeedsInitialization,
+ position(), CHECK_OK);
NamedImport* import =
new (zone()) NamedImport(import_name, local_name, location);
@@ -1080,14 +1084,14 @@ void Parser::ParseImportDeclaration(bool* ok) {
import_default_binding =
ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK_VOID);
import_default_binding_loc = scanner()->location();
- DeclareVariable(import_default_binding, CONST, kNeedsInitialization, pos,
- CHECK_OK_VOID);
+ DeclareVariable(import_default_binding, VariableMode::kConst,
+ kNeedsInitialization, pos, CHECK_OK_VOID);
}
// Parse NameSpaceImport or NamedImports if present.
const AstRawString* module_namespace_binding = nullptr;
Scanner::Location module_namespace_binding_loc;
- const ZoneList<const NamedImport*>* named_imports = nullptr;
+ const ZonePtrList<const NamedImport>* named_imports = nullptr;
if (import_default_binding == nullptr || Check(Token::COMMA)) {
switch (peek()) {
case Token::MUL: {
@@ -1096,8 +1100,8 @@ void Parser::ParseImportDeclaration(bool* ok) {
module_namespace_binding =
ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK_VOID);
module_namespace_binding_loc = scanner()->location();
- DeclareVariable(module_namespace_binding, CONST, kCreatedInitialized,
- pos, CHECK_OK_VOID);
+ DeclareVariable(module_namespace_binding, VariableMode::kConst,
+ kCreatedInitialized, pos, CHECK_OK_VOID);
break;
}
@@ -1161,7 +1165,7 @@ Statement* Parser::ParseExportDefault(bool* ok) {
Expect(Token::DEFAULT, CHECK_OK);
Scanner::Location default_loc = scanner()->location();
- ZoneList<const AstRawString*> local_names(1, zone());
+ ZonePtrList<const AstRawString> local_names(1, zone());
Statement* result = nullptr;
switch (peek()) {
case Token::FUNCTION:
@@ -1193,9 +1197,10 @@ Statement* Parser::ParseExportDefault(bool* ok) {
ast_value_factory()->star_default_star_string();
local_names.Add(local_name, zone());
- // It's fine to declare this as CONST because the user has no way of
- // writing to it.
- Declaration* decl = DeclareVariable(local_name, CONST, pos, CHECK_OK);
+ // It's fine to declare this as VariableMode::kConst because the user has
+ // no way of writing to it.
+ Declaration* decl =
+ DeclareVariable(local_name, VariableMode::kConst, pos, CHECK_OK);
decl->proxy()->var()->set_initializer_position(position());
Assignment* assignment = factory()->NewAssignment(
@@ -1229,7 +1234,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
int pos = position();
Statement* result = nullptr;
- ZoneList<const AstRawString*> names(1, zone());
+ ZonePtrList<const AstRawString> names(1, zone());
Scanner::Location loc = scanner()->peek_location();
switch (peek()) {
case Token::DEFAULT:
@@ -1259,9 +1264,9 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// encountered, and then throw a SyntaxError if we are in the
// non-FromClause case.
Scanner::Location reserved_loc = Scanner::Location::invalid();
- ZoneList<const AstRawString*> export_names(1, zone());
+ ZonePtrList<const AstRawString> export_names(1, zone());
ZoneList<Scanner::Location> export_locations(1, zone());
- ZoneList<const AstRawString*> original_names(1, zone());
+ ZonePtrList<const AstRawString> original_names(1, zone());
ParseExportClause(&export_names, &export_locations, &original_names,
&reserved_loc, CHECK_OK);
const AstRawString* module_specifier = nullptr;
@@ -1356,7 +1361,7 @@ Declaration* Parser::DeclareVariable(const AstRawString* name,
VariableProxy* proxy = factory()->NewVariableProxy(
name, NORMAL_VARIABLE, scanner()->location().beg_pos);
Declaration* declaration;
- if (mode == VAR && !scope()->is_declaration_scope()) {
+ if (mode == VariableMode::kVar && !scope()->is_declaration_scope()) {
DCHECK(scope()->is_block_scope() || scope()->is_with_scope());
declaration = factory()->NewNestedVariableDeclaration(proxy, scope(), pos);
} else {
@@ -1403,7 +1408,7 @@ Variable* Parser::Declare(Declaration* declaration,
Block* Parser::BuildInitializationBlock(
DeclarationParsingResult* parsing_result,
- ZoneList<const AstRawString*>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool* ok) {
Block* result = factory()->NewBlock(1, true);
for (auto declaration : parsing_result->declarations) {
DeclareAndInitializeVariables(result, &(parsing_result->descriptor),
@@ -1415,7 +1420,7 @@ Block* Parser::BuildInitializationBlock(
Statement* Parser::DeclareFunction(const AstRawString* variable_name,
FunctionLiteral* function, VariableMode mode,
int pos, bool is_sloppy_block_function,
- ZoneList<const AstRawString*>* names,
+ ZonePtrList<const AstRawString>* names,
bool* ok) {
VariableProxy* proxy =
factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE, pos);
@@ -1436,10 +1441,10 @@ Statement* Parser::DeclareFunction(const AstRawString* variable_name,
Statement* Parser::DeclareClass(const AstRawString* variable_name,
Expression* value,
- ZoneList<const AstRawString*>* names,
+ ZonePtrList<const AstRawString>* names,
int class_token_pos, int end_pos, bool* ok) {
- Declaration* decl =
- DeclareVariable(variable_name, LET, class_token_pos, CHECK_OK);
+ Declaration* decl = DeclareVariable(variable_name, VariableMode::kLet,
+ class_token_pos, CHECK_OK);
decl->proxy()->var()->set_initializer_position(end_pos);
if (names) names->Add(variable_name, zone());
@@ -1459,7 +1464,7 @@ Statement* Parser::DeclareNative(const AstRawString* name, int pos, bool* ok) {
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
- Declaration* decl = DeclareVariable(name, VAR, pos, CHECK_OK);
+ Declaration* decl = DeclareVariable(name, VariableMode::kVar, pos, CHECK_OK);
NativeFunctionLiteral* lit =
factory()->NewNativeFunctionLiteral(name, extension_, kNoSourcePosition);
return factory()->NewExpressionStatement(
@@ -1468,8 +1473,8 @@ Statement* Parser::DeclareNative(const AstRawString* name, int pos, bool* ok) {
pos);
}
-ZoneList<const AstRawString*>* Parser::DeclareLabel(
- ZoneList<const AstRawString*>* labels, VariableProxy* var, bool* ok) {
+ZonePtrList<const AstRawString>* Parser::DeclareLabel(
+ ZonePtrList<const AstRawString>* labels, VariableProxy* var, bool* ok) {
DCHECK(IsIdentifier(var));
const AstRawString* label = var->raw_name();
// TODO(1240780): We don't check for redeclaration of labels
@@ -1483,7 +1488,7 @@ ZoneList<const AstRawString*>* Parser::DeclareLabel(
return nullptr;
}
if (labels == nullptr) {
- labels = new (zone()) ZoneList<const AstRawString*>(1, zone());
+ labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
}
labels->Add(label, zone());
// Remove the "ghost" variable that turned out to be a label
@@ -1493,7 +1498,7 @@ ZoneList<const AstRawString*>* Parser::DeclareLabel(
return labels;
}
-bool Parser::ContainsLabel(ZoneList<const AstRawString*>* labels,
+bool Parser::ContainsLabel(ZonePtrList<const AstRawString>* labels,
const AstRawString* label) {
DCHECK_NOT_NULL(label);
if (labels != nullptr) {
@@ -1593,12 +1598,12 @@ void Parser::RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
catch_info->name = ast_value_factory()->dot_catch_string();
}
Variable* catch_variable =
- catch_info->scope->DeclareLocal(catch_info->name, VAR);
+ catch_info->scope->DeclareLocal(catch_info->name, VariableMode::kVar);
if (catch_info->pattern != nullptr) {
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
descriptor.scope = scope();
- descriptor.mode = LET;
+ descriptor.mode = VariableMode::kLet;
descriptor.declaration_pos = catch_info->pattern->position();
descriptor.initialization_pos = catch_info->pattern->position();
@@ -1674,7 +1679,7 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
}
void Parser::ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
- ZoneList<Statement*>* body,
+ ZonePtrList<Statement>* body,
bool* ok) {
// For ES6 Generators, we just prepend the initial yield.
Expression* initial_yield = BuildInitialYield(pos, kind);
@@ -1684,7 +1689,7 @@ void Parser::ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
}
void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
- int pos, FunctionKind kind, ZoneList<Statement*>* body, bool* ok) {
+ int pos, FunctionKind kind, ZonePtrList<Statement>* body, bool* ok) {
// For ES2017 Async Generators, we produce:
//
// try {
@@ -1726,8 +1731,8 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
// For AsyncGenerators, a top-level catch block will reject the Promise.
Scope* catch_scope = NewHiddenCatchScope();
- ZoneList<Expression*>* reject_args =
- new (zone()) ZoneList<Expression*>(2, zone());
+ ZonePtrList<Expression>* reject_args =
+ new (zone()) ZonePtrList<Expression>(2, zone());
reject_args->Add(factory()->NewVariableProxy(
function_state_->scope()->generator_object_var()),
zone());
@@ -1746,8 +1751,8 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
try_block->statements()->Add(try_catch, zone());
Block* finally_block = factory()->NewBlock(1, false);
- ZoneList<Expression*>* close_args =
- new (zone()) ZoneList<Expression*>(1, zone());
+ ZonePtrList<Expression>* close_args =
+ new (zone()) ZonePtrList<Expression>(1, zone());
VariableProxy* call_proxy = factory()->NewVariableProxy(
function_state_->scope()->generator_object_var());
close_args->Add(call_proxy, zone());
@@ -1783,8 +1788,8 @@ Expression* Parser::BuildIteratorNextResult(VariableProxy* iterator,
Variable* result, IteratorType type,
int pos) {
Expression* next_property = factory()->NewResolvedProperty(iterator, next);
- ZoneList<Expression*>* next_arguments =
- new (zone()) ZoneList<Expression*>(0, zone());
+ ZonePtrList<Expression>* next_arguments =
+ new (zone()) ZonePtrList<Expression>(0, zone());
Expression* next_call =
factory()->NewCall(next_property, next_arguments, kNoSourcePosition);
if (type == IteratorType::kAsync) {
@@ -1796,16 +1801,16 @@ Expression* Parser::BuildIteratorNextResult(VariableProxy* iterator,
factory()->NewAssignment(Token::ASSIGN, result_proxy, next_call, pos);
// %_IsJSReceiver(...)
- ZoneList<Expression*>* is_spec_object_args =
- new (zone()) ZoneList<Expression*>(1, zone());
+ ZonePtrList<Expression>* is_spec_object_args =
+ new (zone()) ZonePtrList<Expression>(1, zone());
is_spec_object_args->Add(left, zone());
Expression* is_spec_object_call = factory()->NewCallRuntime(
Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
// %ThrowIteratorResultNotAnObject(result)
Expression* result_proxy_again = factory()->NewVariableProxy(result);
- ZoneList<Expression*>* throw_arguments =
- new (zone()) ZoneList<Expression*>(1, zone());
+ ZonePtrList<Expression>* throw_arguments =
+ new (zone()) ZonePtrList<Expression>(1, zone());
throw_arguments->Add(result_proxy_again, zone());
Expression* throw_call = factory()->NewCallRuntime(
Runtime::kThrowIteratorResultNotAnObject, throw_arguments, pos);
@@ -1914,7 +1919,7 @@ void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
bool is_for_var_of =
for_info->mode == ForEachStatement::ITERATE &&
- for_info->parsing_result.descriptor.mode == VariableMode::VAR;
+ for_info->parsing_result.descriptor.mode == VariableMode::kVar;
bool collect_names =
IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
is_for_var_of;
@@ -1966,8 +1971,9 @@ Block* Parser::CreateForEachStatementTDZ(Block* init_block,
// TODO(adamk): This needs to be some sort of special
// INTERNAL variable that's invisible to the debugger
// but visible to everything else.
- Declaration* tdz_decl = DeclareVariable(for_info.bound_names[i], LET,
- kNoSourcePosition, CHECK_OK);
+ Declaration* tdz_decl =
+ DeclareVariable(for_info.bound_names[i], VariableMode::kLet,
+ kNoSourcePosition, CHECK_OK);
tdz_decl->proxy()->var()->set_initializer_position(position());
}
}
@@ -2139,7 +2145,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// }
DCHECK_GT(for_info.bound_names.length(), 0);
- ZoneList<Variable*> temps(for_info.bound_names.length(), zone());
+ ZonePtrList<Variable> temps(for_info.bound_names.length(), zone());
Block* outer_block =
factory()->NewBlock(for_info.bound_names.length() + 4, false);
@@ -2198,7 +2204,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Block* ignore_completion_block =
factory()->NewBlock(for_info.bound_names.length() + 3, true);
- ZoneList<Variable*> inner_vars(for_info.bound_names.length(), zone());
+ ZonePtrList<Variable> inner_vars(for_info.bound_names.length(), zone());
// For each let variable x:
// make statement: let/const x = temp_x.
for (int i = 0; i < for_info.bound_names.length(); i++) {
@@ -2441,10 +2447,6 @@ void Parser::DeclareArrowFunctionFormalParameters(
}
void Parser::PrepareGeneratorVariables() {
- // The code produced for generators relies on forced context allocation of
- // parameters (it does not restore the frame's parameters upon resume).
- function_state_->scope()->ForceContextAllocationForParameters();
-
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
// expressions.
@@ -2457,7 +2459,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
LanguageMode language_mode,
- ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
//
@@ -2568,7 +2570,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
bool should_preparse =
(parse_lazily() && is_lazy_top_level_function) || should_preparse_inner;
- ZoneList<Statement*>* body = nullptr;
+ ZonePtrList<Statement>* body = nullptr;
int expected_property_count = -1;
int suspend_count = -1;
int num_parameters = -1;
@@ -2651,7 +2653,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
: "preparse-resolution")
: "full-parse";
logger_->FunctionEvent(
- event_name, nullptr, script_id(), ms, scope->start_position(),
+ event_name, script_id(), ms, scope->start_position(),
scope->end_position(),
reinterpret_cast<const char*>(function_name->raw_data()),
function_name->byte_length());
@@ -2862,7 +2864,7 @@ Block* Parser::BuildParameterInitializationBlock(
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
descriptor.scope = scope();
- descriptor.mode = LET;
+ descriptor.mode = VariableMode::kLet;
descriptor.declaration_pos = parameter->pattern->position();
// The position that will be used by the AssignmentExpression
// which copies from the temp parameter to the pattern.
@@ -2924,7 +2926,8 @@ Block* Parser::BuildParameterInitializationBlock(
Scope* Parser::NewHiddenCatchScope() {
Scope* catch_scope = NewScopeWithParent(scope(), CATCH_SCOPE);
- catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR);
+ catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(),
+ VariableMode::kVar);
catch_scope->set_is_hidden();
return catch_scope;
}
@@ -2946,7 +2949,7 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block) {
{
Expression* create_promise = factory()->NewCallRuntime(
Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX,
- new (zone()) ZoneList<Expression*>(0, zone()), kNoSourcePosition);
+ new (zone()) ZonePtrList<Expression>(0, zone()), kNoSourcePosition);
Assignment* assign_promise = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(PromiseVariable()),
create_promise, kNoSourcePosition);
@@ -2971,11 +2974,15 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block) {
// There is no TryCatchFinally node, so wrap it in an outer try/finally
Block* outer_try_block = IgnoreCompletion(try_catch_statement);
- // finally { %AsyncFunctionPromiseRelease(.promise) }
+ // finally { %AsyncFunctionPromiseRelease(.promise, can_suspend) }
Block* finally_block;
{
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+ ZonePtrList<Expression>* args =
+ new (zone()) ZonePtrList<Expression>(1, zone());
args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
+ args->Add(factory()->NewBooleanLiteral(function_state_->CanSuspend(),
+ kNoSourcePosition),
+ zone());
Expression* call_promise_release = factory()->NewCallRuntime(
Context::ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, args, kNoSourcePosition);
Statement* promise_release = factory()->NewExpressionStatement(
@@ -2992,7 +2999,8 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block) {
Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
// %ResolvePromise(.promise, value), .promise
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ ZonePtrList<Expression>* args =
+ new (zone()) ZonePtrList<Expression>(2, zone());
args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
args->Add(value, zone());
Expression* call_runtime =
@@ -3006,7 +3014,8 @@ Expression* Parser::BuildRejectPromise(Expression* value, int pos) {
// %promise_internal_reject(.promise, value, false), .promise
// Disables the additional debug event for the rejection since a debug event
// already happened for the exception that got us here.
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(3, zone());
+ ZonePtrList<Expression>* args =
+ new (zone()) ZonePtrList<Expression>(3, zone());
args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
args->Add(value, zone());
args->Add(factory()->NewBooleanLiteral(false, pos), zone());
@@ -3040,13 +3049,13 @@ Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
Suspend::kOnExceptionThrow);
}
-ZoneList<Statement*>* Parser::ParseFunction(
+ZonePtrList<Statement>* Parser::ParseFunction(
const AstRawString* function_name, int pos, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters, int* function_length,
bool* has_duplicate_parameters, int* expected_property_count,
int* suspend_count,
- ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function, bool* ok) {
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
FunctionState function_state(&function_state_, &scope_, function_scope);
@@ -3113,7 +3122,7 @@ ZoneList<Statement*>* Parser::ParseFunction(
*num_parameters = formals.num_parameters();
*function_length = formals.function_length;
- ZoneList<Statement*>* body = new (zone()) ZoneList<Statement*>(8, zone());
+ ZonePtrList<Statement>* body = new (zone()) ZonePtrList<Statement>(8, zone());
ParseFunctionBody(body, function_name, pos, formals, kind, function_type, ok);
// Validate parameter names. We can do this only after parsing the function,
@@ -3145,9 +3154,9 @@ void Parser::DeclareClassVariable(const AstRawString* name,
VariableProxy* proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, class_token_pos);
- class_info->variable =
- Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
- Variable::DefaultInitializationFlag(CONST), ok);
+ class_info->variable = Declare(
+ declaration, DeclarationDescriptor::NORMAL, VariableMode::kConst,
+ Variable::DefaultInitializationFlag(VariableMode::kConst), ok);
}
}
@@ -3159,8 +3168,9 @@ Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name,
VariableProxy* proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, kNoSourcePosition);
- Variable* var = Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
- Variable::DefaultInitializationFlag(CONST), CHECK_OK);
+ Variable* var = Declare(
+ declaration, DeclarationDescriptor::NORMAL, VariableMode::kConst,
+ Variable::DefaultInitializationFlag(VariableMode::kConst), CHECK_OK);
var->ForceContextAllocation();
return var;
}
@@ -3223,11 +3233,11 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
}
FunctionLiteral* Parser::CreateInitializerFunction(
- DeclarationScope* scope, ZoneList<ClassLiteral::Property*>* fields) {
+ DeclarationScope* scope, ZonePtrList<ClassLiteral::Property>* fields) {
DCHECK_EQ(scope->function_kind(),
FunctionKind::kClassFieldsInitializerFunction);
// function() { .. class fields initializer .. }
- ZoneList<Statement*>* statements = NewStatementList(1);
+ ZonePtrList<Statement>* statements = NewStatementList(1);
InitializeClassFieldsStatement* static_fields =
factory()->NewInitializeClassFieldsStatement(fields, kNoSourcePosition);
statements->Add(static_fields, zone());
@@ -3325,7 +3335,8 @@ void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
DCHECK(function_scope->is_function_scope());
BlockState block_state(&scope_, inner_scope);
for (Declaration* decl : *inner_scope->declarations()) {
- if (decl->proxy()->var()->mode() != VAR || !decl->IsVariableDeclaration()) {
+ if (decl->proxy()->var()->mode() != VariableMode::kVar ||
+ !decl->IsVariableDeclaration()) {
continue;
}
const AstRawString* name = decl->proxy()->raw_name();
@@ -3426,9 +3437,7 @@ void Parser::ParseOnBackground(ParseInfo* info) {
RuntimeCallTimerScope runtimeTimer(
runtime_call_stats_, RuntimeCallCounterId::kParseBackgroundProgram);
parsing_on_main_thread_ = false;
- if (!info->script().is_null()) {
- set_script_id(info->script()->id());
- }
+ set_script_id(info->script_id());
DCHECK_NULL(info->literal());
FunctionLiteral* result = nullptr;
@@ -3446,9 +3455,10 @@ void Parser::ParseOnBackground(ParseInfo* info) {
// scopes) and set their end position after we know the script length.
if (info->is_toplevel()) {
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
- result = DoParseProgram(info);
+ result = DoParseProgram(/* isolate = */ nullptr, info);
} else {
- result = DoParseFunction(info, info->function_name());
+ result =
+ DoParseFunction(/* isolate = */ nullptr, info, info->function_name());
}
MaybeResetCharacterStream(info, result);
@@ -3485,9 +3495,9 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag) {
TemplateLiteral* lit = *state;
int pos = lit->position();
- const ZoneList<const AstRawString*>* cooked_strings = lit->cooked();
- const ZoneList<const AstRawString*>* raw_strings = lit->raw();
- const ZoneList<Expression*>* expressions = lit->expressions();
+ const ZonePtrList<const AstRawString>* cooked_strings = lit->cooked();
+ const ZonePtrList<const AstRawString>* raw_strings = lit->raw();
+ const ZonePtrList<Expression>* expressions = lit->expressions();
DCHECK_EQ(cooked_strings->length(), raw_strings->length());
DCHECK_EQ(cooked_strings->length(), expressions->length() + 1);
@@ -3502,8 +3512,8 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
factory()->NewGetTemplateObject(cooked_strings, raw_strings, pos);
// Call TagFn
- ZoneList<Expression*>* call_args =
- new (zone()) ZoneList<Expression*>(expressions->length() + 1, zone());
+ ZonePtrList<Expression>* call_args =
+ new (zone()) ZonePtrList<Expression>(expressions->length() + 1, zone());
call_args->Add(template_object, zone());
call_args->AddAll(*expressions, zone());
return factory()->NewTaggedTemplate(tag, call_args, pos);
@@ -3512,7 +3522,7 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
namespace {
-bool OnlyLastArgIsSpread(ZoneList<Expression*>* args) {
+bool OnlyLastArgIsSpread(ZonePtrList<Expression>* args) {
for (int i = 0; i < args->length() - 1; i++) {
if (args->at(i)->IsSpread()) {
return false;
@@ -3524,7 +3534,7 @@ bool OnlyLastArgIsSpread(ZoneList<Expression*>* args) {
} // namespace
ArrayLiteral* Parser::ArrayLiteralFromListWithSpread(
- ZoneList<Expression*>* list) {
+ ZonePtrList<Expression>* list) {
// If there's only a single spread argument, a fast path using CallWithSpread
// is taken.
DCHECK_LT(1, list->length());
@@ -3540,14 +3550,15 @@ ArrayLiteral* Parser::ArrayLiteralFromListWithSpread(
}
Expression* Parser::SpreadCall(Expression* function,
- ZoneList<Expression*>* args_list, int pos,
+ ZonePtrList<Expression>* args_list, int pos,
Call::PossiblyEval is_possibly_eval) {
// Handle this case in BytecodeGenerator.
if (OnlyLastArgIsSpread(args_list) || function->IsSuperCallReference()) {
return factory()->NewCall(function, args_list, pos);
}
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(3, zone());
+ ZonePtrList<Expression>* args =
+ new (zone()) ZonePtrList<Expression>(3, zone());
if (function->IsProperty()) {
// Method calls
if (function->AsProperty()->IsSuperAccess()) {
@@ -3575,12 +3586,13 @@ Expression* Parser::SpreadCall(Expression* function,
}
Expression* Parser::SpreadCallNew(Expression* function,
- ZoneList<Expression*>* args_list, int pos) {
+ ZonePtrList<Expression>* args_list, int pos) {
if (OnlyLastArgIsSpread(args_list)) {
// Handle in BytecodeGenerator.
return factory()->NewCallNew(function, args_list, pos);
}
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ ZonePtrList<Expression>* args =
+ new (zone()) ZonePtrList<Expression>(2, zone());
args->Add(function, zone());
args->Add(ArrayLiteralFromListWithSpread(args_list), zone());
@@ -3608,7 +3620,7 @@ void Parser::SetAsmModule() {
scope()->AsDeclarationScope()->set_asm_module();
}
-Expression* Parser::ExpressionListToExpression(ZoneList<Expression*>* args) {
+Expression* Parser::ExpressionListToExpression(ZonePtrList<Expression>* args) {
Expression* expr = args->at(0);
for (int i = 1; i < args->length(); ++i) {
expr = factory()->NewBinaryOperation(Token::COMMA, expr, args->at(i),
@@ -3618,8 +3630,9 @@ Expression* Parser::ExpressionListToExpression(ZoneList<Expression*>* args) {
}
// This method completes the desugaring of the body of async_function.
-void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
- Expression* return_value, bool* ok) {
+void Parser::RewriteAsyncFunctionBody(ZonePtrList<Statement>* body,
+ Block* block, Expression* return_value,
+ bool* ok) {
// function async_function() {
// .generator_object = %CreateJSGeneratorObject();
// BuildRejectPromiseOnException({
@@ -3751,7 +3764,7 @@ Statement* Parser::CheckCallable(Variable* var, Expression* error, int pos) {
return validate_var;
}
-void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
+void Parser::BuildIteratorClose(ZonePtrList<Statement>* statements,
Variable* iterator, Variable* input,
Variable* var_output, IteratorType type) {
//
@@ -3803,7 +3816,7 @@ void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
// output = %_Call(iteratorReturn, iterator, input);
Statement* call_return;
{
- auto args = new (zone()) ZoneList<Expression*>(3, zone());
+ auto args = new (zone()) ZonePtrList<Expression>(3, zone());
args->Add(factory()->NewVariableProxy(var_return), zone());
args->Add(factory()->NewVariableProxy(iterator), zone());
args->Add(factory()->NewVariableProxy(input), zone());
@@ -3825,7 +3838,7 @@ void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
{
Expression* is_receiver_call;
{
- auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ auto args = new (zone()) ZonePtrList<Expression>(1, zone());
args->Add(factory()->NewVariableProxy(var_output), zone());
is_receiver_call =
factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
@@ -3833,7 +3846,7 @@ void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
Statement* throw_call;
{
- auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ auto args = new (zone()) ZonePtrList<Expression>(1, zone());
args->Add(factory()->NewVariableProxy(var_output), zone());
Expression* call = factory()->NewCallRuntime(
Runtime::kThrowIteratorResultNotAnObject, args, nopos);
@@ -3931,7 +3944,7 @@ void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
// TryCatchStatementForReThrow below (which does not clear the pending
// message), rather than a TryCatchStatement.
{
- auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ auto args = new (zone()) ZonePtrList<Expression>(1, zone());
args->Add(factory()->NewVariableProxy(catch_scope->catch_variable()),
zone());
rethrow = factory()->NewExpressionStatement(
@@ -3960,7 +3973,7 @@ void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
target->statements()->Add(try_finally, zone());
}
-void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
+void Parser::BuildIteratorCloseForCompletion(ZonePtrList<Statement>* statements,
Variable* iterator,
Expression* completion,
IteratorType type) {
@@ -4022,7 +4035,7 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
// try { %_Call(iteratorReturn, iterator) } catch (_) { }
Statement* try_call_return;
{
- auto args = new (zone()) ZoneList<Expression*>(2, zone());
+ auto args = new (zone()) ZonePtrList<Expression>(2, zone());
args->Add(factory()->NewVariableProxy(var_return), zone());
args->Add(factory()->NewVariableProxy(iterator), zone());
@@ -4052,7 +4065,7 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
Variable* var_output = NewTemporary(ast_value_factory()->empty_string());
Statement* call_return;
{
- auto args = new (zone()) ZoneList<Expression*>(2, zone());
+ auto args = new (zone()) ZonePtrList<Expression>(2, zone());
args->Add(factory()->NewVariableProxy(var_return), zone());
args->Add(factory()->NewVariableProxy(iterator), zone());
Expression* call =
@@ -4070,7 +4083,7 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
Expression* is_receiver_call;
{
- auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ auto args = new (zone()) ZonePtrList<Expression>(1, zone());
args->Add(factory()->NewVariableProxy(var_output), zone());
is_receiver_call =
factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
@@ -4078,7 +4091,7 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
Statement* throw_call;
{
- auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ auto args = new (zone()) ZonePtrList<Expression>(1, zone());
args->Add(factory()->NewVariableProxy(var_output), zone());
Expression* call = factory()->NewCallRuntime(
Runtime::kThrowIteratorResultNotAnObject, args, nopos);
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 2da4490906..2dec83b274 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -125,12 +125,12 @@ struct ParserTypes<Parser> {
typedef ClassLiteral::Property* ClassLiteralProperty;
typedef v8::internal::Suspend* Suspend;
typedef v8::internal::RewritableExpression* RewritableExpression;
- typedef ZoneList<v8::internal::Expression*>* ExpressionList;
- typedef ZoneList<ObjectLiteral::Property*>* ObjectPropertyList;
- typedef ZoneList<ClassLiteral::Property*>* ClassPropertyList;
+ typedef ZonePtrList<v8::internal::Expression>* ExpressionList;
+ typedef ZonePtrList<ObjectLiteral::Property>* ObjectPropertyList;
+ typedef ZonePtrList<ClassLiteral::Property>* ClassPropertyList;
typedef ParserFormalParameters FormalParameters;
typedef v8::internal::Statement* Statement;
- typedef ZoneList<v8::internal::Statement*>* StatementList;
+ typedef ZonePtrList<v8::internal::Statement>* StatementList;
typedef v8::internal::Block* Block;
typedef v8::internal::BreakableStatement* BreakableStatement;
typedef v8::internal::ForStatement* ForStatement;
@@ -163,7 +163,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// This only deserializes the scope chain, but doesn't connect the scopes to
// their corresponding scope infos. Therefore, looking up variables in the
// deserialized scopes is not possible.
- void DeserializeScopeChain(ParseInfo* info,
+ void DeserializeScopeChain(Isolate* isolate, ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info);
// Move statistics to Isolate
@@ -216,20 +216,22 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
FunctionLiteral* ParseFunction(Isolate* isolate, ParseInfo* info,
Handle<SharedFunctionInfo> shared_info);
- FunctionLiteral* DoParseFunction(ParseInfo* info,
+ FunctionLiteral* DoParseFunction(Isolate* isolate, ParseInfo* info,
const AstRawString* raw_name);
// Called by ParseProgram after setting up the scanner.
- FunctionLiteral* DoParseProgram(ParseInfo* info);
+ FunctionLiteral* DoParseProgram(Isolate* isolate, ParseInfo* info);
// Parse with the script as if the source is implicitly wrapped in a function.
// We manually construct the AST and scopes for a top-level function and the
// function wrapper.
- void ParseWrapped(ParseInfo* info, ZoneList<Statement*>* body,
- DeclarationScope* scope, Zone* zone, bool* ok);
+ void ParseWrapped(Isolate* isolate, ParseInfo* info,
+ ZonePtrList<Statement>* body, DeclarationScope* scope,
+ Zone* zone, bool* ok);
- ZoneList<const AstRawString*>* PrepareWrappedArguments(ParseInfo* info,
- Zone* zone);
+ ZonePtrList<const AstRawString>* PrepareWrappedArguments(Isolate* isolate,
+ ParseInfo* info,
+ Zone* zone);
void StitchAst(ParseInfo* top_level_parse_info, Isolate* isolate);
@@ -247,7 +249,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
SET_ALLOW(harmony_dynamic_import);
SET_ALLOW(harmony_import_meta);
SET_ALLOW(harmony_bigint);
- SET_ALLOW(harmony_optional_catch_binding);
SET_ALLOW(harmony_private_fields);
SET_ALLOW(eval_cache);
#undef SET_ALLOW
@@ -255,15 +256,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return reusable_preparser_;
}
- void ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
+ void ParseModuleItemList(ZonePtrList<Statement>* body, bool* ok);
Statement* ParseModuleItem(bool* ok);
const AstRawString* ParseModuleSpecifier(bool* ok);
void ParseImportDeclaration(bool* ok);
Statement* ParseExportDeclaration(bool* ok);
Statement* ParseExportDefault(bool* ok);
- void ParseExportClause(ZoneList<const AstRawString*>* export_names,
+ void ParseExportClause(ZonePtrList<const AstRawString>* export_names,
ZoneList<Scanner::Location>* export_locations,
- ZoneList<const AstRawString*>* local_names,
+ ZonePtrList<const AstRawString>* local_names,
Scanner::Location* reserved_loc, bool* ok);
struct NamedImport : public ZoneObject {
const AstRawString* import_name;
@@ -275,13 +276,13 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
local_name(local_name),
location(location) {}
};
- ZoneList<const NamedImport*>* ParseNamedImports(int pos, bool* ok);
+ ZonePtrList<const NamedImport>* ParseNamedImports(int pos, bool* ok);
Block* BuildInitializationBlock(DeclarationParsingResult* parsing_result,
- ZoneList<const AstRawString*>* names,
+ ZonePtrList<const AstRawString>* names,
bool* ok);
- ZoneList<const AstRawString*>* DeclareLabel(
- ZoneList<const AstRawString*>* labels, VariableProxy* expr, bool* ok);
- bool ContainsLabel(ZoneList<const AstRawString*>* labels,
+ ZonePtrList<const AstRawString>* DeclareLabel(
+ ZonePtrList<const AstRawString>* labels, VariableProxy* expr, bool* ok);
+ bool ContainsLabel(ZonePtrList<const AstRawString>* labels,
const AstRawString* label);
Expression* RewriteReturn(Expression* return_value, int pos);
Statement* RewriteSwitchStatement(SwitchStatement* switch_statement,
@@ -294,10 +295,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
const SourceRange& finally_range,
const CatchInfo& catch_info, int pos);
void ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
- ZoneList<Statement*>* body,
+ ZonePtrList<Statement>* body,
bool* ok);
void ParseAndRewriteAsyncGeneratorFunctionBody(int pos, FunctionKind kind,
- ZoneList<Statement*>* body,
+ ZonePtrList<Statement>* body,
bool* ok);
void DeclareFunctionNameVar(const AstRawString* function_name,
FunctionLiteral::FunctionType function_type,
@@ -306,14 +307,14 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Statement* DeclareFunction(const AstRawString* variable_name,
FunctionLiteral* function, VariableMode mode,
int pos, bool is_sloppy_block_function,
- ZoneList<const AstRawString*>* names, bool* ok);
+ ZonePtrList<const AstRawString>* names, bool* ok);
Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name,
bool* ok);
FunctionLiteral* CreateInitializerFunction(
- DeclarationScope* scope, ZoneList<ClassLiteral::Property*>* fields);
+ DeclarationScope* scope, ZonePtrList<ClassLiteral::Property>* fields);
V8_INLINE Statement* DeclareClass(const AstRawString* variable_name,
Expression* value,
- ZoneList<const AstRawString*>* names,
+ ZonePtrList<const AstRawString>* names,
int class_token_pos, int end_pos, bool* ok);
V8_INLINE void DeclareClassVariable(const AstRawString* name,
ClassInfo* class_info,
@@ -341,7 +342,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void DeclareAndInitializeVariables(
Block* block, const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
- ZoneList<const AstRawString*>* names, bool* ok);
+ ZonePtrList<const AstRawString>* names, bool* ok);
void RewriteDestructuringAssignment(RewritableExpression* expr);
Expression* RewriteDestructuringAssignment(Assignment* assignment);
@@ -382,7 +383,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
LanguageMode language_mode,
- ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function,
+ bool* ok);
ObjectLiteral* InitializeObjectLiteral(ObjectLiteral* object_literal) {
object_literal->CalculateEmitStore(main_zone());
@@ -447,13 +449,14 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
const ParserFormalParameters& parameters, bool* ok);
Block* BuildRejectPromiseOnException(Block* block);
- ZoneList<Statement*>* ParseFunction(
+ ZonePtrList<Statement>* ParseFunction(
const AstRawString* function_name, int pos, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters,
int* function_length, bool* has_duplicate_parameters,
int* expected_property_count, int* suspend_count,
- ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function,
+ bool* ok);
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -462,9 +465,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
TemplateLiteral(Zone* zone, int pos)
: cooked_(8, zone), raw_(8, zone), expressions_(8, zone), pos_(pos) {}
- const ZoneList<const AstRawString*>* cooked() const { return &cooked_; }
- const ZoneList<const AstRawString*>* raw() const { return &raw_; }
- const ZoneList<Expression*>* expressions() const { return &expressions_; }
+ const ZonePtrList<const AstRawString>* cooked() const { return &cooked_; }
+ const ZonePtrList<const AstRawString>* raw() const { return &raw_; }
+ const ZonePtrList<Expression>* expressions() const { return &expressions_; }
int position() const { return pos_; }
void AddTemplateSpan(const AstRawString* cooked, const AstRawString* raw,
@@ -480,9 +483,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
private:
- ZoneList<const AstRawString*> cooked_;
- ZoneList<const AstRawString*> raw_;
- ZoneList<Expression*> expressions_;
+ ZonePtrList<const AstRawString> cooked_;
+ ZonePtrList<const AstRawString> raw_;
+ ZonePtrList<Expression> expressions_;
int pos_;
};
@@ -502,10 +505,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag);
- ArrayLiteral* ArrayLiteralFromListWithSpread(ZoneList<Expression*>* list);
- Expression* SpreadCall(Expression* function, ZoneList<Expression*>* args,
+ ArrayLiteral* ArrayLiteralFromListWithSpread(ZonePtrList<Expression>* list);
+ Expression* SpreadCall(Expression* function, ZonePtrList<Expression>* args,
int pos, Call::PossiblyEval is_possibly_eval);
- Expression* SpreadCallNew(Expression* function, ZoneList<Expression*>* args,
+ Expression* SpreadCallNew(Expression* function, ZonePtrList<Expression>* args,
int pos);
Expression* RewriteSuperCall(Expression* call_expression);
@@ -541,15 +544,16 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Statement* FinalizeForOfStatement(ForOfStatement* loop, Variable* completion,
IteratorType type, int pos);
- void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
- Variable* input, Variable* output, IteratorType type);
- void BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
+ void BuildIteratorClose(ZonePtrList<Statement>* statements,
+ Variable* iterator, Variable* input, Variable* output,
+ IteratorType type);
+ void BuildIteratorCloseForCompletion(ZonePtrList<Statement>* statements,
Variable* iterator,
Expression* completion,
IteratorType type);
Statement* CheckCallable(Variable* var, Expression* error, int pos);
- V8_INLINE void RewriteAsyncFunctionBody(ZoneList<Statement*>* body,
+ V8_INLINE void RewriteAsyncFunctionBody(ZonePtrList<Statement>* body,
Block* block,
Expression* return_value, bool* ok);
@@ -709,7 +713,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// A shortcut for performing a ToString operation
V8_INLINE Expression* ToString(Expression* expr) {
if (expr->IsStringLiteral()) return expr;
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+ ZonePtrList<Expression>* args =
+ new (zone()) ZonePtrList<Expression>(1, zone());
args->Add(expr, zone());
return factory()->NewCallRuntime(Runtime::kInlineToString, args,
expr->position());
@@ -795,10 +800,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE static std::nullptr_t NullIdentifier() { return nullptr; }
V8_INLINE static std::nullptr_t NullExpression() { return nullptr; }
V8_INLINE static std::nullptr_t NullLiteralProperty() { return nullptr; }
- V8_INLINE static ZoneList<Expression*>* NullExpressionList() {
+ V8_INLINE static ZonePtrList<Expression>* NullExpressionList() {
+ return nullptr;
+ }
+ V8_INLINE static ZonePtrList<Statement>* NullStatementList() {
return nullptr;
}
- V8_INLINE static ZoneList<Statement*>* NullStatementList() { return nullptr; }
V8_INLINE static std::nullptr_t NullStatement() { return nullptr; }
template <typename T>
@@ -856,23 +863,23 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return factory()->NewStringLiteral(symbol, pos);
}
- V8_INLINE ZoneList<Expression*>* NewExpressionList(int size) const {
- return new (zone()) ZoneList<Expression*>(size, zone());
+ V8_INLINE ZonePtrList<Expression>* NewExpressionList(int size) const {
+ return new (zone()) ZonePtrList<Expression>(size, zone());
}
- V8_INLINE ZoneList<ObjectLiteral::Property*>* NewObjectPropertyList(
+ V8_INLINE ZonePtrList<ObjectLiteral::Property>* NewObjectPropertyList(
int size) const {
- return new (zone()) ZoneList<ObjectLiteral::Property*>(size, zone());
+ return new (zone()) ZonePtrList<ObjectLiteral::Property>(size, zone());
}
- V8_INLINE ZoneList<ClassLiteral::Property*>* NewClassPropertyList(
+ V8_INLINE ZonePtrList<ClassLiteral::Property>* NewClassPropertyList(
int size) const {
- return new (zone()) ZoneList<ClassLiteral::Property*>(size, zone());
+ return new (zone()) ZonePtrList<ClassLiteral::Property>(size, zone());
}
- V8_INLINE ZoneList<Statement*>* NewStatementList(int size) const {
- return new (zone()) ZoneList<Statement*>(size, zone());
+ V8_INLINE ZonePtrList<Statement>* NewStatementList(int size) const {
+ return new (zone()) ZonePtrList<Statement>(size, zone());
}
V8_INLINE Expression* NewV8Intrinsic(const AstRawString* name,
- ZoneList<Expression*>* args, int pos,
+ ZonePtrList<Expression>* args, int pos,
bool* ok);
V8_INLINE Statement* NewThrowStatement(Expression* exception, int pos) {
@@ -881,7 +888,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
V8_INLINE void AddParameterInitializationBlock(
- const ParserFormalParameters& parameters, ZoneList<Statement*>* body,
+ const ParserFormalParameters& parameters, ZonePtrList<Statement>* body,
bool is_async, bool* ok) {
if (parameters.is_simple) return;
auto* init_block = BuildParameterInitializationBlock(parameters, ok);
@@ -923,8 +930,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// BuildParamerterInitializationBlock.
scope->DeclareParameter(
is_simple ? parameter->name : ast_value_factory()->empty_string(),
- is_simple ? VAR : TEMPORARY, is_optional, parameter->is_rest,
- has_duplicate, ast_value_factory(), parameter->position);
+ is_simple ? VariableMode::kVar : VariableMode::kTemporary,
+ is_optional, parameter->is_rest, has_duplicate, ast_value_factory(),
+ parameter->position);
}
}
@@ -934,7 +942,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Scanner::Location* duplicate_loc,
bool* ok);
- Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
+ Expression* ExpressionListToExpression(ZonePtrList<Expression>* args);
void SetFunctionNameFromPropertyName(LiteralProperty* property,
const AstRawString* name,
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index dc66876e7d..378023cbeb 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -24,10 +24,10 @@ bool ParseProgram(ParseInfo* info, Isolate* isolate) {
VMState<PARSER> state(isolate);
// Create a character stream for the parser.
- Handle<String> source(String::cast(info->script()->source()));
- source = String::Flatten(source);
+ Handle<String> source(String::cast(info->script()->source()), isolate);
isolate->counters()->total_parse_size()->Increment(source->length());
- std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(source));
+ std::unique_ptr<Utf16CharacterStream> stream(
+ ScannerStream::For(isolate, source));
info->set_character_stream(std::move(stream));
Parser parser(info);
@@ -59,11 +59,11 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
DCHECK_NULL(info->literal());
// Create a character stream for the parser.
- Handle<String> source(String::cast(info->script()->source()));
- source = String::Flatten(source);
+ Handle<String> source(String::cast(info->script()->source()), isolate);
isolate->counters()->total_parse_size()->Increment(source->length());
- std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
- source, shared_info->StartPosition(), shared_info->EndPosition()));
+ std::unique_ptr<Utf16CharacterStream> stream(
+ ScannerStream::For(isolate, source, shared_info->StartPosition(),
+ shared_info->EndPosition()));
info->set_character_stream(std::move(stream));
VMState<PARSER> state(isolate);
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index b0dfe0a66f..b981b6d12e 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -29,7 +29,7 @@ class PatternRewriter final : public AstVisitor<PatternRewriter> {
Parser* parser, Block* block,
const DeclarationDescriptor* declaration_descriptor,
const Parser::DeclarationParsingResult::Declaration* declaration,
- ZoneList<const AstRawString*>* names, bool* ok);
+ ZonePtrList<const AstRawString>* names, bool* ok);
static void RewriteDestructuringAssignment(Parser* parser,
RewritableExpression* to_rewrite,
@@ -108,7 +108,7 @@ class PatternRewriter final : public AstVisitor<PatternRewriter> {
int value_beg_position_;
Block* block_;
const DeclarationDescriptor* descriptor_;
- ZoneList<const AstRawString*>* names_;
+ ZonePtrList<const AstRawString>* names_;
Expression* current_value_;
int recursion_level_;
bool* ok_;
@@ -119,7 +119,7 @@ class PatternRewriter final : public AstVisitor<PatternRewriter> {
void Parser::DeclareAndInitializeVariables(
Block* block, const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
- ZoneList<const AstRawString*>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool* ok) {
PatternRewriter::DeclareAndInitializeVariables(
this, block, declaration_descriptor, declaration, names, ok);
}
@@ -140,7 +140,7 @@ void PatternRewriter::DeclareAndInitializeVariables(
Parser* parser, Block* block,
const DeclarationDescriptor* declaration_descriptor,
const Parser::DeclarationParsingResult::Declaration* declaration,
- ZoneList<const AstRawString*>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool* ok) {
DCHECK(block->ignore_completion_value());
PatternRewriter rewriter(declaration_descriptor->scope, parser, BINDING);
@@ -195,7 +195,8 @@ void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
VariableProxy* proxy =
factory()->NewVariableProxy(name, NORMAL_VARIABLE, pattern->position());
Declaration* declaration;
- if (descriptor_->mode == VAR && !descriptor_->scope->is_declaration_scope()) {
+ if (descriptor_->mode == VariableMode::kVar &&
+ !descriptor_->scope->is_declaration_scope()) {
DCHECK(descriptor_->scope->is_block_scope() ||
descriptor_->scope->is_with_scope());
declaration = factory()->NewNestedVariableDeclaration(
@@ -261,7 +262,7 @@ void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// For 'let' and 'const' declared variables the initialization always
// assigns to the declared variable.
// But for var declarations we need to do a new lookup.
- if (descriptor_->mode == VAR) {
+ if (descriptor_->mode == VariableMode::kVar) {
proxy = var_init_scope->NewUnresolved(factory(), name);
} else {
DCHECK_NOT_NULL(proxy);
@@ -367,14 +368,14 @@ void PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
Variable** temp_var) {
auto temp = *temp_var = CreateTempVar(current_value_);
- ZoneList<Expression*>* rest_runtime_callargs = nullptr;
+ ZonePtrList<Expression>* rest_runtime_callargs = nullptr;
if (pattern->has_rest_property()) {
// non_rest_properties_count = pattern->properties()->length - 1;
// args_length = 1 + non_rest_properties_count because we need to
// pass temp as well to the runtime function.
int args_length = pattern->properties()->length();
rest_runtime_callargs =
- new (zone()) ZoneList<Expression*>(args_length, zone());
+ new (zone()) ZonePtrList<Expression>(args_length, zone());
rest_runtime_callargs->Add(factory()->NewVariableProxy(temp), zone());
}
@@ -409,7 +410,7 @@ void PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
if (property->is_computed_name()) {
DCHECK(!key->IsPropertyName() || !key->IsNumberLiteral());
- auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ auto args = new (zone()) ZonePtrList<Expression>(1, zone());
args->Add(key, zone());
auto to_name_key = CreateTempVar(factory()->NewCallRuntime(
Runtime::kToName, args, kNoSourcePosition));
@@ -592,7 +593,7 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
// let array = [];
Variable* array;
{
- auto empty_exprs = new (zone()) ZoneList<Expression*>(0, zone());
+ auto empty_exprs = new (zone()) ZonePtrList<Expression>(0, zone());
array = CreateTempVar(
factory()->NewArrayLiteral(empty_exprs, kNoSourcePosition));
}
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index 12acf452be..0dab3f9ee1 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -24,19 +24,21 @@ class VariableMaybeAssignedField : public BitField8<bool, 0, 1> {};
class VariableContextAllocatedField
: public BitField8<bool, VariableMaybeAssignedField::kNext, 1> {};
-const int kMagicValue = 0xC0DE0DE;
#ifdef DEBUG
+const int kMagicValue = 0xC0DE0DE;
+
const size_t kUint32Size = 5;
const size_t kUint8Size = 2;
const size_t kQuarterMarker = 0;
+const size_t kPlaceholderSize = kUint32Size;
#else
const size_t kUint32Size = 4;
const size_t kUint8Size = 1;
+const size_t kPlaceholderSize = 0;
#endif
-const int kPlaceholderSize = kUint32Size;
-const int kSkippableFunctionDataSize = 4 * kUint32Size + 1 * kUint8Size;
+const size_t kSkippableFunctionDataSize = 4 * kUint32Size + 1 * kUint8Size;
class LanguageField : public BitField8<LanguageMode, 0, 1> {};
class UsesSuperField : public BitField8<bool, LanguageField::kNext, 1> {};
@@ -51,7 +53,7 @@ STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
(Skippable function data:)
------------------------------------
- | scope_data_start |
+ | scope_data_start (debug only) |
------------------------------------
| data for inner function 1 |
| ... |
@@ -59,11 +61,11 @@ STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
| data for inner function n |
| ... |
------------------------------------
- (Scope allocation data:) << scope_data_start points here
+ (Scope allocation data:) << scope_data_start points here in debug
------------------------------------
- magic value
+ magic value (debug only)
------------------------------------
- scope positions
+ scope positions (debug only)
------------------------------------
| scope type << only in debug |
| eval |
@@ -101,19 +103,19 @@ void ProducedPreParsedScopeData::ByteData::WriteUint32(uint32_t data) {
free_quarters_in_last_byte_ = 0;
}
+#ifdef DEBUG
void ProducedPreParsedScopeData::ByteData::OverwriteFirstUint32(uint32_t data) {
auto it = backing_store_.begin();
-#ifdef DEBUG
// Check that that position already holds an item of the expected size.
DCHECK_GE(backing_store_.size(), kUint32Size);
DCHECK_EQ(*it, kUint32Size);
++it;
-#endif
const uint8_t* d = reinterpret_cast<uint8_t*>(&data);
for (size_t i = 0; i < 4; ++i) {
*it++ = *d++;
}
}
+#endif
void ProducedPreParsedScopeData::ByteData::WriteUint8(uint8_t data) {
#ifdef DEBUG
@@ -166,8 +168,10 @@ ProducedPreParsedScopeData::ProducedPreParsedScopeData(
if (parent != nullptr) {
parent->data_for_inner_functions_.push_back(this);
}
+#ifdef DEBUG
// Reserve space for scope_data_start, written later:
byte_data_->WriteUint32(0);
+#endif
}
// Create a ProducedPreParsedScopeData which is just a proxy for a previous
@@ -227,6 +231,9 @@ void ProducedPreParsedScopeData::AddSkippableFunction(
return;
}
+ // Start position is used for a sanity check when consuming the data, we could
+ // remove it in the future if we're very pressed for space but it's been good
+ // at catching bugs in the wild so far.
byte_data_->WriteUint32(start_position);
byte_data_->WriteUint32(end_position);
byte_data_->WriteUint32(num_parameters);
@@ -259,6 +266,7 @@ void ProducedPreParsedScopeData::SaveScopeAllocationData(
return;
}
+#ifdef DEBUG
byte_data_->OverwriteFirstUint32(scope_data_start);
// For a data integrity check, write a value between data about skipped inner
@@ -266,6 +274,7 @@ void ProducedPreParsedScopeData::SaveScopeAllocationData(
byte_data_->WriteUint32(kMagicValue);
byte_data_->WriteUint32(scope->start_position());
byte_data_->WriteUint32(scope->end_position());
+#endif
SaveDataForScope(scope);
}
@@ -292,30 +301,22 @@ MaybeHandle<PreParsedScopeData> ProducedPreParsedScopeData::Serialize(
return MaybeHandle<PreParsedScopeData>();
}
- Handle<PreParsedScopeData> data = isolate->factory()->NewPreParsedScopeData();
+ int child_data_length = static_cast<int>(data_for_inner_functions_.size());
+ Handle<PreParsedScopeData> data =
+ isolate->factory()->NewPreParsedScopeData(child_data_length);
Handle<PodArray<uint8_t>> scope_data_array = byte_data_->Serialize(isolate);
data->set_scope_data(*scope_data_array);
- int child_data_length = static_cast<int>(data_for_inner_functions_.size());
- if (child_data_length == 0) {
- data->set_child_data(*(isolate->factory()->empty_fixed_array()));
- } else {
- Handle<FixedArray> child_array =
- isolate->factory()->NewFixedArray(child_data_length, TENURED);
- int i = 0;
- for (const auto& item : data_for_inner_functions_) {
- MaybeHandle<PreParsedScopeData> maybe_child_data =
- item->Serialize(isolate);
- if (maybe_child_data.is_null()) {
- child_array->set(i++, *(isolate->factory()->null_value()));
- } else {
- Handle<PreParsedScopeData> child_data =
- maybe_child_data.ToHandleChecked();
- child_array->set(i++, *child_data);
- }
+ int i = 0;
+ for (const auto& item : data_for_inner_functions_) {
+ Handle<PreParsedScopeData> child_data;
+ if (item->Serialize(isolate).ToHandle(&child_data)) {
+ data->set_child_data(i, *child_data);
+ } else {
+ DCHECK(data->child_data(i)->IsNull());
}
- data->set_child_data(*child_array);
+ i++;
}
return data;
@@ -481,21 +482,24 @@ uint8_t ConsumedPreParsedScopeData::ByteData::ReadQuarter() {
}
ConsumedPreParsedScopeData::ConsumedPreParsedScopeData()
- : scope_data_(new ByteData()), child_index_(0) {}
+ : isolate_(nullptr), scope_data_(new ByteData()), child_index_(0) {}
ConsumedPreParsedScopeData::~ConsumedPreParsedScopeData() {}
-void ConsumedPreParsedScopeData::SetData(Handle<PreParsedScopeData> data) {
+void ConsumedPreParsedScopeData::SetData(Isolate* isolate,
+ Handle<PreParsedScopeData> data) {
+ DCHECK_NOT_NULL(isolate);
DCHECK(data->IsPreParsedScopeData());
+ isolate_ = isolate;
data_ = data;
#ifdef DEBUG
ByteData::ReadingScope reading_scope(this);
int scope_data_start = scope_data_->ReadUint32();
scope_data_->SetPosition(scope_data_start);
DCHECK_EQ(scope_data_->ReadUint32(), kMagicValue);
-#endif
// The first data item is scope_data_start. Skip over it.
scope_data_->SetPosition(kPlaceholderSize);
+#endif
}
ProducedPreParsedScopeData*
@@ -522,14 +526,13 @@ ConsumedPreParsedScopeData::GetDataForSkippableFunction(
// Retrieve the corresponding PreParsedScopeData and associate it to the
// skipped function. If the skipped functions contains inner functions, those
// can be skipped when the skipped function is eagerly parsed.
- FixedArray* children = data_->child_data();
- CHECK_GT(children->length(), child_index_);
- Object* child_data = children->get(child_index_++);
+ CHECK_GT(data_->length(), child_index_);
+ Object* child_data = data_->child_data(child_index_++);
if (!child_data->IsPreParsedScopeData()) {
return nullptr;
}
Handle<PreParsedScopeData> child_data_handle(
- PreParsedScopeData::cast(child_data));
+ PreParsedScopeData::cast(child_data), isolate_);
return new (zone) ProducedPreParsedScopeData(child_data_handle, zone);
}
@@ -541,14 +544,16 @@ void ConsumedPreParsedScopeData::RestoreScopeAllocationData(
ByteData::ReadingScope reading_scope(this);
+#ifdef DEBUG
int magic_value_from_data = scope_data_->ReadUint32();
// Check that we've consumed all inner function data.
- CHECK_EQ(magic_value_from_data, kMagicValue);
+ DCHECK_EQ(magic_value_from_data, kMagicValue);
int start_position_from_data = scope_data_->ReadUint32();
int end_position_from_data = scope_data_->ReadUint32();
- CHECK_EQ(start_position_from_data, scope->start_position());
- CHECK_EQ(end_position_from_data, scope->end_position());
+ DCHECK_EQ(start_position_from_data, scope->start_position());
+ DCHECK_EQ(end_position_from_data, scope->end_position());
+#endif
RestoreData(scope);
diff --git a/deps/v8/src/parsing/preparsed-scope-data.h b/deps/v8/src/parsing/preparsed-scope-data.h
index b621f069d2..6ad0f491f8 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.h
+++ b/deps/v8/src/parsing/preparsed-scope-data.h
@@ -76,8 +76,10 @@ class ProducedPreParsedScopeData : public ZoneObject {
void WriteUint8(uint8_t data);
void WriteQuarter(uint8_t data);
+#ifdef DEBUG
// For overwriting previously written data at position 0.
void OverwriteFirstUint32(uint32_t data);
+#endif
Handle<PodArray<uint8_t>> Serialize(Isolate* isolate);
@@ -225,7 +227,7 @@ class ConsumedPreParsedScopeData {
ConsumedPreParsedScopeData();
~ConsumedPreParsedScopeData();
- void SetData(Handle<PreParsedScopeData> data);
+ void SetData(Isolate* isolate, Handle<PreParsedScopeData> data);
bool HasData() const { return !data_.is_null(); }
@@ -243,6 +245,7 @@ class ConsumedPreParsedScopeData {
void RestoreDataForVariable(Variable* var);
void RestoreDataForInnerScopes(Scope* scope);
+ Isolate* isolate_;
Handle<PreParsedScopeData> data_;
std::unique_ptr<ByteData> scope_data_;
// When consuming the data, these indexes point to the data we're going to
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 5bb58a03aa..832d2033f2 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -268,7 +268,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
LanguageMode language_mode,
- ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function, bool* ok) {
// Wrapped functions are not parsed in the preparser.
DCHECK_NULL(arguments_for_wrapped_function);
DCHECK_NE(FunctionLiteral::kWrapped, function_type);
@@ -366,7 +366,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
name_byte_length = string->byte_length();
}
logger_->FunctionEvent(
- event_name, nullptr, script_id(), ms, function_scope->start_position(),
+ event_name, script_id(), ms, function_scope->start_position(),
function_scope->end_position(), name, name_byte_length);
}
@@ -429,7 +429,7 @@ void PreParser::DeclareAndInitializeVariables(
PreParserStatement block,
const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
- ZoneList<const AstRawString*>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool* ok) {
if (declaration->pattern.variables_ != nullptr) {
DCHECK(FLAG_lazy_inner_functions);
DCHECK(track_unresolved_variables_);
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 86fa7d1150..aa4f06d354 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -94,7 +94,7 @@ class PreParserExpression {
static PreParserExpression Null() { return PreParserExpression(); }
static PreParserExpression Default(
- ZoneList<VariableProxy*>* variables = nullptr) {
+ ZonePtrList<VariableProxy>* variables = nullptr) {
return PreParserExpression(TypeField::encode(kExpression), variables);
}
@@ -133,7 +133,7 @@ class PreParserExpression {
return PreParserExpression(TypeField::encode(kExpression));
}
- static PreParserExpression Assignment(ZoneList<VariableProxy*>* variables) {
+ static PreParserExpression Assignment(ZonePtrList<VariableProxy>* variables) {
return PreParserExpression(TypeField::encode(kExpression) |
ExpressionTypeField::encode(kAssignment),
variables);
@@ -144,12 +144,13 @@ class PreParserExpression {
}
static PreParserExpression ObjectLiteral(
- ZoneList<VariableProxy*>* variables) {
+ ZonePtrList<VariableProxy>* variables) {
return PreParserExpression(TypeField::encode(kObjectLiteralExpression),
variables);
}
- static PreParserExpression ArrayLiteral(ZoneList<VariableProxy*>* variables) {
+ static PreParserExpression ArrayLiteral(
+ ZonePtrList<VariableProxy>* variables) {
return PreParserExpression(TypeField::encode(kArrayLiteralExpression),
variables);
}
@@ -168,7 +169,7 @@ class PreParserExpression {
IsUseAsmField::encode(true));
}
- static PreParserExpression This(ZoneList<VariableProxy*>* variables) {
+ static PreParserExpression This(ZonePtrList<VariableProxy>* variables) {
return PreParserExpression(TypeField::encode(kExpression) |
ExpressionTypeField::encode(kThisExpression),
variables);
@@ -371,7 +372,7 @@ class PreParserExpression {
};
explicit PreParserExpression(uint32_t expression_code,
- ZoneList<VariableProxy*>* variables = nullptr)
+ ZonePtrList<VariableProxy>* variables = nullptr)
: code_(expression_code), variables_(variables) {}
void AddVariable(VariableProxy* variable, Zone* zone) {
@@ -379,7 +380,7 @@ class PreParserExpression {
return;
}
if (variables_ == nullptr) {
- variables_ = new (zone) ZoneList<VariableProxy*>(1, zone);
+ variables_ = new (zone) ZonePtrList<VariableProxy>(1, zone);
}
variables_->Add(variable, zone);
}
@@ -406,7 +407,7 @@ class PreParserExpression {
uint32_t code_;
// If the PreParser is used in the variable tracking mode, PreParserExpression
// accumulates variables in that expression.
- ZoneList<VariableProxy*>* variables_;
+ ZonePtrList<VariableProxy>* variables_;
friend class PreParser;
friend class PreParserFactory;
@@ -433,7 +434,7 @@ class PreParserList {
private:
explicit PreParserList(int n) : length_(n), variables_(nullptr) {}
int length_;
- ZoneList<VariableProxy*>* variables_;
+ ZonePtrList<VariableProxy>* variables_;
friend class PreParser;
friend class PreParserFactory;
@@ -446,7 +447,7 @@ inline void PreParserList<PreParserExpression>::Add(
DCHECK(FLAG_lazy_inner_functions);
DCHECK_NOT_NULL(zone);
if (variables_ == nullptr) {
- variables_ = new (zone) ZoneList<VariableProxy*>(1, zone);
+ variables_ = new (zone) ZonePtrList<VariableProxy>(1, zone);
}
for (auto identifier : (*expression.variables_)) {
variables_->Add(identifier, zone);
@@ -743,8 +744,9 @@ class PreParserFactory {
return PreParserStatement::Default();
}
- PreParserStatement NewBlock(int capacity, bool ignore_completion_value,
- ZoneList<const AstRawString*>* labels = nullptr) {
+ PreParserStatement NewBlock(
+ int capacity, bool ignore_completion_value,
+ ZonePtrList<const AstRawString>* labels = nullptr) {
return PreParserStatement::Default();
}
@@ -784,17 +786,17 @@ class PreParserFactory {
return PreParserStatement::Default();
}
- PreParserStatement NewDoWhileStatement(ZoneList<const AstRawString*>* labels,
- int pos) {
+ PreParserStatement NewDoWhileStatement(
+ ZonePtrList<const AstRawString>* labels, int pos) {
return PreParserStatement::Default();
}
- PreParserStatement NewWhileStatement(ZoneList<const AstRawString*>* labels,
+ PreParserStatement NewWhileStatement(ZonePtrList<const AstRawString>* labels,
int pos) {
return PreParserStatement::Default();
}
- PreParserStatement NewSwitchStatement(ZoneList<const AstRawString*>* labels,
+ PreParserStatement NewSwitchStatement(ZonePtrList<const AstRawString>* labels,
const PreParserExpression& tag,
int pos) {
return PreParserStatement::Default();
@@ -805,18 +807,18 @@ class PreParserFactory {
return PreParserStatement::Default();
}
- PreParserStatement NewForStatement(ZoneList<const AstRawString*>* labels,
+ PreParserStatement NewForStatement(ZonePtrList<const AstRawString>* labels,
int pos) {
return PreParserStatement::Default();
}
- PreParserStatement NewForEachStatement(ForEachStatement::VisitMode visit_mode,
- ZoneList<const AstRawString*>* labels,
- int pos) {
+ PreParserStatement NewForEachStatement(
+ ForEachStatement::VisitMode visit_mode,
+ ZonePtrList<const AstRawString>* labels, int pos) {
return PreParserStatement::Default();
}
- PreParserStatement NewForOfStatement(ZoneList<const AstRawString*>* labels,
+ PreParserStatement NewForOfStatement(ZonePtrList<const AstRawString>* labels,
int pos) {
return PreParserStatement::Default();
}
@@ -842,12 +844,12 @@ class PreParserFactory {
struct PreParserFormalParameters : FormalParametersBase {
struct Parameter : public ZoneObject {
- Parameter(ZoneList<VariableProxy*>* variables, bool is_rest)
+ Parameter(ZonePtrList<VariableProxy>* variables, bool is_rest)
: variables_(variables), is_rest(is_rest) {}
Parameter** next() { return &next_parameter; }
Parameter* const* next() const { return &next_parameter; }
- ZoneList<VariableProxy*>* variables_;
+ ZonePtrList<VariableProxy>* variables_;
Parameter* next_parameter = nullptr;
bool is_rest : 1;
};
@@ -1013,7 +1015,8 @@ class PreParser : public ParserBase<PreParser> {
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
LanguageMode language_mode,
- ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function,
+ bool* ok);
PreParserExpression InitializeObjectLiteral(PreParserExpression literal) {
return literal;
@@ -1065,10 +1068,10 @@ class PreParser : public ParserBase<PreParser> {
PreParserStatement block,
const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
- ZoneList<const AstRawString*>* names, bool* ok);
+ ZonePtrList<const AstRawString>* names, bool* ok);
- V8_INLINE ZoneList<const AstRawString*>* DeclareLabel(
- ZoneList<const AstRawString*>* labels, const PreParserExpression& expr,
+ V8_INLINE ZonePtrList<const AstRawString>* DeclareLabel(
+ ZonePtrList<const AstRawString>* labels, const PreParserExpression& expr,
bool* ok) {
DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
DCHECK(IsIdentifier(expr));
@@ -1076,7 +1079,7 @@ class PreParser : public ParserBase<PreParser> {
}
// TODO(nikolaos): The preparser currently does not keep track of labels.
- V8_INLINE bool ContainsLabel(ZoneList<const AstRawString*>* labels,
+ V8_INLINE bool ContainsLabel(ZonePtrList<const AstRawString>* labels,
const PreParserIdentifier& label) {
return false;
}
@@ -1100,7 +1103,8 @@ class PreParser : public ParserBase<PreParser> {
if (catch_info->pattern.variables_ != nullptr) {
for (auto variable : *catch_info->pattern.variables_) {
- scope()->DeclareVariableName(variable->raw_name(), LET);
+ scope()->DeclareVariableName(variable->raw_name(),
+ VariableMode::kLet);
}
}
}
@@ -1162,7 +1166,7 @@ class PreParser : public ParserBase<PreParser> {
DeclareFunction(const PreParserIdentifier& variable_name,
const PreParserExpression& function, VariableMode mode,
int pos, bool is_sloppy_block_function,
- ZoneList<const AstRawString*>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool* ok) {
DCHECK_NULL(names);
if (variable_name.string_ != nullptr) {
DCHECK(track_unresolved_variables_);
@@ -1177,13 +1181,13 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserStatement DeclareClass(
const PreParserIdentifier& variable_name,
- const PreParserExpression& value, ZoneList<const AstRawString*>* names,
+ const PreParserExpression& value, ZonePtrList<const AstRawString>* names,
int class_token_pos, int end_pos, bool* ok) {
// Preparser shouldn't be used in contexts where we need to track the names.
DCHECK_NULL(names);
if (variable_name.string_ != nullptr) {
DCHECK(track_unresolved_variables_);
- scope()->DeclareVariableName(variable_name.string_, LET);
+ scope()->DeclareVariableName(variable_name.string_, VariableMode::kLet);
}
return PreParserStatement::Default();
}
@@ -1192,7 +1196,7 @@ class PreParser : public ParserBase<PreParser> {
int class_token_pos, bool* ok) {
if (name.string_ != nullptr) {
DCHECK(track_unresolved_variables_);
- scope()->DeclareVariableName(name.string_, CONST);
+ scope()->DeclareVariableName(name.string_, VariableMode::kConst);
}
}
V8_INLINE void DeclareClassProperty(const PreParserIdentifier& class_name,
@@ -1206,13 +1210,13 @@ class PreParser : public ParserBase<PreParser> {
scope()->DeclareVariableName(
ClassFieldVariableName(ast_value_factory(),
class_info->computed_field_count),
- CONST);
+ VariableMode::kConst);
}
if (kind == ClassLiteralProperty::PRIVATE_FIELD &&
property_name.string_ != nullptr) {
DCHECK(track_unresolved_variables_);
- scope()->DeclareVariableName(property_name.string_, CONST);
+ scope()->DeclareVariableName(property_name.string_, VariableMode::kConst);
}
}
@@ -1380,7 +1384,7 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserStatement
BuildInitializationBlock(DeclarationParsingResult* parsing_result,
- ZoneList<const AstRawString*>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool* ok) {
for (auto declaration : parsing_result->declarations) {
DeclareAndInitializeVariables(PreParserStatement::Default(),
&(parsing_result->descriptor), &declaration,
@@ -1416,7 +1420,7 @@ class PreParser : public ParserBase<PreParser> {
DCHECK_EQ(1, for_info->parsing_result.declarations.size());
bool is_for_var_of =
for_info->mode == ForEachStatement::ITERATE &&
- for_info->parsing_result.descriptor.mode == VariableMode::VAR;
+ for_info->parsing_result.descriptor.mode == VariableMode::kVar;
bool collect_names =
IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
is_for_var_of;
@@ -1433,7 +1437,7 @@ class PreParser : public ParserBase<PreParser> {
if (track_unresolved_variables_) {
if (IsLexicalVariableMode(for_info.parsing_result.descriptor.mode)) {
for (auto name : for_info.bound_names) {
- scope()->DeclareVariableName(name, LET);
+ scope()->DeclareVariableName(name, VariableMode::kLet);
}
return PreParserStatement::Default();
}
@@ -1547,13 +1551,13 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
- ZoneList<VariableProxy*>* variables = nullptr;
+ ZonePtrList<VariableProxy>* variables = nullptr;
if (track_unresolved_variables_) {
VariableProxy* proxy = scope()->NewUnresolved(
factory()->ast_node_factory(), ast_value_factory()->this_string(),
pos, THIS_VARIABLE);
- variables = new (zone()) ZoneList<VariableProxy*>(1, zone());
+ variables = new (zone()) ZonePtrList<VariableProxy>(1, zone());
variables->Add(proxy, zone());
}
return PreParserExpression::This(variables);
@@ -1703,7 +1707,8 @@ class PreParser : public ParserBase<PreParser> {
DCHECK(FLAG_lazy_inner_functions);
if (params.variables_ != nullptr) {
for (auto variable : *params.variables_) {
- parameters->scope->DeclareVariableName(variable->raw_name(), VAR);
+ parameters->scope->DeclareVariableName(variable->raw_name(),
+ VariableMode::kVar);
}
}
}
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 102efad292..151244f692 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -43,7 +43,7 @@ class Processor final : public AstVisitor<Processor> {
InitializeAstVisitor(parser->stack_limit());
}
- void Process(ZoneList<Statement*>* statements);
+ void Process(ZonePtrList<Statement>* statements);
bool result_assigned() const { return result_assigned_; }
Zone* zone() { return zone_; }
@@ -121,8 +121,7 @@ Statement* Processor::AssignUndefinedBefore(Statement* s) {
return b;
}
-
-void Processor::Process(ZoneList<Statement*>* statements) {
+void Processor::Process(ZonePtrList<Statement>* statements) {
// If we're in a breakable scope (named block, iteration, or switch), we walk
// all statements. The last value producing statement before the break needs
// to assign to .result. If we're not in a breakable scope, only the last
@@ -283,7 +282,7 @@ void Processor::VisitSwitchStatement(SwitchStatement* node) {
DCHECK(breakable_ || !is_set_);
BreakableScope scope(this);
// Rewrite statements in all case clauses.
- ZoneList<CaseClause*>* clauses = node->cases();
+ ZonePtrList<CaseClause>* clauses = node->cases();
for (int i = clauses->length() - 1; i >= 0; --i) {
CaseClause* clause = clauses->at(i);
Process(clause->statements());
@@ -381,7 +380,7 @@ bool Rewriter::Rewrite(ParseInfo* info) {
return true;
}
- ZoneList<Statement*>* body = function->body();
+ ZonePtrList<Statement>* body = function->body();
DCHECK_IMPLIES(scope->is_module_scope(), !body->is_empty());
if (!body->is_empty()) {
Variable* result = scope->AsDeclarationScope()->NewTemporary(
@@ -416,7 +415,7 @@ bool Rewriter::Rewrite(Parser* parser, DeclarationScope* closure_scope,
DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
DCHECK(block->scope() == nullptr ||
block->scope()->GetClosureScope() == closure_scope);
- ZoneList<Statement*>* body = block->statements();
+ ZonePtrList<Statement>* body = block->statements();
VariableProxy* result = expr->result();
Variable* result_var = result->var();
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 20aa5c9f8e..052b6007ae 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -19,175 +19,251 @@ namespace {
const unibrow::uchar kUtf8Bom = 0xFEFF;
} // namespace
-// ----------------------------------------------------------------------------
-// BufferedUtf16CharacterStreams
-//
-// A buffered character stream based on a random access character
-// source (ReadBlock can be called with pos() pointing to any position,
-// even positions before the current).
-class BufferedUtf16CharacterStream : public Utf16CharacterStream {
- public:
- BufferedUtf16CharacterStream();
+template <typename Char>
+struct HeapStringType;
- protected:
- static const size_t kBufferSize = 512;
+template <>
+struct HeapStringType<uint8_t> {
+ typedef SeqOneByteString String;
+};
- bool ReadBlock() override;
+template <>
+struct HeapStringType<uint16_t> {
+ typedef SeqTwoByteString String;
+};
- // FillBuffer should read up to kBufferSize characters at position and store
- // them into buffer_[0..]. It returns the number of characters stored.
- virtual size_t FillBuffer(size_t position) = 0;
+template <typename Char>
+struct Range {
+ const Char* start;
+ const Char* end;
- // Fixed sized buffer that this class reads from.
- // The base class' buffer_start_ should always point to buffer_.
- uc16 buffer_[kBufferSize];
+ size_t length() { return static_cast<size_t>(end - start); }
+ bool unaligned_start() const {
+ return reinterpret_cast<intptr_t>(start) % sizeof(Char) == 1;
+ }
};
-BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
- : Utf16CharacterStream(buffer_, buffer_, buffer_, 0) {}
+// A Char stream backed by an on-heap SeqOneByteString or SeqTwoByteString.
+template <typename Char>
+class OnHeapStream {
+ public:
+ typedef typename HeapStringType<Char>::String String;
-bool BufferedUtf16CharacterStream::ReadBlock() {
- DCHECK_EQ(buffer_start_, buffer_);
+ OnHeapStream(Handle<String> string, size_t start_offset, size_t end)
+ : string_(string), start_offset_(start_offset), length_(end) {}
- size_t position = pos();
- buffer_pos_ = position;
- buffer_cursor_ = buffer_;
- buffer_end_ = buffer_ + FillBuffer(position);
- DCHECK_EQ(pos(), position);
- DCHECK_LE(buffer_end_, buffer_start_ + kBufferSize);
- return buffer_cursor_ < buffer_end_;
-}
+ Range<Char> GetDataAt(size_t pos) {
+ return {&string_->GetChars()[start_offset_ + Min(length_, pos)],
+ &string_->GetChars()[start_offset_ + length_]};
+ }
-// ----------------------------------------------------------------------------
-// GenericStringUtf16CharacterStream.
-//
-// A stream w/ a data source being a (flattened) Handle<String>.
+ static const bool kCanAccessHeap = true;
+
+ private:
+ Handle<String> string_;
+ const size_t start_offset_;
+ const size_t length_;
+};
-class GenericStringUtf16CharacterStream : public BufferedUtf16CharacterStream {
+// A Char stream backed by an off-heap ExternalOneByteString or
+// ExternalTwoByteString.
+template <typename Char>
+class ExternalStringStream {
public:
- GenericStringUtf16CharacterStream(Handle<String> data, size_t start_position,
- size_t end_position);
+ ExternalStringStream(const Char* data, size_t end)
+ : data_(data), length_(end) {}
- bool can_access_heap() override { return true; }
+ Range<Char> GetDataAt(size_t pos) {
+ return {&data_[Min(length_, pos)], &data_[length_]};
+ }
- protected:
- size_t FillBuffer(size_t position) override;
+ static const bool kCanAccessHeap = false;
- Handle<String> string_;
- size_t length_;
+ private:
+ const Char* const data_;
+ const size_t length_;
};
-GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
- Handle<String> data, size_t start_position, size_t end_position)
- : string_(data), length_(end_position) {
- DCHECK_GE(end_position, start_position);
- DCHECK_GE(static_cast<size_t>(string_->length()),
- end_position - start_position);
- buffer_pos_ = start_position;
-}
+// A Char stream backed by multiple source-stream provided off-heap chunks.
+template <typename Char>
+class ChunkedStream {
+ public:
+ explicit ChunkedStream(ScriptCompiler::ExternalSourceStream* source,
+ RuntimeCallStats* stats)
+ : source_(source), stats_(stats) {}
-size_t GenericStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
- if (from_pos >= length_) return 0;
+ Range<Char> GetDataAt(size_t pos) {
+ Chunk chunk = FindChunk(pos);
+ size_t buffer_end = chunk.length;
+ size_t buffer_pos = Min(buffer_end, pos - chunk.position);
+ return {&chunk.data[buffer_pos], &chunk.data[buffer_end]};
+ }
- size_t length = i::Min(kBufferSize, length_ - from_pos);
- String::WriteToFlat<uc16>(*string_, buffer_, static_cast<int>(from_pos),
- static_cast<int>(from_pos + length));
- return length;
-}
+ ~ChunkedStream() {
+ for (size_t i = 0; i < chunks_.size(); i++) {
+ delete[] chunks_[i].data;
+ }
+ }
-// ----------------------------------------------------------------------------
-// ExternalTwoByteStringUtf16CharacterStream.
-//
-// A stream whose data source is a Handle<ExternalTwoByteString>. It avoids
-// all data copying.
+ static const bool kCanAccessHeap = false;
-class ExternalTwoByteStringUtf16CharacterStream : public Utf16CharacterStream {
- public:
- ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
- size_t start_position,
- size_t end_position);
+ private:
+ struct Chunk {
+ const Char* const data;
+ // The logical position of data.
+ const size_t position;
+ const size_t length;
+ size_t end_position() const { return position + length; }
+ };
- bool can_access_heap() override { return false; }
+ Chunk FindChunk(size_t position) {
+ if (chunks_.empty()) FetchChunk(size_t{0});
- private:
- bool ReadBlock() override;
+ // Walk forwards while the position is in front of the current chunk.
+ while (position >= chunks_.back().end_position() &&
+ chunks_.back().length > 0) {
+ FetchChunk(chunks_.back().end_position());
+ }
+
+ // Walk backwards.
+ for (auto reverse_it = chunks_.rbegin(); reverse_it != chunks_.rend();
+ ++reverse_it) {
+ if (reverse_it->position <= position) return *reverse_it;
+ }
+
+ UNREACHABLE();
+ }
+
+ void FetchChunk(size_t position) {
+ const uint8_t* data = nullptr;
+ size_t length;
+ {
+ RuntimeCallTimerScope scope(stats_,
+ RuntimeCallCounterId::kGetMoreDataCallback);
+ length = source_->GetMoreData(&data);
+ }
+ // Incoming data has to be aligned to Char size.
+ DCHECK_EQ(0, length % sizeof(Char));
+ chunks_.push_back(
+ {reinterpret_cast<const Char*>(data), position, length / sizeof(Char)});
+ }
- const uc16* raw_data_; // Pointer to the actual array of characters.
- size_t start_pos_;
- size_t end_pos_;
+ std::vector<struct Chunk> chunks_;
+ ScriptCompiler::ExternalSourceStream* source_;
+ RuntimeCallStats* stats_;
};
-ExternalTwoByteStringUtf16CharacterStream::
- ExternalTwoByteStringUtf16CharacterStream(
- Handle<ExternalTwoByteString> data, size_t start_position,
- size_t end_position)
- : raw_data_(data->GetTwoByteData(static_cast<int>(start_position))),
- start_pos_(start_position),
- end_pos_(end_position) {
- buffer_start_ = raw_data_;
- buffer_cursor_ = raw_data_;
- buffer_end_ = raw_data_ + (end_pos_ - start_pos_);
- buffer_pos_ = start_pos_;
-}
+// Provides a buffered utf-16 view on the bytes from the underlying ByteStream.
+// Chars are buffered if either the underlying stream isn't utf-16 or the
+// underlying utf-16 stream might move (is on-heap).
+template <typename Char, template <typename T> class ByteStream>
+class BufferedCharacterStream : public Utf16CharacterStream {
+ public:
+ template <class... TArgs>
+ BufferedCharacterStream(size_t pos, TArgs... args) : byte_stream_(args...) {
+ buffer_pos_ = pos;
+ }
-bool ExternalTwoByteStringUtf16CharacterStream::ReadBlock() {
- size_t position = pos();
- bool have_data = start_pos_ <= position && position < end_pos_;
- if (have_data) {
- buffer_pos_ = start_pos_;
- buffer_cursor_ = raw_data_ + (position - start_pos_),
- buffer_end_ = raw_data_ + (end_pos_ - start_pos_);
- } else {
+ protected:
+ bool ReadBlock() override {
+ size_t position = pos();
buffer_pos_ = position;
- buffer_cursor_ = raw_data_;
- buffer_end_ = raw_data_;
+ buffer_start_ = &buffer_[0];
+ buffer_cursor_ = buffer_start_;
+
+ Range<Char> range = byte_stream_.GetDataAt(position);
+ if (range.length() == 0) {
+ buffer_end_ = buffer_start_;
+ return false;
+ }
+
+ size_t length = Min(kBufferSize, range.length());
+ i::CopyCharsUnsigned(buffer_, range.start, length);
+ buffer_end_ = &buffer_[length];
+ return true;
}
- return have_data;
-}
-// ----------------------------------------------------------------------------
-// ExternalOneByteStringUtf16CharacterStream
-//
-// A stream whose data source is a Handle<ExternalOneByteString>.
+ bool can_access_heap() override {
+ return ByteStream<uint16_t>::kCanAccessHeap;
+ }
-class ExternalOneByteStringUtf16CharacterStream
- : public BufferedUtf16CharacterStream {
+ private:
+ static const size_t kBufferSize = 512;
+ uc16 buffer_[kBufferSize];
+ ByteStream<Char> byte_stream_;
+};
+
+// Provides a unbuffered utf-16 view on the bytes from the underlying
+// ByteStream.
+template <template <typename T> class ByteStream>
+class UnbufferedCharacterStream : public Utf16CharacterStream {
public:
- ExternalOneByteStringUtf16CharacterStream(Handle<ExternalOneByteString> data,
- size_t start_position,
- size_t end_position);
+ template <class... TArgs>
+ UnbufferedCharacterStream(size_t pos, TArgs... args) : byte_stream_(args...) {
+ DCHECK(!ByteStream<uint16_t>::kCanAccessHeap);
+ buffer_pos_ = pos;
+ }
+
+ protected:
+ bool ReadBlock() override {
+ size_t position = pos();
+ buffer_pos_ = position;
+ Range<uint16_t> range = byte_stream_.GetDataAt(position);
+ buffer_start_ = range.start;
+ buffer_end_ = range.end;
+ buffer_cursor_ = buffer_start_;
+ if (range.length() == 0) return false;
- // For testing:
- ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length);
+ DCHECK(!range.unaligned_start());
+ DCHECK_LE(buffer_start_, buffer_end_);
+ return true;
+ }
bool can_access_heap() override { return false; }
+ private:
+ ByteStream<uint16_t> byte_stream_;
+};
+
+// ----------------------------------------------------------------------------
+// BufferedUtf16CharacterStreams
+//
+// A buffered character stream based on a random access character
+// source (ReadBlock can be called with pos() pointing to any position,
+// even positions before the current).
+//
+// TODO(verwaest): Remove together with Utf8 external streaming streams.
+class BufferedUtf16CharacterStream : public Utf16CharacterStream {
+ public:
+ BufferedUtf16CharacterStream();
+
protected:
- size_t FillBuffer(size_t position) override;
+ static const size_t kBufferSize = 512;
- const uint8_t* raw_data_; // Pointer to the actual array of characters.
- size_t length_;
-};
+ bool ReadBlock() override;
-ExternalOneByteStringUtf16CharacterStream::
- ExternalOneByteStringUtf16CharacterStream(
- Handle<ExternalOneByteString> data, size_t start_position,
- size_t end_position)
- : raw_data_(data->GetChars()), length_(end_position) {
- DCHECK(end_position >= start_position);
- buffer_pos_ = start_position;
-}
+ // FillBuffer should read up to kBufferSize characters at position and store
+ // them into buffer_[0..]. It returns the number of characters stored.
+ virtual size_t FillBuffer(size_t position) = 0;
+
+ // Fixed sized buffer that this class reads from.
+ // The base class' buffer_start_ should always point to buffer_.
+ uc16 buffer_[kBufferSize];
+};
-ExternalOneByteStringUtf16CharacterStream::
- ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length)
- : raw_data_(reinterpret_cast<const uint8_t*>(data)), length_(length) {}
+BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
+ : Utf16CharacterStream(buffer_, buffer_, buffer_, 0) {}
-size_t ExternalOneByteStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
- if (from_pos >= length_) return 0;
+bool BufferedUtf16CharacterStream::ReadBlock() {
+ DCHECK_EQ(buffer_start_, buffer_);
- size_t length = Min(kBufferSize, length_ - from_pos);
- i::CopyCharsUnsigned(buffer_, raw_data_ + from_pos, length);
- return length;
+ size_t position = pos();
+ buffer_pos_ = position;
+ buffer_cursor_ = buffer_;
+ buffer_end_ = buffer_ + FillBuffer(position);
+ DCHECK_EQ(pos(), position);
+ DCHECK_LE(buffer_end_, buffer_start_ + kBufferSize);
+ return buffer_cursor_ < buffer_end_;
}
// ----------------------------------------------------------------------------
@@ -197,6 +273,9 @@ size_t ExternalOneByteStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
// may 'cut' arbitrarily into utf-8 characters. Also, seeking to a given
// character position is tricky because the byte position cannot be dericed
// from the character position.
+//
+// TODO(verwaest): Decode utf8 chunks into utf16 chunks on the blink side
+// instead so we don't need to buffer.
class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
public:
@@ -469,392 +548,48 @@ size_t Utf8ExternalStreamingStream::FillBuffer(size_t position) {
}
// ----------------------------------------------------------------------------
-// Chunks - helper for One- + TwoByteExternalStreamingStream
-namespace {
-
-struct Chunk {
- const uint8_t* data;
- size_t byte_length;
- size_t byte_pos;
-};
-
-typedef std::vector<struct Chunk> Chunks;
-
-void DeleteChunks(Chunks& chunks) {
- for (size_t i = 0; i < chunks.size(); i++) delete[] chunks[i].data;
-}
-
-// Return the chunk index for the chunk containing position.
-// If position is behind the end of the stream, the index of the last,
-// zero-length chunk is returned.
-size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source,
- size_t position, RuntimeCallStats* stats) {
- size_t end_pos =
- chunks.empty() ? 0 : (chunks.back().byte_pos + chunks.back().byte_length);
-
- // Get more data if needed. We usually won't enter the loop body.
- bool out_of_data = !chunks.empty() && chunks.back().byte_length == 0;
- {
- RuntimeCallTimerScope scope(stats,
- RuntimeCallCounterId::kGetMoreDataCallback);
- while (!out_of_data && end_pos <= position + 1) {
- const uint8_t* chunk = nullptr;
- size_t len = source->GetMoreData(&chunk);
-
- chunks.push_back({chunk, len, end_pos});
- end_pos += len;
- out_of_data = (len == 0);
- }
- }
-
- // Here, we should always have at least one chunk, and we either have the
- // chunk we were looking for, or we're out of data. Also, out_of_data and
- // end_pos are current (and designate whether we have exhausted the stream,
- // and the length of data received so far, respectively).
- DCHECK(!chunks.empty());
- DCHECK_EQ(end_pos, chunks.back().byte_pos + chunks.back().byte_length);
- DCHECK_EQ(out_of_data, chunks.back().byte_length == 0);
- DCHECK(position < end_pos || out_of_data);
-
- // Edge case: position is behind the end of stream: Return the last (length 0)
- // chunk to indicate the end of the stream.
- if (position >= end_pos) {
- DCHECK(out_of_data);
- return chunks.size() - 1;
- }
-
- // We almost always 'stream', meaning we want data from the last chunk, so
- // let's look at chunks back-to-front.
- size_t chunk_no = chunks.size() - 1;
- while (chunks[chunk_no].byte_pos > position) {
- DCHECK_NE(chunk_no, 0u);
- chunk_no--;
- }
- DCHECK_LE(chunks[chunk_no].byte_pos, position);
- DCHECK_LT(position, chunks[chunk_no].byte_pos + chunks[chunk_no].byte_length);
- return chunk_no;
-}
-
-} // anonymous namespace
-
-// ----------------------------------------------------------------------------
-// OneByteExternalStreamingStream
-//
-// A stream of latin-1 encoded, chunked data.
-
-class OneByteExternalStreamingStream : public BufferedUtf16CharacterStream {
- public:
- explicit OneByteExternalStreamingStream(
- ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats)
- : source_(source), stats_(stats) {}
- ~OneByteExternalStreamingStream() override { DeleteChunks(chunks_); }
-
- bool can_access_heap() override { return false; }
-
- protected:
- size_t FillBuffer(size_t position) override;
-
- private:
- Chunks chunks_;
- ScriptCompiler::ExternalSourceStream* source_;
- RuntimeCallStats* stats_;
-};
-
-size_t OneByteExternalStreamingStream::FillBuffer(size_t position) {
- const Chunk& chunk = chunks_[FindChunk(chunks_, source_, position, stats_)];
- if (chunk.byte_length == 0) return 0;
-
- size_t start_pos = position - chunk.byte_pos;
- size_t len = i::Min(kBufferSize, chunk.byte_length - start_pos);
- i::CopyCharsUnsigned(buffer_, chunk.data + start_pos, len);
- return len;
-}
-
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
-// ----------------------------------------------------------------------------
-// TwoByteExternalStreamingStream
-//
-// A stream of ucs-2 data, delivered in chunks. Chunks may be 'cut' into the
-// middle of characters (or even contain only one byte), which adds a bit
-// of complexity. This stream avoid all data copying, except for characters
-// that cross chunk boundaries.
-
-class TwoByteExternalStreamingStream : public Utf16CharacterStream {
- public:
- explicit TwoByteExternalStreamingStream(
- ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats);
- ~TwoByteExternalStreamingStream() override;
-
- bool can_access_heap() override { return false; }
-
- protected:
- bool ReadBlock() override;
-
- Chunks chunks_;
- ScriptCompiler::ExternalSourceStream* source_;
- RuntimeCallStats* stats_;
- uc16 one_char_buffer_;
-};
-
-TwoByteExternalStreamingStream::TwoByteExternalStreamingStream(
- ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats)
- : Utf16CharacterStream(&one_char_buffer_, &one_char_buffer_,
- &one_char_buffer_, 0),
- source_(source),
- stats_(stats),
- one_char_buffer_(0) {}
-
-TwoByteExternalStreamingStream::~TwoByteExternalStreamingStream() {
- DeleteChunks(chunks_);
-}
-
-bool TwoByteExternalStreamingStream::ReadBlock() {
- size_t position = pos();
-
- // We'll search for the 2nd byte of our character, to make sure we
- // have enough data for at least one character.
- size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
-
- // Out of data? Return 0.
- if (chunks_[chunk_no].byte_length == 0) {
- buffer_pos_ = position;
- buffer_cursor_ = buffer_start_;
- buffer_end_ = buffer_start_;
- return false;
- }
-
- Chunk& current = chunks_[chunk_no];
-
- // Annoying edge case: Chunks may not be 2-byte aligned, meaning that a
- // character may be split between the previous and the current chunk.
- // If we find such a lonely byte at the beginning of the chunk, we'll use
- // one_char_buffer_ to hold the full character.
- bool lonely_byte = (chunks_[chunk_no].byte_pos == (2 * position + 1));
- if (lonely_byte) {
- DCHECK_NE(chunk_no, 0u);
- Chunk& previous_chunk = chunks_[chunk_no - 1];
-#ifdef V8_TARGET_BIG_ENDIAN
- uc16 character = current.data[0] |
- previous_chunk.data[previous_chunk.byte_length - 1] << 8;
-#else
- uc16 character = previous_chunk.data[previous_chunk.byte_length - 1] |
- current.data[0] << 8;
-#endif
-
- one_char_buffer_ = character;
- buffer_pos_ = position;
- buffer_start_ = &one_char_buffer_;
- buffer_cursor_ = &one_char_buffer_;
- buffer_end_ = &one_char_buffer_ + 1;
- return true;
- }
-
- // Common case: character is in current chunk.
- DCHECK_LE(current.byte_pos, 2 * position);
- DCHECK_LT(2 * position + 1, current.byte_pos + current.byte_length);
-
- // Determine # of full ucs-2 chars in stream, and whether we started on an odd
- // byte boundary.
- bool odd_start = (current.byte_pos % 2) == 1;
- size_t number_chars = (current.byte_length - odd_start) / 2;
-
- // Point the buffer_*_ members into the current chunk and set buffer_cursor_
- // to point to position. Be careful when converting the byte positions (in
- // Chunk) to the ucs-2 character positions (in buffer_*_ members).
- buffer_start_ = reinterpret_cast<const uint16_t*>(current.data + odd_start);
- buffer_end_ = buffer_start_ + number_chars;
- buffer_pos_ = (current.byte_pos + odd_start) / 2;
- buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
- DCHECK_EQ(position, pos());
- return true;
-}
-
-#else
-
-// ----------------------------------------------------------------------------
-// TwoByteExternalBufferedStream
-//
-// This class is made specifically to address unaligned access to 16-bit data
-// in MIPS and ARM architectures. It replaces class
-// TwoByteExternalStreamingStream which in some cases does have unaligned
-// accesse to 16-bit data
-
-class TwoByteExternalBufferedStream : public Utf16CharacterStream {
- public:
- explicit TwoByteExternalBufferedStream(
- ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats);
- ~TwoByteExternalBufferedStream();
-
- bool can_access_heap() override { return false; }
-
- protected:
- static const size_t kBufferSize = 512;
-
- bool ReadBlock() override;
-
- // FillBuffer should read up to kBufferSize characters at position and store
- // them into buffer_[0..]. It returns the number of characters stored.
- size_t FillBuffer(size_t position, size_t chunk_no);
-
- // Fixed sized buffer that this class reads from.
- // The base class' buffer_start_ should always point to buffer_.
- uc16 buffer_[kBufferSize];
-
- Chunks chunks_;
- ScriptCompiler::ExternalSourceStream* source_;
- RuntimeCallStats* stats_;
-};
-
-TwoByteExternalBufferedStream::TwoByteExternalBufferedStream(
- ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats)
- : Utf16CharacterStream(buffer_, buffer_, buffer_, 0),
- source_(source),
- stats_(stats) {}
-
-TwoByteExternalBufferedStream::~TwoByteExternalBufferedStream() {
- DeleteChunks(chunks_);
-}
-
-bool TwoByteExternalBufferedStream::ReadBlock() {
- size_t position = pos();
- // Find chunk in which the position belongs
- size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
-
- // Out of data? Return 0.
- if (chunks_[chunk_no].byte_length == 0) {
- buffer_pos_ = position;
- buffer_cursor_ = buffer_start_;
- buffer_end_ = buffer_start_;
- return false;
- }
-
- Chunk& current = chunks_[chunk_no];
-
- bool odd_start = current.byte_pos % 2;
- // Common case: character is in current chunk.
- DCHECK_LE(current.byte_pos, 2 * position + odd_start);
- DCHECK_LT(2 * position + 1, current.byte_pos + current.byte_length);
-
- // If character starts on odd address copy text in buffer so there is always
- // aligned access to characters. This is important on MIPS and ARM
- // architectures. Otherwise read characters from memory directly.
- if (!odd_start) {
- buffer_start_ = reinterpret_cast<const uint16_t*>(current.data);
- size_t number_chars = current.byte_length / 2;
- buffer_end_ = buffer_start_ + number_chars;
- buffer_pos_ = current.byte_pos / 2;
- buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
- DCHECK_EQ(position, pos());
- return true;
- } else {
- buffer_start_ = buffer_;
- buffer_pos_ = position;
- buffer_cursor_ = buffer_;
- buffer_end_ = buffer_ + FillBuffer(position, chunk_no);
- DCHECK_EQ(pos(), position);
- DCHECK_LE(buffer_end_, buffer_start_ + kBufferSize);
- return buffer_cursor_ < buffer_end_;
- }
-}
-
-size_t TwoByteExternalBufferedStream::FillBuffer(size_t position,
- size_t chunk_no) {
- DCHECK_EQ(chunks_[chunk_no].byte_pos % 2, 1u);
- bool odd_start = true;
- // Align buffer_pos_ to the size of the buffer.
- {
- size_t new_pos = position / kBufferSize * kBufferSize;
- if (new_pos != position) {
- chunk_no = FindChunk(chunks_, source_, 2 * new_pos + 1, stats_);
- buffer_pos_ = new_pos;
- buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
- position = new_pos;
- odd_start = chunks_[chunk_no].byte_pos % 2;
- }
- }
-
- Chunk* current = &chunks_[chunk_no];
-
- // Annoying edge case: Chunks may not be 2-byte aligned, meaning that a
- // character may be split between the previous and the current chunk.
- // If we find such a lonely byte at the beginning of the chunk, we'll copy
- // it to the first byte in buffer_.
- size_t totalLength = 0;
- bool lonely_byte = (current->byte_pos == (2 * position + 1));
- if (lonely_byte) {
- DCHECK_NE(chunk_no, 0u);
- Chunk& previous_chunk = chunks_[chunk_no - 1];
- *reinterpret_cast<uint8_t*>(buffer_) =
- previous_chunk.data[previous_chunk.byte_length - 1];
- totalLength++;
- }
-
- // Common case: character is in current chunk.
- DCHECK_LE(current->byte_pos, 2 * position + odd_start);
- DCHECK_LT(2 * position + 1, current->byte_pos + current->byte_length);
-
- // Copy characters from current chunk starting from chunk_pos to the end of
- // buffer or chunk.
- size_t chunk_pos = position - current->byte_pos / 2;
- size_t start_offset = odd_start && chunk_pos != 0;
- size_t bytes_to_move =
- i::Min(2 * kBufferSize - lonely_byte,
- current->byte_length - 2 * chunk_pos + start_offset);
- i::MemMove(reinterpret_cast<uint8_t*>(buffer_) + lonely_byte,
- current->data + 2 * chunk_pos - start_offset, bytes_to_move);
-
- // Fill up the rest of the buffer if there is space and data left.
- totalLength += bytes_to_move;
- position = (current->byte_pos + current->byte_length) / 2;
- if (position - buffer_pos_ < kBufferSize) {
- chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
- current = &chunks_[chunk_no];
- odd_start = current->byte_pos % 2;
- bytes_to_move = i::Min(2 * kBufferSize - totalLength, current->byte_length);
- while (bytes_to_move) {
- // Common case: character is in current chunk.
- DCHECK_LE(current->byte_pos, 2 * position + odd_start);
- DCHECK_LT(2 * position + 1, current->byte_pos + current->byte_length);
-
- i::MemMove(reinterpret_cast<uint8_t*>(buffer_) + totalLength,
- current->data, bytes_to_move);
- totalLength += bytes_to_move;
- position = (current->byte_pos + current->byte_length) / 2;
- chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
- current = &chunks_[chunk_no];
- odd_start = current->byte_pos % 2;
- bytes_to_move =
- i::Min(2 * kBufferSize - totalLength, current->byte_length);
- }
- }
- return totalLength / 2;
-}
-#endif
-
-// ----------------------------------------------------------------------------
// ScannerStream: Create stream instances.
-Utf16CharacterStream* ScannerStream::For(Handle<String> data) {
- return ScannerStream::For(data, 0, data->length());
+Utf16CharacterStream* ScannerStream::For(Isolate* isolate,
+ Handle<String> data) {
+ return ScannerStream::For(isolate, data, 0, data->length());
}
-Utf16CharacterStream* ScannerStream::For(Handle<String> data, int start_pos,
- int end_pos) {
+Utf16CharacterStream* ScannerStream::For(Isolate* isolate, Handle<String> data,
+ int start_pos, int end_pos) {
DCHECK_GE(start_pos, 0);
DCHECK_LE(start_pos, end_pos);
DCHECK_LE(end_pos, data->length());
+ size_t start_offset = 0;
+ if (data->IsSlicedString()) {
+ SlicedString* string = SlicedString::cast(*data);
+ start_offset = string->offset();
+ String* parent = string->parent();
+ if (parent->IsThinString()) parent = ThinString::cast(parent)->actual();
+ data = handle(parent, isolate);
+ } else {
+ data = String::Flatten(isolate, data);
+ }
if (data->IsExternalOneByteString()) {
- return new ExternalOneByteStringUtf16CharacterStream(
- Handle<ExternalOneByteString>::cast(data),
- static_cast<size_t>(start_pos), static_cast<size_t>(end_pos));
+ return new BufferedCharacterStream<uint8_t, ExternalStringStream>(
+ static_cast<size_t>(start_pos),
+ ExternalOneByteString::cast(*data)->GetChars() + start_offset,
+ static_cast<size_t>(end_pos));
} else if (data->IsExternalTwoByteString()) {
- return new ExternalTwoByteStringUtf16CharacterStream(
- Handle<ExternalTwoByteString>::cast(data),
- static_cast<size_t>(start_pos), static_cast<size_t>(end_pos));
+ return new UnbufferedCharacterStream<ExternalStringStream>(
+ static_cast<size_t>(start_pos),
+ ExternalTwoByteString::cast(*data)->GetChars() + start_offset,
+ static_cast<size_t>(end_pos));
+ } else if (data->IsSeqOneByteString()) {
+ return new BufferedCharacterStream<uint8_t, OnHeapStream>(
+ static_cast<size_t>(start_pos), Handle<SeqOneByteString>::cast(data),
+ start_offset, static_cast<size_t>(end_pos));
+ } else if (data->IsSeqTwoByteString()) {
+ return new BufferedCharacterStream<uint16_t, OnHeapStream>(
+ static_cast<size_t>(start_pos), Handle<SeqTwoByteString>::cast(data),
+ start_offset, static_cast<size_t>(end_pos));
} else {
- // TODO(vogelheim): Maybe call data.Flatten() first?
- return new GenericStringUtf16CharacterStream(
- data, static_cast<size_t>(start_pos), static_cast<size_t>(end_pos));
+ UNREACHABLE();
}
}
@@ -866,7 +601,9 @@ std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
const char* data, size_t length) {
return std::unique_ptr<Utf16CharacterStream>(
- new ExternalOneByteStringUtf16CharacterStream(data, length));
+ new BufferedCharacterStream<uint8_t, ExternalStringStream>(
+ static_cast<size_t>(0), reinterpret_cast<const uint8_t*>(data),
+ static_cast<size_t>(length)));
}
Utf16CharacterStream* ScannerStream::For(
@@ -875,18 +612,15 @@ Utf16CharacterStream* ScannerStream::For(
RuntimeCallStats* stats) {
switch (encoding) {
case v8::ScriptCompiler::StreamedSource::TWO_BYTE:
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
- return new TwoByteExternalStreamingStream(source_stream, stats);
-#else
- return new TwoByteExternalBufferedStream(source_stream, stats);
-#endif
+ return new UnbufferedCharacterStream<ChunkedStream>(
+ static_cast<size_t>(0), source_stream, stats);
case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
- return new OneByteExternalStreamingStream(source_stream, stats);
+ return new BufferedCharacterStream<uint8_t, ChunkedStream>(
+ static_cast<size_t>(0), source_stream, stats);
case v8::ScriptCompiler::StreamedSource::UTF8:
return new Utf8ExternalStreamingStream(source_stream, stats);
}
UNREACHABLE();
- return nullptr;
}
} // namespace internal
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 9f7d2bd5fb..12c5847f2f 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -19,9 +19,9 @@ class String;
class V8_EXPORT_PRIVATE ScannerStream {
public:
- static Utf16CharacterStream* For(Handle<String> data);
- static Utf16CharacterStream* For(Handle<String> data, int start_pos,
- int end_pos);
+ static Utf16CharacterStream* For(Isolate* isolate, Handle<String> data);
+ static Utf16CharacterStream* For(Isolate* isolate, Handle<String> data,
+ int start_pos, int end_pos);
static Utf16CharacterStream* For(
ScriptCompiler::ExternalSourceStream* source_stream,
ScriptCompiler::StreamedSource::Encoding encoding,
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index aacf3e7162..852b5e400b 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -391,21 +391,6 @@ Token::Value Scanner::Next() {
}
has_line_terminator_before_next_ = false;
has_multiline_comment_before_next_ = false;
- if (static_cast<unsigned>(c0_) <= 0x7F) {
- Token::Value token = static_cast<Token::Value>(one_char_tokens[c0_]);
- if (token != Token::ILLEGAL) {
- int pos = source_pos();
- next_.token = token;
- next_.contextual_token = Token::UNINITIALIZED;
- next_.location.beg_pos = pos;
- next_.location.end_pos = pos + 1;
- next_.literal_chars = nullptr;
- next_.raw_literal_chars = nullptr;
- next_.invalid_template_escape_message = MessageTemplate::kNone;
- Advance();
- return current_.token;
- }
- }
Scan();
return current_.token;
}
@@ -438,8 +423,8 @@ Token::Value Scanner::SkipWhiteSpace() {
while (true) {
while (true) {
- // Don't skip behind the end of input.
- if (c0_ == kEndOfInput) break;
+ // We won't skip behind the end of input.
+ DCHECK(!unicode_cache_->IsWhiteSpace(kEndOfInput));
// Advance as long as character is a WhiteSpace or LineTerminator.
// Remember if the latter is the case.
@@ -521,9 +506,11 @@ Token::Value Scanner::SkipSourceURLComment() {
void Scanner::TryToParseSourceURLComment() {
// Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this
// function will just return if it cannot parse a magic comment.
- if (c0_ == kEndOfInput || !unicode_cache_->IsWhiteSpace(c0_)) return;
+ DCHECK(!unicode_cache_->IsWhiteSpaceOrLineTerminator(kEndOfInput));
+ if (!unicode_cache_->IsWhiteSpace(c0_)) return;
Advance();
LiteralBuffer name;
+
while (c0_ != kEndOfInput &&
!unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) && c0_ != '=') {
name.AddChar(c0_);
@@ -543,7 +530,7 @@ void Scanner::TryToParseSourceURLComment() {
return;
Advance();
value->Reset();
- while (c0_ != kEndOfInput && unicode_cache_->IsWhiteSpace(c0_)) {
+ while (unicode_cache_->IsWhiteSpace(c0_)) {
Advance();
}
while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
@@ -576,7 +563,8 @@ Token::Value Scanner::SkipMultiLineComment() {
while (c0_ != kEndOfInput) {
uc32 ch = c0_;
Advance();
- if (c0_ != kEndOfInput && unibrow::IsLineTerminator(ch)) {
+ DCHECK(!unibrow::IsLineTerminator(kEndOfInput));
+ if (unibrow::IsLineTerminator(ch)) {
// Following ECMA-262, section 7.4, a comment containing
// a newline will make the comment count as a line-terminator.
has_multiline_comment_before_next_ = true;
@@ -617,24 +605,26 @@ void Scanner::Scan() {
next_.literal_chars = nullptr;
next_.raw_literal_chars = nullptr;
next_.invalid_template_escape_message = MessageTemplate::kNone;
+
Token::Value token;
do {
+ if (static_cast<unsigned>(c0_) <= 0x7F) {
+ Token::Value token = static_cast<Token::Value>(one_char_tokens[c0_]);
+ if (token != Token::ILLEGAL) {
+ int pos = source_pos();
+ next_.token = token;
+ next_.contextual_token = Token::UNINITIALIZED;
+ next_.location.beg_pos = pos;
+ next_.location.end_pos = pos + 1;
+ Advance();
+ return;
+ }
+ }
+
// Remember the position of the next token
next_.location.beg_pos = source_pos();
switch (c0_) {
- case ' ':
- case '\t':
- Advance();
- token = Token::WHITESPACE;
- break;
-
- case '\n':
- Advance();
- has_line_terminator_before_next_ = true;
- token = Token::WHITESPACE;
- break;
-
case '"':
case '\'':
token = ScanString();
@@ -813,50 +803,6 @@ void Scanner::Scan() {
}
break;
- case ':':
- token = Select(Token::COLON);
- break;
-
- case ';':
- token = Select(Token::SEMICOLON);
- break;
-
- case ',':
- token = Select(Token::COMMA);
- break;
-
- case '(':
- token = Select(Token::LPAREN);
- break;
-
- case ')':
- token = Select(Token::RPAREN);
- break;
-
- case '[':
- token = Select(Token::LBRACK);
- break;
-
- case ']':
- token = Select(Token::RBRACK);
- break;
-
- case '{':
- token = Select(Token::LBRACE);
- break;
-
- case '}':
- token = Select(Token::RBRACE);
- break;
-
- case '?':
- token = Select(Token::CONDITIONAL);
- break;
-
- case '~':
- token = Select(Token::BIT_NOT);
- break;
-
case '`':
token = ScanTemplateStart();
break;
@@ -866,17 +812,17 @@ void Scanner::Scan() {
break;
default:
- if (c0_ == kEndOfInput) {
- token = Token::EOS;
- } else if (unicode_cache_->IsIdentifierStart(c0_)) {
+ if (unicode_cache_->IsIdentifierStart(c0_) ||
+ (CombineSurrogatePair() &&
+ unicode_cache_->IsIdentifierStart(c0_))) {
token = ScanIdentifierOrKeyword();
} else if (IsDecimalDigit(c0_)) {
token = ScanNumber(false);
+ } else if (c0_ == kEndOfInput) {
+ token = Token::EOS;
} else {
token = SkipWhiteSpace();
- if (token == Token::ILLEGAL) {
- Advance();
- }
+ if (token == Token::ILLEGAL) Advance();
}
break;
}
@@ -978,8 +924,8 @@ bool Scanner::ScanEscape() {
Advance<capture_raw>();
// Skip escaped newlines.
- if (!in_template_literal && c0_ != kEndOfInput &&
- unibrow::IsLineTerminator(c)) {
+ DCHECK(!unibrow::IsLineTerminator(kEndOfInput));
+ if (!in_template_literal && unibrow::IsLineTerminator(c)) {
// Allow escaped CR+LF newlines in multiline string literals.
if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance<capture_raw>();
return true;
@@ -1053,43 +999,28 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length, bool in_template_literal) {
Token::Value Scanner::ScanString() {
uc32 quote = c0_;
- Advance<false, false>(); // consume quote
+ Advance(); // consume quote
LiteralScope literal(this);
while (true) {
- if (c0_ > kMaxAscii) {
- HandleLeadSurrogate();
- break;
- }
- if (c0_ == kEndOfInput || c0_ == '\n' || c0_ == '\r') return Token::ILLEGAL;
if (c0_ == quote) {
literal.Complete();
- Advance<false, false>();
+ Advance();
return Token::STRING;
}
- char c = static_cast<char>(c0_);
- if (c == '\\') break;
- Advance<false, false>();
- AddLiteralChar(c);
- }
-
- while (c0_ != quote && c0_ != kEndOfInput &&
- !unibrow::IsStringLiteralLineTerminator(c0_)) {
- uc32 c = c0_;
- Advance();
- if (c == '\\') {
+ if (c0_ == kEndOfInput || unibrow::IsStringLiteralLineTerminator(c0_)) {
+ return Token::ILLEGAL;
+ }
+ if (c0_ == '\\') {
+ Advance();
+ // TODO(verwaest): Check whether we can remove the additional check.
if (c0_ == kEndOfInput || !ScanEscape<false, false>()) {
return Token::ILLEGAL;
}
- } else {
- AddLiteralChar(c);
+ continue;
}
+ AddLiteralCharAdvance();
}
- if (c0_ != quote) return Token::ILLEGAL;
- literal.Complete();
-
- Advance(); // consume quote
- return Token::STRING;
}
Token::Value Scanner::ScanPrivateName() {
@@ -1102,7 +1033,8 @@ Token::Value Scanner::ScanPrivateName() {
LiteralScope literal(this);
DCHECK_EQ(c0_, '#');
AddLiteralCharAdvance();
- if (c0_ == kEndOfInput || !unicode_cache_->IsIdentifierStart(c0_)) {
+ DCHECK(!unicode_cache_->IsIdentifierStart(kEndOfInput));
+ if (!unicode_cache_->IsIdentifierStart(c0_)) {
PushBack(c0_);
ReportScannerError(source_pos(),
MessageTemplate::kInvalidOrUnexpectedToken);
@@ -1150,7 +1082,8 @@ Token::Value Scanner::ScanTemplateSpan() {
ReduceRawLiteralLength(2);
break;
} else if (c == '\\') {
- if (c0_ != kEndOfInput && unibrow::IsLineTerminator(c0_)) {
+ DCHECK(!unibrow::IsLineTerminator(kEndOfInput));
+ if (unibrow::IsLineTerminator(c0_)) {
// The TV of LineContinuation :: \ LineTerminatorSequence is the empty
// code unit sequence.
uc32 lastChar = c0_;
@@ -1230,7 +1163,7 @@ bool Scanner::ScanDigitsWithNumericSeparators(bool (*predicate)(uc32 ch),
bool separator_seen = false;
while (predicate(c0_) || c0_ == '_') {
if (c0_ == '_') {
- Advance<false, false>();
+ Advance();
if (c0_ == '_') {
ReportScannerError(Location(source_pos(), source_pos() + 1),
MessageTemplate::kContinuousNumericSeparator);
@@ -1266,7 +1199,7 @@ bool Scanner::ScanDecimalAsSmiWithNumericSeparators(uint64_t* value) {
bool separator_seen = false;
while (IsDecimalDigit(c0_) || c0_ == '_') {
if (c0_ == '_') {
- Advance<false, false>();
+ Advance();
if (c0_ == '_') {
ReportScannerError(Location(source_pos(), source_pos() + 1),
MessageTemplate::kContinuousNumericSeparator);
@@ -1278,7 +1211,7 @@ bool Scanner::ScanDecimalAsSmiWithNumericSeparators(uint64_t* value) {
separator_seen = false;
*value = 10 * *value + (c0_ - '0');
uc32 first_char = c0_;
- Advance<false, false>();
+ Advance();
AddLiteralChar(first_char);
}
@@ -1299,7 +1232,7 @@ bool Scanner::ScanDecimalAsSmi(uint64_t* value) {
while (IsDecimalDigit(c0_)) {
*value = 10 * *value + (c0_ - '0');
uc32 first_char = c0_;
- Advance<false, false>();
+ Advance();
AddLiteralChar(first_char);
}
return true;
@@ -1444,10 +1377,9 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
if (next_.literal_chars->one_byte_literal().length() <= 10 &&
value <= Smi::kMaxValue && c0_ != '.' &&
- (c0_ == kEndOfInput || !unicode_cache_->IsIdentifierStart(c0_))) {
+ !unicode_cache_->IsIdentifierStart(c0_)) {
next_.smi_value_ = static_cast<uint32_t>(value);
literal.Complete();
- HandleLeadSurrogate();
if (kind == DECIMAL_WITH_LEADING_ZERO) {
octal_pos_ = Location(start_pos, source_pos());
@@ -1455,7 +1387,6 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
}
return Token::SMI;
}
- HandleLeadSurrogate();
}
if (!ScanDecimalDigits()) return Token::ILLEGAL;
@@ -1503,9 +1434,9 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// not be an identifier start or a decimal digit; see ECMA-262
// section 7.8.3, page 17 (note that we read only one decimal digit
// if the value is 0).
- if (IsDecimalDigit(c0_) ||
- (c0_ != kEndOfInput && unicode_cache_->IsIdentifierStart(c0_)))
+ if (IsDecimalDigit(c0_) || unicode_cache_->IsIdentifierStart(c0_)) {
return Token::ILLEGAL;
+ }
literal.Complete();
@@ -1688,24 +1619,18 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
DCHECK(unicode_cache_->IsIdentifierStart(c0_));
+ bool escaped = false;
if (IsInRange(c0_, 'a', 'z') || c0_ == '_') {
do {
- char first_char = static_cast<char>(c0_);
- Advance<false, false>();
- AddLiteralChar(first_char);
+ AddLiteralCharAdvance();
} while (IsInRange(c0_, 'a', 'z') || c0_ == '_');
- if (IsDecimalDigit(c0_) || IsInRange(c0_, 'A', 'Z') || c0_ == '_' ||
- c0_ == '$') {
- // Identifier starting with lowercase.
- char first_char = static_cast<char>(c0_);
- Advance<false, false>();
- AddLiteralChar(first_char);
- while (IsAsciiIdentifier(c0_)) {
- char first_char = static_cast<char>(c0_);
- Advance<false, false>();
- AddLiteralChar(first_char);
- }
+ if (IsDecimalDigit(c0_) || IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
+ // Identifier starting with lowercase or _.
+ do {
+ AddLiteralCharAdvance();
+ } while (IsAsciiIdentifier(c0_));
+
if (c0_ <= kMaxAscii && c0_ != '\\') {
literal->Complete();
return Token::IDENTIFIER;
@@ -1721,100 +1646,71 @@ Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
literal->Complete();
return token;
}
-
- HandleLeadSurrogate();
- } else if (IsInRange(c0_, 'A', 'Z') || c0_ == '_' || c0_ == '$') {
+ } else if (IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
do {
- char first_char = static_cast<char>(c0_);
- Advance<false, false>();
- AddLiteralChar(first_char);
+ AddLiteralCharAdvance();
} while (IsAsciiIdentifier(c0_));
if (c0_ <= kMaxAscii && c0_ != '\\') {
literal->Complete();
return Token::IDENTIFIER;
}
-
- HandleLeadSurrogate();
} else if (c0_ == '\\') {
- // Scan identifier start character.
+ escaped = true;
uc32 c = ScanIdentifierUnicodeEscape();
- // Only allow legal identifier start characters.
- if (c < 0 ||
- c == '\\' || // No recursive escapes.
- !unicode_cache_->IsIdentifierStart(c)) {
+ DCHECK(!unicode_cache_->IsIdentifierStart(-1));
+ if (c == '\\' || !unicode_cache_->IsIdentifierStart(c)) {
return Token::ILLEGAL;
}
AddLiteralChar(c);
- return ScanIdentifierSuffix(literal, true);
- } else {
- uc32 first_char = c0_;
- Advance();
- AddLiteralChar(first_char);
- }
-
- // Scan the rest of the identifier characters.
- while (c0_ != kEndOfInput && unicode_cache_->IsIdentifierPart(c0_)) {
- if (c0_ != '\\') {
- uc32 next_char = c0_;
- Advance();
- AddLiteralChar(next_char);
- continue;
- }
- // Fallthrough if no longer able to complete keyword.
- return ScanIdentifierSuffix(literal, false);
- }
-
- if (next_.literal_chars->is_one_byte()) {
- Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- Token::Value token =
- KeywordOrIdentifierToken(chars.start(), chars.length());
- if (token == Token::IDENTIFIER ||
- token == Token::FUTURE_STRICT_RESERVED_WORD ||
- Token::IsContextualKeyword(token))
- literal->Complete();
- return token;
}
- literal->Complete();
- return Token::IDENTIFIER;
-}
-
-Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal,
- bool escaped) {
- // Scan the rest of the identifier characters.
- while (c0_ != kEndOfInput && unicode_cache_->IsIdentifierPart(c0_)) {
+ while (true) {
if (c0_ == '\\') {
- uc32 c = ScanIdentifierUnicodeEscape();
escaped = true;
+ uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier part characters.
- if (c < 0 ||
- c == '\\' ||
- !unicode_cache_->IsIdentifierPart(c)) {
+ // TODO(verwaest): Make this true.
+ // DCHECK(!unicode_cache_->IsIdentifierPart('\\'));
+ DCHECK(!unicode_cache_->IsIdentifierPart(-1));
+ if (c == '\\' || !unicode_cache_->IsIdentifierPart(c)) {
return Token::ILLEGAL;
}
AddLiteralChar(c);
+ } else if (unicode_cache_->IsIdentifierPart(c0_) ||
+ (CombineSurrogatePair() &&
+ unicode_cache_->IsIdentifierPart(c0_))) {
+ AddLiteralCharAdvance();
} else {
- AddLiteralChar(c0_);
- Advance();
+ break;
}
}
- literal->Complete();
- if (escaped && next_.literal_chars->is_one_byte()) {
+ if (next_.literal_chars->is_one_byte()) {
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
Token::Value token =
KeywordOrIdentifierToken(chars.start(), chars.length());
/* TODO(adamk): YIELD should be handled specially. */
+ if (token == Token::FUTURE_STRICT_RESERVED_WORD) {
+ literal->Complete();
+ if (escaped) return Token::ESCAPED_STRICT_RESERVED_WORD;
+ return token;
+ }
if (token == Token::IDENTIFIER || Token::IsContextualKeyword(token)) {
+ literal->Complete();
return token;
- } else if (token == Token::FUTURE_STRICT_RESERVED_WORD ||
- token == Token::LET || token == Token::STATIC) {
+ }
+
+ if (!escaped) return token;
+
+ literal->Complete();
+ if (token == Token::LET || token == Token::STATIC) {
return Token::ESCAPED_STRICT_RESERVED_WORD;
- } else {
- return Token::ESCAPED_KEYWORD;
}
+ return Token::ESCAPED_KEYWORD;
}
+
+ literal->Complete();
return Token::IDENTIFIER;
}
@@ -1879,7 +1775,7 @@ Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
int flags = 0;
- while (c0_ != kEndOfInput && unicode_cache_->IsIdentifierPart(c0_)) {
+ while (unicode_cache_->IsIdentifierPart(c0_)) {
RegExp::Flags flag = RegExp::kNone;
switch (c0_) {
case 'g':
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 77ea4d3272..34da5fafbf 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -403,12 +403,12 @@ class Scanner {
~LiteralBuffer() { backing_store_.Dispose(); }
- INLINE(void AddChar(char code_unit)) {
+ V8_INLINE void AddChar(char code_unit) {
DCHECK(IsValidAscii(code_unit));
AddOneByteChar(static_cast<byte>(code_unit));
}
- INLINE(void AddChar(uc32 code_unit)) {
+ V8_INLINE void AddChar(uc32 code_unit) {
if (is_one_byte_ &&
code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
AddOneByteChar(static_cast<byte>(code_unit));
@@ -465,7 +465,7 @@ class Scanner {
return iscntrl(code_unit) || isprint(code_unit);
}
- INLINE(void AddOneByteChar(byte one_byte_char)) {
+ V8_INLINE void AddOneByteChar(byte one_byte_char) {
DCHECK(is_one_byte_);
if (position_ >= backing_store_.length()) ExpandBuffer();
backing_store_[position_] = one_byte_char;
@@ -575,22 +575,22 @@ class Scanner {
next_.raw_literal_chars = free_buffer;
}
- INLINE(void AddLiteralChar(uc32 c)) {
+ V8_INLINE void AddLiteralChar(uc32 c) {
DCHECK_NOT_NULL(next_.literal_chars);
next_.literal_chars->AddChar(c);
}
- INLINE(void AddLiteralChar(char c)) {
+ V8_INLINE void AddLiteralChar(char c) {
DCHECK_NOT_NULL(next_.literal_chars);
next_.literal_chars->AddChar(c);
}
- INLINE(void AddRawLiteralChar(uc32 c)) {
+ V8_INLINE void AddRawLiteralChar(uc32 c) {
DCHECK_NOT_NULL(next_.raw_literal_chars);
next_.raw_literal_chars->AddChar(c);
}
- INLINE(void ReduceRawLiteralLength(int delta)) {
+ V8_INLINE void ReduceRawLiteralLength(int delta) {
DCHECK_NOT_NULL(next_.raw_literal_chars);
next_.raw_literal_chars->ReduceLength(delta);
}
@@ -608,24 +608,26 @@ class Scanner {
}
// Low-level scanning support.
- template <bool capture_raw = false, bool check_surrogate = true>
+ template <bool capture_raw = false>
void Advance() {
if (capture_raw) {
AddRawLiteralChar(c0_);
}
c0_ = source_->Advance();
- if (check_surrogate) HandleLeadSurrogate();
}
- void HandleLeadSurrogate() {
+ bool CombineSurrogatePair() {
+ DCHECK(!unibrow::Utf16::IsLeadSurrogate(kEndOfInput));
if (unibrow::Utf16::IsLeadSurrogate(c0_)) {
uc32 c1 = source_->Advance();
- if (!unibrow::Utf16::IsTrailSurrogate(c1)) {
- source_->Back();
- } else {
+ DCHECK(!unibrow::Utf16::IsTrailSurrogate(kEndOfInput));
+ if (unibrow::Utf16::IsTrailSurrogate(c1)) {
c0_ = unibrow::Utf16::CombineSurrogatePair(c0_, c1);
+ return true;
}
+ source_->Back();
}
+ return false;
}
void PushBack(uc32 ch) {
@@ -750,7 +752,6 @@ class Scanner {
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifierOrKeyword();
Token::Value ScanIdentifierOrKeywordInner(LiteralScope* literal);
- Token::Value ScanIdentifierSuffix(LiteralScope* literal, bool escaped);
Token::Value ScanString();
Token::Value ScanPrivateName();
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
index 26da880b12..50e1403626 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -117,19 +117,20 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
Handle<JSObject> jserror = Handle<JSObject>::cast(error);
Handle<Name> key_start_pos = factory->error_start_pos_symbol();
- JSObject::SetProperty(jserror, key_start_pos,
+ JSObject::SetProperty(isolate, jserror, key_start_pos,
handle(Smi::FromInt(location.start_pos()), isolate),
LanguageMode::kSloppy)
.Check();
Handle<Name> key_end_pos = factory->error_end_pos_symbol();
- JSObject::SetProperty(jserror, key_end_pos,
+ JSObject::SetProperty(isolate, jserror, key_end_pos,
handle(Smi::FromInt(location.end_pos()), isolate),
LanguageMode::kSloppy)
.Check();
Handle<Name> key_script = factory->error_script_symbol();
- JSObject::SetProperty(jserror, key_script, script, LanguageMode::kSloppy)
+ JSObject::SetProperty(isolate, jserror, key_script, script,
+ LanguageMode::kSloppy)
.Check();
isolate->Throw(*error, &location);
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index dfccb293d1..3aaa36bc12 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -164,7 +164,7 @@ void PerfJitLogger::CloseMarkerFile(void* marker_address) {
munmap(marker_address, page_size);
}
-PerfJitLogger::PerfJitLogger() {
+PerfJitLogger::PerfJitLogger(Isolate* isolate) : CodeEventLogger(isolate) {
base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
reference_count_++;
@@ -332,7 +332,8 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
if (entry_count == 0) return;
// The WasmToJS wrapper stubs have source position entries.
if (!shared->HasSourceCode()) return;
- Handle<Script> script(Script::cast(shared->script()));
+ Isolate* isolate = shared->GetIsolate();
+ Handle<Script> script(Script::cast(shared->script()), isolate);
PerfJitCodeDebugInfo debug_info;
@@ -346,8 +347,8 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
size += entry_count * sizeof(PerfJitDebugEntry);
// Add the size of the name after each entry.
- Handle<Code> code_handle(code);
- Handle<SharedFunctionInfo> function_handle(shared);
+ Handle<Code> code_handle(code, isolate);
+ Handle<SharedFunctionInfo> function_handle(shared, isolate);
for (SourcePositionTableIterator iterator(code->SourcePositionTable());
!iterator.done(); iterator.Advance()) {
SourcePositionInfo info(GetSourcePositionInfo(code_handle, function_handle,
@@ -419,7 +420,7 @@ void PerfJitLogger::LogWriteUnwindingInfo(Code* code) {
LogWriteBytes(padding_bytes, static_cast<int>(padding_size));
}
-void PerfJitLogger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+void PerfJitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
// We may receive a CodeMove event if a BytecodeArray object moves. Otherwise
// code relocation is not supported.
CHECK(from->IsBytecodeArray());
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/perf-jit.h
index bbcc79dd1c..d08f4b91ab 100644
--- a/deps/v8/src/perf-jit.h
+++ b/deps/v8/src/perf-jit.h
@@ -38,10 +38,10 @@ namespace internal {
// Linux perf tool logging support
class PerfJitLogger : public CodeEventLogger {
public:
- PerfJitLogger();
+ explicit PerfJitLogger(Isolate* isolate);
virtual ~PerfJitLogger();
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
void CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) override {}
@@ -118,7 +118,9 @@ class PerfJitLogger : public CodeEventLogger {
// PerfJitLogger is only implemented on Linux
class PerfJitLogger : public CodeEventLogger {
public:
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override {
+ explicit PerfJitLogger(Isolate* isolate) : CodeEventLogger(isolate) {}
+
+ void CodeMoveEvent(AbstractCode* from, Address to) override {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 9324a19e51..73c2e8dfe2 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -173,7 +173,7 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_target_object(HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -181,9 +181,8 @@ void RelocInfo::set_target_object(HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target);
- host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
+ heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
+ heap->RecordWriteIntoCode(host(), this, target);
}
}
@@ -200,13 +199,6 @@ void RelocInfo::set_target_external_reference(
icache_flush_mode);
}
-void RelocInfo::set_wasm_code_table_entry(Address target,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
- Assembler::set_target_address_at(pc_, constant_pool_, target,
- icache_flush_mode);
-}
-
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@@ -247,7 +239,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 3386d5265d..c43b955210 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -41,6 +41,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
+#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/ppc/assembler-ppc-inl.h"
@@ -141,9 +142,9 @@ Register ToRegister(int num) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
-
+const int RelocInfo::kApplyMask =
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially
@@ -161,34 +162,27 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
-Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
-uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- Assembler::target_address_at(pc_, constant_pool_));
-}
-
-void RelocInfo::set_embedded_address(Address address,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
-}
-
-void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_,
- static_cast<Address>(size), flush_mode);
+int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(address, icache_flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return embedded_address();
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return static_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
}
// -----------------------------------------------------------------------------
@@ -228,8 +222,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- IMMUTABLE, TENURED);
+ object =
+ isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
@@ -247,8 +241,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options, void* buffer,
+ int buffer_size)
+ : AssemblerBase(options, buffer, buffer_size),
constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@@ -504,7 +499,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// pointer in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- PatchingAssembler patcher(isolate_data(),
+ PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
patcher.bitwise_mov32(dst, offset);
break;
@@ -520,7 +515,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
: (SIGN_EXT_IMM22(operands & kImm22Mask));
int32_t offset = target_pos + delta;
PatchingAssembler patcher(
- isolate_data(), reinterpret_cast<byte*>(buffer_ + pos),
+ options(), reinterpret_cast<byte*>(buffer_ + pos),
2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
patcher.bitwise_add32(dst, base, offset);
if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
@@ -529,7 +524,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
case kUnboundMovLabelAddrOpcode: {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
- PatchingAssembler patcher(isolate_data(),
+ PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos),
kMovInstructionsNoConstantPool);
// Keep internal references relative until EmitRelocations.
@@ -537,7 +532,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
break;
}
case kUnboundJumpTableEntryOpcode: {
- PatchingAssembler patcher(isolate_data(),
+ PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos),
kPointerSize / kInstrSize);
// Keep internal references relative until EmitRelocations.
@@ -1305,7 +1300,7 @@ void Assembler::EnsureSpaceFor(int space_needed) {
bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
if (assembler != nullptr && assembler->predictable_code_size()) return true;
- return assembler->serializer_enabled();
+ return assembler->options().record_reloc_info_for_serialization;
} else if (RelocInfo::IsNone(rmode_)) {
return false;
}
@@ -2075,10 +2070,11 @@ void Assembler::dp(uintptr_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (options().disable_reloc_info_for_patching) return;
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
- (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
- !emit_debug_code())) {
+ (RelocInfo::IsOnlyForSerializer(rmode) &&
+ !options().record_reloc_info_for_serialization && !emit_debug_code())) {
return;
}
DeferredRelocInfo rinfo(pc_offset(), rmode, data);
@@ -2148,9 +2144,9 @@ void Assembler::CheckTrampolinePool() {
}
}
-PatchingAssembler::PatchingAssembler(IsolateData isolate_data, byte* address,
- int instructions)
- : Assembler(isolate_data, address, instructions * kInstrSize + kGap) {
+PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
+ byte* address, int instructions)
+ : Assembler(options, address, instructions * kInstrSize + kGap) {
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 80f53f4b00..0fde450f07 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -377,28 +377,28 @@ C_REGISTERS(DECLARE_C_REGISTER)
class Operand BASE_EMBEDDED {
public:
// immediate
- INLINE(explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
- : rmode_(rmode)) {
+ V8_INLINE explicit Operand(intptr_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : rmode_(rmode) {
value_.immediate = immediate;
}
- INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
- INLINE(explicit Operand(const ExternalReference& f)
- : rmode_(RelocInfo::EXTERNAL_REFERENCE)) {
+ V8_INLINE static Operand Zero() { return Operand(static_cast<intptr_t>(0)); }
+ V8_INLINE explicit Operand(const ExternalReference& f)
+ : rmode_(RelocInfo::EXTERNAL_REFERENCE) {
value_.immediate = static_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value) : rmode_(RelocInfo::NONE)) {
+ V8_INLINE explicit Operand(Smi* value) : rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
// rm
- INLINE(explicit Operand(Register rm));
+ V8_INLINE explicit Operand(Register rm);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
// Return true if this is a register operand.
- INLINE(bool is_reg() const) { return rm_.is_valid(); }
+ V8_INLINE bool is_reg() const { return rm_.is_valid(); }
bool must_output_reloc_info(const Assembler* assembler) const;
@@ -504,9 +504,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : Assembler(IsolateData(isolate), buffer, buffer_size) {}
- Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
+ Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -553,29 +551,29 @@ class Assembler : public AssemblerBase {
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
- INLINE(static bool IsConstantPoolLoadStart(
- Address pc, ConstantPoolEntry::Access* access = nullptr));
- INLINE(static bool IsConstantPoolLoadEnd(
- Address pc, ConstantPoolEntry::Access* access = nullptr));
- INLINE(static int GetConstantPoolOffset(Address pc,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type));
- INLINE(void PatchConstantPoolAccessInstruction(
+ V8_INLINE static bool IsConstantPoolLoadStart(
+ Address pc, ConstantPoolEntry::Access* access = nullptr);
+ V8_INLINE static bool IsConstantPoolLoadEnd(
+ Address pc, ConstantPoolEntry::Access* access = nullptr);
+ V8_INLINE static int GetConstantPoolOffset(Address pc,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type);
+ V8_INLINE void PatchConstantPoolAccessInstruction(
int pc_offset, int offset, ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type));
+ ConstantPoolEntry::Type type);
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
- INLINE(static Address target_constant_pool_address_at(
+ V8_INLINE static Address target_constant_pool_address_at(
Address pc, Address constant_pool, ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type));
+ ConstantPoolEntry::Type type);
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
- INLINE(static Address target_address_at(Address pc, Address constant_pool));
- INLINE(static void set_target_address_at(
+ V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
+ V8_INLINE static void set_target_address_at(
Address pc, Address constant_pool, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -583,7 +581,7 @@ class Assembler : public AssemblerBase {
// Given the address of the beginning of a call, return the address
// in the instruction stream that the call will return to.
- INLINE(static Address return_address_from_call_start(Address pc));
+ V8_INLINE static Address return_address_from_call_start(Address pc);
// This sets the branch destination.
// This is for calls and branches within generated code.
@@ -1456,9 +1454,9 @@ class Assembler : public AssemblerBase {
ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
intptr_t value) {
bool sharing_ok = RelocInfo::IsNone(rmode) ||
- !(serializer_enabled() ||
- rmode < RelocInfo::FIRST_SHAREABLE_RELOC_MODE ||
- is_constant_pool_entry_sharing_blocked());
+ (!options().record_reloc_info_for_serialization &&
+ RelocInfo::IsShareableRelocMode(rmode) &&
+ !is_constant_pool_entry_sharing_blocked());
return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
}
ConstantPoolEntry::Access ConstantPoolAddEntry(Double value) {
@@ -1631,23 +1629,12 @@ class Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
friend class RegExpMacroAssemblerPPC;
friend class RelocInfo;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
-
- // The following functions help with avoiding allocations of embedded heap
- // objects during the code assembly phase. {RequestHeapObject} records the
- // need for a future heap number allocation or code stub generation. After
- // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
- // objects and place them where they are expected (determined by the pc offset
- // associated with each request). That is, for each request, it will patch the
- // dummy heap object handle that we emitted during code assembly with the
- // actual heap object handle.
- void RequestHeapObject(HeapObjectRequest request);
- void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
-
- std::forward_list<HeapObjectRequest> heap_object_requests_;
};
@@ -1658,7 +1645,8 @@ class EnsureSpace BASE_EMBEDDED {
class PatchingAssembler : public Assembler {
public:
- PatchingAssembler(IsolateData isolate_data, byte* address, int instructions);
+ PatchingAssembler(const AssemblerOptions& options, byte* address,
+ int instructions);
~PatchingAssembler();
};
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 4cda05c629..f4c286fdc7 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -27,20 +27,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ StorePX(r4, MemOperand(sp, r0));
- __ push(r4);
- __ push(r5);
- __ addi(r3, r3, Operand(3));
- __ TailCallRuntime(Runtime::kNewArray);
-}
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
void JSEntryStub::Generate(MacroAssembler* masm) {
// r3: code entry
// r4: function
@@ -202,6 +188,19 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
+ if (FLAG_embedded_builtins) {
+ if (masm->root_array_available() &&
+ isolate()->ShouldLoadConstantsFromRootList()) {
+ // This is basically an inlined version of Call(Handle<Code>) that loads
+ // the code object into lr instead of ip.
+ DCHECK_NE(ip, target);
+ __ IndirectLoadConstant(ip, GetCode());
+ __ addi(r0, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Move(ip, target);
+ __ Call(r0);
+ return;
+ }
+ }
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX/PPC64BE Linux use a function descriptor.
__ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
@@ -325,280 +324,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-template <class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Cmpi(r6, Operand(kind), r0);
- T stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- // r5 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // r6 - kind (if mode != DISABLE_ALLOCATION_SITES)
- // r3 - number of arguments
- // r4 - constructor?
- // sp[0] - last argument
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(
- masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
- } else if (mode == DONT_OVERRIDE) {
- // is the low bit set? If so, we are holey and that is good.
- Label normal_sequence;
- __ andi(r0, r6, Operand(1));
- __ bne(&normal_sequence, cr0);
-
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
- __ addi(r6, r6, Operand(1));
-
- if (FLAG_debug_code) {
- __ LoadP(r8, FieldMemOperand(r5, 0));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, AbortReason::kExpectedAllocationSite);
- }
-
- // Save the resulting elements kind in type info. We can't just store r6
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field...upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ LoadP(r7, FieldMemOperand(
- r5, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
- __ StoreP(
- r7,
- FieldMemOperand(r5, AllocationSite::kTransitionInfoOrBoilerplateOffset),
- r0);
-
- __ bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ mov(r0, Operand(kind));
- __ cmp(r6, r0);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-template <class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(isolate, kind);
- stub.GetCode();
- if (AllocationSite::ShouldTrack(kind)) {
- T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode();
- }
- }
-}
-
-
-void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayNArgumentsConstructorStub stub(isolate);
- stub.GetCode();
- ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
- stubh1.GetCode();
- InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
- stubh2.GetCode();
- }
-}
-
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm, AllocationSiteOverrideMode mode) {
- Label not_zero_case, not_one_case;
- __ cmpi(r3, Operand::Zero());
- __ bne(&not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ cmpi(r3, Operand(1));
- __ bgt(&not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : argc (only if argument_count() == ANY)
- // -- r4 : constructor
- // -- r5 : AllocationSite or undefined
- // -- r6 : new target
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ TestIfSmi(r7, r0);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r7, r7, r8, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
-
- // We should either have undefined in r5 or a valid AllocationSite
- __ AssertUndefinedOrAllocationSite(r5, r7);
- }
-
- // Enter the context of the Array function.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
- Label subclassing;
- __ cmp(r6, r4);
- __ bne(&subclassing);
-
- Label no_info;
- // Get the elements kind and case on that.
- __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
- __ beq(&no_info);
-
- __ LoadP(r6, FieldMemOperand(
- r5, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ SmiUntag(r6);
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask));
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-
- __ bind(&subclassing);
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ StorePX(r4, MemOperand(sp, r0));
- __ addi(r3, r3, Operand(3));
- __ Push(r6, r5);
- __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
-}
-
-
-void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
- ElementsKind kind) {
- __ cmpli(r3, Operand(1));
-
- InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
- __ TailCallStub(&stub0, lt);
-
- ArrayNArgumentsConstructorStub stubN(isolate());
- __ TailCallStub(&stubN, gt);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- __ LoadP(r6, MemOperand(sp, 0));
- __ cmpi(r6, Operand::Zero());
-
- InternalArraySingleArgumentConstructorStub stub1_holey(
- isolate(), GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey, ne);
- }
-
- InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
- __ TailCallStub(&stub1);
-}
-
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : argc
- // -- r4 : constructor
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ TestIfSmi(r6, r0);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r6, r6, r7, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // Figure out the right elements kind
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the map's "bit field 2" into |result|.
- __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(r6);
-
- if (FLAG_debug_code) {
- Label done;
- __ cmpi(r6, Operand(PACKED_ELEMENTS));
- __ beq(&done);
- __ cmpi(r6, Operand(HOLEY_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
- }
-
- Label fast_elements_case;
- __ cmpi(r6, Operand(PACKED_ELEMENTS));
- __ beq(&fast_elements_case);
- GenerateCase(masm, HOLEY_ELEMENTS);
-
- __ bind(&fast_elements_case);
- GenerateCase(masm, PACKED_ELEMENTS);
-}
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@@ -627,18 +352,18 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
DCHECK(function_address == r4 || function_address == r5);
Register scratch = r6;
- __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
+ __ Move(scratch, ExternalReference::is_profiling_address(isolate));
__ lbz(scratch, MemOperand(scratch, 0));
__ cmpi(scratch, Operand::Zero());
if (CpuFeatures::IsSupported(ISELECT)) {
- __ mov(scratch, Operand(thunk_ref));
+ __ Move(scratch, thunk_ref);
__ isel(eq, scratch, function_address, scratch);
} else {
Label profiler_disabled;
Label end_profiler_check;
__ beq(&profiler_disabled);
- __ mov(scratch, Operand(thunk_ref));
+ __ Move(scratch, thunk_ref);
__ b(&end_profiler_check);
__ bind(&profiler_disabled);
__ mr(scratch, function_address);
@@ -650,7 +375,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// r14 - next_address->kNextOffset
// r15 - next_address->kLimitOffset
// r16 - next_address->kLevelOffset
- __ mov(r17, Operand(next_address));
+ __ Move(r17, next_address);
__ LoadP(r14, MemOperand(r17, kNextOffset));
__ LoadP(r15, MemOperand(r17, kLimitOffset));
__ lwz(r16, MemOperand(r17, kLevelOffset));
@@ -661,7 +386,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1, r3);
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
+ __ Move(r3, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::log_enter_external_function(), 1);
__ PopSafepointRegisters();
}
@@ -676,7 +401,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1, r3);
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
+ __ Move(r3, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::log_leave_external_function(), 1);
__ PopSafepointRegisters();
}
@@ -715,7 +440,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
- __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ Move(r15, ExternalReference::scheduled_exception_address(isolate));
__ LoadP(r15, MemOperand(r15));
__ cmp(r14, r15);
__ bne(&promote_scheduled_exception);
@@ -731,7 +456,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ StoreP(r15, MemOperand(r17, kLimitOffset));
__ mr(r14, r3);
__ PrepareCallCFunction(1, r15);
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
+ __ Move(r3, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
__ mr(r3, r14);
__ b(&leave_exit_frame);
@@ -777,7 +502,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// return value default
__ push(scratch);
// isolate
- __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ push(scratch);
// holder
__ push(holder);
@@ -854,7 +579,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ push(scratch);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ Push(scratch, scratch);
- __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ Move(scratch, ExternalReference::isolate_address(isolate()));
__ Push(scratch, holder);
__ Push(Smi::kZero); // should_throw_on_error -> false
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 44749d4eb6..673e5dc9b7 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -23,6 +23,9 @@
namespace v8 {
namespace internal {
+// TODO(sigurds): Change this value once we use relative jumps.
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+
// Number of registers
const int kNumRegisters = 32;
@@ -36,6 +39,11 @@ const int kNoRegister = -1;
const int kLoadPtrMaxReachBits = 15;
const int kLoadDoubleMaxReachBits = 15;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 128;
+
// sign-extend the least significant 16-bits of value <imm>
#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 95aaa3f0bd..b10af51de1 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -83,7 +83,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(r4, &context_check);
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ li(r4, Operand(type())); // bailout type,
+ __ li(r4, Operand(static_cast<int>(deopt_kind())));
// r5: bailout id already loaded.
// r6: code address or 0 already loaded.
// r7: Fp-to-sp delta.
diff --git a/deps/v8/src/ppc/frame-constants-ppc.h b/deps/v8/src/ppc/frame-constants-ppc.h
index f91601c046..a4516c367c 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/ppc/frame-constants-ppc.h
@@ -5,6 +5,7 @@
#ifndef V8_PPC_FRAME_CONSTANTS_PPC_H_
#define V8_PPC_FRAME_CONSTANTS_PPC_H_
+#include "src/base/macros.h"
#include "src/frame-constants.h"
namespace v8 {
@@ -32,6 +33,19 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
};
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 8;
+ static constexpr int kNumberOfSavedFpParamRegs = 8;
+
+ // FP-relative.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 443716e55c..c446a74e10 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -57,13 +57,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return r8; }
const Register ApiGetterDescriptor::HolderRegister() { return r3; }
const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
-const Register MathPowTaggedDescriptor::exponent() { return r5; }
-
-const Register MathPowIntegerDescriptor::exponent() {
- return MathPowTaggedDescriptor::exponent();
-}
-
-
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
@@ -177,24 +170,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ConstructTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r4 : the target to call
- // r6 : the new target
- Register registers[] = {r4, r6, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void TransitionElementsKindDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortJSDescriptor::InitializePlatformSpecific(
+void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -202,42 +178,7 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
- Register registers[] = {r4, r6, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // r3 -- number of arguments
- // r4 -- function
- // r5 -- allocation site with elements kind
- Register registers[] = {r4, r5, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // r3 -- number of arguments
- // r4 -- function
- // r5 -- allocation site with elements kind
- Register registers[] = {r4, r5, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {r4, r5, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
@@ -305,7 +246,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+namespace {
+
+void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // argument count (argc)
@@ -315,6 +258,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+} // namespace
+
+void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
+void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 160ce33530..13e04a2c8c 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -10,7 +10,6 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
-#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -20,16 +19,17 @@
#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot.h"
#include "src/ppc/macro-assembler-ppc.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, buffer, size, create_code_object) {
+MacroAssembler::MacroAssembler(Isolate* isolate,
+ const AssemblerOptions& options, void* buffer,
+ int size, CodeObjectRequired create_code_object)
+ : TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@@ -41,15 +41,6 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
}
}
-TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ = Handle<HeapObject>::New(
- isolate->heap()->self_reference_marker(), isolate);
- }
-}
-
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -133,27 +124,13 @@ void TurboAssembler::Jump(Register target) {
bctr();
}
-#ifdef V8_EMBEDDED_BUILTINS
-void TurboAssembler::LookupConstant(Register destination,
- Handle<Object> object) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Ensure the given object is in the builtins constants table and fetch its
- // index.
- BuiltinsConstantsTableBuilder* builder =
- isolate()->builtins_constants_table_builder();
- uint32_t index = builder->AddObject(object);
-
- // TODO(jgruber): Load builtins from the builtins table.
- // TODO(jgruber): Ensure that code generation can recognize constant targets
- // in kArchCallCodeObject.
-
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
const uint32_t offset =
- FixedArray::kHeaderSize + index * kPointerSize - kHeapObjectTag;
+ FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
@@ -161,30 +138,18 @@ void TurboAssembler::LookupConstant(Register destination,
LoadP(destination, MemOperand(destination, offset), r0);
}
-void TurboAssembler::LookupExternalReference(Register destination,
- ExternalReference reference) {
- CHECK(reference.address() !=
- ExternalReference::roots_array_start(isolate()).address());
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Encode as an index into the external reference table stored on the isolate.
-
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
- uint32_t index = v.index();
-
- // Generate code to load from the external reference table.
-
- int32_t roots_to_external_reference_offset =
- Heap::roots_to_external_reference_table_offset() +
- ExternalReferenceTable::OffsetOfEntry(index);
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ LoadP(destination, MemOperand(kRootRegister, offset), r0);
+}
- LoadP(destination,
- MemOperand(kRootRegister, roots_to_external_reference_offset), r0);
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ mr(destination, kRootRegister);
+ } else {
+ addi(destination, kRootRegister, Operand(offset));
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
void MacroAssembler::JumpToJSEntry(Register target) {
Move(ip, target);
@@ -216,18 +181,34 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ppc code, never THUMB code
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- Register scratch = ip;
- LookupConstant(scratch, code);
- addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip, cr);
- Jump(scratch);
- bind(&skip);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ Register scratch = ip;
+ IndirectLoadConstant(scratch, code);
+ addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Label skip;
+ if (cond != al) b(NegateCondition(cond), &skip, cr);
+ Jump(scratch);
+ bind(&skip);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do
+ // not preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Label skip;
+ if (cond != al) b(NegateCondition(cond), &skip, cr);
+ Jump(ip);
+ bind(&skip);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
}
@@ -297,19 +278,36 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
- LookupConstant(ip, code);
- addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip);
- Call(ip);
- bind(&skip);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ IndirectLoadConstant(ip, code);
+ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Label skip;
+ if (cond != al) b(NegateCondition(cond), &skip);
+ Call(ip);
+ bind(&skip);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do
+ // not preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Label skip;
+ if (cond != al) b(NegateCondition(cond), &skip);
+ Call(ip);
+ bind(&skip);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Call(code.address(), rmode, cond);
}
@@ -337,29 +335,22 @@ void TurboAssembler::Push(Smi* smi) {
}
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- Heap::RootListIndex root_index;
- if (!isolate()->heap()->IsRootHandle(value, &root_index)) {
- LookupConstant(dst, value);
- } else {
- LoadRoot(dst, root_index);
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
}
- return;
}
-#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(value));
}
void TurboAssembler::Move(Register dst, ExternalReference reference) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
- reference.address() !=
- ExternalReference::roots_array_start(isolate()).address()) {
- LookupExternalReference(dst, reference);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, reference);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(reference));
}
@@ -431,7 +422,7 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond) {
DCHECK(cond == al);
- LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
+ LoadP(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), r0);
}
void MacroAssembler::RecordWriteField(Register object, int offset,
@@ -990,10 +981,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
}
- if (type == StackFrame::INTERNAL) {
- Move(ip, CodeObject());
- push(ip);
- }
}
int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
@@ -1400,9 +1387,9 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- LoadWordArith(expected_reg,
- FieldMemOperand(
- temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
+ LoadHalfWord(expected_reg,
+ FieldMemOperand(
+ temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(fun, new_target, expected, actual, flag);
@@ -1632,7 +1619,8 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
- DoubleRegister double_input) {
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -1643,7 +1631,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
// Put input on stack.
stfdu(double_input, MemOperand(sp, -kDoubleSize));
- Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
LoadP(result, MemOperand(sp));
addi(sp, sp, Operand(kDoubleSize));
@@ -1676,8 +1668,8 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
beq(done);
}
-void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles) {
+void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
+ Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -1685,13 +1677,9 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
mov(r3, Operand(f->nargs));
Move(r4, ExternalReference::Create(f));
-#if V8_TARGET_ARCH_PPC64
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
-#else
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
-#endif
- Call(code, RelocInfo::CODE_TARGET);
+ DCHECK(!AreAliased(centry, r3, r4));
+ addi(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@@ -1789,18 +1777,17 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
const char* msg = GetAbortReason(reason);
- if (msg != nullptr) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+#endif
- if (FLAG_trap_on_abort) {
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
stop(msg);
return;
}
-#endif
LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
@@ -1854,18 +1841,6 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
-void MacroAssembler::AssertFixedArray(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- TestIfSmi(object, r0);
- Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, cr0);
- push(object);
- CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
- pop(object);
- Check(eq, AbortReason::kOperandIsNotAFixedArray);
- }
-}
-
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1938,7 +1913,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
CompareRoot(object, Heap::kUndefinedValueRootIndex);
beq(&done_checking);
LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
@@ -2368,7 +2343,7 @@ void TurboAssembler::Add(Register dst, Register src, intptr_t value,
}
-void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
+void TurboAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
CRegister cr) {
intptr_t value = src2.immediate();
if (is_int16(value)) {
@@ -2701,6 +2676,7 @@ void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
int offset = mem.offset();
if (!is_int16(offset)) {
+ DCHECK_NE(scratch, no_reg);
LoadIntLiteral(scratch, offset);
lhzx(dst, MemOperand(base, scratch));
} else {
@@ -3083,6 +3059,16 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
+void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
+ Cmpi(x, Operand(y), r0);
+ beq(dest);
+}
+
+void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
+ Cmpi(x, Operand(y), r0);
+ blt(dest);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 9e8c2cfada..daf1fbdb6a 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -10,6 +10,7 @@
#include "src/double.h"
#include "src/globals.h"
#include "src/ppc/assembler-ppc.h"
+#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
@@ -26,9 +27,13 @@ constexpr Register kInterpreterAccumulatorRegister = r3;
constexpr Register kInterpreterBytecodeOffsetRegister = r15;
constexpr Register kInterpreterBytecodeArrayRegister = r16;
constexpr Register kInterpreterDispatchTableRegister = r17;
+
constexpr Register kJavaScriptCallArgCountRegister = r3;
-constexpr Register kJavaScriptCallNewTargetRegister = r6;
constexpr Register kJavaScriptCallCodeStartRegister = r5;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = r6;
+constexpr Register kJavaScriptCallExtraArg1Register = r5;
+
constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r4;
constexpr Register kRuntimeCallArgCountRegister = r3;
@@ -110,20 +115,14 @@ bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
#define Div divw
#endif
-class TurboAssembler : public Assembler {
+class TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object);
-
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
-
- Isolate* isolate() const { return isolate_; }
+ TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : TurboAssemblerBase(isolate, options, buffer, buffer_size,
+ create_code_object) {}
- Handle<HeapObject> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
// Converts the integer (untagged smi) in |src| to a double, storing
// the result to |dst|
void ConvertIntToDouble(Register src, DoubleRegister dst);
@@ -194,6 +193,7 @@ class TurboAssembler : public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
+ addi(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
}
// These exist to provide portability between 32 and 64bit
@@ -220,9 +220,6 @@ class TurboAssembler : public Assembler {
void LoadPC(Register dst);
void ComputeCodeStartAddress(Register dst);
- bool root_array_available() const { return root_array_available_; }
- void set_root_array_available(bool v) { root_array_available_ = v; }
-
void StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
@@ -233,6 +230,8 @@ class TurboAssembler : public Assembler {
void StoreSingleU(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
+ void Cmpi(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
void Cmpli(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void Cmpwi(Register src1, const Operand& src2, Register scratch,
@@ -349,8 +348,11 @@ class TurboAssembler : public Assembler {
Register exclusion3 = no_reg);
// Load an object from the root table.
+ void LoadRoot(Register destination, Heap::RootListIndex index) override {
+ LoadRoot(destination, index, al);
+ }
void LoadRoot(Register destination, Heap::RootListIndex index,
- Condition cond = al);
+ Condition cond);
void SwapP(Register src, Register dst, Register scratch);
void SwapP(Register src, MemOperand dst, Register scratch);
@@ -405,9 +407,10 @@ class TurboAssembler : public Assembler {
void CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments);
- // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
- void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ // Call a runtime routine. This expects {centry} to contain a fitting CEntry
+ // builtin for the target runtime function and uses an indirect call.
+ void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
+
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
@@ -437,11 +440,10 @@ class TurboAssembler : public Assembler {
Register src_high, uint32_t shift);
#endif
-#ifdef V8_EMBEDDED_BUILTINS
- void LookupConstant(Register destination, Handle<Object> object);
- void LookupExternalReference(Register destination,
- ExternalReference reference);
-#endif // V8_EMBEDDED_BUILTINS
+ void LoadFromConstantsTable(Register destination,
+ int constant_index) override;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
+ void LoadRootRelative(Register destination, int32_t offset) override;
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
@@ -455,6 +457,8 @@ class TurboAssembler : public Assembler {
CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
+ CRegister cr = cr7);
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
@@ -464,7 +468,9 @@ class TurboAssembler : public Assembler {
Condition cond = al);
void Call(Label* target);
- void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
+ void CallForDeoptimization(Address target, int deopt_id,
+ RelocInfo::Mode rmode) {
+ USE(deopt_id);
Call(target, rmode);
}
@@ -615,6 +621,9 @@ class TurboAssembler : public Assembler {
TestIfSmi(value, r0);
beq(smi_label, cr0); // branch if SMI
}
+ void JumpIfEqual(Register x, int32_t y, Label* dest);
+ void JumpIfLessThan(Register x, int32_t y, Label* dest);
+
#if V8_TARGET_ARCH_PPC64
inline void TestIfInt32(Register value, Register scratch,
CRegister cr = cr7) {
@@ -658,7 +667,7 @@ class TurboAssembler : public Assembler {
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
- DoubleRegister double_input);
+ DoubleRegister double_input, StubCallMode stub_mode);
// Call a code stub.
void CallStubDelayed(CodeStub* stub);
@@ -677,19 +686,9 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister();
- protected:
- // This handle will be patched with the code object on installation.
- Handle<HeapObject> code_object_;
-
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
- bool has_frame_ = false;
- bool root_array_available_ = true;
- Isolate* const isolate_;
-
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
- CRegister cr = cr7);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
@@ -700,7 +699,11 @@ class TurboAssembler : public Assembler {
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ CodeObjectRequired create_code_object)
+ : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
+ size, create_code_object) {}
+ MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int size, CodeObjectRequired create_code_object);
// ---------------------------------------------------------------------------
// GC Support
@@ -797,7 +800,8 @@ class MacroAssembler : public TurboAssembler {
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
- void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
+ void LoadHalfWord(Register dst, const MemOperand& mem,
+ Register scratch = no_reg);
void LoadHalfWordArith(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
@@ -812,8 +816,6 @@ class MacroAssembler : public TurboAssembler {
void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
- void Cmpi(Register src1, const Operand& src2, Register scratch,
- CRegister cr = cr7);
void Cmplwi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
@@ -1022,9 +1024,6 @@ class MacroAssembler : public TurboAssembler {
#define SmiWordOffset(offset) offset
#endif
- // Abort execution if argument is not a FixedArray, enabled via --debug-code.
- void AssertFixedArray(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 6fd503c470..350d4687ce 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -348,7 +348,7 @@ void PPCDebugger::Debug() {
(strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
intptr_t value;
- OFStream os(stdout);
+ StdoutStream os;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
os << arg1 << ": \n";
@@ -604,7 +604,7 @@ void PPCDebugger::Debug() {
PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the PPCDebugger.\n");
+ PrintF(" stop and give control to the PPCDebugger.\n");
PrintF(" The first %d stop codes are watched:\n",
Simulator::kNumOfWatchedStops);
PrintF(" - They can be enabled / disabled: the Simulator\n");
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index cf672f920c..21843325f9 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -249,7 +249,7 @@ unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id));
if (entry->value == nullptr) {
FunctionInfo* info = new FunctionInfo();
- info->name = names_->GetFunctionName(shared->DebugName());
+ info->name = names_->GetName(shared->DebugName());
info->function_id = id;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index d8603c8168..f6eaa8f8a3 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -16,17 +16,17 @@ namespace v8 {
namespace internal {
void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->AddCode(instruction_start, entry, instruction_size);
+ code_map->AddCode(start, entry, size);
}
void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->MoveCode(from_instruction_start, to_instruction_start);
+ code_map->MoveCode(from, to);
}
void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
- CodeEntry* entry = code_map->FindEntry(instruction_start);
+ CodeEntry* entry = code_map->FindEntry(start);
if (entry != nullptr) {
entry->set_bailout_reason(bailout_reason);
}
@@ -34,7 +34,7 @@ void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
- CodeEntry* entry = code_map->FindEntry(instruction_start);
+ CodeEntry* entry = code_map->FindEntry(start);
if (entry == nullptr) return;
std::vector<CpuProfileDeoptFrame> frames_vector(
deopt_frames, deopt_frames + deopt_frame_count);
@@ -44,7 +44,7 @@ void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
- CodeEntry* entry = code_map->FindEntry(instruction_start);
+ CodeEntry* entry = code_map->FindEntry(start);
if (!entry) {
// Code objects for builtins should already have been added to the map but
// some of them have been filtered out by CpuProfiler.
@@ -58,7 +58,7 @@ TickSample* ProfilerEventsProcessor::StartTickSample() {
void* address = ticks_buffer_.StartEnqueue();
if (address == nullptr) return nullptr;
TickSampleEventRecord* evt =
- new (address) TickSampleEventRecord(last_code_event_id_.Value());
+ new (address) TickSampleEventRecord(last_code_event_id_);
return &evt->sample;
}
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 79606dc812..463a30f184 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -63,14 +63,14 @@ ProfilerEventsProcessor::~ProfilerEventsProcessor() {
}
void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
- event.generic.order = last_code_event_id_.Increment(1);
+ event.generic.order = ++last_code_event_id_;
events_buffer_.Enqueue(event);
}
void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
int fp_to_sp_delta) {
- TickSampleEventRecord record(last_code_event_id_.Value());
+ TickSampleEventRecord record(last_code_event_id_);
RegisterState regs;
Address fp = isolate->c_entry_fp(isolate->thread_local_top());
regs.sp = reinterpret_cast<void*>(fp - fp_to_sp_delta);
@@ -82,7 +82,7 @@ void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
bool update_stats) {
- TickSampleEventRecord record(last_code_event_id_.Value());
+ TickSampleEventRecord record(last_code_event_id_);
RegisterState regs;
StackFrameIterator it(isolate);
if (!it.done()) {
@@ -426,7 +426,7 @@ void CpuProfiler::LogBuiltins() {
CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
Builtins::Name id = static_cast<Builtins::Name>(i);
- rec->instruction_start = builtins->builtin(id)->InstructionStart();
+ rec->start = builtins->builtin(id)->address();
rec->builtin_id = id;
processor_->Enqueue(evt_rec);
}
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 4e56c7bd74..febc154802 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -53,35 +53,35 @@ class CodeEventRecord {
class CodeCreateEventRecord : public CodeEventRecord {
public:
- Address instruction_start;
+ Address start;
CodeEntry* entry;
- unsigned instruction_size;
+ unsigned size;
- INLINE(void UpdateCodeMap(CodeMap* code_map));
+ V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
class CodeMoveEventRecord : public CodeEventRecord {
public:
- Address from_instruction_start;
- Address to_instruction_start;
+ Address from;
+ Address to;
- INLINE(void UpdateCodeMap(CodeMap* code_map));
+ V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
class CodeDisableOptEventRecord : public CodeEventRecord {
public:
- Address instruction_start;
+ Address start;
const char* bailout_reason;
- INLINE(void UpdateCodeMap(CodeMap* code_map));
+ V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
class CodeDeoptEventRecord : public CodeEventRecord {
public:
- Address instruction_start;
+ Address start;
const char* deopt_reason;
int deopt_id;
Address pc;
@@ -89,16 +89,16 @@ class CodeDeoptEventRecord : public CodeEventRecord {
CpuProfileDeoptFrame* deopt_frames;
int deopt_frame_count;
- INLINE(void UpdateCodeMap(CodeMap* code_map));
+ V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
class ReportBuiltinEventRecord : public CodeEventRecord {
public:
- Address instruction_start;
+ Address start;
Builtins::Name builtin_id;
- INLINE(void UpdateCodeMap(CodeMap* code_map));
+ V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
@@ -140,7 +140,7 @@ class ProfilerEventsProcessor : public base::Thread {
// Thread control.
virtual void Run();
void StopSynchronously();
- INLINE(bool running()) { return !!base::Relaxed_Load(&running_); }
+ V8_INLINE bool running() { return !!base::Relaxed_Load(&running_); }
void Enqueue(const CodeEventsContainer& event);
// Puts current stack into tick sample events buffer.
@@ -183,7 +183,7 @@ class ProfilerEventsProcessor : public base::Thread {
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- base::AtomicNumber<unsigned> last_code_event_id_;
+ std::atomic<unsigned> last_code_event_id_;
unsigned last_processed_code_event_id_;
};
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 10645ad161..e5aa7c554e 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -16,14 +16,14 @@ namespace internal {
HeapProfiler::HeapProfiler(Heap* heap)
: ids_(new HeapObjectsMap(heap)),
- names_(new StringsStorage(heap->HashSeed())),
+ names_(new StringsStorage()),
is_tracking_object_moves_(false) {}
HeapProfiler::~HeapProfiler() = default;
void HeapProfiler::DeleteAllSnapshots() {
snapshots_.clear();
- names_.reset(new StringsStorage(heap()->HashSeed()));
+ names_.reset(new StringsStorage());
}
@@ -209,7 +209,8 @@ Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
// Can't break -- kFilterUnreachable requires full heap traversal.
}
}
- return object != nullptr ? Handle<HeapObject>(object) : Handle<HeapObject>();
+ return object != nullptr ? Handle<HeapObject>(object, isolate())
+ : Handle<HeapObject>();
}
@@ -233,9 +234,9 @@ void HeapProfiler::QueryObjects(Handle<Context> context,
HeapIterator heap_iterator(heap());
HeapObject* heap_obj;
while ((heap_obj = heap_iterator.next()) != nullptr) {
- if (!heap_obj->IsJSObject() || heap_obj->IsExternal()) continue;
+ if (!heap_obj->IsJSObject() || heap_obj->IsExternal(isolate())) continue;
v8::Local<v8::Object> v8_obj(
- Utils::ToLocal(handle(JSObject::cast(heap_obj))));
+ Utils::ToLocal(handle(JSObject::cast(heap_obj), isolate())));
if (!predicate->Filter(v8_obj)) continue;
objects->Append(v8_obj);
}
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 6564250d2e..b51ea0de7e 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -16,6 +16,10 @@
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-promise-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
@@ -772,6 +776,8 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
case PROPERTY_CELL_TYPE: return "system / PropertyCell";
case FOREIGN_TYPE: return "system / Foreign";
case ODDBALL_TYPE: return "system / Oddball";
+ case ALLOCATION_SITE_TYPE:
+ return "system / AllocationSite";
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: return "system / "#Name;
STRUCT_LIST(MAKE_STRUCT_CASE)
@@ -780,17 +786,13 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
}
}
-
-int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
+int V8HeapExplorer::EstimateObjectsCount() {
+ HeapIterator it(heap_, HeapIterator::kFilterUnreachable);
int objects_count = 0;
- for (HeapObject* obj = iterator->next(); obj != nullptr;
- obj = iterator->next()) {
- objects_count++;
- }
+ while (it.next()) ++objects_count;
return objects_count;
}
-
class IndexedReferencesExtractor : public ObjectVisitor {
public:
IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject* parent_obj,
@@ -836,10 +838,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
int parent_;
};
-
-bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
- if (obj->IsFixedArray()) return false; // FixedArrays are processed on pass 2
-
+void V8HeapExplorer::ExtractReferences(int entry, HeapObject* obj) {
if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
} else if (obj->IsJSArrayBuffer()) {
@@ -883,6 +882,9 @@ bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
ExtractPropertyCellReferences(entry, PropertyCell::cast(obj));
} else if (obj->IsAllocationSite()) {
ExtractAllocationSiteReferences(entry, AllocationSite::cast(obj));
+ } else if (obj->IsArrayBoilerplateDescription()) {
+ ExtractArrayBoilerplateDescriptionReferences(
+ entry, ArrayBoilerplateDescription::cast(obj));
} else if (obj->IsFeedbackVector()) {
ExtractFeedbackVectorReferences(entry, FeedbackVector::cast(obj));
} else if (obj->IsWeakFixedArray()) {
@@ -891,20 +893,13 @@ bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
} else if (obj->IsWeakArrayList()) {
ExtractWeakArrayReferences(WeakArrayList::kHeaderSize, entry,
WeakArrayList::cast(obj));
- }
- return true;
-}
-
-
-bool V8HeapExplorer::ExtractReferencesPass2(int entry, HeapObject* obj) {
- if (!obj->IsFixedArray()) return false;
-
- if (obj->IsContext()) {
+ } else if (obj->IsContext()) {
ExtractContextReferences(entry, Context::cast(obj));
- } else {
+ } else if (obj->IsEphemeronHashTable()) {
+ ExtractEphemeronHashTableReferences(entry, EphemeronHashTable::cast(obj));
+ } else if (obj->IsFixedArray()) {
ExtractFixedArrayReferences(entry, FixedArray::cast(obj));
}
- return true;
}
@@ -923,7 +918,8 @@ void V8HeapExplorer::ExtractJSObjectReferences(
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
PrototypeIterator iter(heap_->isolate(), js_obj);
- SetPropertyReference(obj, entry, heap_->proto_string(), iter.GetCurrent());
+ ReadOnlyRoots roots(heap_);
+ SetPropertyReference(obj, entry, roots.proto_string(), iter.GetCurrent());
if (obj->IsJSBoundFunction()) {
JSBoundFunction* js_fun = JSBoundFunction::cast(obj);
TagObject(js_fun->bound_arguments(), "(bound arguments)");
@@ -945,11 +941,11 @@ void V8HeapExplorer::ExtractJSObjectReferences(
Object* proto_or_map = js_fun->prototype_or_initial_map();
if (!proto_or_map->IsTheHole(heap_->isolate())) {
if (!proto_or_map->IsMap()) {
- SetPropertyReference(obj, entry, heap_->prototype_string(),
+ SetPropertyReference(obj, entry, roots.prototype_string(),
proto_or_map, nullptr,
JSFunction::kPrototypeOrInitialMapOffset);
} else {
- SetPropertyReference(obj, entry, heap_->prototype_string(),
+ SetPropertyReference(obj, entry, roots.prototype_string(),
js_fun->prototype());
SetInternalReference(obj, entry, "initial_map", proto_or_map,
JSFunction::kPrototypeOrInitialMapOffset);
@@ -1034,14 +1030,34 @@ void V8HeapExplorer::ExtractJSCollectionReferences(int entry,
void V8HeapExplorer::ExtractJSWeakCollectionReferences(int entry,
JSWeakCollection* obj) {
- if (obj->table()->IsHashTable()) {
- ObjectHashTable* table = ObjectHashTable::cast(obj->table());
- TagFixedArraySubType(table, JS_WEAK_COLLECTION_SUB_TYPE);
- }
SetInternalReference(obj, entry, "table", obj->table(),
JSWeakCollection::kTableOffset);
}
+void V8HeapExplorer::ExtractEphemeronHashTableReferences(
+ int entry, EphemeronHashTable* table) {
+ for (int i = 0, capacity = table->Capacity(); i < capacity; ++i) {
+ int key_index = EphemeronHashTable::EntryToIndex(i) +
+ EphemeronHashTable::kEntryKeyIndex;
+ int value_index = EphemeronHashTable::EntryToValueIndex(i);
+ Object* key = table->get(key_index);
+ Object* value = table->get(value_index);
+ SetWeakReference(table, entry, key_index, key,
+ table->OffsetOfElementAt(key_index));
+ SetInternalReference(table, entry, value_index, value,
+ table->OffsetOfElementAt(value_index));
+ HeapEntry* key_entry = GetEntry(key);
+ int key_entry_index = key_entry->index();
+ HeapEntry* value_entry = GetEntry(value);
+ if (key_entry && value_entry) {
+ const char* edge_name =
+ names_->GetFormatted("key %s in WeakMap", key_entry->name());
+ filler_->SetNamedAutoIndexReference(
+ HeapGraphEdge::kInternal, key_entry_index, edge_name, value_entry);
+ }
+ }
+}
+
void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
if (!context->IsNativeContext() && context->is_declaration_context()) {
ScopeInfo* scope_info = context->scope_info();
@@ -1167,7 +1183,7 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
HeapObject* obj = shared;
String* shared_name = shared->DebugName();
const char* name = nullptr;
- if (shared_name != heap_->empty_string()) {
+ if (shared_name != ReadOnlyRoots(heap_).empty_string()) {
name = names_->GetName(shared_name);
TagObject(shared->GetCode(), names_->GetFormatted("(code for %s)", name));
} else {
@@ -1188,12 +1204,10 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"function_data", shared->function_data(),
SharedFunctionInfo::kFunctionDataOffset);
- SetInternalReference(obj, entry,
- "debug_info", shared->debug_info(),
- SharedFunctionInfo::kDebugInfoOffset);
- SetInternalReference(obj, entry, "function_identifier",
- shared->function_identifier(),
- SharedFunctionInfo::kFunctionIdentifierOffset);
+ SetInternalReference(
+ obj, entry, "function_identifier_or_debug_info",
+ shared->function_identifier_or_debug_info(),
+ SharedFunctionInfo::kFunctionIdentifierOrDebugInfoOffset);
SetInternalReference(
obj, entry, "raw_outer_scope_info_or_feedback_metadata",
shared->raw_outer_scope_info_or_feedback_metadata(),
@@ -1305,10 +1319,13 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
TagObject(site->dependent_code(), "(dependent code)");
SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
AllocationSite::kDependentCodeOffset);
- // Do not visit weak_next as it is not visited by the ObjectVisitor,
- // and we're not very interested in weak_next field here.
- STATIC_ASSERT(AllocationSite::kWeakNextOffset >=
- AllocationSite::kPointerFieldsEndOffset);
+}
+
+void V8HeapExplorer::ExtractArrayBoilerplateDescriptionReferences(
+ int entry, ArrayBoilerplateDescription* value) {
+ SetInternalReference(value, entry, "constant_elements",
+ value->constant_elements(),
+ ArrayBoilerplateDescription::kConstantElementsOffset);
}
class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
@@ -1347,50 +1364,10 @@ void V8HeapExplorer::ExtractJSPromiseReferences(int entry, JSPromise* promise) {
}
void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
- auto it = array_types_.find(array);
- if (it == array_types_.end()) {
- for (int i = 0, l = array->length(); i < l; ++i) {
- DCHECK(!HasWeakHeapObjectTag(array->get(i)));
- SetInternalReference(array, entry, i, array->get(i),
- array->OffsetOfElementAt(i));
- }
- return;
- }
- switch (it->second) {
- case JS_WEAK_COLLECTION_SUB_TYPE: {
- ObjectHashTable* table = ObjectHashTable::cast(array);
- for (int i = 0, capacity = table->Capacity(); i < capacity; ++i) {
- int key_index =
- ObjectHashTable::EntryToIndex(i) + ObjectHashTable::kEntryKeyIndex;
- int value_index = ObjectHashTable::EntryToValueIndex(i);
- Object* key = table->get(key_index);
- Object* value = table->get(value_index);
- SetWeakReference(table, entry, key_index, key,
- table->OffsetOfElementAt(key_index));
- SetInternalReference(table, entry, value_index, value,
- table->OffsetOfElementAt(value_index));
- HeapEntry* key_entry = GetEntry(key);
- int key_entry_index = key_entry->index();
- HeapEntry* value_entry = GetEntry(value);
- if (key_entry && value_entry) {
- const char* edge_name =
- names_->GetFormatted("key %s in WeakMap", key_entry->name());
- filler_->SetNamedAutoIndexReference(HeapGraphEdge::kInternal,
- key_entry_index, edge_name,
- value_entry);
- }
- }
- break;
- }
-
- // TODO(alph): Add special processing for other types of FixedArrays.
-
- default:
- for (int i = 0, l = array->length(); i < l; ++i) {
- SetInternalReference(array, entry, i, array->get(i),
- array->OffsetOfElementAt(i));
- }
- break;
+ for (int i = 0, l = array->length(); i < l; ++i) {
+ DCHECK(!HasWeakHeapObjectTag(array->get(i)));
+ SetInternalReference(array, entry, i, array->get(i),
+ array->OffsetOfElementAt(i));
}
}
@@ -1413,6 +1390,9 @@ void V8HeapExplorer::ExtractWeakArrayReferences(int header_size, int entry,
if (object->ToWeakHeapObject(&heap_object)) {
SetWeakReference(array, entry, i, heap_object,
header_size + i * kPointerSize);
+ } else if (object->ToStrongHeapObject(&heap_object)) {
+ SetInternalReference(array, entry, i, heap_object,
+ header_size + i * kPointerSize);
}
}
}
@@ -1442,7 +1422,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
case kDescriptor:
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
descs->GetKey(i),
- descs->GetValue(i));
+ descs->GetStrongValue(i));
break;
}
}
@@ -1451,8 +1431,9 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
GlobalDictionary* dictionary =
JSGlobalObject::cast(js_obj)->global_dictionary();
int length = dictionary->Capacity();
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < length; ++i) {
- if (dictionary->IsKey(isolate, dictionary->KeyAt(i))) {
+ if (dictionary->IsKey(roots, dictionary->KeyAt(i))) {
PropertyCell* cell = dictionary->CellAt(i);
Name* name = cell->name();
Object* value = cell->value();
@@ -1464,9 +1445,10 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
} else {
NameDictionary* dictionary = js_obj->property_dictionary();
int length = dictionary->Capacity();
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(isolate, k)) {
+ if (dictionary->IsKey(roots, k)) {
Object* value = dictionary->ValueAt(i);
PropertyDetails details = dictionary->DetailsAt(i);
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
@@ -1496,14 +1478,14 @@ void V8HeapExplorer::ExtractAccessorPairProperty(JSObject* js_obj, int entry,
void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
- Isolate* isolate = js_obj->GetIsolate();
+ ReadOnlyRoots roots = js_obj->GetReadOnlyRoots();
if (js_obj->HasObjectElements()) {
FixedArray* elements = FixedArray::cast(js_obj->elements());
int length = js_obj->IsJSArray()
? Smi::ToInt(JSArray::cast(js_obj)->length())
: elements->length();
for (int i = 0; i < length; ++i) {
- if (!elements->get(i)->IsTheHole(isolate)) {
+ if (!elements->get(i)->IsTheHole(roots)) {
SetElementReference(js_obj, entry, i, elements->get(i));
}
}
@@ -1512,7 +1494,7 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(isolate, k)) {
+ if (dictionary->IsKey(roots, k)) {
DCHECK(k->IsNumber());
uint32_t index = static_cast<uint32_t>(k->Number());
SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
@@ -1534,7 +1516,7 @@ void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
String* V8HeapExplorer::GetConstructorName(JSObject* object) {
Isolate* isolate = object->GetIsolate();
- if (object->IsJSFunction()) return isolate->heap()->closure_string();
+ if (object->IsJSFunction()) return ReadOnlyRoots(isolate).closure_string();
DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
return *JSReceiver::GetConstructorName(handle(object, isolate));
@@ -1573,9 +1555,7 @@ class RootsReferencesExtractor : public RootVisitor {
bool visiting_weak_roots_;
};
-
-bool V8HeapExplorer::IterateAndExtractReferences(
- SnapshotFiller* filler) {
+bool V8HeapExplorer::IterateAndExtractReferences(SnapshotFiller* filler) {
filler_ = filler;
// Create references to the synthetic roots.
@@ -1592,27 +1572,8 @@ bool V8HeapExplorer::IterateAndExtractReferences(
extractor.SetVisitingWeakRoots();
heap_->IterateWeakGlobalHandles(&extractor);
- // We have to do two passes as sometimes FixedArrays are used
- // to weakly hold their items, and it's impossible to distinguish
- // between these cases without processing the array owner first.
- bool interrupted =
- IterateAndExtractSinglePass<&V8HeapExplorer::ExtractReferencesPass1>() ||
- IterateAndExtractSinglePass<&V8HeapExplorer::ExtractReferencesPass2>();
-
- if (interrupted) {
- filler_ = nullptr;
- return false;
- }
-
- filler_ = nullptr;
- return progress_->ProgressReport(true);
-}
-
-
-template<V8HeapExplorer::ExtractReferencesMethod extractor>
-bool V8HeapExplorer::IterateAndExtractSinglePass() {
- // Now iterate the whole heap.
bool interrupted = false;
+
HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
// Heap iteration with filtering must be finished in any case.
for (HeapObject *obj = iterator.next(); obj != nullptr;
@@ -1629,14 +1590,13 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
HeapEntry* heap_entry = GetEntry(obj);
int entry = heap_entry->index();
- if ((this->*extractor)(entry, obj)) {
- SetInternalReference(obj, entry,
- "map", obj->map(), HeapObject::kMapOffset);
- // Extract unvisited fields as hidden references and restore tags
- // of visited fields.
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- }
+ ExtractReferences(entry, obj);
+ SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+ // Extract unvisited fields as hidden references and restore tags
+ // of visited fields.
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
+ obj->Iterate(&refs_extractor);
+
// Ensure visited_fields_ doesn't leak to the next object.
for (size_t i = 0; i < max_pointer; ++i) {
DCHECK(!visited_fields_[i]);
@@ -1644,22 +1604,25 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
if (!progress_->ProgressReport(false)) interrupted = true;
}
- return interrupted;
+
+ filler_ = nullptr;
+ return interrupted ? false : progress_->ProgressReport(true);
}
bool V8HeapExplorer::IsEssentialObject(Object* object) {
+ ReadOnlyRoots roots(heap_);
return object->IsHeapObject() && !object->IsOddball() &&
- object != heap_->empty_byte_array() &&
- object != heap_->empty_fixed_array() &&
- object != heap_->empty_weak_fixed_array() &&
- object != heap_->empty_descriptor_array() &&
- object != heap_->fixed_array_map() && object != heap_->cell_map() &&
- object != heap_->global_property_cell_map() &&
- object != heap_->shared_function_info_map() &&
- object != heap_->free_space_map() &&
- object != heap_->one_pointer_filler_map() &&
- object != heap_->two_pointer_filler_map();
+ object != roots.empty_byte_array() &&
+ object != roots.empty_fixed_array() &&
+ object != roots.empty_weak_fixed_array() &&
+ object != roots.empty_descriptor_array() &&
+ object != roots.fixed_array_map() && object != roots.cell_map() &&
+ object != roots.global_property_cell_map() &&
+ object != roots.shared_function_info_map() &&
+ object != roots.free_space_map() &&
+ object != roots.one_pointer_filler_map() &&
+ object != roots.two_pointer_filler_map();
}
bool V8HeapExplorer::IsEssentialHiddenReference(Object* parent,
@@ -1889,7 +1852,6 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
JSGlobalObject* global = Context::cast(child_obj)->global_object();
if (!global->IsJSGlobalObject()) return;
- if (heap_->isolate()->debug()->IsDebugGlobal(global)) return;
if (user_roots_.Contains(global)) return;
user_roots_.Insert(global);
@@ -1897,24 +1859,34 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
}
const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
+ ReadOnlyRoots roots(heap_);
if (strong_gc_subroot_names_.is_empty()) {
#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name);
+#define RO_NAME_ENTRY(name) \
+ strong_gc_subroot_names_.SetTag(roots.name(), #name);
#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name)
- STRONG_ROOT_LIST(ROOT_NAME)
+ STRONG_MUTABLE_ROOT_LIST(ROOT_NAME)
+#undef ROOT_NAME
+#define ROOT_NAME(type, name, camel_name) RO_NAME_ENTRY(name)
+ STRONG_READ_ONLY_ROOT_LIST(ROOT_NAME)
#undef ROOT_NAME
-#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map)
+#define STRUCT_MAP_NAME(NAME, Name, name) RO_NAME_ENTRY(name##_map)
STRUCT_LIST(STRUCT_MAP_NAME)
#undef STRUCT_MAP_NAME
+#define ALLOCATION_SITE_MAP_NAME(NAME, Name, Size, name) \
+ RO_NAME_ENTRY(name##_map)
+ ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_NAME)
+#undef ALLOCATION_SITE_MAP_NAME
#define DATA_HANDLER_MAP_NAME(NAME, Name, Size, name) NAME_ENTRY(name##_map)
DATA_HANDLER_LIST(DATA_HANDLER_MAP_NAME)
#undef DATA_HANDLER_MAP_NAME
-#define STRING_NAME(name, str) NAME_ENTRY(name)
+#define STRING_NAME(name, str) RO_NAME_ENTRY(name)
INTERNALIZED_STRING_LIST(STRING_NAME)
#undef STRING_NAME
-#define SYMBOL_NAME(name) NAME_ENTRY(name)
+#define SYMBOL_NAME(name) RO_NAME_ENTRY(name)
PRIVATE_SYMBOL_LIST(SYMBOL_NAME)
#undef SYMBOL_NAME
-#define SYMBOL_NAME(name, description) NAME_ENTRY(name)
+#define SYMBOL_NAME(name, description) RO_NAME_ENTRY(name)
PUBLIC_SYMBOL_LIST(SYMBOL_NAME)
WELL_KNOWN_SYMBOL_LIST(SYMBOL_NAME)
#undef SYMBOL_NAME
@@ -1923,6 +1895,7 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
ACCESSOR_INFO_LIST(ACCESSOR_NAME)
#undef ACCESSOR_NAME
#undef NAME_ENTRY
+#undef RO_NAME_ENTRY
CHECK(!strong_gc_subroot_names_.is_empty());
}
return strong_gc_subroot_names_.GetTag(object);
@@ -1937,12 +1910,6 @@ void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
}
}
-void V8HeapExplorer::TagFixedArraySubType(const FixedArray* array,
- FixedArraySubInstanceType type) {
- DCHECK(array_types_.find(array) == array_types_.end());
- array_types_[array] = type;
-}
-
class GlobalObjectsEnumerator : public RootVisitor {
public:
void VisitRootPointers(Root root, const char* description, Object** start,
@@ -1953,7 +1920,8 @@ class GlobalObjectsEnumerator : public RootVisitor {
if (!proxy->IsJSGlobalProxy()) continue;
Object* global = proxy->map()->prototype();
if (!global->IsJSGlobalObject()) continue;
- objects_.push_back(Handle<JSGlobalObject>(JSGlobalObject::cast(global)));
+ objects_.push_back(Handle<JSGlobalObject>(JSGlobalObject::cast(global),
+ proxy->GetIsolate()));
}
}
int count() const { return static_cast<int>(objects_.size()); }
@@ -2465,7 +2433,7 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
}
#endif
- SetProgressTotal(2); // 2 passes.
+ InitProgressCounter();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -2485,12 +2453,10 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
return true;
}
-
void HeapSnapshotGenerator::ProgressStep() {
++progress_counter_;
}
-
bool HeapSnapshotGenerator::ProgressReport(bool force) {
const int kProgressReportGranularity = 10000;
if (control_ != nullptr &&
@@ -2501,27 +2467,22 @@ bool HeapSnapshotGenerator::ProgressReport(bool force) {
return true;
}
-
-void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
+void HeapSnapshotGenerator::InitProgressCounter() {
if (control_ == nullptr) return;
- HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
// The +1 ensures that intermediate ProgressReport calls will never signal
// that the work is finished (i.e. progress_counter_ == progress_total_).
// Only the forced ProgressReport() at the end of GenerateSnapshot()
// should signal that the work is finished because signalling finished twice
// breaks the DevTools frontend.
- progress_total_ =
- iterations_count * (v8_heap_explorer_.EstimateObjectsCount(&iterator) +
- dom_explorer_.EstimateObjectsCount()) +
- 1;
+ progress_total_ = v8_heap_explorer_.EstimateObjectsCount() +
+ dom_explorer_.EstimateObjectsCount() + 1;
progress_counter_ = 0;
}
-
bool HeapSnapshotGenerator::FillReferences() {
SnapshotFiller filler(snapshot_, &entries_);
- return v8_heap_explorer_.IterateAndExtractReferences(&filler)
- && dom_explorer_.IterateAndExtractReferences(&filler);
+ return v8_heap_explorer_.IterateAndExtractReferences(&filler) &&
+ dom_explorer_.IterateAndExtractReferences(&filler);
}
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index d0e17edf03..4f4fbee742 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -13,6 +13,8 @@
#include "src/base/platform/time.h"
#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/literal-objects.h"
#include "src/profiler/strings-storage.h"
#include "src/string-hasher.h"
#include "src/visitors.h"
@@ -57,13 +59,13 @@ class HeapGraphEdge BASE_EMBEDDED {
type() == kInternal || type() == kShortcut || type() == kWeak);
return name_;
}
- INLINE(HeapEntry* from() const);
+ V8_INLINE HeapEntry* from() const;
HeapEntry* to() const { return to_entry_; }
- INLINE(Isolate* isolate() const);
+ V8_INLINE Isolate* isolate() const;
private:
- INLINE(HeapSnapshot* snapshot() const);
+ V8_INLINE HeapSnapshot* snapshot() const;
int from_index() const { return FromIndexField::decode(bit_field_); }
class TypeField : public BitField<Type, 0, 3> {};
@@ -120,14 +122,14 @@ class HeapEntry BASE_EMBEDDED {
SnapshotObjectId id() const { return id_; }
size_t self_size() const { return self_size_; }
unsigned trace_node_id() const { return trace_node_id_; }
- INLINE(int index() const);
+ V8_INLINE int index() const;
int children_count() const { return children_count_; }
- INLINE(int set_children_index(int index));
+ V8_INLINE int set_children_index(int index);
void add_child(HeapGraphEdge* edge) {
*(children_begin() + children_count_++) = edge;
}
HeapGraphEdge* child(int i) { return *(children_begin() + i); }
- INLINE(Isolate* isolate() const);
+ V8_INLINE Isolate* isolate() const;
void SetIndexedReference(
HeapGraphEdge::Type type, int index, HeapEntry* entry);
@@ -138,8 +140,8 @@ class HeapEntry BASE_EMBEDDED {
const char* prefix, const char* edge_name, int max_depth, int indent);
private:
- INLINE(std::deque<HeapGraphEdge*>::iterator children_begin());
- INLINE(std::deque<HeapGraphEdge*>::iterator children_end());
+ V8_INLINE std::deque<HeapGraphEdge*>::iterator children_begin();
+ V8_INLINE std::deque<HeapGraphEdge*>::iterator children_end();
const char* TypeAsString();
unsigned type_: 4;
@@ -341,7 +343,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
v8::HeapProfiler::ObjectNameResolver* resolver);
virtual ~V8HeapExplorer();
virtual HeapEntry* AllocateEntry(HeapThing ptr);
- int EstimateObjectsCount(HeapIterator* iterator);
+ int EstimateObjectsCount();
bool IterateAndExtractReferences(SnapshotFiller* filler);
void TagGlobalObjects();
void TagCodeObject(Code* code);
@@ -354,9 +356,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
static String* GetConstructorName(JSObject* object);
private:
- typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry,
- HeapObject* object);
-
void MarkVisitedField(int offset);
HeapEntry* AddEntry(HeapObject* object);
@@ -366,11 +365,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
const char* GetSystemEntryName(HeapObject* object);
- template<V8HeapExplorer::ExtractReferencesMethod extractor>
- bool IterateAndExtractSinglePass();
-
- bool ExtractReferencesPass1(int entry, HeapObject* obj);
- bool ExtractReferencesPass2(int entry, HeapObject* obj);
+ void ExtractReferences(int entry, HeapObject* obj);
void ExtractJSGlobalProxyReferences(int entry, JSGlobalProxy* proxy);
void ExtractJSObjectReferences(int entry, JSObject* js_obj);
void ExtractStringReferences(int entry, String* obj);
@@ -378,6 +373,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractJSCollectionReferences(int entry, JSCollection* collection);
void ExtractJSWeakCollectionReferences(int entry,
JSWeakCollection* collection);
+ void ExtractEphemeronHashTableReferences(int entry,
+ EphemeronHashTable* table);
void ExtractContextReferences(int entry, Context* context);
void ExtractMapReferences(int entry, Map* map);
void ExtractSharedFunctionInfoReferences(int entry,
@@ -391,6 +388,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractWeakCellReferences(int entry, WeakCell* weak_cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
+ void ExtractArrayBoilerplateDescriptionReferences(
+ int entry, ArrayBoilerplateDescription* value);
void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractJSPromiseReferences(int entry, JSPromise* promise);
void ExtractFixedArrayReferences(int entry, FixedArray* array);
@@ -458,8 +457,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
Object* child);
const char* GetStrongGcSubrootName(Object* object);
void TagObject(Object* obj, const char* tag);
- void TagFixedArraySubType(const FixedArray* array,
- FixedArraySubInstanceType type);
HeapEntry* GetEntry(Object* obj);
@@ -472,7 +469,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsSet objects_tags_;
HeapObjectsSet strong_gc_subroot_names_;
HeapObjectsSet user_roots_;
- std::unordered_map<const FixedArray*, FixedArraySubInstanceType> array_types_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
std::vector<bool> visited_fields_;
@@ -561,7 +557,7 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
bool FillReferences();
void ProgressStep();
bool ProgressReport(bool force = false);
- void SetProgressTotal(int iterations_count);
+ void InitProgressCounter();
HeapSnapshot* snapshot_;
v8::ActivityControl* control_;
@@ -590,12 +586,12 @@ class HeapSnapshotJSONSerializer {
void Serialize(v8::OutputStream* stream);
private:
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
+ V8_INLINE static bool StringsMatch(void* key1, void* key2) {
return strcmp(reinterpret_cast<char*>(key1),
reinterpret_cast<char*>(key2)) == 0;
}
- INLINE(static uint32_t StringHash(const void* string)) {
+ V8_INLINE static uint32_t StringHash(const void* string) {
const char* s = reinterpret_cast<const char*>(string);
int len = static_cast<int>(strlen(s));
return StringHasher::HashSequentialString(
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 4273234dd2..92619f2fbf 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -42,6 +42,7 @@ int SourcePositionTable::GetSourceLineNumber(int pc_offset) const {
return it->line_number;
}
+const char* const CodeEntry::kWasmResourceNamePrefix = "wasm ";
const char* const CodeEntry::kEmptyResourceName = "";
const char* const CodeEntry::kEmptyBailoutReason = "";
const char* const CodeEntry::kNoDeoptReason = "";
@@ -346,13 +347,13 @@ class Position {
public:
explicit Position(ProfileNode* node)
: node(node), child_idx_(0) { }
- INLINE(ProfileNode* current_child()) {
+ V8_INLINE ProfileNode* current_child() {
return node->children()->at(child_idx_);
}
- INLINE(bool has_current_child()) {
+ V8_INLINE bool has_current_child() {
return child_idx_ < static_cast<int>(node->children()->size());
}
- INLINE(void next_child()) { ++child_idx_; }
+ V8_INLINE void next_child() { ++child_idx_; }
ProfileNode* node;
private:
@@ -385,6 +386,8 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
using v8::tracing::TracedValue;
+std::atomic<uint32_t> CpuProfile::last_id_;
+
CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
bool record_samples, ProfilingMode mode)
: title_(title),
@@ -393,12 +396,13 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
start_time_(base::TimeTicks::HighResolutionNow()),
top_down_(profiler->isolate()),
profiler_(profiler),
- streaming_next_sample_(0) {
+ streaming_next_sample_(0),
+ id_(++last_id_) {
auto value = TracedValue::Create();
value->SetDouble("startTime",
(start_time_ - base::TimeTicks()).InMicroseconds());
TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
- "Profile", this, "data", std::move(value));
+ "Profile", id_, "data", std::move(value));
}
void CpuProfile::AddPath(base::TimeTicks timestamp,
@@ -490,7 +494,7 @@ void CpuProfile::StreamPendingTraceEvents() {
}
TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
- "ProfileChunk", this, "data", std::move(value));
+ "ProfileChunk", id_, "data", std::move(value));
}
void CpuProfile::FinishProfile() {
@@ -499,7 +503,7 @@ void CpuProfile::FinishProfile() {
auto value = TracedValue::Create();
value->SetDouble("endTime", (end_time_ - base::TimeTicks()).InMicroseconds());
TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
- "ProfileChunk", this, "data", std::move(value));
+ "ProfileChunk", id_, "data", std::move(value));
}
void CpuProfile::Print() {
@@ -525,8 +529,6 @@ void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
ClearCodesInRange(addr, addr + size);
unsigned index = AddCodeEntry(addr, entry);
code_map_.emplace(addr, CodeEntryMapInfo{index, size});
- DCHECK(entry->instruction_start() == kNullAddress ||
- addr == entry->instruction_start());
}
void CodeMap::ClearCodesInRange(Address start, Address end) {
@@ -548,14 +550,8 @@ CodeEntry* CodeMap::FindEntry(Address addr) {
auto it = code_map_.upper_bound(addr);
if (it == code_map_.begin()) return nullptr;
--it;
- Address start_address = it->first;
- Address end_address = start_address + it->second.size;
- CodeEntry* ret = addr < end_address ? entry(it->second.index) : nullptr;
- if (ret && ret->instruction_start() != kNullAddress) {
- DCHECK_EQ(start_address, ret->instruction_start());
- DCHECK(addr >= start_address && addr < end_address);
- }
- return ret;
+ Address end_address = it->first + it->second.size;
+ return addr < end_address ? entry(it->second.index) : nullptr;
}
void CodeMap::MoveCode(Address from, Address to) {
@@ -567,9 +563,6 @@ void CodeMap::MoveCode(Address from, Address to) {
DCHECK(from + info.size <= to || to + info.size <= from);
ClearCodesInRange(to, to + info.size);
code_map_.emplace(to, info);
-
- CodeEntry* entry = code_entries_[info.index].entry;
- entry->set_instruction_start(to);
}
unsigned CodeMap::AddCodeEntry(Address start, CodeEntry* entry) {
@@ -597,9 +590,7 @@ void CodeMap::Print() {
}
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
- : resource_names_(isolate->heap()->HashSeed()),
- profiler_(nullptr),
- current_profiles_semaphore_(1) {}
+ : profiler_(nullptr), current_profiles_semaphore_(1) {}
bool CpuProfilesCollection::StartProfiling(const char* title,
bool record_samples,
@@ -702,29 +693,26 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
if (sample.pc != nullptr) {
if (sample.has_external_callback && sample.state == EXTERNAL) {
// Don't use PC when in external callback code, as it can point
- // inside a callback's code, and we will erroneously report
+ // inside callback's code, and we will erroneously report
// that a callback calls itself.
stack_trace.push_back(
{FindEntry(reinterpret_cast<Address>(sample.external_callback_entry)),
no_line_info});
} else {
- Address attributed_pc = reinterpret_cast<Address>(sample.pc);
- CodeEntry* pc_entry = FindEntry(attributed_pc);
- // If there is no pc_entry, we're likely in native code. Find out if the
- // top of the stack (the return address) was pointing inside a JS
- // function, meaning that we have encountered a frameless invocation.
+ CodeEntry* pc_entry = FindEntry(reinterpret_cast<Address>(sample.pc));
+ // If there is no pc_entry we're likely in native code.
+ // Find out, if top of stack was pointing inside a JS function
+ // meaning that we have encountered a frameless invocation.
if (!pc_entry && !sample.has_external_callback) {
- attributed_pc = reinterpret_cast<Address>(sample.tos);
- pc_entry = FindEntry(attributed_pc);
+ pc_entry = FindEntry(reinterpret_cast<Address>(sample.tos));
}
// If pc is in the function code before it set up stack frame or after the
- // frame was destroyed, SafeStackFrameIterator incorrectly thinks that
- // ebp contains the return address of the current function and skips the
- // caller's frame. Check for this case and just skip such samples.
+ // frame was destroyed SafeStackFrameIterator incorrectly thinks that
+ // ebp contains return address of the current function and skips caller's
+ // frame. Check for this case and just skip such samples.
if (pc_entry) {
- int pc_offset =
- static_cast<int>(attributed_pc - pc_entry->instruction_start());
- DCHECK_GE(pc_offset, 0);
+ int pc_offset = static_cast<int>(reinterpret_cast<Address>(sample.pc) -
+ pc_entry->instruction_start());
src_line = pc_entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = pc_entry->line_number();
@@ -756,7 +744,6 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// Find out if the entry has an inlining stack associated.
int pc_offset =
static_cast<int>(stack_pos - entry->instruction_start());
- DCHECK_GE(pc_offset, 0);
const std::vector<std::unique_ptr<CodeEntry>>* inline_stack =
entry->GetInlineStack(pc_offset);
if (inline_stack) {
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index e575a78648..3e301a4082 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -5,6 +5,7 @@
#ifndef V8_PROFILER_PROFILE_GENERATOR_H_
#define V8_PROFILER_PROFILE_GENERATOR_H_
+#include <atomic>
#include <deque>
#include <limits>
#include <map>
@@ -107,13 +108,12 @@ class CodeEntry {
const std::vector<std::unique_ptr<CodeEntry>>* GetInlineStack(
int pc_offset) const;
- void set_instruction_start(Address start) { instruction_start_ = start; }
Address instruction_start() const { return instruction_start_; }
-
CodeEventListener::LogEventsAndTags tag() const {
return TagField::decode(bit_field_);
}
+ static const char* const kWasmResourceNamePrefix;
static const char* const kEmptyResourceName;
static const char* const kEmptyBailoutReason;
static const char* const kNoDeoptReason;
@@ -262,7 +262,6 @@ class ProfileNode {
DISALLOW_COPY_AND_ASSIGN(ProfileNode);
};
-
class ProfileTree {
public:
explicit ProfileTree(Isolate* isolate);
@@ -355,6 +354,9 @@ class CpuProfile {
ProfileTree top_down_;
CpuProfiler* const profiler_;
size_t streaming_next_sample_;
+ uint32_t id_;
+
+ static std::atomic<uint32_t> last_id_;
DISALLOW_COPY_AND_ASSIGN(CpuProfile);
};
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index e3c2c140fb..9c29da9ec7 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -17,18 +17,16 @@ namespace internal {
ProfilerListener::ProfilerListener(Isolate* isolate,
CodeEventObserver* observer)
- : isolate_(isolate),
- observer_(observer),
- function_and_resource_names_(isolate->heap()->HashSeed()) {}
+ : isolate_(isolate), observer_(observer) {}
ProfilerListener::~ProfilerListener() = default;
void ProfilerListener::CallbackEvent(Name* name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = entry_point;
+ rec->start = entry_point;
rec->entry = NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name));
- rec->instruction_size = 1;
+ rec->size = 1;
DispatchCodeEvent(evt_rec);
}
@@ -36,13 +34,13 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, const char* name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
- rec->entry = NewCodeEntry(
- tag, GetFunctionName(name), CodeEntry::kEmptyResourceName,
- CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
- nullptr, code->InstructionStart());
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(tag, GetName(name), CodeEntry::kEmptyResourceName,
+ CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, nullptr,
+ code->InstructionStart());
RecordInliningInfo(rec->entry, code);
- rec->instruction_size = code->InstructionSize();
+ rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
}
@@ -50,13 +48,13 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, Name* name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
- rec->entry = NewCodeEntry(
- tag, GetFunctionName(name), CodeEntry::kEmptyResourceName,
- CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
- nullptr, code->InstructionStart());
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(tag, GetName(name), CodeEntry::kEmptyResourceName,
+ CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, nullptr,
+ code->InstructionStart());
RecordInliningInfo(rec->entry, code);
- rec->instruction_size = code->InstructionSize();
+ rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
}
@@ -66,15 +64,15 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
Name* script_name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
- rec->entry = NewCodeEntry(tag, GetFunctionName(shared->DebugName()),
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(tag, GetName(shared->DebugName()),
GetName(InferScriptName(script_name, shared)),
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
code->InstructionStart());
RecordInliningInfo(rec->entry, code);
rec->entry->FillFunctionInfo(shared);
- rec->instruction_size = code->InstructionSize();
+ rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
}
@@ -85,7 +83,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
int column) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = abstract_code->InstructionStart();
+ rec->start = abstract_code->address();
std::unique_ptr<SourcePositionTable> line_table;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
@@ -102,12 +100,12 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
}
rec->entry =
- NewCodeEntry(tag, GetFunctionName(shared->DebugName()),
+ NewCodeEntry(tag, GetName(shared->DebugName()),
GetName(InferScriptName(script_name, shared)), line, column,
std::move(line_table), abstract_code->InstructionStart());
RecordInliningInfo(rec->entry, abstract_code);
rec->entry->FillFunctionInfo(shared);
- rec->instruction_size = abstract_code->InstructionSize();
+ rec->size = abstract_code->ExecutableSize();
DispatchCodeEvent(evt_rec);
}
@@ -116,24 +114,20 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
wasm::WasmName name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->instruction_start();
- // TODO(herhut): Instead of sanitizing here, make sure all wasm functions
- // have names.
- const char* name_ptr =
- name.start() == nullptr ? "<anonymous>" : GetFunctionName(name.start());
- rec->entry = NewCodeEntry(tag, name_ptr, CodeEntry::kEmptyResourceName,
- CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->instruction_start());
- rec->instruction_size = code->instructions().length();
+ rec->start = code->instruction_start();
+ rec->entry = NewCodeEntry(
+ tag, GetName(name.start()), CodeEntry::kWasmResourceNamePrefix,
+ CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
+ nullptr, code->instruction_start());
+ rec->size = code->instructions().length();
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+void ProfilerListener::CodeMoveEvent(AbstractCode* from, Address to) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->from_instruction_start = from->InstructionStart();
- rec->to_instruction_start = to->InstructionStart();
+ rec->from = from->address();
+ rec->to = to;
DispatchCodeEvent(evt_rec);
}
@@ -141,17 +135,17 @@ void ProfilerListener::CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ rec->start = code->address();
rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
- int fp_to_sp_delta) {
+void ProfilerListener::CodeDeoptEvent(Code* code, DeoptimizeKind kind,
+ Address pc, int fp_to_sp_delta) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
- rec->instruction_start = code->InstructionStart();
+ rec->start = code->address();
rec->deopt_reason = DeoptimizeReasonToString(info.deopt_reason);
rec->deopt_id = info.deopt_id;
rec->pc = pc;
@@ -166,10 +160,10 @@ void ProfilerListener::CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
void ProfilerListener::GetterCallbackEvent(Name* name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = entry_point;
+ rec->start = entry_point;
rec->entry =
NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetConsName("get ", name));
- rec->instruction_size = 1;
+ rec->size = 1;
DispatchCodeEvent(evt_rec);
}
@@ -177,22 +171,23 @@ void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
String* source) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ rec->start = code->address();
rec->entry = NewCodeEntry(
CodeEventListener::REG_EXP_TAG, GetConsName("RegExp: ", source),
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr, code->InstructionStart());
- rec->instruction_size = code->InstructionSize();
+ CpuProfileNode::kNoColumnNumberInfo, nullptr,
+ code->raw_instruction_start());
+ rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
}
void ProfilerListener::SetterCallbackEvent(Name* name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = entry_point;
+ rec->start = entry_point;
rec->entry =
NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetConsName("set ", name));
- rec->instruction_size = 1;
+ rec->size = 1;
DispatchCodeEvent(evt_rec);
}
@@ -243,7 +238,7 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
: CodeEntry::kEmptyResourceName;
CodeEntry* inline_entry =
- new CodeEntry(entry->tag(), GetFunctionName(shared_info->DebugName()),
+ new CodeEntry(entry->tag(), GetName(shared_info->DebugName()),
resource_name, CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
code->InstructionStart());
@@ -286,7 +281,7 @@ void ProfilerListener::AttachDeoptInlinedFrames(Code* code,
// scope limits their lifetime.
HandleScope scope(isolate_);
std::vector<SourcePositionInfo> stack =
- last_position.InliningStack(handle(code));
+ last_position.InliningStack(handle(code, isolate_));
CpuProfileDeoptFrame* deopt_frames =
new CpuProfileDeoptFrame[stack.size()];
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index 313a6808c4..5cff7cc11d 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -44,10 +44,10 @@ class ProfilerListener : public CodeEventListener {
wasm::WasmName name) override;
void CodeMovingGCEvent() override {}
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
void CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) override;
- void CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
+ void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) override;
void GetterCallbackEvent(Name* name, Address entry_point) override;
void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
@@ -68,15 +68,12 @@ class ProfilerListener : public CodeEventListener {
const char* GetName(int args_count) {
return function_and_resource_names_.GetName(args_count);
}
+ const char* GetName(const char* name) {
+ return function_and_resource_names_.GetCopy(name);
+ }
const char* GetConsName(const char* prefix, Name* name) {
return function_and_resource_names_.GetConsName(prefix, name);
}
- const char* GetFunctionName(Name* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- const char* GetFunctionName(const char* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
private:
void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 734c2ea36d..4501cd6f79 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -204,7 +204,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
// the first element in the list.
for (auto it = stack.rbegin(); it != stack.rend(); ++it) {
SharedFunctionInfo* shared = *it;
- const char* name = this->names()->GetFunctionName(shared->DebugName());
+ const char* name = this->names()->GetName(shared->DebugName());
int script_id = v8::UnboundScript::kNoScriptId;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
@@ -282,7 +282,7 @@ v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
{
Script::Iterator iterator(isolate_);
while (Script* script = iterator.Next()) {
- scripts[script->id()] = handle(script);
+ scripts[script->id()] = handle(script, isolate_);
}
}
auto profile = new v8::internal::AllocationProfile();
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index e48d054df6..9a5a006ff4 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -17,8 +17,7 @@ bool StringsStorage::StringsMatch(void* key1, void* key2) {
0;
}
-StringsStorage::StringsStorage(uint32_t hash_seed)
- : hash_seed_(hash_seed), names_(StringsMatch) {}
+StringsStorage::StringsStorage() : names_(StringsMatch) {}
StringsStorage::~StringsStorage() {
for (base::HashMap::Entry* p = names_.Start(); p != nullptr;
@@ -107,16 +106,8 @@ const char* StringsStorage::GetConsName(const char* prefix, Name* name) {
return "";
}
-const char* StringsStorage::GetFunctionName(Name* name) {
- return GetName(name);
-}
-
-const char* StringsStorage::GetFunctionName(const char* name) {
- return GetCopy(name);
-}
-
base::HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
- uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
+ uint32_t hash = StringHasher::HashSequentialString(str, len, kZeroHashSeed);
return names_.LookupOrInsert(const_cast<char*>(str), hash);
}
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index cdc22e48f2..5c0f8afd93 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -20,7 +20,7 @@ class Name;
// forever, even if they disappear from JS heap or external storage.
class V8_EXPORT_PRIVATE StringsStorage {
public:
- explicit StringsStorage(uint32_t hash_seed);
+ StringsStorage();
~StringsStorage();
// Copies the given c-string and stores it, returning the stored copy, or just
@@ -35,10 +35,6 @@ class V8_EXPORT_PRIVATE StringsStorage {
// Appends string resulting from name to prefix, then returns the stored
// result.
const char* GetConsName(const char* prefix, Name* name);
- // Does exactly the same thing as GetName(Name* name).
- const char* GetFunctionName(Name* name);
- // Does exactly the same thing as GetCopy(const char* name).
- const char* GetFunctionName(const char* name);
private:
static bool StringsMatch(void* key1, void* key2);
@@ -49,7 +45,6 @@ class V8_EXPORT_PRIVATE StringsStorage {
PRINTF_FORMAT(2, 0)
const char* GetVFormatted(const char* format, va_list args);
- uint32_t hash_seed_;
base::CustomMatcherHashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
diff --git a/deps/v8/src/profiler/unbound-queue.h b/deps/v8/src/profiler/unbound-queue.h
index 062f1ce609..0efe95abdf 100644
--- a/deps/v8/src/profiler/unbound-queue.h
+++ b/deps/v8/src/profiler/unbound-queue.h
@@ -24,13 +24,13 @@ class UnboundQueue BASE_EMBEDDED {
inline UnboundQueue();
inline ~UnboundQueue();
- INLINE(bool Dequeue(Record* rec));
- INLINE(void Enqueue(const Record& rec));
- INLINE(bool IsEmpty() const);
- INLINE(Record* Peek() const);
+ V8_INLINE bool Dequeue(Record* rec);
+ V8_INLINE void Enqueue(const Record& rec);
+ V8_INLINE bool IsEmpty() const;
+ V8_INLINE Record* Peek() const;
private:
- INLINE(void DeleteFirst());
+ V8_INLINE void DeleteFirst();
struct Node;
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index d2a7b6695e..3fdd39287b 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -57,7 +57,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
// TODO(jkummerow): support dictionary properties?
if (map->is_dictionary_map()) return false;
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map->instance_descriptors());
+ Handle<DescriptorArray>(map->instance_descriptors(), isolate);
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
Name* key = descs->GetKey(i);
@@ -76,27 +76,27 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
} else {
DCHECK_EQ(kDescriptor, details.location());
if (details.kind() == kData) {
- value = handle(descs->GetValue(i), isolate);
+ value = handle(descs->GetStrongValue(i), isolate);
} else {
DCHECK_EQ(kAccessor, details.kind());
// Bail out to slow path.
return false;
}
}
- Heap* heap = isolate->heap();
- if (key == heap->enumerable_string()) {
- desc->set_enumerable(value->BooleanValue());
- } else if (key == heap->configurable_string()) {
- desc->set_configurable(value->BooleanValue());
- } else if (key == heap->value_string()) {
+ ReadOnlyRoots roots(isolate);
+ if (key == roots.enumerable_string()) {
+ desc->set_enumerable(value->BooleanValue(isolate));
+ } else if (key == roots.configurable_string()) {
+ desc->set_configurable(value->BooleanValue(isolate));
+ } else if (key == roots.value_string()) {
desc->set_value(value);
- } else if (key == heap->writable_string()) {
- desc->set_writable(value->BooleanValue());
- } else if (key == heap->get_string()) {
+ } else if (key == roots.writable_string()) {
+ desc->set_writable(value->BooleanValue(isolate));
+ } else if (key == roots.get_string()) {
// Bail out to slow path to throw an exception if necessary.
if (!value->IsCallable()) return false;
desc->set_get(value);
- } else if (key == heap->set_string()) {
+ } else if (key == roots.set_string()) {
// Bail out to slow path to throw an exception if necessary.
if (!value->IsCallable()) return false;
desc->set_set(value);
@@ -212,7 +212,7 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
}
// 6c. Set the [[Enumerable]] field of desc to enum.
if (!enumerable.is_null()) {
- desc->set_enumerable(enumerable->BooleanValue());
+ desc->set_enumerable(enumerable->BooleanValue(isolate));
}
// configurable?
@@ -224,7 +224,7 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
}
// 9c. Set the [[Configurable]] field of desc to conf.
if (!configurable.is_null()) {
- desc->set_configurable(configurable->BooleanValue());
+ desc->set_configurable(configurable->BooleanValue(isolate));
}
// value?
@@ -245,7 +245,7 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
return false;
}
// 15c. Set the [[Writable]] field of desc to writable.
- if (!writable.is_null()) desc->set_writable(writable->BooleanValue());
+ if (!writable.is_null()) desc->set_writable(writable->BooleanValue(isolate));
// getter?
Handle<Object> getter;
@@ -361,11 +361,11 @@ Handle<PropertyDescriptorObject> PropertyDescriptor::ToPropertyDescriptorObject(
obj->set(PropertyDescriptorObject::kFlagsIndex, Smi::FromInt(flags));
obj->set(PropertyDescriptorObject::kValueIndex,
- has_value() ? *value_ : isolate->heap()->the_hole_value());
+ has_value() ? *value_ : ReadOnlyRoots(isolate).the_hole_value());
obj->set(PropertyDescriptorObject::kGetIndex,
- has_get() ? *get_ : isolate->heap()->the_hole_value());
+ has_get() ? *get_ : ReadOnlyRoots(isolate).the_hole_value());
obj->set(PropertyDescriptorObject::kSetIndex,
- has_set() ? *set_ : isolate->heap()->the_hole_value());
+ has_set() ? *set_ : ReadOnlyRoots(isolate).the_hole_value());
return obj;
}
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index dbd4f93acd..4968258860 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -76,11 +76,12 @@ enum PropertyLocation { kField = 0, kDescriptor = 1 };
// Order of modes is significant.
// Must fit in the BitField PropertyDetails::ConstnessField.
-enum PropertyConstness { kMutable = 0, kConst = 1 };
+enum class PropertyConstness { kMutable = 0, kConst = 1 };
// TODO(ishell): remove once constant field tracking is done.
const PropertyConstness kDefaultFieldConstness =
- FLAG_track_constant_fields ? kConst : kMutable;
+ FLAG_track_constant_fields ? PropertyConstness::kConst
+ : PropertyConstness::kMutable;
class Representation {
public:
@@ -410,15 +411,15 @@ inline bool IsGeneralizableTo(PropertyLocation a, PropertyLocation b) {
return b == kField || a == kDescriptor;
}
-// kMutable constness is more general than kConst, kConst generalizes only to
-// itself.
+// PropertyConstness::kMutable constness is more general than
+// VariableMode::kConst, VariableMode::kConst generalizes only to itself.
inline bool IsGeneralizableTo(PropertyConstness a, PropertyConstness b) {
- return b == kMutable || a == kConst;
+ return b == PropertyConstness::kMutable || a == PropertyConstness::kConst;
}
inline PropertyConstness GeneralizeConstness(PropertyConstness a,
PropertyConstness b) {
- return a == kMutable ? kMutable : b;
+ return a == PropertyConstness::kMutable ? PropertyConstness::kMutable : b;
}
std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 136cb821b3..dfa5221bc4 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -25,16 +25,17 @@ std::ostream& operator<<(std::ostream& os,
Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
PropertyAttributes attributes,
Representation representation) {
- return DataField(key, field_index, attributes, kMutable, representation,
- FieldType::Any(key->GetIsolate()));
+ return DataField(key, field_index, attributes, PropertyConstness::kMutable,
+ representation,
+ MaybeObjectHandle(FieldType::Any(key->GetIsolate())));
}
Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
- Handle<Object> wrapped_field_type) {
- DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakCell());
+ MaybeObjectHandle wrapped_field_type) {
+ DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakHeapObject());
PropertyDetails details(kData, attributes, kField, constness, representation,
field_index);
return Descriptor(key, wrapped_field_type, details);
@@ -44,12 +45,13 @@ Descriptor Descriptor::DataConstant(Handle<Name> key, int field_index,
Handle<Object> value,
PropertyAttributes attributes) {
if (FLAG_track_constant_fields) {
- Handle<Object> any_type(FieldType::Any(), key->GetIsolate());
- return DataField(key, field_index, attributes, kConst,
+ MaybeObjectHandle any_type(FieldType::Any(), key->GetIsolate());
+ return DataField(key, field_index, attributes, PropertyConstness::kConst,
Representation::Tagged(), any_type);
} else {
- return Descriptor(key, value, kData, attributes, kDescriptor, kConst,
+ return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
+ kDescriptor, PropertyConstness::kConst,
value->OptimalRepresentation(), field_index);
}
}
@@ -57,7 +59,7 @@ Descriptor Descriptor::DataConstant(Handle<Name> key, int field_index,
// Outputs PropertyDetails as a dictionary details.
void PropertyDetails::PrintAsSlowTo(std::ostream& os) {
os << "(";
- if (constness() == kConst) os << "const ";
+ if (constness() == PropertyConstness::kConst) os << "const ";
os << (kind() == kData ? "data" : "accessor");
os << ", dict_index: " << dictionary_index();
os << ", attrs: " << attributes() << ")";
@@ -66,7 +68,7 @@ void PropertyDetails::PrintAsSlowTo(std::ostream& os) {
// Outputs PropertyDetails as a descriptor array details.
void PropertyDetails::PrintAsFastTo(std::ostream& os, PrintMode mode) {
os << "(";
- if (constness() == kConst) os << "const ";
+ if (constness() == PropertyConstness::kConst) os << "const ";
os << (kind() == kData ? "data" : "accessor");
if (location() == kField) {
os << " field";
@@ -90,7 +92,7 @@ void PropertyDetails::PrintAsFastTo(std::ostream& os, PrintMode mode) {
#ifdef OBJECT_PRINT
void PropertyDetails::Print(bool dictionary_mode) {
- OFStream os(stdout);
+ StdoutStream os;
if (dictionary_mode) {
PrintAsSlowTo(os);
} else {
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index eccaeb006f..4173491466 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -26,7 +26,7 @@ class Descriptor final BASE_EMBEDDED {
Descriptor() : details_(Smi::kZero) {}
Handle<Name> GetKey() const { return key_; }
- Handle<Object> GetValue() const { return value_; }
+ MaybeObjectHandle GetValue() const { return value_; }
PropertyDetails GetDetails() const { return details_; }
void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
@@ -39,11 +39,12 @@ class Descriptor final BASE_EMBEDDED {
PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
- Handle<Object> wrapped_field_type);
+ MaybeObjectHandle wrapped_field_type);
static Descriptor DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
- return Descriptor(key, value, kData, attributes, kDescriptor, kConst,
+ return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
+ kDescriptor, PropertyConstness::kConst,
value->OptimalRepresentation(), 0);
}
@@ -53,31 +54,24 @@ class Descriptor final BASE_EMBEDDED {
static Descriptor AccessorConstant(Handle<Name> key, Handle<Object> foreign,
PropertyAttributes attributes) {
- return Descriptor(key, foreign, kAccessor, attributes, kDescriptor, kConst,
+ return Descriptor(key, MaybeObjectHandle(foreign), kAccessor, attributes,
+ kDescriptor, PropertyConstness::kConst,
Representation::Tagged(), 0);
}
private:
Handle<Name> key_;
- Handle<Object> value_;
+ MaybeObjectHandle value_;
PropertyDetails details_;
protected:
- void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
- DCHECK(key->IsUniqueName());
- DCHECK_IMPLIES(key->IsPrivate(), !details.IsEnumerable());
- key_ = key;
- value_ = value;
- details_ = details;
- }
-
- Descriptor(Handle<Name> key, Handle<Object> value, PropertyDetails details)
+ Descriptor(Handle<Name> key, MaybeObjectHandle value, PropertyDetails details)
: key_(key), value_(value), details_(details) {
DCHECK(key->IsUniqueName());
DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
}
- Descriptor(Handle<Name> key, Handle<Object> value, PropertyKind kind,
+ Descriptor(Handle<Name> key, MaybeObjectHandle value, PropertyKind kind,
PropertyAttributes attributes, PropertyLocation location,
PropertyConstness constness, Representation representation,
int field_index)
@@ -89,8 +83,6 @@ class Descriptor final BASE_EMBEDDED {
DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
}
- friend class DescriptorArray;
- friend class Map;
friend class MapUpdater;
};
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index 082a139a1c..71ae1ff9f1 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -51,9 +51,9 @@ class PrototypeIterator {
if (where_to_start == kStartAtPrototype) Advance();
}
- explicit PrototypeIterator(Map* receiver_map,
+ explicit PrototypeIterator(Isolate* isolate, Map* receiver_map,
WhereToEnd where_to_end = END_AT_NULL)
- : isolate_(receiver_map->GetIsolate()),
+ : isolate_(isolate),
object_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype()),
where_to_end_(where_to_end),
is_at_end_(object_->IsNull(isolate_)),
@@ -65,9 +65,9 @@ class PrototypeIterator {
}
}
- explicit PrototypeIterator(Handle<Map> receiver_map,
+ explicit PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
WhereToEnd where_to_end = END_AT_NULL)
- : isolate_(receiver_map->GetIsolate()),
+ : isolate_(isolate),
object_(nullptr),
handle_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype(),
isolate_),
@@ -88,7 +88,7 @@ class PrototypeIterator {
// PrototypeIterator.
DCHECK(!handle_.is_null());
if (handle_->IsAccessCheckNeeded()) {
- return isolate_->MayAccess(handle(isolate_->context()),
+ return isolate_->MayAccess(handle(isolate_->context(), isolate_),
Handle<JSObject>::cast(handle_));
}
return true;
@@ -110,7 +110,7 @@ class PrototypeIterator {
void Advance() {
if (handle_.is_null() && object_->IsJSProxy()) {
is_at_end_ = true;
- object_ = isolate_->heap()->null_value();
+ object_ = ReadOnlyRoots(isolate_).null_value();
return;
} else if (!handle_.is_null() && handle_->IsJSProxy()) {
is_at_end_ = true;
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
index 1266da3209..756210b218 100644
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ b/deps/v8/src/regexp/jsregexp-inl.h
@@ -8,6 +8,7 @@
#include "src/allocation.h"
#include "src/objects.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/regexp/jsregexp.h"
namespace v8 {
@@ -38,11 +39,9 @@ int32_t* RegExpImpl::GlobalCache::FetchNext() {
int last_end_index = last_match[1];
if (regexp_->TypeTag() == JSRegExp::ATOM) {
- num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
- subject_,
- last_end_index,
- register_array_,
- register_array_size_);
+ num_matches_ =
+ RegExpImpl::AtomExecRaw(isolate_, regexp_, subject_, last_end_index,
+ register_array_, register_array_size_);
} else {
int last_start_index = last_match[0];
if (last_start_index == last_end_index) {
@@ -53,11 +52,9 @@ int32_t* RegExpImpl::GlobalCache::FetchNext() {
num_matches_ = 0; // Signal failed match.
return nullptr;
}
- num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
- subject_,
- last_end_index,
- register_array_,
- register_array_size_);
+ num_matches_ = RegExpImpl::IrregexpExecRaw(
+ isolate_, regexp_, subject_, last_end_index, register_array_,
+ register_array_size_);
}
if (num_matches_ <= 0) return nullptr;
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index e26ebaa740..3fdc3d98f5 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -8,6 +8,7 @@
#include <vector>
#include "src/base/platform/platform.h"
+#include "src/code-tracer.h"
#include "src/compilation-cache.h"
#include "src/elements.h"
#include "src/execution.h"
@@ -61,17 +62,17 @@ namespace internal {
V8_WARN_UNUSED_RESULT
static inline MaybeHandle<Object> ThrowRegExpException(
- Handle<JSRegExp> re, Handle<String> pattern, Handle<String> error_text) {
- Isolate* isolate = re->GetIsolate();
+ Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
+ Handle<String> error_text) {
THROW_NEW_ERROR(isolate, NewSyntaxError(MessageTemplate::kMalformedRegExp,
pattern, error_text),
Object);
}
-
-inline void ThrowRegExpException(Handle<JSRegExp> re,
+inline void ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> error_text) {
- USE(ThrowRegExpException(re, Handle<String>(re->Pattern()), error_text));
+ USE(ThrowRegExpException(isolate, re, Handle<String>(re->Pattern(), isolate),
+ error_text));
}
@@ -128,12 +129,11 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
// Generic RegExp methods. Dispatches to implementation specific methods.
-MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
+MaybeHandle<Object> RegExpImpl::Compile(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags) {
DCHECK(pattern->IsFlat());
- Isolate* isolate = re->GetIsolate();
Zone zone(isolate->allocator(), ZONE_NAME);
CompilationCache* compilation_cache = isolate->compilation_cache();
MaybeHandle<FixedArray> maybe_cached =
@@ -151,7 +151,7 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
&parse_result)) {
// Throw an exception if we fail to parse the pattern.
- return ThrowRegExpException(re, pattern, parse_result.error);
+ return ThrowRegExpException(isolate, re, pattern, parse_result.error);
}
bool has_been_compiled = false;
@@ -159,7 +159,7 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
!HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
- AtomCompile(re, pattern, flags, pattern);
+ AtomCompile(isolate, re, pattern, flags, pattern);
has_been_compiled = true;
} else if (parse_result.tree->IsAtom() && !IsSticky(flags) &&
parse_result.capture_count == 0) {
@@ -170,30 +170,30 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
isolate, atom_string,
isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
if (!IgnoreCase(atom->flags()) && !HasFewDifferentCharacters(atom_string)) {
- AtomCompile(re, pattern, flags, atom_string);
+ AtomCompile(isolate, re, pattern, flags, atom_string);
has_been_compiled = true;
}
}
if (!has_been_compiled) {
- IrregexpInitialize(re, pattern, flags, parse_result.capture_count);
+ IrregexpInitialize(isolate, re, pattern, flags, parse_result.capture_count);
}
DCHECK(re->data()->IsFixedArray());
// Compilation succeeded so the data is set on the regexp
// and we can store it in the cache.
- Handle<FixedArray> data(FixedArray::cast(re->data()));
+ Handle<FixedArray> data(FixedArray::cast(re->data()), isolate);
compilation_cache->PutRegExp(pattern, flags, data);
return re;
}
-MaybeHandle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
+MaybeHandle<Object> RegExpImpl::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info) {
switch (regexp->TypeTag()) {
case JSRegExp::ATOM:
- return AtomExec(regexp, subject, index, last_match_info);
+ return AtomExec(isolate, regexp, subject, index, last_match_info);
case JSRegExp::IRREGEXP: {
- return IrregexpExec(regexp, subject, index, last_match_info);
+ return IrregexpExec(isolate, regexp, subject, index, last_match_info);
}
default:
UNREACHABLE();
@@ -203,21 +203,17 @@ MaybeHandle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
// RegExp Atom implementation: Simple string search using indexOf.
-
-void RegExpImpl::AtomCompile(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
+void RegExpImpl::AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern, JSRegExp::Flags flags,
Handle<String> match_pattern) {
- re->GetIsolate()->factory()->SetRegExpAtomData(re,
- JSRegExp::ATOM,
- pattern,
- flags,
- match_pattern);
+ isolate->factory()->SetRegExpAtomData(re, JSRegExp::ATOM, pattern, flags,
+ match_pattern);
}
-static void SetAtomLastCapture(Handle<RegExpMatchInfo> last_match_info,
+static void SetAtomLastCapture(Isolate* isolate,
+ Handle<RegExpMatchInfo> last_match_info,
String* subject, int from, int to) {
- SealHandleScope shs(last_match_info->GetIsolate());
+ SealHandleScope shs(isolate);
last_match_info->SetNumberOfCaptureRegisters(2);
last_match_info->SetLastSubject(subject);
last_match_info->SetLastInput(subject);
@@ -225,18 +221,13 @@ static void SetAtomLastCapture(Handle<RegExpMatchInfo> last_match_info,
last_match_info->SetCapture(1, to);
}
-
-int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- int32_t* output,
+int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index, int32_t* output,
int output_size) {
- Isolate* isolate = regexp->GetIsolate();
-
DCHECK_LE(0, index);
DCHECK_LE(index, subject->length());
- subject = String::Flatten(subject);
+ subject = String::Flatten(isolate, subject);
DisallowHeapAllocation no_gc; // ensure vectors stay valid
String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -277,22 +268,21 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
return output_size / 2;
}
-Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, Handle<String> subject,
- int index,
+Handle<Object> RegExpImpl::AtomExec(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info) {
- Isolate* isolate = re->GetIsolate();
-
static const int kNumRegisters = 2;
STATIC_ASSERT(kNumRegisters <= Isolate::kJSRegexpStaticOffsetsVectorSize);
int32_t* output_registers = isolate->jsregexp_static_offsets_vector();
- int res = AtomExecRaw(re, subject, index, output_registers, kNumRegisters);
+ int res =
+ AtomExecRaw(isolate, re, subject, index, output_registers, kNumRegisters);
if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
DCHECK_EQ(res, RegExpImpl::RE_SUCCESS);
SealHandleScope shs(isolate);
- SetAtomLastCapture(last_match_info, *subject, output_registers[0],
+ SetAtomLastCapture(isolate, last_match_info, *subject, output_registers[0],
output_registers[1]);
return last_match_info;
}
@@ -306,7 +296,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, Handle<String> subject,
// from the source pattern.
// If compilation fails, an exception is thrown and this function
// returns false.
-bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re,
+bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject,
bool is_one_byte) {
Object* compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
@@ -315,15 +305,13 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re,
#else // V8_INTERPRETED_REGEXP (RegExp native code)
if (compiled_code->IsCode()) return true;
#endif
- return CompileIrregexp(re, sample_subject, is_one_byte);
+ return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
}
-
-bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
+bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject,
bool is_one_byte) {
// Compile the RegExp.
- Isolate* isolate = re->GetIsolate();
Zone zone(isolate->allocator(), ZONE_NAME);
PostponeInterruptsScope postpone(isolate);
#ifdef DEBUG
@@ -337,15 +325,15 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
JSRegExp::Flags flags = re->GetFlags();
- Handle<String> pattern(re->Pattern());
- pattern = String::Flatten(pattern);
+ Handle<String> pattern(re->Pattern(), isolate);
+ pattern = String::Flatten(isolate, pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
&compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
- USE(ThrowRegExpException(re, pattern, compile_data.error));
+ USE(ThrowRegExpException(isolate, re, pattern, compile_data.error));
return false;
}
RegExpEngine::CompilationResult result =
@@ -359,11 +347,12 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
}
Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
CStrVector(result.error_message)).ToHandleChecked();
- ThrowRegExpException(re, error_message);
+ ThrowRegExpException(isolate, re, error_message);
return false;
}
- Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data()));
+ Handle<FixedArray> data =
+ Handle<FixedArray>(FixedArray::cast(re->data()), isolate);
data->set(JSRegExp::code_index(is_one_byte), result.code);
SetIrregexpCaptureNameMap(*data, compile_data.capture_name_map);
int register_max = IrregexpMaxRegisterCount(*data);
@@ -413,27 +402,21 @@ Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_one_byte) {
return Code::cast(re->get(JSRegExp::code_index(is_one_byte)));
}
-
-void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re,
+void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> pattern,
- JSRegExp::Flags flags,
- int capture_count) {
+ JSRegExp::Flags flags, int capture_count) {
// Initialize compiled code entries to null.
- re->GetIsolate()->factory()->SetRegExpIrregexpData(re,
- JSRegExp::IRREGEXP,
- pattern,
- flags,
- capture_count);
+ isolate->factory()->SetRegExpIrregexpData(re, JSRegExp::IRREGEXP, pattern,
+ flags, capture_count);
}
-
-int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
+int RegExpImpl::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject) {
DCHECK(subject->IsFlat());
// Check representation of the underlying storage.
bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
- if (!EnsureCompiledIrregexp(regexp, subject, is_one_byte)) return -1;
+ if (!EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte)) return -1;
#ifdef V8_INTERPRETED_REGEXP
// Byte-code regexp needs space allocated for all its registers.
@@ -449,14 +432,9 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
#endif // V8_INTERPRETED_REGEXP
}
-
-int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- int32_t* output,
- int output_size) {
- Isolate* isolate = regexp->GetIsolate();
-
+int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index,
+ int32_t* output, int output_size) {
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
DCHECK_LE(0, index);
@@ -468,7 +446,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
#ifndef V8_INTERPRETED_REGEXP
DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
- EnsureCompiledIrregexp(regexp, subject, is_one_byte);
+ EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
// The stack is used to allocate registers for the compiled regexp code.
// This means that in case of failure, the output registers array is left
@@ -498,7 +476,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
// the, potentially, different subject (the string can switch between
// being internal and external, and even between being Latin1 and UC16,
// but the characters are always the same).
- IrregexpPrepare(regexp, subject);
+ IrregexpPrepare(isolate, regexp, subject);
is_one_byte = subject->IsOneByteRepresentationUnderneath();
} while (true);
UNREACHABLE();
@@ -537,12 +515,11 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
}
MaybeHandle<Object> RegExpImpl::IrregexpExec(
- Handle<JSRegExp> regexp, Handle<String> subject, int previous_index,
- Handle<RegExpMatchInfo> last_match_info) {
- Isolate* isolate = regexp->GetIsolate();
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int previous_index, Handle<RegExpMatchInfo> last_match_info) {
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
- subject = String::Flatten(subject);
+ subject = String::Flatten(isolate, subject);
// Prepare space for the return values.
#if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
@@ -552,7 +529,8 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
}
#endif
- int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
+ int required_registers =
+ RegExpImpl::IrregexpPrepare(isolate, regexp, subject);
if (required_registers < 0) {
// Compiling failed with an exception.
DCHECK(isolate->has_pending_exception());
@@ -568,13 +546,14 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
output_registers = isolate->jsregexp_static_offsets_vector();
}
- int res = RegExpImpl::IrregexpExecRaw(
- regexp, subject, previous_index, output_registers, required_registers);
+ int res =
+ RegExpImpl::IrregexpExecRaw(isolate, regexp, subject, previous_index,
+ output_registers, required_registers);
if (res == RE_SUCCESS) {
int capture_count =
IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
- return SetLastMatchInfo(
- last_match_info, subject, capture_count, output_registers);
+ return SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
+ output_registers);
}
if (res == RE_EXCEPTION) {
DCHECK(isolate->has_pending_exception());
@@ -585,22 +564,21 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
}
Handle<RegExpMatchInfo> RegExpImpl::SetLastMatchInfo(
- Handle<RegExpMatchInfo> last_match_info, Handle<String> subject,
- int capture_count, int32_t* match) {
+ Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
+ Handle<String> subject, int capture_count, int32_t* match) {
// This is the only place where match infos can grow. If, after executing the
// regexp, RegExpExecStub finds that the match info is too small, it restarts
// execution in RegExpImpl::Exec, which finally grows the match info right
// here.
int capture_register_count = (capture_count + 1) * 2;
- Handle<RegExpMatchInfo> result =
- RegExpMatchInfo::ReserveCaptures(last_match_info, capture_register_count);
+ Handle<RegExpMatchInfo> result = RegExpMatchInfo::ReserveCaptures(
+ isolate, last_match_info, capture_register_count);
result->SetNumberOfCaptureRegisters(capture_register_count);
if (*result != *last_match_info) {
// The match info has been reallocated, update the corresponding reference
// on the native context.
- Isolate* isolate = last_match_info->GetIsolate();
if (*last_match_info == *isolate->regexp_last_match_info()) {
isolate->native_context()->set_regexp_last_match_info(*result);
} else if (*last_match_info == *isolate->regexp_internal_match_info()) {
@@ -625,7 +603,8 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
: register_array_(nullptr),
register_array_size_(0),
regexp_(regexp),
- subject_(subject) {
+ subject_(subject),
+ isolate_(isolate) {
#ifdef V8_INTERPRETED_REGEXP
bool interpreted = true;
#else
@@ -638,7 +617,8 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
// There is no distinction between interpreted and native for atom regexps.
interpreted = false;
} else {
- registers_per_match_ = RegExpImpl::IrregexpPrepare(regexp_, subject_);
+ registers_per_match_ =
+ RegExpImpl::IrregexpPrepare(isolate_, regexp_, subject_);
if (registers_per_match_ < 0) {
num_matches_ = -1; // Signal exception.
return;
@@ -964,9 +944,9 @@ class RegExpCompiler {
return unicode_lookaround_position_register_;
}
- RegExpEngine::CompilationResult Assemble(RegExpMacroAssembler* assembler,
- RegExpNode* start,
- int capture_count,
+ RegExpEngine::CompilationResult Assemble(Isolate* isolate,
+ RegExpMacroAssembler* assembler,
+ RegExpNode* start, int capture_count,
Handle<String> pattern);
inline void AddWork(RegExpNode* node) {
@@ -1069,14 +1049,9 @@ RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
DCHECK_GE(RegExpMacroAssembler::kMaxRegister, next_register_ - 1);
}
-
RegExpEngine::CompilationResult RegExpCompiler::Assemble(
- RegExpMacroAssembler* macro_assembler,
- RegExpNode* start,
- int capture_count,
- Handle<String> pattern) {
- Isolate* isolate = pattern->GetHeap()->isolate();
-
+ Isolate* isolate, RegExpMacroAssembler* macro_assembler, RegExpNode* start,
+ int capture_count, Handle<String> pattern) {
#ifdef DEBUG
if (FLAG_trace_regexp_assembler)
macro_assembler_ = new RegExpMacroAssemblerTracer(isolate, macro_assembler);
@@ -4715,7 +4690,7 @@ void DispatchTable::Dump() {
void RegExpEngine::DotPrint(const char* label,
RegExpNode* node,
bool ignore_case) {
- OFStream os(stdout);
+ StdoutStream os;
DotPrinter printer(os, ignore_case);
printer.PrintNode(label, node);
}
@@ -6664,12 +6639,13 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
bool is_unicode = IsUnicode(flags);
RegExpCompiler compiler(isolate, zone, data->capture_count, is_one_byte);
- if (compiler.optimize()) compiler.set_optimize(!TooMuchRegExpCode(pattern));
+ if (compiler.optimize())
+ compiler.set_optimize(!TooMuchRegExpCode(isolate, pattern));
// Sample some characters from the middle of the string.
static const int kSampleSize = 128;
- sample_subject = String::Flatten(sample_subject);
+ sample_subject = String::Flatten(isolate, sample_subject);
int chars_sampled = 0;
int half_way = (sample_subject->length() - kSampleSize) / 2;
for (int i = Max(0, half_way);
@@ -6771,7 +6747,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
RegExpMacroAssemblerIrregexp macro_assembler(isolate, codes, zone);
#endif // V8_INTERPRETED_REGEXP
- macro_assembler.set_slow_safe(TooMuchRegExpCode(pattern));
+ macro_assembler.set_slow_safe(TooMuchRegExpCode(isolate, pattern));
// Inserted here, instead of in Assembler, because it depends on information
// in the AST that isn't replicated in the Node structure.
@@ -6791,15 +6767,12 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
macro_assembler.set_global_mode(mode);
}
- return compiler.Assemble(&macro_assembler,
- node,
- data->capture_count,
+ return compiler.Assemble(isolate, &macro_assembler, node, data->capture_count,
pattern);
}
-
-bool RegExpEngine::TooMuchRegExpCode(Handle<String> pattern) {
- Heap* heap = pattern->GetHeap();
+bool RegExpEngine::TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
+ Heap* heap = isolate->heap();
bool too_much = pattern->length() > RegExpImpl::kRegExpTooLargeToOptimize;
if (heap->isolate()->total_regexp_code_generated() >
RegExpImpl::kRegExpCompiledLimit &&
@@ -6900,7 +6873,8 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
}
}
// Convert backing store to a copy-on-write array.
- value_array->set_map_no_write_barrier(isolate->heap()->fixed_cow_array_map());
+ value_array->set_map_no_write_barrier(
+ ReadOnlyRoots(isolate).fixed_cow_array_map());
}
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index f8d21617a1..fd2a90521d 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -73,34 +73,29 @@ class RegExpImpl {
// the implementation wants to store in the data field.
// Returns false if compilation fails.
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Compile(
- Handle<JSRegExp> re, Handle<String> pattern, JSRegExp::Flags flags);
+ Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
+ JSRegExp::Flags flags);
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Exec(
- Handle<JSRegExp> regexp, Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info);
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info);
// Prepares a JSRegExp object with Irregexp-specific data.
- static void IrregexpInitialize(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
+ static void IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern, JSRegExp::Flags flags,
int capture_register_count);
-
- static void AtomCompile(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
+ static void AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern, JSRegExp::Flags flags,
Handle<String> match_pattern);
-
- static int AtomExecRaw(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- int32_t* output,
+ static int AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index, int32_t* output,
int output_size);
- static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
+ static Handle<Object> AtomExec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info);
@@ -113,7 +108,7 @@ class RegExpImpl {
// Returns the number of integer spaces required by IrregexpExecOnce
// as its "registers" argument. If the regexp cannot be compiled,
// an exception is set as pending, and this function returns negative.
- static int IrregexpPrepare(Handle<JSRegExp> regexp,
+ static int IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject);
// Execute a regular expression on the subject, starting from index.
@@ -122,10 +117,8 @@ class RegExpImpl {
// The captures and subcaptures are stored into the registers vector.
// If matching fails, returns RE_FAILURE.
// If execution fails, sets a pending exception and returns RE_EXCEPTION.
- static int IrregexpExecRaw(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- int32_t* output,
+ static int IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index, int32_t* output,
int output_size);
// Execute an Irregexp bytecode pattern.
@@ -133,14 +126,14 @@ class RegExpImpl {
// captured positions. On a failure, the result is the null value.
// Returns an empty handle in case of an exception.
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> IrregexpExec(
- Handle<JSRegExp> regexp, Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info);
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info);
// Set last match info. If match is nullptr, then setting captures is
// omitted.
static Handle<RegExpMatchInfo> SetLastMatchInfo(
- Handle<RegExpMatchInfo> last_match_info, Handle<String> subject,
- int capture_count, int32_t* match);
+ Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
+ Handle<String> subject, int capture_count, int32_t* match);
class GlobalCache {
public:
@@ -148,17 +141,17 @@ class RegExpImpl {
Handle<String> subject,
Isolate* isolate);
- INLINE(~GlobalCache());
+ V8_INLINE ~GlobalCache();
// Fetch the next entry in the cache for global regexp match results.
// This does not set the last match info. Upon failure, nullptr is
// returned. The cause can be checked with Result(). The previous result is
// still in available in memory when a failure happens.
- INLINE(int32_t* FetchNext());
+ V8_INLINE int32_t* FetchNext();
- INLINE(int32_t* LastSuccessfulMatch());
+ V8_INLINE int32_t* LastSuccessfulMatch();
- INLINE(bool HasException()) { return num_matches_ < 0; }
+ V8_INLINE bool HasException() { return num_matches_ < 0; }
private:
int AdvanceZeroLength(int last_index);
@@ -172,6 +165,7 @@ class RegExpImpl {
int register_array_size_;
Handle<JSRegExp> regexp_;
Handle<String> subject_;
+ Isolate* isolate_;
};
// For acting on the JSRegExp data FixedArray.
@@ -194,9 +188,10 @@ class RegExpImpl {
static const int kRegExpTooLargeToOptimize = 20 * KB;
private:
- static bool CompileIrregexp(Handle<JSRegExp> re,
+ static bool CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject, bool is_one_byte);
- static inline bool EnsureCompiledIrregexp(Handle<JSRegExp> re,
+ static inline bool EnsureCompiledIrregexp(Isolate* isolate,
+ Handle<JSRegExp> re,
Handle<String> sample_subject,
bool is_one_byte);
};
@@ -1519,7 +1514,7 @@ class RegExpEngine: public AllStatic {
struct CompilationResult {
CompilationResult(Isolate* isolate, const char* error_message)
: error_message(error_message),
- code(isolate->heap()->the_hole_value()),
+ code(ReadOnlyRoots(isolate).the_hole_value()),
num_registers(0) {}
CompilationResult(Object* code, int registers)
: error_message(nullptr), code(code), num_registers(registers) {}
@@ -1535,7 +1530,7 @@ class RegExpEngine: public AllStatic {
Handle<String> sample_subject,
bool is_one_byte);
- static bool TooMuchRegExpCode(Handle<String> pattern);
+ static bool TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern);
static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
};
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index a7f461586e..77e8847d68 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -166,8 +166,8 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
int return_value = 0;
// Prepare for possible GC.
HandleScope handles(isolate);
- Handle<Code> code_handle(re_code);
- Handle<String> subject_handle(*subject);
+ Handle<Code> code_handle(re_code, isolate);
+ Handle<String> subject_handle(*subject, isolate);
bool is_one_byte = subject_handle->IsOneByteRepresentationUnderneath();
StackLimitCheck check(isolate);
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index ebc56650b1..c1d2c7d5cd 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -342,20 +342,15 @@ RegExpTree* RegExpParser::ParseDisjunction() {
uc32 p = Next();
Advance(2);
if (unicode()) {
- if (FLAG_harmony_regexp_property) {
- ZoneList<CharacterRange>* ranges =
- new (zone()) ZoneList<CharacterRange>(2, zone());
- if (!ParsePropertyClass(ranges, p == 'P')) {
- return ReportError(CStrVector("Invalid property name"));
- }
- RegExpCharacterClass* cc = new (zone())
- RegExpCharacterClass(zone(), ranges, builder->flags());
- builder->AddCharacterClass(cc);
- } else {
- // With /u, no identity escapes except for syntax characters
- // are allowed. Otherwise, all identity escapes are allowed.
- return ReportError(CStrVector("Invalid escape"));
+ ZoneList<CharacterRange>* ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ if (!ParsePropertyClass(ranges, p == 'P')) {
+ return ReportError(CStrVector("Invalid property name"));
}
+ RegExpCharacterClass* cc = new (zone())
+ RegExpCharacterClass(zone(), ranges, builder->flags());
+ builder->AddCharacterClass(cc);
+
} else {
builder->AddCharacter(p);
}
@@ -477,18 +472,17 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddCharacter('u');
} else {
// With /u, invalid escapes are not treated as identity escapes.
- return ReportError(CStrVector("Invalid unicode escape"));
+ return ReportError(CStrVector("Invalid Unicode escape"));
}
break;
}
case 'k':
// Either an identity escape or a named back-reference. The two
// interpretations are mutually exclusive: '\k' is interpreted as
- // an identity escape for non-unicode patterns without named
+ // an identity escape for non-Unicode patterns without named
// capture groups, and as the beginning of a named back-reference
// in all other cases.
- if (FLAG_harmony_regexp_named_captures &&
- (unicode() || HasNamedCaptures())) {
+ if (unicode() || HasNamedCaptures()) {
Advance(2);
ParseNamedBackReference(builder, state CHECK_FAILED);
break;
@@ -678,13 +672,10 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
subexpr_type = NEGATIVE_LOOKAROUND;
break;
}
- if (FLAG_harmony_regexp_named_captures) {
- is_named_capture = true;
- has_named_captures_ = true;
- Advance();
- break;
- }
- V8_FALLTHROUGH;
+ is_named_capture = true;
+ has_named_captures_ = true;
+ Advance();
+ break;
default:
ReportError(CStrVector("Invalid group"));
return nullptr;
@@ -765,7 +756,6 @@ void RegExpParser::ScanForCaptures() {
// * or a named capture '(?<'.
//
// Of these, only named captures are capturing groups.
- if (!FLAG_harmony_regexp_named_captures) break;
Advance();
if (current() != '<') break;
@@ -830,8 +820,6 @@ static void push_code_unit(ZoneVector<uc16>* v, uint32_t code_unit) {
}
const ZoneVector<uc16>* RegExpParser::ParseCaptureGroupName() {
- DCHECK(FLAG_harmony_regexp_named_captures);
-
ZoneVector<uc16>* name =
new (zone()->New(sizeof(ZoneVector<uc16>))) ZoneVector<uc16>(zone());
@@ -879,7 +867,6 @@ const ZoneVector<uc16>* RegExpParser::ParseCaptureGroupName() {
bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<uc16>* name,
int index) {
- DCHECK(FLAG_harmony_regexp_named_captures);
DCHECK(0 < index && index <= captures_started_);
DCHECK_NOT_NULL(name);
@@ -1304,6 +1291,9 @@ bool IsSupportedBinaryProperty(UProperty property) {
case UCHAR_EMOJI_MODIFIER_BASE:
case UCHAR_EMOJI_MODIFIER:
case UCHAR_EMOJI_PRESENTATION:
+#if U_ICU_VERSION_MAJOR_NUM >= 62
+ case UCHAR_EXTENDED_PICTOGRAPHIC:
+#endif
case UCHAR_EXTENDER:
case UCHAR_GRAPHEME_BASE:
case UCHAR_GRAPHEME_EXTEND:
@@ -1598,7 +1588,7 @@ void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
return;
case 'p':
case 'P':
- if (FLAG_harmony_regexp_property && unicode()) {
+ if (unicode()) {
bool negate = Next() == 'P';
Advance(2);
if (!ParsePropertyClass(ranges, negate)) {
@@ -1700,7 +1690,7 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
DCHECK(tree != nullptr);
DCHECK(result->error.is_null());
if (FLAG_trace_regexp_parser) {
- OFStream os(stdout);
+ StdoutStream os;
tree->Print(os, zone);
os << "\n";
}
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 0857342c0c..1f89844f10 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -7,6 +7,7 @@
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/regexp/jsregexp.h"
namespace v8 {
@@ -29,7 +30,7 @@ Handle<String> RegExpUtils::GenericCaptureGetter(
}
if (ok != nullptr) *ok = true;
- Handle<String> last_subject(match_info->LastSubject());
+ Handle<String> last_subject(match_info->LastSubject(), isolate);
return isolate->factory()->NewSubString(last_subject, match_start, match_end);
}
@@ -50,7 +51,8 @@ MaybeHandle<Object> RegExpUtils::SetLastIndex(Isolate* isolate,
JSRegExp::cast(*recv)->set_last_index(*value_as_object, SKIP_WRITE_BARRIER);
return recv;
} else {
- return Object::SetProperty(recv, isolate->factory()->lastIndex_string(),
+ return Object::SetProperty(isolate, recv,
+ isolate->factory()->lastIndex_string(),
value_as_object, LanguageMode::kStrict);
}
}
@@ -60,7 +62,8 @@ MaybeHandle<Object> RegExpUtils::GetLastIndex(Isolate* isolate,
if (HasInitialRegExpMap(isolate, recv)) {
return handle(JSRegExp::cast(*recv)->last_index(), isolate);
} else {
- return Object::GetProperty(recv, isolate->factory()->lastIndex_string());
+ return Object::GetProperty(isolate, recv,
+ isolate->factory()->lastIndex_string());
}
}
@@ -74,7 +77,8 @@ MaybeHandle<Object> RegExpUtils::RegExpExec(Isolate* isolate,
if (exec->IsUndefined(isolate)) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, exec,
- Object::GetProperty(regexp, isolate->factory()->exec_string()), Object);
+ Object::GetProperty(isolate, regexp, isolate->factory()->exec_string()),
+ Object);
}
if (exec->IsCallable()) {
@@ -123,10 +127,11 @@ Maybe<bool> RegExpUtils::IsRegExp(Isolate* isolate, Handle<Object> object) {
Handle<Object> match;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, match,
- JSObject::GetProperty(receiver, isolate->factory()->match_symbol()),
+ JSObject::GetProperty(isolate, receiver,
+ isolate->factory()->match_symbol()),
Nothing<bool>());
- if (!match->IsUndefined(isolate)) return Just(match->BooleanValue());
+ if (!match->IsUndefined(isolate)) return Just(match->BooleanValue(isolate));
return Just(object->IsJSRegExp());
}
@@ -186,7 +191,8 @@ MaybeHandle<Object> RegExpUtils::SetAdvancedStringIndex(
Handle<Object> last_index_obj;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, last_index_obj,
- Object::GetProperty(regexp, isolate->factory()->lastIndex_string()),
+ Object::GetProperty(isolate, regexp,
+ isolate->factory()->lastIndex_string()),
Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index f71423d26e..335de1a053 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -316,6 +316,12 @@ RegisterConfiguration::RegisterConfiguration(
}
}
+const char* RegisterConfiguration::GetGeneralOrSpecialRegisterName(
+ int code) const {
+ if (code < num_general_registers_) return GetGeneralRegisterName(code);
+ return Assembler::GetSpecialRegisterName(code);
+}
+
// Assert that kFloat32, kFloat64, and kSimd128 are consecutive values.
STATIC_ASSERT(static_cast<int>(MachineRepresentation::kSimd128) ==
static_cast<int>(MachineRepresentation::kFloat64) + 1);
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
index 1299baac69..ad413cc18a 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/register-configuration.h
@@ -102,7 +102,9 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
bool IsAllocatableSimd128Code(int index) const {
return ((1 << index) & allocatable_simd128_codes_mask_) != 0;
}
+ const char* GetGeneralOrSpecialRegisterName(int code) const;
const char* GetGeneralRegisterName(int code) const {
+ DCHECK_LT(code, num_general_registers_);
return general_register_names_[code];
}
const char* GetFloatRegisterName(int code) const {
diff --git a/deps/v8/src/roots-inl.h b/deps/v8/src/roots-inl.h
new file mode 100644
index 0000000000..4caa9d8f0a
--- /dev/null
+++ b/deps/v8/src/roots-inl.h
@@ -0,0 +1,96 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ROOTS_INL_H_
+#define V8_ROOTS_INL_H_
+
+#include "src/roots.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/objects/api-callbacks.h"
+
+namespace v8 {
+
+namespace internal {
+
+ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate) : heap_(isolate->heap()) {}
+
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ type* ReadOnlyRoots::name() { \
+ return type::cast(heap_->roots_[Heap::k##camel_name##RootIndex]); \
+ } \
+ Handle<type> ReadOnlyRoots::name##_handle() { \
+ return Handle<type>( \
+ bit_cast<type**>(&heap_->roots_[Heap::k##camel_name##RootIndex])); \
+ }
+STRONG_READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) \
+ String* ReadOnlyRoots::name() { \
+ return String::cast(heap_->roots_[Heap::k##name##RootIndex]); \
+ } \
+ Handle<String> ReadOnlyRoots::name##_handle() { \
+ return Handle<String>( \
+ bit_cast<String**>(&heap_->roots_[Heap::k##name##RootIndex])); \
+ }
+INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name) \
+ Symbol* ReadOnlyRoots::name() { \
+ return Symbol::cast(heap_->roots_[Heap::k##name##RootIndex]); \
+ } \
+ Handle<Symbol> ReadOnlyRoots::name##_handle() { \
+ return Handle<Symbol>( \
+ bit_cast<Symbol**>(&heap_->roots_[Heap::k##name##RootIndex])); \
+ }
+PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, description) \
+ Symbol* ReadOnlyRoots::name() { \
+ return Symbol::cast(heap_->roots_[Heap::k##name##RootIndex]); \
+ } \
+ Handle<Symbol> ReadOnlyRoots::name##_handle() { \
+ return Handle<Symbol>( \
+ bit_cast<Symbol**>(&heap_->roots_[Heap::k##name##RootIndex])); \
+ }
+PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ Map* ReadOnlyRoots::name##_map() { \
+ return Map::cast(heap_->roots_[Heap::k##Name##MapRootIndex]); \
+ } \
+ Handle<Map> ReadOnlyRoots::name##_map_handle() { \
+ return Handle<Map>( \
+ bit_cast<Map**>(&heap_->roots_[Heap::k##Name##MapRootIndex])); \
+ }
+STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
+ Map* ReadOnlyRoots::name##_map() { \
+ return Map::cast(heap_->roots_[Heap::k##Name##Size##MapRootIndex]); \
+ } \
+ Handle<Map> ReadOnlyRoots::name##_map_handle() { \
+ return Handle<Map>( \
+ bit_cast<Map**>(&heap_->roots_[Heap::k##Name##Size##MapRootIndex])); \
+ }
+ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
+#undef ALLOCATION_SITE_MAP_ACCESSOR
+
+FixedTypedArrayBase* ReadOnlyRoots::EmptyFixedTypedArrayForMap(const Map* map) {
+ // TODO(delphick): All of these empty fixed type arrays are in RO_SPACE so
+ // this the method below can be moved into ReadOnlyRoots.
+ return heap_->EmptyFixedTypedArrayForMap(map);
+}
+
+} // namespace internal
+
+} // namespace v8
+
+#endif // V8_ROOTS_INL_H_
diff --git a/deps/v8/src/roots.h b/deps/v8/src/roots.h
new file mode 100644
index 0000000000..20e11317c5
--- /dev/null
+++ b/deps/v8/src/roots.h
@@ -0,0 +1,352 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ROOTS_H_
+#define V8_ROOTS_H_
+
+#include "src/handles.h"
+#include "src/heap-symbols.h"
+#include "src/objects-definitions.h"
+
+namespace v8 {
+
+namespace internal {
+
+// Defines all the read-only roots in Heap.
+#define STRONG_READ_ONLY_ROOT_LIST(V) \
+ /* Cluster the most popular ones in a few cache lines here at the top. */ \
+ /* The first 32 entries are most often used in the startup snapshot and */ \
+ /* can use a shorter representation in the serialization format. */ \
+ V(Map, free_space_map, FreeSpaceMap) \
+ V(Map, one_pointer_filler_map, OnePointerFillerMap) \
+ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
+ V(Oddball, uninitialized_value, UninitializedValue) \
+ V(Oddball, undefined_value, UndefinedValue) \
+ V(Oddball, the_hole_value, TheHoleValue) \
+ V(Oddball, null_value, NullValue) \
+ V(Oddball, true_value, TrueValue) \
+ V(Oddball, false_value, FalseValue) \
+ V(String, empty_string, empty_string) \
+ V(Map, meta_map, MetaMap) \
+ V(Map, byte_array_map, ByteArrayMap) \
+ V(Map, fixed_array_map, FixedArrayMap) \
+ V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(Map, hash_table_map, HashTableMap) \
+ V(Map, symbol_map, SymbolMap) \
+ V(Map, one_byte_string_map, OneByteStringMap) \
+ V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
+ V(Map, scope_info_map, ScopeInfoMap) \
+ V(Map, shared_function_info_map, SharedFunctionInfoMap) \
+ V(Map, code_map, CodeMap) \
+ V(Map, function_context_map, FunctionContextMap) \
+ V(Map, cell_map, CellMap) \
+ V(Map, weak_cell_map, WeakCellMap) \
+ V(Map, global_property_cell_map, GlobalPropertyCellMap) \
+ V(Map, foreign_map, ForeignMap) \
+ V(Map, heap_number_map, HeapNumberMap) \
+ V(Map, transition_array_map, TransitionArrayMap) \
+ V(Map, feedback_vector_map, FeedbackVectorMap) \
+ V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
+ V(FixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
+ /* Entries beyond the first 32 */ \
+ /* The roots above this line should be boring from a GC point of view. */ \
+ /* This means they are never in new space and never on a page that is */ \
+ /* being compacted.*/ \
+ /* Oddballs */ \
+ V(Oddball, arguments_marker, ArgumentsMarker) \
+ V(Oddball, exception, Exception) \
+ V(Oddball, termination_exception, TerminationException) \
+ V(Oddball, optimized_out, OptimizedOut) \
+ V(Oddball, stale_register, StaleRegister) \
+ /* Context maps */ \
+ V(Map, native_context_map, NativeContextMap) \
+ V(Map, module_context_map, ModuleContextMap) \
+ V(Map, eval_context_map, EvalContextMap) \
+ V(Map, script_context_map, ScriptContextMap) \
+ V(Map, block_context_map, BlockContextMap) \
+ V(Map, catch_context_map, CatchContextMap) \
+ V(Map, with_context_map, WithContextMap) \
+ V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
+ V(Map, script_context_table_map, ScriptContextTableMap) \
+ /* Maps */ \
+ V(Map, feedback_metadata_map, FeedbackMetadataArrayMap) \
+ V(Map, array_list_map, ArrayListMap) \
+ V(Map, bigint_map, BigIntMap) \
+ V(Map, object_boilerplate_description_map, ObjectBoilerplateDescriptionMap) \
+ V(Map, bytecode_array_map, BytecodeArrayMap) \
+ V(Map, code_data_container_map, CodeDataContainerMap) \
+ V(Map, descriptor_array_map, DescriptorArrayMap) \
+ V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(Map, global_dictionary_map, GlobalDictionaryMap) \
+ V(Map, many_closures_cell_map, ManyClosuresCellMap) \
+ V(Map, module_info_map, ModuleInfoMap) \
+ V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
+ V(Map, name_dictionary_map, NameDictionaryMap) \
+ V(Map, no_closures_cell_map, NoClosuresCellMap) \
+ V(Map, number_dictionary_map, NumberDictionaryMap) \
+ V(Map, one_closure_cell_map, OneClosureCellMap) \
+ V(Map, ordered_hash_map_map, OrderedHashMapMap) \
+ V(Map, ordered_hash_set_map, OrderedHashSetMap) \
+ V(Map, pre_parsed_scope_data_map, PreParsedScopeDataMap) \
+ V(Map, property_array_map, PropertyArrayMap) \
+ V(Map, side_effect_call_handler_info_map, SideEffectCallHandlerInfoMap) \
+ V(Map, side_effect_free_call_handler_info_map, \
+ SideEffectFreeCallHandlerInfoMap) \
+ V(Map, next_call_side_effect_free_call_handler_info_map, \
+ NextCallSideEffectFreeCallHandlerInfoMap) \
+ V(Map, simple_number_dictionary_map, SimpleNumberDictionaryMap) \
+ V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
+ V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
+ V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
+ V(Map, string_table_map, StringTableMap) \
+ V(Map, uncompiled_data_without_pre_parsed_scope_map, \
+ UncompiledDataWithoutPreParsedScopeMap) \
+ V(Map, uncompiled_data_with_pre_parsed_scope_map, \
+ UncompiledDataWithPreParsedScopeMap) \
+ V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
+ V(Map, weak_array_list_map, WeakArrayListMap) \
+ V(Map, ephemeron_hash_table_map, EphemeronHashTableMap) \
+ /* String maps */ \
+ V(Map, native_source_string_map, NativeSourceStringMap) \
+ V(Map, string_map, StringMap) \
+ V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
+ V(Map, cons_string_map, ConsStringMap) \
+ V(Map, thin_one_byte_string_map, ThinOneByteStringMap) \
+ V(Map, thin_string_map, ThinStringMap) \
+ V(Map, sliced_string_map, SlicedStringMap) \
+ V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
+ V(Map, external_string_map, ExternalStringMap) \
+ V(Map, external_string_with_one_byte_data_map, \
+ ExternalStringWithOneByteDataMap) \
+ V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
+ V(Map, short_external_string_map, ShortExternalStringMap) \
+ V(Map, short_external_string_with_one_byte_data_map, \
+ ShortExternalStringWithOneByteDataMap) \
+ V(Map, internalized_string_map, InternalizedStringMap) \
+ V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
+ V(Map, external_internalized_string_with_one_byte_data_map, \
+ ExternalInternalizedStringWithOneByteDataMap) \
+ V(Map, external_one_byte_internalized_string_map, \
+ ExternalOneByteInternalizedStringMap) \
+ V(Map, short_external_internalized_string_map, \
+ ShortExternalInternalizedStringMap) \
+ V(Map, short_external_internalized_string_with_one_byte_data_map, \
+ ShortExternalInternalizedStringWithOneByteDataMap) \
+ V(Map, short_external_one_byte_internalized_string_map, \
+ ShortExternalOneByteInternalizedStringMap) \
+ V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
+ /* Array element maps */ \
+ V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
+ V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
+ V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
+ V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
+ V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
+ V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
+ V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
+ V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
+ V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
+ V(Map, fixed_biguint64_array_map, FixedBigUint64ArrayMap) \
+ V(Map, fixed_bigint64_array_map, FixedBigInt64ArrayMap) \
+ /* Oddball maps */ \
+ V(Map, undefined_map, UndefinedMap) \
+ V(Map, the_hole_map, TheHoleMap) \
+ V(Map, null_map, NullMap) \
+ V(Map, boolean_map, BooleanMap) \
+ V(Map, uninitialized_map, UninitializedMap) \
+ V(Map, arguments_marker_map, ArgumentsMarkerMap) \
+ V(Map, exception_map, ExceptionMap) \
+ V(Map, termination_exception_map, TerminationExceptionMap) \
+ V(Map, optimized_out_map, OptimizedOutMap) \
+ V(Map, stale_register_map, StaleRegisterMap) \
+ V(Map, self_reference_marker_map, SelfReferenceMarkerMap) \
+ /* Canonical empty values */ \
+ V(EnumCache, empty_enum_cache, EmptyEnumCache) \
+ V(PropertyArray, empty_property_array, EmptyPropertyArray) \
+ V(ByteArray, empty_byte_array, EmptyByteArray) \
+ V(ObjectBoilerplateDescription, empty_object_boilerplate_description, \
+ EmptyObjectBoilerplateDescription) \
+ V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
+ V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
+ V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
+ V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
+ EmptyFixedUint8ClampedArray) \
+ V(FixedTypedArrayBase, empty_fixed_biguint64_array, \
+ EmptyFixedBigUint64Array) \
+ V(FixedTypedArrayBase, empty_fixed_bigint64_array, EmptyFixedBigInt64Array) \
+ V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
+ V(NumberDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
+ V(FixedArray, empty_ordered_hash_map, EmptyOrderedHashMap) \
+ V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet) \
+ V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata) \
+ V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
+ V(WeakCell, empty_weak_cell, EmptyWeakCell) \
+ V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
+ V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
+ V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList) \
+ /* Special numbers */ \
+ V(HeapNumber, nan_value, NanValue) \
+ V(HeapNumber, hole_nan_value, HoleNanValue) \
+ V(HeapNumber, infinity_value, InfinityValue) \
+ V(HeapNumber, minus_zero_value, MinusZeroValue) \
+ V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
+ /* Marker for self-references during code-generation */ \
+ V(HeapObject, self_reference_marker, SelfReferenceMarker)
+
+#define STRONG_MUTABLE_ROOT_LIST(V) \
+ /* Maps */ \
+ V(Map, external_map, ExternalMap) \
+ V(Map, message_object_map, JSMessageObjectMap) \
+ /* Canonical empty values */ \
+ V(Script, empty_script, EmptyScript) \
+ V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
+ V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
+ /* Protectors */ \
+ V(Cell, array_constructor_protector, ArrayConstructorProtector) \
+ V(PropertyCell, no_elements_protector, NoElementsProtector) \
+ V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
+ V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
+ V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
+ V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
+ V(Cell, string_length_protector, StringLengthProtector) \
+ V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
+ V(PropertyCell, array_buffer_neutering_protector, \
+ ArrayBufferNeuteringProtector) \
+ V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
+ V(Cell, promise_resolve_protector, PromiseResolveProtector) \
+ V(PropertyCell, promise_then_protector, PromiseThenProtector) \
+ /* Caches */ \
+ V(FixedArray, number_string_cache, NumberStringCache) \
+ V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
+ V(FixedArray, string_split_cache, StringSplitCache) \
+ V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
+ /* Lists and dictionaries */ \
+ V(NameDictionary, empty_property_dictionary, EmptyPropertyDictionary) \
+ V(NameDictionary, public_symbol_table, PublicSymbolTable) \
+ V(NameDictionary, api_symbol_table, ApiSymbolTable) \
+ V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
+ V(Object, script_list, ScriptList) \
+ V(SimpleNumberDictionary, code_stubs, CodeStubs) \
+ V(FixedArray, materialized_objects, MaterializedObjects) \
+ V(FixedArray, microtask_queue, MicrotaskQueue) \
+ V(FixedArray, detached_contexts, DetachedContexts) \
+ V(HeapObject, retaining_path_targets, RetainingPathTargets) \
+ V(WeakArrayList, retained_maps, RetainedMaps) \
+ /* Indirection lists for isolate-independent builtins */ \
+ V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
+ /* Feedback vectors that we need for code coverage or type profile */ \
+ V(Object, feedback_vectors_for_profiling_tools, \
+ FeedbackVectorsForProfilingTools) \
+ V(Object, weak_stack_trace_list, WeakStackTraceList) \
+ V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
+ V(FixedArray, serialized_objects, SerializedObjects) \
+ V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
+ V(TemplateList, message_listeners, MessageListeners) \
+ /* DeserializeLazy handlers for lazy bytecode deserialization */ \
+ V(Object, deserialize_lazy_handler, DeserializeLazyHandler) \
+ V(Object, deserialize_lazy_handler_wide, DeserializeLazyHandlerWide) \
+ V(Object, deserialize_lazy_handler_extra_wide, \
+ DeserializeLazyHandlerExtraWide) \
+ /* Hash seed */ \
+ V(ByteArray, hash_seed, HashSeed) \
+ /* JS Entries */ \
+ V(Code, js_entry_code, JsEntryCode) \
+ V(Code, js_construct_entry_code, JsConstructEntryCode) \
+ V(Code, js_run_microtasks_entry_code, JsRunMicrotasksEntryCode)
+
+#define STRONG_ROOT_LIST(V) \
+ STRONG_READ_ONLY_ROOT_LIST(V) \
+ STRONG_MUTABLE_ROOT_LIST(V)
+
+// Entries in this list are limited to Smis and are not visited during GC.
+#define SMI_ROOT_LIST(V) \
+ V(Smi, stack_limit, StackLimit) \
+ V(Smi, real_stack_limit, RealStackLimit) \
+ V(Smi, last_script_id, LastScriptId) \
+ V(Smi, last_debugging_id, LastDebuggingId) \
+ /* To distinguish the function templates, so that we can find them in the */ \
+ /* function cache of the native context. */ \
+ V(Smi, next_template_serial_number, NextTemplateSerialNumber) \
+ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+ V(Smi, construct_stub_create_deopt_pc_offset, \
+ ConstructStubCreateDeoptPCOffset) \
+ V(Smi, construct_stub_invoke_deopt_pc_offset, \
+ ConstructStubInvokeDeoptPCOffset) \
+ V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
+
+#define MUTABLE_ROOT_LIST(V) \
+ STRONG_MUTABLE_ROOT_LIST(V) \
+ SMI_ROOT_LIST(V) \
+ V(StringTable, string_table, StringTable)
+
+#define ROOT_LIST(V) \
+ MUTABLE_ROOT_LIST(V) \
+ STRONG_READ_ONLY_ROOT_LIST(V)
+
+class FixedTypedArrayBase;
+class Heap;
+class Isolate;
+class Map;
+class String;
+class Symbol;
+
+class ReadOnlyRoots {
+ public:
+ explicit ReadOnlyRoots(Heap* heap) : heap_(heap) {}
+ inline explicit ReadOnlyRoots(Isolate* isolate);
+
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ inline class type* name(); \
+ inline Handle<type> name##_handle();
+ STRONG_READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) \
+ inline String* name(); \
+ inline Handle<String> name##_handle();
+ INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name) \
+ inline Symbol* name(); \
+ inline Handle<Symbol> name##_handle();
+ PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, description) \
+ inline Symbol* name(); \
+ inline Handle<Symbol> name##_handle();
+ PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+// Utility type maps.
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ inline Map* name##_map(); \
+ inline class Handle<Map> name##_map_handle();
+ STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
+ inline Map* name##_map(); \
+ inline class Handle<Map> name##_map_handle();
+ ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
+#undef ALLOCATION_SITE_MAP_ACCESSOR
+
+ inline FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
+
+ private:
+ Heap* heap_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ROOTS_H_
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 10ae84d05d..ae23c99910 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -13,6 +13,7 @@
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/messages.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/prototype.h"
@@ -92,7 +93,7 @@ Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
if (key >= limit) break;
Maybe<bool> has_element = JSReceiver::HasElement(receiver, key);
- MAYBE_RETURN(has_element, isolate->heap()->exception());
+ MAYBE_RETURN(has_element, ReadOnlyRoots(isolate).exception());
if (!has_element.FromJust()) {
continue;
}
@@ -107,7 +108,7 @@ Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
// Find next free position to move elements to.
Maybe<uint32_t> free_position =
FindNextFreePosition(isolate, receiver, current_pos);
- MAYBE_RETURN(free_position, isolate->heap()->exception());
+ MAYBE_RETURN(free_position, ReadOnlyRoots(isolate).exception());
current_pos = free_position.FromJust();
// Do not move elements that are already in the "packed" area.
@@ -147,7 +148,7 @@ Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
if (key >= limit) continue;
Maybe<bool> delete_result = JSReceiver::DeleteElement(receiver, key);
- MAYBE_RETURN(delete_result, isolate->heap()->exception());
+ MAYBE_RETURN(delete_result, ReadOnlyRoots(isolate).exception());
}
return *isolate->factory()->NewNumberFromUint(result);
@@ -178,7 +179,7 @@ Object* RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
if (object->HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
- Handle<NumberDictionary> dict(object->element_dictionary());
+ Handle<NumberDictionary> dict(object->element_dictionary(), isolate);
if (object->IsJSArray() || dict->requires_slow_elements() ||
dict->max_number_key() >= limit) {
return RemoveArrayHolesGeneric(isolate, receiver, limit);
@@ -187,8 +188,7 @@ Object* RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<Map> new_map =
JSObject::GetElementsTransitionMap(object, HOLEY_ELEMENTS);
- PretenureFlag tenure =
- isolate->heap()->InNewSpace(*object) ? NOT_TENURED : TENURED;
+ PretenureFlag tenure = Heap::InNewSpace(*object) ? NOT_TENURED : TENURED;
Handle<FixedArray> fast_elements =
isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure);
dict->CopyValuesTo(*fast_elements);
@@ -207,7 +207,7 @@ Object* RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
- Handle<FixedArrayBase> elements_base(object->elements());
+ Handle<FixedArrayBase> elements_base(object->elements(), isolate);
uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
if (limit > elements_length) {
limit = elements_length;
@@ -217,7 +217,7 @@ Object* RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
}
uint32_t result = 0;
- if (elements_base->map() == isolate->heap()->fixed_double_array_map()) {
+ if (elements_base->map() == ReadOnlyRoots(isolate).fixed_double_array_map()) {
FixedDoubleArray* elements = FixedDoubleArray::cast(*elements_base);
// Split elements into defined and the_hole, in that order.
unsigned int holes = limit;
@@ -366,7 +366,7 @@ RUNTIME_FUNCTION(Runtime_PrepareElementsForSort) {
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects) {
if (!isolate->debug()->PerformSideEffectCheckForObject(object)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
}
@@ -400,7 +400,7 @@ RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
JSObject::ValidateElements(*from);
JSObject::ValidateElements(*to);
- Handle<FixedArrayBase> new_elements(from->elements());
+ Handle<FixedArrayBase> new_elements(from->elements(), isolate);
ElementsKind from_kind = from->GetElementsKind();
Handle<Map> new_map = JSObject::GetElementsTransitionMap(to, from_kind);
JSObject::SetMapAndElements(to, new_map, new_elements);
@@ -422,7 +422,7 @@ RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
CONVERT_ARG_CHECKED(JSArray, array, 0);
FixedArrayBase* elements = array->elements();
SealHandleScope shs(isolate);
- if (elements->IsDictionary()) {
+ if (elements->IsNumberDictionary()) {
int result = NumberDictionary::cast(elements)->NumberOfElements();
return Smi::FromInt(result);
} else {
@@ -500,10 +500,7 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
j++;
}
- if (j != keys->length()) {
- isolate->heap()->RightTrimFixedArray(*keys, keys->length() - j);
- }
-
+ keys = FixedArray::ShrinkOrEmpty(isolate, keys, j);
return *isolate->factory()->NewJSArrayWithElements(keys);
}
@@ -609,7 +606,7 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
// advice. Therefore we use AllocateJSObjectFromMap instead of passing
// the constructor.
if (to_kind != initial_map->elements_kind()) {
- initial_map = Map::AsElementsKind(initial_map, to_kind);
+ initial_map = Map::AsElementsKind(isolate, initial_map, to_kind);
}
// If we don't care to track arrays of to_kind ElementsKind, then
@@ -691,10 +688,10 @@ RUNTIME_FUNCTION(Runtime_HasComplexElements) {
for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
if (PrototypeIterator::GetCurrent<JSReceiver>(iter)->HasComplexElements()) {
- return isolate->heap()->true_value();
+ return ReadOnlyRoots(isolate).true_value();
}
}
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
// ES6 22.1.2.2 Array.isArray
@@ -703,7 +700,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIsArray) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
Maybe<bool> result = Object::IsArray(object);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -747,7 +744,8 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
Handle<Object> len_;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, len_,
- Object::GetProperty(object, isolate->factory()->length_string()));
+ Object::GetProperty(isolate, object,
+ isolate->factory()->length_string()));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len_,
Object::ToLength(isolate, len_));
@@ -756,7 +754,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
}
}
- if (len == 0) return isolate->heap()->false_value();
+ if (len == 0) return ReadOnlyRoots(isolate).false_value();
// Let n be ? ToInteger(fromIndex). (If fromIndex is undefined, this step
// produces the value 0.)
@@ -775,7 +773,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
} else {
DCHECK(from_index->IsHeapNumber());
double start_from = from_index->Number();
- if (start_from >= len) return isolate->heap()->false_value();
+ if (start_from >= len) return ReadOnlyRoots(isolate).false_value();
if (V8_LIKELY(std::isfinite(start_from))) {
if (start_from < 0) {
index = static_cast<int64_t>(std::max<double>(start_from + len, 0));
@@ -797,7 +795,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
Maybe<bool> result = elements->IncludesValue(isolate, obj, search_element,
static_cast<uint32_t>(index),
static_cast<uint32_t>(len));
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -817,10 +815,10 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
// If SameValueZero(searchElement, elementK) is true, return true.
if (search_element->SameValueZero(*element_k)) {
- return isolate->heap()->true_value();
+ return ReadOnlyRoots(isolate).true_value();
}
}
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
@@ -848,7 +846,8 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
Handle<Object> len_;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, len_,
- Object::GetProperty(object, isolate->factory()->length_string()));
+ Object::GetProperty(isolate, object,
+ isolate->factory()->length_string()));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len_,
Object::ToLength(isolate, len_));
@@ -895,7 +894,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
Maybe<int64_t> result = elements->IndexOfValue(isolate, obj, search_element,
static_cast<uint32_t>(index),
static_cast<uint32_t>(len));
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->NewNumberFromInt64(result.FromJust());
}
@@ -910,7 +909,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
isolate, object, index_obj, &success);
DCHECK(success);
Maybe<bool> present = JSReceiver::HasProperty(&it);
- MAYBE_RETURN(present, isolate->heap()->exception());
+ MAYBE_RETURN(present, ReadOnlyRoots(isolate).exception());
if (!present.FromJust()) continue;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element_k,
Object::GetProperty(&it));
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index 1f054a232e..280106751c 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -40,8 +40,9 @@ RUNTIME_FUNCTION(Runtime_BigIntCompareToString) {
CONVERT_ARG_HANDLE_CHECKED(Smi, mode, 0);
CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 1);
CONVERT_ARG_HANDLE_CHECKED(String, rhs, 2);
- bool result = ComparisonResultToBool(static_cast<Operation>(mode->value()),
- BigInt::CompareToString(lhs, rhs));
+ bool result =
+ ComparisonResultToBool(static_cast<Operation>(mode->value()),
+ BigInt::CompareToString(isolate, lhs, rhs));
return *isolate->factory()->ToBoolean(result);
}
@@ -68,7 +69,7 @@ RUNTIME_FUNCTION(Runtime_BigIntEqualToString) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(String, rhs, 1);
- bool result = BigInt::EqualToString(lhs, rhs);
+ bool result = BigInt::EqualToString(isolate, lhs, rhs);
return *isolate->factory()->ToBoolean(result);
}
@@ -83,7 +84,7 @@ RUNTIME_FUNCTION(Runtime_BigIntToNumber) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(BigInt, x, 0);
- return *BigInt::ToNumber(x);
+ return *BigInt::ToNumber(isolate, x);
}
RUNTIME_FUNCTION(Runtime_ToBigInt) {
@@ -110,40 +111,40 @@ RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
MaybeHandle<BigInt> result;
switch (op) {
case Operation::kAdd:
- result = BigInt::Add(left, right);
+ result = BigInt::Add(isolate, left, right);
break;
case Operation::kSubtract:
- result = BigInt::Subtract(left, right);
+ result = BigInt::Subtract(isolate, left, right);
break;
case Operation::kMultiply:
- result = BigInt::Multiply(left, right);
+ result = BigInt::Multiply(isolate, left, right);
break;
case Operation::kDivide:
- result = BigInt::Divide(left, right);
+ result = BigInt::Divide(isolate, left, right);
break;
case Operation::kModulus:
- result = BigInt::Remainder(left, right);
+ result = BigInt::Remainder(isolate, left, right);
break;
case Operation::kExponentiate:
- result = BigInt::Exponentiate(left, right);
+ result = BigInt::Exponentiate(isolate, left, right);
break;
case Operation::kBitwiseAnd:
- result = BigInt::BitwiseAnd(left, right);
+ result = BigInt::BitwiseAnd(isolate, left, right);
break;
case Operation::kBitwiseOr:
- result = BigInt::BitwiseOr(left, right);
+ result = BigInt::BitwiseOr(isolate, left, right);
break;
case Operation::kBitwiseXor:
- result = BigInt::BitwiseXor(left, right);
+ result = BigInt::BitwiseXor(isolate, left, right);
break;
case Operation::kShiftLeft:
- result = BigInt::LeftShift(left, right);
+ result = BigInt::LeftShift(isolate, left, right);
break;
case Operation::kShiftRight:
- result = BigInt::SignedRightShift(left, right);
+ result = BigInt::SignedRightShift(isolate, left, right);
break;
case Operation::kShiftRightLogical:
- result = BigInt::UnsignedRightShift(left, right);
+ result = BigInt::UnsignedRightShift(isolate, left, right);
break;
default:
UNREACHABLE();
@@ -161,16 +162,16 @@ RUNTIME_FUNCTION(Runtime_BigIntUnaryOp) {
MaybeHandle<BigInt> result;
switch (op) {
case Operation::kBitwiseNot:
- result = BigInt::BitwiseNot(x);
+ result = BigInt::BitwiseNot(isolate, x);
break;
case Operation::kNegate:
- result = BigInt::UnaryMinus(x);
+ result = BigInt::UnaryMinus(isolate, x);
break;
case Operation::kIncrement:
- result = BigInt::Increment(x);
+ result = BigInt::Increment(isolate, x);
break;
case Operation::kDecrement:
- result = BigInt::Decrement(x);
+ result = BigInt::Decrement(isolate, x);
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 2978cad72a..6a83087a53 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -103,7 +103,7 @@ RUNTIME_FUNCTION(Runtime_ThrowNotSuperConstructor) {
RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
DCHECK_EQ(0, args.length());
- return isolate->heap()->home_object_symbol();
+ return ReadOnlyRoots(isolate).home_object_symbol();
}
namespace {
@@ -128,7 +128,7 @@ inline void SetHomeObject(Isolate* isolate, JSFunction* method,
if (method->shared()->needs_home_object()) {
const int kPropertyIndex = JSFunction::kMaybeHomeObjectDescriptorIndex;
CHECK_EQ(method->map()->instance_descriptors()->GetKey(kPropertyIndex),
- isolate->heap()->home_object_symbol());
+ ReadOnlyRoots(isolate).home_object_symbol());
FieldIndex field_index =
FieldIndex::ForDescriptor(method->map(), kPropertyIndex);
@@ -213,7 +213,7 @@ Handle<Dictionary> ShallowCopyDictionaryTemplate(
Object* value = dictionary->ValueAt(i);
if (value->IsAccessorPair()) {
Handle<AccessorPair> pair(AccessorPair::cast(value), isolate);
- pair = AccessorPair::Copy(pair);
+ pair = AccessorPair::Copy(isolate, pair);
dictionary->ValueAtPut(i, *pair);
}
}
@@ -228,9 +228,10 @@ bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
// Replace all indices with proper methods.
int capacity = dictionary->Capacity();
+ ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
Object* maybe_key = dictionary->KeyAt(i);
- if (!Dictionary::IsKey(isolate, maybe_key)) continue;
+ if (!Dictionary::IsKey(roots, maybe_key)) continue;
if (install_name_accessor && *install_name_accessor &&
(maybe_key == *name_string)) {
*install_name_accessor = false;
@@ -287,7 +288,7 @@ bool AddDescriptorsByTemplate(
Handle<NumberDictionary> elements_dictionary =
*elements_dictionary_template ==
- isolate->heap()->empty_slow_element_dictionary()
+ ReadOnlyRoots(isolate).empty_slow_element_dictionary()
? elements_dictionary_template
: ShallowCopyDictionaryTemplate(isolate,
elements_dictionary_template);
@@ -295,10 +296,10 @@ bool AddDescriptorsByTemplate(
// Read values from |descriptors_template| and store possibly post-processed
// values into "instantiated" |descriptors| array.
for (int i = 0; i < nof_descriptors; i++) {
- Object* value = descriptors_template->GetValue(i);
+ Object* value = descriptors_template->GetStrongValue(i);
if (value->IsAccessorPair()) {
- Handle<AccessorPair> pair =
- AccessorPair::Copy(handle(AccessorPair::cast(value), isolate));
+ Handle<AccessorPair> pair = AccessorPair::Copy(
+ isolate, handle(AccessorPair::cast(value), isolate));
value = *pair;
}
DisallowHeapAllocation no_gc;
@@ -335,7 +336,7 @@ bool AddDescriptorsByTemplate(
DCHECK(!details.representation().IsDouble());
}
DCHECK(value->FitsRepresentation(details.representation()));
- descriptors->Set(i, name, value, details);
+ descriptors->Set(i, name, MaybeObject::FromObject(value), details);
}
map->InitializeDescriptors(*descriptors,
@@ -409,7 +410,7 @@ bool AddDescriptorsByTemplate(
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
PropertyDetails details(kAccessor, attribs, PropertyCellType::kNoCell);
Handle<NameDictionary> dict = NameDictionary::Add(
- properties_dictionary, isolate->factory()->name_string(),
+ isolate, properties_dictionary, isolate->factory()->name_string(),
isolate->factory()->function_name_accessor(), details);
CHECK_EQ(*dict, *properties_dictionary);
}
@@ -449,9 +450,9 @@ bool InitClassPrototype(Isolate* isolate,
Handle<Object> prototype_parent,
Handle<JSFunction> constructor, Arguments& args) {
Handle<Map> map(prototype->map(), isolate);
- map = Map::CopyDropDescriptors(map);
+ map = Map::CopyDropDescriptors(isolate, map);
map->set_is_prototype_map(true);
- Map::SetPrototype(map, prototype_parent);
+ Map::SetPrototype(isolate, map, prototype_parent);
constructor->set_prototype_or_initial_map(*prototype);
map->SetConstructor(*constructor);
Handle<FixedArray> computed_properties(
@@ -462,7 +463,7 @@ bool InitClassPrototype(Isolate* isolate,
Handle<Object> properties_template(
class_boilerplate->instance_properties_template(), isolate);
- if (properties_template->IsDictionary()) {
+ if (properties_template->IsNameDictionary()) {
Handle<NameDictionary> properties_dictionary_template =
Handle<NameDictionary>::cast(properties_template);
@@ -496,13 +497,13 @@ bool InitClassConstructor(Isolate* isolate,
Handle<Object> constructor_parent,
Handle<JSFunction> constructor, Arguments& args) {
Handle<Map> map(constructor->map(), isolate);
- map = Map::CopyDropDescriptors(map);
+ map = Map::CopyDropDescriptors(isolate, map);
DCHECK(map->is_prototype_map());
if (!constructor_parent.is_null()) {
// Set map's prototype without enabling prototype setup mode for superclass
// because it does not make sense.
- Map::SetPrototype(map, constructor_parent, false);
+ Map::SetPrototype(isolate, map, constructor_parent, false);
}
Handle<NumberDictionary> elements_dictionary_template(
@@ -514,12 +515,12 @@ bool InitClassConstructor(Isolate* isolate,
Handle<Object> properties_template(
class_boilerplate->static_properties_template(), isolate);
- if (properties_template->IsDictionary()) {
+ if (properties_template->IsNameDictionary()) {
Handle<NameDictionary> properties_dictionary_template =
Handle<NameDictionary>::cast(properties_template);
map->set_is_dictionary_map(true);
- map->InitializeDescriptors(isolate->heap()->empty_descriptor_array(),
+ map->InitializeDescriptors(ReadOnlyRoots(isolate).empty_descriptor_array(),
LayoutDescriptor::FastPointerLayout());
map->set_is_migration_target(false);
map->set_may_have_interesting_symbols(true);
@@ -628,7 +629,7 @@ MaybeHandle<JSReceiver> GetSuperHolder(
Isolate* isolate, Handle<Object> receiver, Handle<JSObject> home_object,
SuperMode mode, MaybeHandle<Name> maybe_name, uint32_t index) {
if (home_object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), home_object)) {
+ !isolate->MayAccess(handle(isolate->context(), isolate), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, JSReceiver);
}
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 92ee880719..30e4341be3 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -8,36 +8,25 @@
#include "src/conversions-inl.h"
#include "src/heap/factory.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-collection-inl.h"
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_IsJSMapIterator) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(args[0]->IsJSMapIterator());
-}
-
-RUNTIME_FUNCTION(Runtime_IsJSSetIterator) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(args[0]->IsJSSetIterator());
-}
-
RUNTIME_FUNCTION(Runtime_TheHole) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->the_hole_value();
+ return ReadOnlyRoots(isolate).the_hole_value();
}
RUNTIME_FUNCTION(Runtime_SetGrow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
- table = OrderedHashSet::EnsureGrowable(table);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()), isolate);
+ table = OrderedHashSet::EnsureGrowable(isolate, table);
holder->set_table(*table);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -45,10 +34,10 @@ RUNTIME_FUNCTION(Runtime_SetShrink) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
- table = OrderedHashSet::Shrink(table);
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()), isolate);
+ table = OrderedHashSet::Shrink(isolate, table);
holder->set_table(*table);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_SetIteratorClone) {
@@ -65,20 +54,20 @@ RUNTIME_FUNCTION(Runtime_MapShrink) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
- table = OrderedHashMap::Shrink(table);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()), isolate);
+ table = OrderedHashMap::Shrink(isolate, table);
holder->set_table(*table);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_MapGrow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
- table = OrderedHashMap::EnsureGrowable(table);
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()), isolate);
+ table = OrderedHashMap::EnsureGrowable(isolate, table);
holder->set_table(*table);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_MapIteratorClone) {
@@ -109,9 +98,9 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
#ifdef DEBUG
DCHECK(key->IsJSReceiver());
- DCHECK(ObjectHashTableShape::IsLive(isolate, *key));
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
+ DCHECK(EphemeronHashTableShape::IsLive(ReadOnlyRoots(isolate), *key));
+ Handle<EphemeronHashTable> table(
+ EphemeronHashTable::cast(weak_collection->table()), isolate);
// Should only be called when shrinking the table is necessary. See
// HashTable::Shrink().
DCHECK(table->NumberOfElements() - 1 <= (table->Capacity() >> 2) &&
@@ -122,6 +111,14 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
return isolate->heap()->ToBoolean(was_present);
}
+RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
+ CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
+ CHECK_GE(max_values, 0);
+ return *JSWeakCollection::GetEntries(holder, max_values);
+}
RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
HandleScope scope(isolate);
@@ -133,11 +130,11 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
#ifdef DEBUG
DCHECK(key->IsJSReceiver());
- DCHECK(ObjectHashTableShape::IsLive(isolate, *key));
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
+ DCHECK(EphemeronHashTableShape::IsLive(ReadOnlyRoots(isolate), *key));
+ Handle<EphemeronHashTable> table(
+ EphemeronHashTable::cast(weak_collection->table()), isolate);
// Should only be called when rehashing or resizing the table is necessary.
- // See ObjectHashTable::Put() and HashTable::HasSufficientCapacityToAdd().
+ // See EphemeronHashTable::Put() and HashTable::HasSufficientCapacityToAdd().
DCHECK((table->NumberOfDeletedElements() << 1) > table->NumberOfElements() ||
!table->HasSufficientCapacityToAdd(1));
#endif
@@ -146,30 +143,6 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
return *weak_collection;
}
-
-RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
- CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
- CHECK_GE(max_values, 0);
- return *JSWeakCollection::GetEntries(holder, max_values);
-}
-
-RUNTIME_FUNCTION(Runtime_IsJSMap) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSMap());
-}
-
-RUNTIME_FUNCTION(Runtime_IsJSSet) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSSet());
-}
-
RUNTIME_FUNCTION(Runtime_IsJSWeakMap) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 3f894fd929..2502fba30d 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -36,7 +36,7 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
return isolate->StackOverflow();
}
if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
DCHECK(function->is_compiled());
return function->code();
@@ -51,7 +51,7 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
return isolate->StackOverflow();
}
if (!Compiler::CompileOptimized(function, ConcurrencyMode::kConcurrent)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
DCHECK(function->is_compiled());
return function->code();
@@ -66,10 +66,10 @@ RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
DCHECK_EQ(function->feedback_vector()->optimization_marker(),
OptimizationMarker::kLogFirstExecution);
DCHECK(FLAG_log_function_events);
- Handle<SharedFunctionInfo> sfi(function->shared());
- LOG(isolate, FunctionEvent("first-execution", Script::cast(sfi->script()), -1,
- 0, sfi->StartPosition(), sfi->EndPosition(),
- sfi->DebugName()));
+ Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
+ LOG(isolate, FunctionEvent(
+ "first-execution", Script::cast(sfi->script())->id(), 0,
+ sfi->StartPosition(), sfi->EndPosition(), sfi->DebugName()));
function->feedback_vector()->ClearOptimizationMarker();
// Return the code to continue execution, we don't care at this point whether
// this is for lazy compilation or has been eagerly complied.
@@ -85,7 +85,7 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
return isolate->StackOverflow();
}
if (!Compiler::CompileOptimized(function, ConcurrencyMode::kNotConcurrent)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
DCHECK(function->is_compiled());
return function->code();
@@ -121,8 +121,8 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
memory = args.at<JSArrayBuffer>(3);
}
if (function->shared()->HasAsmWasmData()) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<FixedArray> data(shared->asm_wasm_data());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<FixedArray> data(shared->asm_wasm_data(), isolate);
MaybeHandle<Object> result = AsmJs::InstantiateAsmWasm(
isolate, shared, data, stdlib, foreign, memory);
if (!result.is_null()) {
@@ -130,9 +130,10 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
}
}
// Remove wasm data, mark as broken for asm->wasm, replace function code with
- // CompileLazy, and return a smi 0 to indicate failure.
+ // UncompiledData, and return a smi 0 to indicate failure.
if (function->shared()->HasAsmWasmData()) {
- function->shared()->FlushCompiled();
+ SharedFunctionInfo::DiscardCompiled(isolate,
+ handle(function->shared(), isolate));
}
function->shared()->set_is_asm_wasm_broken(true);
DCHECK(function->code() ==
@@ -153,7 +154,7 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
Handle<JSFunction> function = deoptimizer->function();
- Deoptimizer::BailoutType type = deoptimizer->bailout_type();
+ DeoptimizeKind type = deoptimizer->deopt_kind();
// TODO(turbofan): We currently need the native context to materialize
// the arguments object, but only to get to its map.
@@ -169,11 +170,11 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
isolate->set_context(Context::cast(top_frame->context()));
// Invalidate the underlying optimized code on non-lazy deopts.
- if (type != Deoptimizer::LAZY) {
+ if (type != DeoptimizeKind::kLazy) {
Deoptimizer::DeoptimizeFunction(*function);
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -203,7 +204,7 @@ BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
// the one installed on the function (e.g. patched by debugger). This however
// is fine because we guarantee the layout to be in sync, hence any BailoutId
// representing the entry point will be valid for any copy of the bytecode.
- Handle<BytecodeArray> bytecode(iframe->GetBytecodeArray());
+ Handle<BytecodeArray> bytecode(iframe->GetBytecodeArray(), iframe->isolate());
DCHECK(frame->LookupCode()->is_interpreter_trampoline_builtin());
DCHECK(frame->function()->shared()->HasBytecodeArray());
@@ -294,8 +295,8 @@ static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
LanguageMode language_mode,
int eval_scope_position, int eval_position) {
- Handle<Context> context = Handle<Context>(isolate->context());
- Handle<Context> native_context = Handle<Context>(context->native_context());
+ Handle<Context> context(isolate->context(), isolate);
+ Handle<Context> native_context(context->native_context(), isolate);
// Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
@@ -308,7 +309,7 @@ static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
MaybeHandle<Object> maybe_error = isolate->factory()->NewEvalError(
MessageTemplate::kCodeGenFromStrings, error_message);
if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
// Deal with a normal eval call with a string argument. Compile it
@@ -320,7 +321,7 @@ static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
Compiler::GetFunctionFromEval(source, outer_info, context, language_mode,
restriction, kNoSourcePosition,
eval_scope_position, eval_position),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
return *compiled;
}
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index fabb1a80da..9711ffad54 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -21,6 +21,8 @@
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-promise-inl.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -45,7 +47,8 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it(isolate);
if (isolate->debug_execution_mode() == DebugInfo::kBreakpoints) {
- isolate->debug()->Break(it.frame(), handle(it.frame()->function()));
+ isolate->debug()->Break(it.frame(),
+ handle(it.frame()->function(), isolate));
}
// Return the handler from the original bytecode array.
@@ -81,7 +84,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
operand_scale);
if (side_effect_check_failed) {
- return MakePair(isolate->heap()->exception(),
+ return MakePair(ReadOnlyRoots(isolate).exception(),
Smi::FromInt(static_cast<uint8_t>(bytecode)));
}
Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
@@ -107,15 +110,7 @@ RUNTIME_FUNCTION(Runtime_DebugBreakAtEntry) {
DCHECK_EQ(*function, it.frame()->function());
isolate->debug()->Break(it.frame(), function);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_DebugApplyInstrumentation) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- isolate->debug()->ApplyInstrumentation(handle(function->shared()));
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
@@ -127,50 +122,13 @@ RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
return isolate->stack_guard()->HandleInterrupts();
}
-
RUNTIME_FUNCTION(Runtime_ScheduleBreak) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- isolate->stack_guard()->RequestDebugBreak();
- return isolate->heap()->undefined_value();
-}
-
-static Handle<Object> DebugGetProperty(LookupIterator* it,
- bool* has_caught = nullptr) {
- for (; it->IsFound(); it->Next()) {
- switch (it->state()) {
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
- case LookupIterator::ACCESS_CHECK:
- // Ignore access checks.
- break;
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- case LookupIterator::INTERCEPTOR:
- case LookupIterator::JSPROXY:
- return it->isolate()->factory()->undefined_value();
- case LookupIterator::ACCESSOR: {
- Handle<Object> accessors = it->GetAccessors();
- if (!accessors->IsAccessorInfo()) {
- return it->isolate()->factory()->undefined_value();
- }
- MaybeHandle<Object> maybe_result =
- JSObject::GetPropertyWithAccessor(it);
- Handle<Object> result;
- if (!maybe_result.ToHandle(&result)) {
- result = handle(it->isolate()->pending_exception(), it->isolate());
- it->isolate()->clear_pending_exception();
- if (has_caught != nullptr) *has_caught = true;
- }
- return result;
- }
-
- case LookupIterator::DATA:
- return it->GetDataValue();
- }
- }
-
- return it->isolate()->factory()->undefined_value();
+ isolate->RequestInterrupt(
+ [](v8::Isolate* isolate, void*) { v8::debug::BreakRightNow(isolate); },
+ nullptr);
+ return ReadOnlyRoots(isolate).undefined_value();
}
template <class IteratorType>
@@ -288,7 +246,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(1, *status_str);
Handle<Object> value_obj(promise->status() == Promise::kPending
- ? isolate->heap()->undefined_value()
+ ? ReadOnlyRoots(isolate).undefined_value()
: promise->result(),
isolate);
Handle<String> promise_value =
@@ -328,627 +286,6 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
return factory->NewJSArray(0);
}
-
-RUNTIME_FUNCTION(Runtime_DebugGetInternalProperties) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- RETURN_RESULT_OR_FAILURE(isolate,
- Runtime::GetInternalProperties(isolate, obj));
-}
-
-
-// Get debugger related details for an object property, in the following format:
-// 0: Property value
-// 1: Property details
-// 2: Property value is exception
-// 3: Getter function if defined
-// 4: Setter function if defined
-// Items 2-4 are only filled if the property has either a getter or a setter.
-RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, name_obj, 1);
-
- // Convert the {name_obj} to a Name.
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, name_obj));
-
- // Make sure to set the current context to the context before the debugger was
- // entered (if the debugger is entered). The reason for switching context here
- // is that for some property lookups (accessors and interceptors) callbacks
- // into the embedding application can occur, and the embedding application
- // could have the assumption that its own native context is the current
- // context and not some internal debugger context.
- SaveContext save(isolate);
- if (isolate->debug()->in_debug_scope()) {
- isolate->set_context(*isolate->debug()->debugger_entry()->GetContext());
- }
-
- // Check if the name is trivially convertible to an index and get the element
- // if so.
- uint32_t index;
- // TODO(verwaest): Make sure DebugGetProperty can handle arrays, and remove
- // this special case.
- if (name->AsArrayIndex(&index)) {
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
- Handle<Object> element_or_char;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, element_or_char, JSReceiver::GetElement(isolate, obj, index));
- details->set(0, *element_or_char);
- details->set(1, PropertyDetails::Empty().AsSmi());
- return *isolate->factory()->NewJSArrayWithElements(details);
- }
-
- LookupIterator it(obj, name, LookupIterator::OWN);
- bool has_caught = false;
- Handle<Object> value = DebugGetProperty(&it, &has_caught);
- if (!it.IsFound()) return isolate->heap()->undefined_value();
-
- Handle<Object> maybe_pair;
- if (it.state() == LookupIterator::ACCESSOR) {
- maybe_pair = it.GetAccessors();
- }
-
- // If the callback object is a fixed array then it contains JavaScript
- // getter and/or setter.
- bool has_js_accessors = !maybe_pair.is_null() && maybe_pair->IsAccessorPair();
- Handle<FixedArray> details =
- isolate->factory()->NewFixedArray(has_js_accessors ? 6 : 3);
- details->set(0, *value);
- // TODO(verwaest): Get rid of this random way of handling interceptors.
- PropertyDetails d = it.state() == LookupIterator::INTERCEPTOR
- ? PropertyDetails::Empty()
- : it.property_details();
- details->set(1, d.AsSmi());
- details->set(
- 2, isolate->heap()->ToBoolean(it.state() == LookupIterator::INTERCEPTOR));
- if (has_js_accessors) {
- Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(maybe_pair);
- details->set(3, isolate->heap()->ToBoolean(has_caught));
- Handle<Object> getter =
- AccessorPair::GetComponent(accessors, ACCESSOR_GETTER);
- Handle<Object> setter =
- AccessorPair::GetComponent(accessors, ACCESSOR_SETTER);
- details->set(4, *getter);
- details->set(5, *setter);
- }
-
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
-RUNTIME_FUNCTION(Runtime_DebugGetProperty) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-
- LookupIterator it(obj, name);
- return *DebugGetProperty(&it);
-}
-
-// Return the property kind calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(Runtime_DebugPropertyKindFromDetails) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
- return Smi::FromInt(static_cast<int>(details.kind()));
-}
-
-
-// Return the property attribute calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(Runtime_DebugPropertyAttributesFromDetails) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
- return Smi::FromInt(static_cast<int>(details.attributes()));
-}
-
-
-RUNTIME_FUNCTION(Runtime_CheckExecutionState) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
- return isolate->heap()->true_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetFrameCount) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- // Count all frames which are relevant to debugging stack trace.
- int n = 0;
- StackFrame::Id id = isolate->debug()->break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack frame count is 0.
- return Smi::kZero;
- }
-
- std::vector<FrameSummary> frames;
- for (StackTraceFrameIterator it(isolate, id); !it.done(); it.Advance()) {
- frames.clear();
- it.frame()->Summarize(&frames);
- for (size_t i = frames.size(); i != 0; i--) {
- // Omit functions from native and extension scripts.
- if (frames[i - 1].is_subject_to_debugging()) n++;
- }
- }
- return Smi::FromInt(n);
-}
-
-static const int kFrameDetailsFrameIdIndex = 0;
-static const int kFrameDetailsReceiverIndex = 1;
-static const int kFrameDetailsFunctionIndex = 2;
-static const int kFrameDetailsScriptIndex = 3;
-static const int kFrameDetailsArgumentCountIndex = 4;
-static const int kFrameDetailsLocalCountIndex = 5;
-static const int kFrameDetailsSourcePositionIndex = 6;
-static const int kFrameDetailsConstructCallIndex = 7;
-static const int kFrameDetailsAtReturnIndex = 8;
-static const int kFrameDetailsFlagsIndex = 9;
-static const int kFrameDetailsFirstDynamicIndex = 10;
-
-// Return an array with frame details
-// args[0]: number: break id
-// args[1]: number: frame index
-//
-// The array returned contains the following information:
-// 0: Frame id
-// 1: Receiver
-// 2: Function
-// 3: Script
-// 4: Argument count
-// 5: Local count
-// 6: Source position
-// 7: Constructor call
-// 8: Is at return
-// 9: Flags
-// Arguments name, value
-// Locals name, value
-// Return value if any
-RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
- Heap* heap = isolate->heap();
-
- // Find the relevant frame with the requested index.
- StackFrame::Id id = isolate->debug()->break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there are no JavaScript stack frames return undefined.
- return heap->undefined_value();
- }
-
- StackTraceFrameIterator it(isolate, id);
- // Inlined frame index in optimized frame, starting from outer function.
- int inlined_frame_index =
- DebugFrameHelper::FindIndexedNonNativeFrame(&it, index);
- if (inlined_frame_index == -1) return heap->undefined_value();
-
- FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate);
-
- // Traverse the saved contexts chain to find the active context for the
- // selected frame.
- SaveContext* save =
- DebugFrameHelper::FindSavedContextForFrame(isolate, it.frame());
-
- // Get the frame id.
- Handle<Object> frame_id(DebugFrameHelper::WrapFrameId(it.frame()->id()),
- isolate);
-
- if (frame_inspector.IsWasm()) {
- // Create the details array (no dynamic information for wasm).
- Handle<FixedArray> details =
- isolate->factory()->NewFixedArray(kFrameDetailsFirstDynamicIndex);
-
- // Add the frame id.
- details->set(kFrameDetailsFrameIdIndex, *frame_id);
-
- // Add the function name.
- Handle<String> func_name = frame_inspector.GetFunctionName();
- details->set(kFrameDetailsFunctionIndex, *func_name);
-
- // Add the script wrapper
- Handle<Object> script_wrapper =
- Script::GetWrapper(frame_inspector.GetScript());
- details->set(kFrameDetailsScriptIndex, *script_wrapper);
-
- // Add the arguments count.
- details->set(kFrameDetailsArgumentCountIndex, Smi::kZero);
-
- // Add the locals count
- details->set(kFrameDetailsLocalCountIndex, Smi::kZero);
-
- // Add the source position.
- int position = frame_inspector.GetSourcePosition();
- details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
-
- // Add the constructor information.
- details->set(kFrameDetailsConstructCallIndex, heap->ToBoolean(false));
-
- // Add the at return information.
- details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(false));
-
- // Add flags to indicate information on whether this frame is
- // bit 0: invoked in the debugger context.
- // bit 1: optimized frame.
- // bit 2: inlined in optimized frame
- int flags = inlined_frame_index << 2;
- if (*save->context() == *isolate->debug()->debug_context()) {
- flags |= 1 << 0;
- }
- details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
-
- return *isolate->factory()->NewJSArrayWithElements(details);
- }
-
- // Find source position in unoptimized code.
- int position = frame_inspector.GetSourcePosition();
-
- // Handle JavaScript frames.
- bool is_optimized = it.frame()->is_optimized();
-
- // Check for constructor frame.
- bool constructor = frame_inspector.IsConstructor();
-
- // Get scope info and read from it for local variable information.
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(frame_inspector.GetFunction());
- CHECK(function->shared()->IsSubjectToDebugging());
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
- DCHECK(*scope_info != ScopeInfo::Empty(isolate));
-
- // Get the locals names and values into a temporary array.
- Handle<Object> maybe_context = frame_inspector.GetContext();
- const int local_count_with_synthetic = maybe_context->IsContext()
- ? scope_info->LocalCount()
- : scope_info->StackLocalCount();
- int local_count = local_count_with_synthetic;
- for (int slot = 0; slot < local_count_with_synthetic; ++slot) {
- // Hide compiler-introduced temporary variables, whether on the stack or on
- // the context.
- if (ScopeInfo::VariableIsSynthetic(scope_info->LocalName(slot))) {
- local_count--;
- }
- }
-
- std::vector<Handle<Object>> locals;
- // Fill in the values of the locals.
- int i = 0;
- for (; i < scope_info->StackLocalCount(); ++i) {
- // Use the value from the stack.
- if (ScopeInfo::VariableIsSynthetic(scope_info->LocalName(i))) continue;
- locals.emplace_back(scope_info->LocalName(i), isolate);
- Handle<Object> value =
- frame_inspector.GetExpression(scope_info->StackLocalIndex(i));
- // TODO(yangguo): We convert optimized out values to {undefined} when they
- // are passed to the debugger. Eventually we should handle them somehow.
- if (value->IsOptimizedOut(isolate)) {
- value = isolate->factory()->undefined_value();
- }
- locals.push_back(value);
- }
- if (static_cast<int>(locals.size()) < local_count * 2) {
- // Get the context containing declarations.
- DCHECK(maybe_context->IsContext());
- Handle<Context> context(Context::cast(*maybe_context)->closure_context());
-
- for (; i < scope_info->LocalCount(); ++i) {
- Handle<String> name(scope_info->LocalName(i));
- if (ScopeInfo::VariableIsSynthetic(*name)) continue;
- VariableMode mode;
- InitializationFlag init_flag;
- MaybeAssignedFlag maybe_assigned_flag;
- locals.push_back(name);
- int context_slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
- Object* value = context->get(context_slot_index);
- locals.emplace_back(value, isolate);
- }
- }
-
- // Check whether this frame is positioned at return. If not top
- // frame or if the frame is optimized it cannot be at a return.
- bool at_return = false;
- if (!is_optimized && index == 0) {
- at_return = isolate->debug()->IsBreakAtReturn(it.javascript_frame());
- }
-
- // If positioned just before return find the value to be returned and add it
- // to the frame information.
- Handle<Object> return_value = isolate->factory()->undefined_value();
- if (at_return) {
- return_value = handle(isolate->debug()->return_value(), isolate);
- }
-
- // Now advance to the arguments adapter frame (if any). It contains all
- // the provided parameters whereas the function frame always have the number
- // of arguments matching the functions parameters. The rest of the
- // information (except for what is collected above) is the same.
- if ((inlined_frame_index == 0) &&
- it.javascript_frame()->has_adapted_arguments()) {
- it.AdvanceOneFrame();
- DCHECK(it.frame()->is_arguments_adaptor());
- frame_inspector.SetArgumentsFrame(it.frame());
- }
-
- // Find the number of arguments to fill. At least fill the number of
- // parameters for the function and fill more if more parameters are provided.
- int argument_count = scope_info->ParameterCount();
- if (argument_count < frame_inspector.GetParametersCount()) {
- argument_count = frame_inspector.GetParametersCount();
- }
-
- // Calculate the size of the result.
- int details_size = kFrameDetailsFirstDynamicIndex +
- 2 * (argument_count + local_count) + (at_return ? 1 : 0);
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
-
- // Add the frame id.
- details->set(kFrameDetailsFrameIdIndex, *frame_id);
-
- // Add the function (same as in function frame).
- details->set(kFrameDetailsFunctionIndex, *(frame_inspector.GetFunction()));
-
- // Add the script wrapper
- Handle<Object> script_wrapper =
- Script::GetWrapper(frame_inspector.GetScript());
- details->set(kFrameDetailsScriptIndex, *script_wrapper);
-
- // Add the arguments count.
- details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
-
- // Add the locals count
- details->set(kFrameDetailsLocalCountIndex, Smi::FromInt(local_count));
-
- // Add the source position.
- if (position != kNoSourcePosition) {
- details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
- } else {
- details->set(kFrameDetailsSourcePositionIndex, heap->undefined_value());
- }
-
- // Add the constructor information.
- details->set(kFrameDetailsConstructCallIndex, heap->ToBoolean(constructor));
-
- // Add the at return information.
- details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(at_return));
-
- // Add flags to indicate information on whether this frame is
- // bit 0: invoked in the debugger context.
- // bit 1: optimized frame.
- // bit 2: inlined in optimized frame
- int flags = 0;
- if (*save->context() == *isolate->debug()->debug_context()) {
- flags |= 1 << 0;
- }
- if (is_optimized) {
- flags |= 1 << 1;
- flags |= inlined_frame_index << 2;
- }
- details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
-
- // Fill the dynamic part.
- int details_index = kFrameDetailsFirstDynamicIndex;
-
- // Add arguments name and value.
- for (int i = 0; i < argument_count; i++) {
- // Name of the argument.
- if (i < scope_info->ParameterCount()) {
- details->set(details_index++, scope_info->ParameterName(i));
- } else {
- details->set(details_index++, heap->undefined_value());
- }
-
- // Parameter value.
- if (i < frame_inspector.GetParametersCount()) {
- // Get the value from the stack.
- details->set(details_index++, *(frame_inspector.GetParameter(i)));
- } else {
- details->set(details_index++, heap->undefined_value());
- }
- }
-
- // Add locals name and value from the temporary copy from the function frame.
- for (const auto& local : locals) details->set(details_index++, *local);
-
- // Add the value being returned.
- if (at_return) {
- details->set(details_index++, *return_value);
- }
-
- // Add the receiver (same as in function frame).
- Handle<Object> receiver = frame_inspector.GetReceiver();
- DCHECK(function->shared()->IsUserJavaScript());
- // Optimized frames only restore the receiver as best-effort (see
- // OptimizedFrame::Summarize).
- DCHECK_IMPLIES(!is_optimized && is_sloppy(shared->language_mode()),
- receiver->IsJSReceiver());
- details->set(kFrameDetailsReceiverIndex, *receiver);
-
- DCHECK_EQ(details_size, details_index);
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetScopeCount) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- StackTraceFrameIterator it(isolate, id);
- StandardFrame* frame = it.frame();
- if (it.frame()->is_wasm()) return 0;
-
- FrameInspector frame_inspector(frame, 0, isolate);
-
- // Count the visible scopes.
- int n = 0;
- for (ScopeIterator it(isolate, &frame_inspector); !it.Done(); it.Next()) {
- n++;
- }
-
- return Smi::FromInt(n);
-}
-
-
-// Return an array with scope details
-// args[0]: number: break id
-// args[1]: number: frame index
-// args[2]: number: inlined frame index
-// args[3]: number: scope index
-//
-// The array returned contains the following information:
-// 0: Scope type
-// 1: Scope object
-RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
- CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- StackTraceFrameIterator frame_it(isolate, id);
- // Wasm has no scopes, this must be javascript.
- JavaScriptFrame* frame = JavaScriptFrame::cast(frame_it.frame());
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
-
- // Find the requested scope.
- int n = 0;
- ScopeIterator it(isolate, &frame_inspector);
- for (; !it.Done() && n < index; it.Next()) {
- n++;
- }
- if (it.Done()) {
- return isolate->heap()->undefined_value();
- }
- RETURN_RESULT_OR_FAILURE(isolate, it.MaterializeScopeDetails());
-}
-
-
-// Return an array of scope details
-// args[0]: number: break id
-// args[1]: number: frame index
-// args[2]: number: inlined frame index
-// args[3]: boolean: ignore nested scopes
-//
-// The array returned contains arrays with the following information:
-// 0: Scope type
-// 1: Scope object
-RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3 || args.length() == 4);
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
- CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
-
- ScopeIterator::Option option = ScopeIterator::DEFAULT;
- if (args.length() == 4) {
- CONVERT_BOOLEAN_ARG_CHECKED(flag, 3);
- if (flag) option = ScopeIterator::IGNORE_NESTED_SCOPES;
- }
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- StackTraceFrameIterator frame_it(isolate, id);
- StandardFrame* frame = frame_it.frame();
-
- // Handle wasm frames specially. They provide exactly two scopes (global /
- // local).
- if (frame->is_wasm_interpreter_entry()) {
- Handle<WasmDebugInfo> debug_info(
- WasmInterpreterEntryFrame::cast(frame)->debug_info(), isolate);
- return *WasmDebugInfo::GetScopeDetails(debug_info, frame->fp(),
- inlined_frame_index);
- }
-
- FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
- std::vector<Handle<JSObject>> result;
- ScopeIterator it(isolate, &frame_inspector, option);
- for (; !it.Done(); it.Next()) {
- Handle<JSObject> details;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
- it.MaterializeScopeDetails());
- result.push_back(details);
- }
-
- int result_size = static_cast<int>(result.size());
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(result_size);
- for (int i = 0; i < result_size; ++i) {
- array->set(i, *result[i]);
- }
- return *isolate->factory()->NewJSArrayWithElements(array);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetFunctionScopeCount) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
-
- // Check arguments.
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
-
- // Count the visible scopes.
- int n = 0;
- if (function->IsJSFunction()) {
- for (ScopeIterator it(isolate, Handle<JSFunction>::cast(function));
- !it.Done(); it.Next()) {
- n++;
- }
- }
-
- return Smi::FromInt(n);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetFunctionScopeDetails) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
-
- // Check arguments.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
-
- // Find the requested scope.
- int n = 0;
- ScopeIterator it(isolate, fun);
- for (; !it.Done() && n < index; it.Next()) {
- n++;
- }
- if (it.Done()) {
- return isolate->heap()->undefined_value();
- }
-
- RETURN_RESULT_OR_FAILURE(isolate, it.MaterializeScopeDetails());
-}
-
RUNTIME_FUNCTION(Runtime_GetGeneratorScopeCount) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -977,7 +314,7 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
DCHECK_EQ(2, args.length());
if (!args[0]->IsJSGeneratorObject()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// Check arguments.
@@ -986,7 +323,7 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
// Only inspect suspended generator scopes.
if (!gen->is_suspended()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// Find the requested scope.
@@ -996,10 +333,10 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
n++;
}
if (it.Done()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
- RETURN_RESULT_OR_FAILURE(isolate, it.MaterializeScopeDetails());
+ return *it.MaterializeScopeDetails();
}
static bool SetScopeVariableValue(ScopeIterator* it, int index,
@@ -1014,52 +351,22 @@ static bool SetScopeVariableValue(ScopeIterator* it, int index,
return it->SetVariableValue(variable_name, new_value);
}
-
// Change variable value in closure or local scope
// args[0]: number or JsFunction: break id or function
-// args[1]: number: frame index (when arg[0] is break id)
-// args[2]: number: inlined frame index (when arg[0] is break id)
-// args[3]: number: scope index
-// args[4]: string: variable name
-// args[5]: object: new value
+// args[1]: number: scope index
+// args[2]: string: variable name
+// args[3]: object: new value
//
// Return true if success and false otherwise
-RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
+RUNTIME_FUNCTION(Runtime_SetGeneratorScopeVariableValue) {
HandleScope scope(isolate);
- DCHECK_EQ(6, args.length());
-
- // Check arguments.
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
- CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 4);
- CONVERT_ARG_HANDLE_CHECKED(Object, new_value, 5);
-
- bool res;
- if (args[0]->IsNumber()) {
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
- CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- StackTraceFrameIterator frame_it(isolate, id);
- // Wasm has no scopes, this must be javascript.
- JavaScriptFrame* frame = JavaScriptFrame::cast(frame_it.frame());
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
-
- ScopeIterator it(isolate, &frame_inspector);
- res = SetScopeVariableValue(&it, index, variable_name, new_value);
- } else if (args[0]->IsJSFunction()) {
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- ScopeIterator it(isolate, fun);
- res = SetScopeVariableValue(&it, index, variable_name, new_value);
- } else {
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
- ScopeIterator it(isolate, gen);
- res = SetScopeVariableValue(&it, index, variable_name, new_value);
- }
-
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
+ CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+ CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, new_value, 3);
+ ScopeIterator it(isolate, gen);
+ bool res = SetScopeVariableValue(&it, index, variable_name, new_value);
return isolate->heap()->ToBoolean(res);
}
@@ -1070,11 +377,12 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- Handle<SharedFunctionInfo> shared(fun->shared());
+ Handle<SharedFunctionInfo> shared(fun->shared(), isolate);
// Find the number of break points
- Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
+ Handle<Object> break_locations =
+ Debug::GetSourceBreakLocations(isolate, shared);
if (break_locations->IsUndefined(isolate)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// Return array as JS array
return *isolate->factory()->NewJSArrayWithElements(
@@ -1082,24 +390,6 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
}
-// Change the state of break on exceptions.
-// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
-// args[1]: Boolean indicating on/off.
-RUNTIME_FUNCTION(Runtime_ChangeBreakOnException) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
- CONVERT_BOOLEAN_ARG_CHECKED(enable, 1);
-
- // If the number doesn't match an enum value, the ChangeBreakOnException
- // function will default to affecting caught exceptions.
- ExceptionBreakType type = static_cast<ExceptionBreakType>(type_arg);
- // Update break point state.
- isolate->debug()->ChangeBreakOnException(type, enable);
- return isolate->heap()->undefined_value();
-}
-
-
// Returns the state of break on exceptions
// args[0]: boolean indicating uncaught exceptions
RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
@@ -1112,256 +402,36 @@ RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
return Smi::FromInt(result);
}
-
-// Prepare for stepping
-// args[0]: break id for checking execution state
-// args[1]: step action from the enumeration StepAction
-// args[2]: number of times to perform the step, for step out it is the number
-// of frames to step down.
-RUNTIME_FUNCTION(Runtime_PrepareStep) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- if (!args[1]->IsNumber()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
-
- // Get the step action and check validity.
- StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
- if (step_action != StepIn && step_action != StepNext &&
- step_action != StepOut) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
-
- // Clear all current stepping setup.
- isolate->debug()->ClearStepping();
-
- // Prepare step.
- isolate->debug()->PrepareStep(static_cast<StepAction>(step_action));
- return isolate->heap()->undefined_value();
-}
-
// Clear all stepping set by PrepareStep.
RUNTIME_FUNCTION(Runtime_ClearStepping) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
CHECK(isolate->debug()->is_active());
isolate->debug()->ClearStepping();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
- HandleScope scope(isolate);
-
- // Check the execution state and decode arguments frame and source to be
- // evaluated.
- DCHECK_EQ(5, args.length());
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
- CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
- CONVERT_BOOLEAN_ARG_CHECKED(throw_on_side_effect, 4);
-
- StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
-
- RETURN_RESULT_OR_FAILURE(
- isolate, DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source,
- throw_on_side_effect));
+ return ReadOnlyRoots(isolate).undefined_value();
}
-
-RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
- HandleScope scope(isolate);
-
- // Check the execution state and decode arguments frame and source to be
- // evaluated.
- DCHECK_EQ(2, args.length());
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
-
- RETURN_RESULT_OR_FAILURE(isolate,
- DebugEvaluate::Global(isolate, source, false));
-}
-
-
-RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
+RUNTIME_FUNCTION(Runtime_DebugGetLoadedScriptIds) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
Handle<FixedArray> instances;
{
DebugScope debug_scope(isolate->debug());
- if (debug_scope.failed()) {
- DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
- }
// Fill the script objects.
instances = isolate->debug()->GetLoadedScripts();
}
// Convert the script objects to proper JS objects.
for (int i = 0; i < instances->length(); i++) {
- Handle<Script> script = Handle<Script>(Script::cast(instances->get(i)));
- // Get the script wrapper in a local handle before calling GetScriptWrapper,
- // because using
- // instances->set(i, *GetScriptWrapper(script))
- // is unsafe as GetScriptWrapper might call GC and the C++ compiler might
- // already have dereferenced the instances handle.
- Handle<JSObject> wrapper = Script::GetWrapper(script);
- instances->set(i, *wrapper);
+ Handle<Script> script(Script::cast(instances->get(i)), isolate);
+ instances->set(i, Smi::FromInt(script->id()));
}
// Return result as a JS array.
return *isolate->factory()->NewJSArrayWithElements(instances);
}
-static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate,
- JSObject* object,
- Object* proto) {
- PrototypeIterator iter(isolate, object, kStartAtReceiver);
- while (true) {
- iter.AdvanceIgnoringProxies();
- if (iter.IsAtEnd()) return false;
- if (iter.GetCurrent() == proto) return true;
- }
-}
-
-
-// Scan the heap for objects with direct references to an object
-// args[0]: the object to find references to
-// args[1]: constructor function for instances to exclude (Mirror)
-// args[2]: the the maximum number of objects to return
-RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, filter, 1);
- CHECK(filter->IsUndefined(isolate) || filter->IsJSObject());
- CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
- CHECK_GE(max_references, 0);
-
- std::vector<Handle<JSObject>> instances;
- Heap* heap = isolate->heap();
- {
- HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
- // Get the constructor function for context extension and arguments array.
- Object* arguments_fun = isolate->sloppy_arguments_map()->GetConstructor();
- HeapObject* heap_obj;
- while ((heap_obj = iterator.next()) != nullptr) {
- if (!heap_obj->IsJSObject()) continue;
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->IsJSContextExtensionObject()) continue;
- if (obj->map()->GetConstructor() == arguments_fun) continue;
- if (!obj->ReferencesObject(*target)) continue;
- // Check filter if supplied. This is normally used to avoid
- // references from mirror objects.
- if (!filter->IsUndefined(isolate) &&
- HasInPrototypeChainIgnoringProxies(isolate, obj, *filter)) {
- continue;
- }
- if (obj->IsJSGlobalObject()) {
- obj = JSGlobalObject::cast(obj)->global_proxy();
- }
- instances.emplace_back(obj);
- if (static_cast<int32_t>(instances.size()) == max_references) break;
- }
- // Iterate the rest of the heap to satisfy HeapIterator constraints.
- while (iterator.next()) {
- }
- }
-
- Handle<FixedArray> result;
- if (instances.size() == 1 && instances.back().is_identical_to(target)) {
- // Check for circular reference only. This can happen when the object is
- // only referenced from mirrors and has a circular reference in which case
- // the object is not really alive and would have been garbage collected if
- // not referenced from the mirror.
- result = isolate->factory()->empty_fixed_array();
- } else {
- int instances_size = static_cast<int>(instances.size());
- result = isolate->factory()->NewFixedArray(instances_size);
- for (int i = 0; i < instances_size; ++i) result->set(i, *instances[i]);
- }
- return *isolate->factory()->NewJSArrayWithElements(result);
-}
-
-
-// Scan the heap for objects constructed by a specific function.
-// args[0]: the constructor to find instances of
-// args[1]: the the maximum number of objects to return
-RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
- CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
- CHECK_GE(max_references, 0);
-
- std::vector<Handle<JSObject>> instances;
- Heap* heap = isolate->heap();
- {
- HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
- HeapObject* heap_obj;
- while ((heap_obj = iterator.next()) != nullptr) {
- if (!heap_obj->IsJSObject()) continue;
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->map()->GetConstructor() != *constructor) continue;
- instances.emplace_back(obj);
- if (static_cast<int32_t>(instances.size()) == max_references) break;
- }
- // Iterate the rest of the heap to satisfy HeapIterator constraints.
- while (iterator.next()) {
- }
- }
-
- int instances_size = static_cast<int>(instances.size());
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(instances_size);
- for (int i = 0; i < instances_size; ++i) result->set(i, *instances[i]);
- return *isolate->factory()->NewJSArrayWithElements(result);
-}
-
-
-// Find the effective prototype object as returned by __proto__.
-// args[0]: the object to find the prototype for.
-RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
- HandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- // TODO(1543): Come up with a solution for clients to handle potential errors
- // thrown by an intermediate proxy.
- RETURN_RESULT_OR_FAILURE(isolate, JSReceiver::GetPrototype(isolate, obj));
-}
-
-
-// Patches script source (should be called upon BeforeCompile event).
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
-
- CHECK(script_wrapper->value()->IsScript());
- Handle<Script> script(Script::cast(script_wrapper->value()));
-
- // The following condition is not guaranteed to hold and a failure is also
- // propagated to callers. Hence we fail gracefully here and don't crash.
- if (script->compilation_state() != Script::COMPILATION_STATE_INITIAL) {
- return isolate->ThrowIllegalOperation();
- }
-
- script->set_source(*source);
-
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
SealHandleScope shs(isolate);
@@ -1371,41 +441,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
if (f->IsJSFunction()) {
return JSFunction::cast(f)->shared()->inferred_name();
}
- return isolate->heap()->empty_string();
-}
-
-
-RUNTIME_FUNCTION(Runtime_FunctionGetDebugName) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
-
- if (function->IsJSBoundFunction()) {
- RETURN_RESULT_OR_FAILURE(
- isolate, JSBoundFunction::GetName(
- isolate, Handle<JSBoundFunction>::cast(function)));
- } else {
- return *JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetDebugContext) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- Handle<Context> context;
- {
- DebugScope debug_scope(isolate->debug());
- if (debug_scope.failed()) {
- DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
- }
- context = isolate->debug()->GetDebugContext();
- }
- if (context.is_null()) return isolate->heap()->undefined_value();
- context->set_security_token(isolate->native_context()->security_token());
- return context->global_proxy();
+ return ReadOnlyRoots(isolate).empty_string();
}
@@ -1416,7 +452,7 @@ RUNTIME_FUNCTION(Runtime_CollectGarbage) {
DCHECK_EQ(1, args.length());
isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask,
GarbageCollectionReason::kRuntime);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1431,56 +467,6 @@ RUNTIME_FUNCTION(Runtime_GetHeapUsage) {
return Smi::FromInt(usage);
}
-
-// Finds the script object from the script data. NOTE: This operation uses
-// heap traversal to find the function generated for the source position
-// for the requested break point. For lazily compiled functions several heap
-// traversals might be required rendering this operation as a rather slow
-// operation. However for setting break points which is normally done through
-// some kind of user interaction the performance is not crucial.
-RUNTIME_FUNCTION(Runtime_GetScript) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, script_name, 0);
-
- Handle<Script> found;
- {
- Script::Iterator iterator(isolate);
- Script* script = nullptr;
- while ((script = iterator.Next()) != nullptr) {
- if (!script->name()->IsString()) continue;
- String* name = String::cast(script->name());
- if (name->Equals(*script_name)) {
- found = Handle<Script>(script, isolate);
- break;
- }
- }
- }
-
- if (found.is_null()) return isolate->heap()->undefined_value();
- return *Script::GetWrapper(found);
-}
-
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_ScriptLineCount) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSValue, script, 0);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- if (script_handle->type() == Script::TYPE_WASM) {
- // Return 0 for now; this function will disappear soon anyway.
- return Smi::FromInt(0);
- }
-
- Script::InitLineEnds(script_handle);
-
- FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
- return Smi::FromInt(line_ends_array->length());
-}
-
namespace {
int ScriptLinePosition(Handle<Script> script, int line) {
@@ -1488,7 +474,6 @@ int ScriptLinePosition(Handle<Script> script, int line) {
if (script->type() == Script::TYPE_WASM) {
return WasmModuleObject::cast(script->wasm_module_object())
- ->shared()
->GetFunctionOffset(line);
}
@@ -1504,11 +489,24 @@ int ScriptLinePosition(Handle<Script> script, int line) {
return Smi::ToInt(line_ends_array->get(line - 1)) + 1;
}
-} // namespace
+int ScriptLinePositionWithOffset(Handle<Script> script, int line, int offset) {
+ if (line < 0 || offset < 0) return -1;
+
+ if (line == 0 || offset == 0)
+ return ScriptLinePosition(script, line) + offset;
+
+ Script::PositionInfo info;
+ if (!Script::GetPositionInfo(script, offset, &info, Script::NO_OFFSET)) {
+ return -1;
+ }
-static Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
- Script::OffsetFlag offset_flag,
- Isolate* isolate) {
+ const int total_line = info.line + line;
+ return ScriptLinePosition(script, total_line);
+}
+
+Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
+ Script::OffsetFlag offset_flag,
+ Isolate* isolate) {
Script::PositionInfo info;
if (!Script::GetPositionInfo(script, position, &info, offset_flag)) {
return isolate->factory()->null_value();
@@ -1523,37 +521,21 @@ static Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
Handle<JSObject> jsinfo =
isolate->factory()->NewJSObject(isolate->object_function());
- JSObject::AddProperty(jsinfo, isolate->factory()->script_string(), script,
- NONE);
- JSObject::AddProperty(jsinfo, isolate->factory()->position_string(),
+ JSObject::AddProperty(isolate, jsinfo, isolate->factory()->script_string(),
+ script, NONE);
+ JSObject::AddProperty(isolate, jsinfo, isolate->factory()->position_string(),
handle(Smi::FromInt(position), isolate), NONE);
- JSObject::AddProperty(jsinfo, isolate->factory()->line_string(),
+ JSObject::AddProperty(isolate, jsinfo, isolate->factory()->line_string(),
handle(Smi::FromInt(info.line), isolate), NONE);
- JSObject::AddProperty(jsinfo, isolate->factory()->column_string(),
+ JSObject::AddProperty(isolate, jsinfo, isolate->factory()->column_string(),
handle(Smi::FromInt(info.column), isolate), NONE);
- JSObject::AddProperty(jsinfo, isolate->factory()->sourceText_string(),
- sourceText, NONE);
+ JSObject::AddProperty(isolate, jsinfo,
+ isolate->factory()->sourceText_string(), sourceText,
+ NONE);
return jsinfo;
}
-namespace {
-
-int ScriptLinePositionWithOffset(Handle<Script> script, int line, int offset) {
- if (line < 0 || offset < 0) return -1;
-
- if (line == 0 || offset == 0)
- return ScriptLinePosition(script, line) + offset;
-
- Script::PositionInfo info;
- if (!Script::GetPositionInfo(script, offset, &info, Script::NO_OFFSET)) {
- return -1;
- }
-
- const int total_line = info.line + line;
- return ScriptLinePosition(script, total_line);
-}
-
Handle<Object> ScriptLocationFromLine(Isolate* isolate, Handle<Script> script,
Handle<Object> opt_line,
Handle<Object> opt_column,
@@ -1587,7 +569,7 @@ bool GetScriptById(Isolate* isolate, int needle, Handle<Script>* result) {
Script* script = nullptr;
while ((script = iterator.Next()) != nullptr) {
if (script->id() == needle) {
- *result = handle(script);
+ *result = handle(script, isolate);
return true;
}
}
@@ -1597,30 +579,6 @@ bool GetScriptById(Isolate* isolate, int needle, Handle<Script>* result) {
} // namespace
-// Get information on a specific source line and column possibly offset by a
-// fixed source position. This function is used to find a source position from
-// a line and column position. The fixed source position offset is typically
-// used to find a source position in a function based on a line and column in
-// the source for the function alone. The offset passed will then be the
-// start position of the source for the function within the full script source.
-// Note that incoming line and column parameters may be undefined, and are
-// assumed to be passed *with* offsets.
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSValue, script, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
- CONVERT_NUMBER_CHECKED(int32_t, offset, Int32, args[3]);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- return *ScriptLocationFromLine(isolate, script_handle, opt_line, opt_column,
- offset);
-}
-
// TODO(5530): Rename once conflicting function has been deleted.
RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine2) {
HandleScope scope(isolate);
@@ -1636,38 +594,6 @@ RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine2) {
return *ScriptLocationFromLine(isolate, script, opt_line, opt_column, offset);
}
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_ScriptPositionInfo) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_CHECKED(JSValue, script, 0);
- CONVERT_NUMBER_CHECKED(int32_t, position, Int32, args[1]);
- CONVERT_BOOLEAN_ARG_CHECKED(with_offset, 2);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- const Script::OffsetFlag offset_flag =
- with_offset ? Script::WITH_OFFSET : Script::NO_OFFSET;
- return *GetJSPositionInfo(script_handle, position, offset_flag, isolate);
-}
-
-// TODO(5530): Rename once conflicting function has been deleted.
-RUNTIME_FUNCTION(Runtime_ScriptPositionInfo2) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_NUMBER_CHECKED(int32_t, scriptid, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, position, Int32, args[1]);
- CONVERT_BOOLEAN_ARG_CHECKED(with_offset, 2);
-
- Handle<Script> script;
- CHECK(GetScriptById(isolate, scriptid, &script));
-
- const Script::OffsetFlag offset_flag =
- with_offset ? Script::WITH_OFFSET : Script::NO_OFFSET;
- return *GetJSPositionInfo(script, position, offset_flag, isolate);
-}
-
// On function call, depending on circumstances, prepare for stepping in,
// or perform a side effect check.
RUNTIME_FUNCTION(Runtime_DebugOnFunctionCall) {
@@ -1678,16 +604,17 @@ RUNTIME_FUNCTION(Runtime_DebugOnFunctionCall) {
if (isolate->debug()->needs_check_on_function_call()) {
// Ensure that the callee will perform debug check on function call too.
Deoptimizer::DeoptimizeFunction(*fun);
- if (isolate->debug()->last_step_action() >= StepIn) {
+ if (isolate->debug()->last_step_action() >= StepIn ||
+ isolate->debug()->break_on_next_function_call()) {
DCHECK_EQ(isolate->debug_execution_mode(), DebugInfo::kBreakpoints);
isolate->debug()->PrepareStepIn(fun);
}
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects &&
!isolate->debug()->PerformSideEffectCheck(fun, receiver)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// Set one shot breakpoints for the suspended generator object.
@@ -1695,7 +622,7 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInSuspendedGenerator) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
isolate->debug()->PrepareStepInSuspendedGenerator();
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
@@ -1703,7 +630,7 @@ RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
isolate->PushPromise(promise);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1711,22 +638,7 @@ RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
DCHECK_EQ(0, args.length());
SealHandleScope shs(isolate);
isolate->PopPromise();
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionPromiseCreated) {
- DCHECK_EQ(1, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- isolate->PushPromise(promise);
- int id = isolate->debug()->NextAsyncTaskId(promise);
- Handle<Symbol> async_stack_id_symbol =
- isolate->factory()->promise_async_stack_id_symbol();
- JSObject::SetProperty(promise, async_stack_id_symbol,
- handle(Smi::FromInt(id), isolate),
- LanguageMode::kStrict)
- .Assert();
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugIsActive) {
@@ -1743,11 +655,11 @@ Handle<JSObject> MakeRangeObject(Isolate* isolate, const CoverageBlock& range) {
Handle<String> count_string = factory->InternalizeUtf8String("count");
Handle<JSObject> range_obj = factory->NewJSObjectWithNullProto();
- JSObject::AddProperty(range_obj, start_string,
+ JSObject::AddProperty(isolate, range_obj, start_string,
factory->NewNumberFromInt(range.start), NONE);
- JSObject::AddProperty(range_obj, end_string,
+ JSObject::AddProperty(isolate, range_obj, end_string,
factory->NewNumberFromInt(range.end), NONE);
- JSObject::AddProperty(range_obj, count_string,
+ JSObject::AddProperty(isolate, range_obj, count_string,
factory->NewNumberFromUint(range.count), NONE);
return range_obj;
@@ -1796,8 +708,8 @@ RUNTIME_FUNCTION(Runtime_DebugCollectCoverage) {
Handle<JSArray> script_obj =
factory->NewJSArrayWithElements(ranges_array, PACKED_ELEMENTS);
- Handle<JSObject> wrapper = Script::GetWrapper(script_data.script);
- JSObject::AddProperty(script_obj, script_string, wrapper, NONE);
+ JSObject::AddProperty(isolate, script_obj, script_string,
+ handle(script_data.script->source(), isolate), NONE);
scripts_array->set(i, *script_obj);
}
return *factory->NewJSArrayWithElements(scripts_array, PACKED_ELEMENTS);
@@ -1808,7 +720,7 @@ RUNTIME_FUNCTION(Runtime_DebugTogglePreciseCoverage) {
CONVERT_BOOLEAN_ARG_CHECKED(enable, 0);
Coverage::SelectMode(isolate, enable ? debug::Coverage::kPreciseCount
: debug::Coverage::kBestEffort);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugToggleBlockCoverage) {
@@ -1816,7 +728,7 @@ RUNTIME_FUNCTION(Runtime_DebugToggleBlockCoverage) {
CONVERT_BOOLEAN_ARG_CHECKED(enable, 0);
Coverage::SelectMode(isolate, enable ? debug::Coverage::kBlockCount
: debug::Coverage::kBestEffort);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_IncBlockCounter) {
@@ -1836,8 +748,67 @@ RUNTIME_FUNCTION(Runtime_IncBlockCounter) {
coverage_info->IncrementBlockCount(coverage_array_slot_index);
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionSuspended) {
+ DCHECK_EQ(1, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ isolate->OnAsyncFunctionStateChanged(promise, debug::kAsyncFunctionSuspended);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionFinished) {
+ DCHECK_EQ(2, args.length());
+ HandleScope scope(isolate);
+ CONVERT_BOOLEAN_ARG_CHECKED(has_suspend, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 1);
+ isolate->PopPromise();
+ if (has_suspend) {
+ isolate->OnAsyncFunctionStateChanged(promise,
+ debug::kAsyncFunctionFinished);
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, script_function, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
+
+ Handle<Script> script(Script::cast(script_function->shared()->script()),
+ isolate);
+ v8::debug::LiveEditResult result;
+ LiveEdit::PatchScript(isolate, script, new_source, false, &result);
+ switch (result.status) {
+ case v8::debug::LiveEditResult::COMPILE_ERROR:
+ return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
+ "LiveEdit failed: COMPILE_ERROR"));
+ case v8::debug::LiveEditResult::BLOCKED_BY_RUNNING_GENERATOR:
+ return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
+ "LiveEdit failed: BLOCKED_BY_RUNNING_GENERATOR"));
+ case v8::debug::LiveEditResult::BLOCKED_BY_FUNCTION_ABOVE_BREAK_FRAME:
+ return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
+ "LiveEdit failed: BLOCKED_BY_FUNCTION_ABOVE_BREAK_FRAME"));
+ case v8::debug::LiveEditResult::
+ BLOCKED_BY_FUNCTION_BELOW_NON_DROPPABLE_FRAME:
+ return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
+ "LiveEdit failed: BLOCKED_BY_FUNCTION_BELOW_NON_DROPPABLE_FRAME"));
+ case v8::debug::LiveEditResult::BLOCKED_BY_ACTIVE_FUNCTION:
+ return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
+ "LiveEdit failed: BLOCKED_BY_ACTIVE_FUNCTION"));
+ case v8::debug::LiveEditResult::BLOCKED_BY_NEW_TARGET_IN_RESTART_FRAME:
+ return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
+ "LiveEdit failed: BLOCKED_BY_NEW_TARGET_IN_RESTART_FRAME"));
+ case v8::debug::LiveEditResult::FRAME_RESTART_IS_NOT_SUPPORTED:
+ return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
+ "LiveEdit failed: FRAME_RESTART_IS_NOT_SUPPORTED"));
+ case v8::debug::LiveEditResult::OK:
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-error.cc b/deps/v8/src/runtime/runtime-error.cc
deleted file mode 100644
index 7cd98f223b..0000000000
--- a/deps/v8/src/runtime/runtime-error.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/base/platform/time.h"
-#include "src/conversions-inl.h"
-#include "src/futex-emulation.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-RUNTIME_FUNCTION(Runtime_ErrorToString) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, recv, 0);
- RETURN_RESULT_OR_FAILURE(isolate, ErrorUtils::ToString(isolate, recv));
-}
-
-RUNTIME_FUNCTION(Runtime_IsJSError) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(args[0]->IsJSError());
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index 58e47f621e..5df16faf46 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -10,6 +10,7 @@
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/objects-inl.h"
+#include "src/objects/module.h"
namespace v8 {
namespace internal {
@@ -20,12 +21,13 @@ namespace {
// that contains all enumerable properties of the {receiver} and its prototypes
// have none, the map of the {receiver}. This is used to speed up the check for
// deletions during a for-in.
-MaybeHandle<HeapObject> Enumerate(Handle<JSReceiver> receiver) {
- Isolate* const isolate = receiver->GetIsolate();
+MaybeHandle<HeapObject> Enumerate(Isolate* isolate,
+ Handle<JSReceiver> receiver) {
JSObject::MakePrototypesFast(receiver, kStartAtReceiver, isolate);
FastKeyAccumulator accumulator(isolate, receiver,
KeyCollectionMode::kIncludePrototypes,
- ENUMERABLE_STRINGS, true);
+ ENUMERABLE_STRINGS);
+ accumulator.set_is_for_in(true);
// Test if we have an enum cache for {receiver}.
if (!accumulator.is_receiver_simple_enum()) {
Handle<FixedArray> keys;
@@ -114,7 +116,7 @@ RUNTIME_FUNCTION(Runtime_ForInEnumerate) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- RETURN_RESULT_OR_FAILURE(isolate, Enumerate(receiver));
+ RETURN_RESULT_OR_FAILURE(isolate, Enumerate(isolate, receiver));
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 9f1cbc00e9..1057dfa177 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -28,7 +28,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetName) {
}
// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
+RUNTIME_FUNCTION(Runtime_FunctionGetScriptSource) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
@@ -36,11 +36,9 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
if (function->IsJSFunction()) {
Handle<Object> script(
Handle<JSFunction>::cast(function)->shared()->script(), isolate);
- if (script->IsScript()) {
- return *Script::GetWrapper(Handle<Script>::cast(script));
- }
+ if (script->IsScript()) return Handle<Script>::cast(script)->source();
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_FunctionGetScriptId) {
@@ -64,10 +62,10 @@ RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
if (function->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(
- Handle<JSFunction>::cast(function)->shared());
+ Handle<JSFunction>::cast(function)->shared(), isolate);
return *SharedFunctionInfo::GetSourceCode(shared);
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -80,13 +78,6 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
return Smi::FromInt(pos);
}
-RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- return fun->native_context()->debug_context_id();
-}
RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
SealHandleScope shs(isolate);
@@ -104,12 +95,12 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, source, 1);
- Handle<SharedFunctionInfo> target_shared(target->shared());
- Handle<SharedFunctionInfo> source_shared(source->shared());
+ Handle<SharedFunctionInfo> target_shared(target->shared(), isolate);
+ Handle<SharedFunctionInfo> source_shared(source->shared(), isolate);
if (!source->is_compiled() &&
!Compiler::Compile(source, Compiler::KEEP_EXCEPTION)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
// Set the function data, scope info, formal parameter count, and the length
@@ -120,26 +111,24 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
source_shared->raw_outer_scope_info_or_feedback_metadata());
target_shared->set_internal_formal_parameter_count(
source_shared->internal_formal_parameter_count());
- target_shared->set_raw_start_position_and_type(
- source_shared->raw_start_position_and_type());
- target_shared->set_raw_end_position(source_shared->raw_end_position());
bool was_native = target_shared->native();
target_shared->set_flags(source_shared->flags());
target_shared->set_native(was_native);
- target_shared->set_function_literal_id(source_shared->function_literal_id());
-
target_shared->set_scope_info(source_shared->scope_info());
Handle<Object> source_script(source_shared->script(), isolate);
+ int function_literal_id = source_shared->FunctionLiteralId(isolate);
if (source_script->IsScript()) {
SharedFunctionInfo::SetScript(source_shared,
- isolate->factory()->undefined_value());
+ isolate->factory()->undefined_value(),
+ function_literal_id);
}
- SharedFunctionInfo::SetScript(target_shared, source_script);
+ SharedFunctionInfo::SetScript(target_shared, source_script,
+ function_literal_id);
// Set the code of the target function.
target->set_code(source_shared->GetCode());
- Handle<Context> context(source->context());
+ Handle<Context> context(source->context(), isolate);
target->set_context(*context);
// Make sure we get a fresh copy of the feedback vector to avoid cross
@@ -150,7 +139,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
if (isolate->logger()->is_listening_to_code_events() ||
isolate->is_profiling()) {
isolate->logger()->LogExistingFunction(
- source_shared, Handle<AbstractCode>(source_shared->abstract_code()));
+ source_shared, handle(source_shared->abstract_code(), isolate));
}
return *target;
@@ -170,7 +159,7 @@ RUNTIME_FUNCTION(Runtime_SetNativeFlag) {
JSFunction* func = JSFunction::cast(object);
func->shared()->set_native(true);
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -205,15 +194,5 @@ RUNTIME_FUNCTION(Runtime_IsFunction) {
}
-RUNTIME_FUNCTION(Runtime_FunctionToString) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
- return function->IsJSBoundFunction()
- ? *JSBoundFunction::ToString(
- Handle<JSBoundFunction>::cast(function))
- : *JSFunction::ToString(Handle<JSFunction>::cast(function));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index 5c2106e685..96f538e4f3 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -39,7 +39,7 @@ RUNTIME_FUNCTION(Runtime_SetAllowAtomicsWait) {
CONVERT_BOOLEAN_ARG_CHECKED(set, 0);
isolate->set_allow_atomics_wait(set);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 3c7c808c30..02068ec7a9 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -11,12 +11,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_IsJSGeneratorObject) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(args[0]->IsJSGeneratorObject());
-}
-
RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -26,15 +20,17 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
// Underlying function needs to have bytecode available.
DCHECK(function->shared()->HasBytecodeArray());
- int size = function->shared()->GetBytecodeArray()->register_count();
- Handle<FixedArray> register_file = isolate->factory()->NewFixedArray(size);
+ int size = function->shared()->internal_formal_parameter_count() +
+ function->shared()->GetBytecodeArray()->register_count();
+ Handle<FixedArray> parameters_and_registers =
+ isolate->factory()->NewFixedArray(size);
Handle<JSGeneratorObject> generator =
isolate->factory()->NewJSGeneratorObject(function);
generator->set_function(*function);
generator->set_context(isolate->context());
generator->set_receiver(*receiver);
- generator->set_register_file(*register_file);
+ generator->set_parameters_and_registers(*parameters_and_registers);
generator->set_continuation(JSGeneratorObject::kGeneratorExecuting);
if (generator->IsJSAsyncGeneratorObject()) {
Handle<JSAsyncGeneratorObject>::cast(generator)->set_is_awaiting(0);
@@ -56,14 +52,6 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
return generator->function();
}
-RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
- return generator->receiver();
-}
-
RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
@@ -94,23 +82,6 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
UNREACHABLE();
}
-RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
- return Smi::FromInt(generator->continuation());
-}
-
-RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
- if (!generator->is_suspended()) return isolate->heap()->undefined_value();
- return Smi::FromInt(generator->source_position());
-}
-
// Return true if {generator}'s PC has a catch handler. This allows
// catch prediction to happen from the AsyncGeneratorResumeNext stub.
RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
@@ -125,7 +96,7 @@ RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
// If state is 0 ("suspendedStart"), there is guaranteed to be no catch
// handler. Otherwise, if state is below 0, the generator is closed and will
// not reach a catch handler.
- if (state < 1) return isolate->heap()->false_value();
+ if (state < 1) return ReadOnlyRoots(isolate).false_value();
SharedFunctionInfo* shared = generator->function()->shared();
DCHECK(shared->HasBytecodeArray());
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 0c4ddc3c0b..310c20b102 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -27,13 +27,7 @@ RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
CHECK(isolate->bootstrapper()->IsActive());
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_IsScriptWrapper) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(args[0]->IsScriptWrapper());
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
@@ -55,13 +49,13 @@ RUNTIME_FUNCTION(Runtime_InstallToContext) {
CHECK(array->HasFastElements());
CHECK(isolate->bootstrapper()->IsActive());
Handle<Context> native_context = isolate->native_context();
- Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
+ Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()), isolate);
int length = Smi::ToInt(array->length());
for (int i = 0; i < length; i += 2) {
CHECK(fixed_array->get(i)->IsString());
- Handle<String> name(String::cast(fixed_array->get(i)));
+ Handle<String> name(String::cast(fixed_array->get(i)), isolate);
CHECK(fixed_array->get(i + 1)->IsJSObject());
- Handle<JSObject> object(JSObject::cast(fixed_array->get(i + 1)));
+ Handle<JSObject> object(JSObject::cast(fixed_array->get(i + 1)), isolate);
int index = Context::ImportedFieldIndexForName(name);
if (index == Context::kNotFound) {
index = Context::IntrinsicIndexForName(name);
@@ -69,7 +63,7 @@ RUNTIME_FUNCTION(Runtime_InstallToContext) {
CHECK_NE(index, Context::kNotFound);
native_context->set(index, *object);
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_Throw) {
@@ -303,7 +297,7 @@ RUNTIME_FUNCTION(Runtime_AllocateSeqOneByteString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(length, 0);
- if (length == 0) return isolate->heap()->empty_string();
+ if (length == 0) return ReadOnlyRoots(isolate).empty_string();
Handle<SeqOneByteString> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, isolate->factory()->NewRawOneByteString(length));
@@ -314,7 +308,7 @@ RUNTIME_FUNCTION(Runtime_AllocateSeqTwoByteString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(length, 0);
- if (length == 0) return isolate->heap()->empty_string();
+ if (length == 0) return ReadOnlyRoots(isolate).empty_string();
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, isolate->factory()->NewRawTwoByteString(length));
@@ -336,7 +330,7 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
std::vector<FrameSummary> frames;
it.frame()->Summarize(&frames);
auto& summary = frames.back().AsJavaScript();
- Handle<SharedFunctionInfo> shared(summary.function()->shared());
+ Handle<SharedFunctionInfo> shared(summary.function()->shared(), isolate);
Handle<Object> script(shared->script(), isolate);
int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
if (script->IsScript() &&
@@ -353,7 +347,7 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
CallPrinter::ErrorHint* hint) {
MessageLocation location;
if (ComputeLocation(isolate, &location)) {
- ParseInfo info(location.shared());
+ ParseInfo info(isolate, location.shared());
if (parsing::ParseAny(&info, location.shared(), isolate)) {
info.ast_value_factory()->Internalize(isolate);
CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
@@ -477,7 +471,7 @@ RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(counter, 0);
isolate->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(counter));
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
@@ -522,7 +516,7 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
std::fclose(f);
else
std::fflush(f);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
}
@@ -565,7 +559,8 @@ RUNTIME_FUNCTION(Runtime_CreateAsyncFromSyncIterator) {
Handle<Object> next;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, next,
- Object::GetProperty(sync_iterator, isolate->factory()->next_string()));
+ Object::GetProperty(isolate, sync_iterator,
+ isolate->factory()->next_string()));
return *isolate->factory()->NewJSAsyncFromSyncIterator(
Handle<JSReceiver>::cast(sync_iterator), next);
@@ -576,7 +571,7 @@ RUNTIME_FUNCTION(Runtime_CreateTemplateObject) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(TemplateObjectDescription, description, 0);
- return *TemplateObjectDescription::CreateTemplateObject(description);
+ return *TemplateObjectDescription::CreateTemplateObject(isolate, description);
}
RUNTIME_FUNCTION(Runtime_ReportMessage) {
@@ -593,7 +588,7 @@ RUNTIME_FUNCTION(Runtime_ReportMessage) {
isolate->set_pending_exception(*message_obj);
isolate->ReportPendingMessagesFromJavaScript();
isolate->clear_pending_exception();
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 30458c0acc..bc48bb4ab7 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -58,7 +58,7 @@ void AdvanceToOffsetForTracing(
interpreter::OperandScale::kSingle));
}
-void PrintRegisters(std::ostream& os, bool is_input,
+void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
interpreter::BytecodeArrayIterator& bytecode_iterator,
Handle<Object> accumulator) {
static const char kAccumulator[] = "accumulator";
@@ -82,8 +82,7 @@ void PrintRegisters(std::ostream& os, bool is_input,
}
// Print the registers.
- JavaScriptFrameIterator frame_iterator(
- bytecode_iterator.bytecode_array()->GetIsolate());
+ JavaScriptFrameIterator frame_iterator(isolate);
InterpretedFrame* frame =
reinterpret_cast<InterpretedFrame*>(frame_iterator.frame());
int operand_count = interpreter::Bytecodes::NumberOfOperands(bytecode);
@@ -119,7 +118,7 @@ void PrintRegisters(std::ostream& os, bool is_input,
RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
if (!FLAG_trace_ignition) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
SealHandleScope shs(isolate);
@@ -132,7 +131,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
AdvanceToOffsetForTracing(bytecode_iterator, offset);
if (offset == bytecode_iterator.current_offset()) {
- OFStream os(stdout);
+ StdoutStream os;
// Print bytecode.
const uint8_t* base_address = reinterpret_cast<const uint8_t*>(
@@ -144,16 +143,16 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
bytecode_array->parameter_count());
os << std::endl;
// Print all input registers and accumulator.
- PrintRegisters(os, true, bytecode_iterator, accumulator);
+ PrintRegisters(isolate, os, true, bytecode_iterator, accumulator);
os << std::flush;
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
if (!FLAG_trace_ignition) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
SealHandleScope shs(isolate);
@@ -171,12 +170,12 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
if (bytecode_iterator.current_operand_scale() ==
interpreter::OperandScale::kSingle ||
offset > bytecode_iterator.current_offset()) {
- OFStream os(stdout);
+ StdoutStream os;
// Print all output registers and accumulator.
- PrintRegisters(os, false, bytecode_iterator, accumulator);
+ PrintRegisters(isolate, os, false, bytecode_iterator, accumulator);
os << std::flush;
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
#endif
@@ -185,7 +184,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
RUNTIME_FUNCTION(Runtime_InterpreterTraceUpdateFeedback) {
if (!FLAG_trace_feedback_updates) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
SealHandleScope shs(isolate);
@@ -196,7 +195,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceUpdateFeedback) {
int slot_count = function->feedback_vector()->metadata()->slot_count();
- OFStream os(stdout);
+ StdoutStream os;
os << "[Feedback slot " << slot << "/" << slot_count << " in ";
function->shared()->ShortPrint(os);
os << " updated to ";
@@ -211,7 +210,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceUpdateFeedback) {
os << "]" << std::endl;
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
#endif
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index fced753c26..5d39074984 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -20,6 +20,7 @@
#include "src/intl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/objects/intl-objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/utils.h"
@@ -119,6 +120,11 @@ RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
// so we should filter from all locales, but it's not clear how; see
// https://ssl.icu-project.org/trac/ticket/12756
available_locales = icu::Locale::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("relativetimeformat"))) {
+ // TODO(ftang): for now just use
+ // icu::NumberFormat::getAvailableLocales(count) until we migrate to
+ // Intl::GetAvailableLocales()
+ available_locales = icu::NumberFormat::getAvailableLocales(count);
} else {
UNREACHABLE();
}
@@ -172,36 +178,18 @@ RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
return *factory->NewStringFromStaticChars("und");
}
-RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
-
- if (!input->IsJSObject()) return isolate->heap()->false_value();
- Handle<JSObject> obj = Handle<JSObject>::cast(input);
-
- Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
- Handle<Object> tag = JSReceiver::GetDataProperty(obj, marker);
- return isolate->heap()->ToBoolean(!tag->IsUndefined(isolate));
-}
-
RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, expected_type, 1);
+ CONVERT_SMI_ARG_CHECKED(expected_type_int, 1);
- if (!input->IsJSObject()) return isolate->heap()->false_value();
- Handle<JSObject> obj = Handle<JSObject>::cast(input);
+ Intl::Type expected_type = Intl::TypeFromInt(expected_type_int);
- Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
- Handle<Object> tag = JSReceiver::GetDataProperty(obj, marker);
- return isolate->heap()->ToBoolean(tag->IsString() &&
- String::cast(*tag)->Equals(*expected_type));
+ return isolate->heap()->ToBoolean(
+ Intl::IsObjectOfType(isolate, input, expected_type));
}
RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
@@ -210,12 +198,19 @@ RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, type, 1);
+
+#ifdef DEBUG
+ // TypeFromSmi does correctness checks.
+ Intl::Type type_intl = Intl::TypeFromSmi(*type);
+ USE(type_intl);
+#endif
Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
- JSObject::SetProperty(input, marker, type, LanguageMode::kStrict).Assert();
+ JSObject::SetProperty(isolate, input, marker, type, LanguageMode::kStrict)
+ .Assert();
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
@@ -228,7 +223,7 @@ RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
Handle<JSFunction> constructor(
- isolate->native_context()->intl_date_time_format_function());
+ isolate->native_context()->intl_date_time_format_function(), isolate);
Handle<JSObject> local_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
@@ -287,7 +282,7 @@ RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
Handle<JSFunction> constructor(
- isolate->native_context()->intl_number_format_function());
+ isolate->native_context()->intl_number_format_function(), isolate);
Handle<JSObject> local_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
@@ -299,7 +294,8 @@ RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
if (!number_format) return isolate->ThrowIllegalOperation();
- local_object->SetEmbedderField(0, reinterpret_cast<Smi*>(number_format));
+ local_object->SetEmbedderField(NumberFormat::kDecimalFormatIndex,
+ reinterpret_cast<Smi*>(number_format));
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
@@ -314,22 +310,15 @@ RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(number));
+ Handle<Object> number_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_obj,
+ Object::ToNumber(isolate, value));
- icu::DecimalFormat* number_format =
- NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
- CHECK_NOT_NULL(number_format);
-
- icu::UnicodeString result;
- number_format->format(value->Number(), result);
-
- RETURN_RESULT_OR_FAILURE(
- isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length())));
+ double number = number_obj->Number();
+ RETURN_RESULT_OR_FAILURE(isolate, NumberFormat::FormatNumber(
+ isolate, number_format_holder, number));
}
RUNTIME_FUNCTION(Runtime_CurrencyDigits) {
@@ -359,7 +348,7 @@ RUNTIME_FUNCTION(Runtime_CreateCollator) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
Handle<JSFunction> constructor(
- isolate->native_context()->intl_collator_function());
+ isolate->native_context()->intl_collator_function(), isolate);
Handle<JSObject> collator_holder;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, collator_holder,
@@ -385,8 +374,8 @@ RUNTIME_FUNCTION(Runtime_InternalCompare) {
icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
CHECK_NOT_NULL(collator);
- string1 = String::Flatten(string1);
- string2 = String::Flatten(string2);
+ string1 = String::Flatten(isolate, string1);
+ string2 = String::Flatten(isolate, string2);
UCollationResult result;
UErrorCode status = U_ZERO_ERROR;
@@ -419,7 +408,7 @@ RUNTIME_FUNCTION(Runtime_CreatePluralRules) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
Handle<JSFunction> constructor(
- isolate->native_context()->intl_plural_rules_function());
+ isolate->native_context()->intl_plural_rules_function(), isolate);
Handle<JSObject> local_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
@@ -495,7 +484,7 @@ RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
Handle<JSFunction> constructor(
- isolate->native_context()->intl_v8_break_iterator_function());
+ isolate->native_context()->intl_v8_break_iterator_function(), isolate);
Handle<JSObject> local_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
@@ -537,7 +526,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
delete u_text;
int length = text->length();
- text = String::Flatten(text);
+ text = String::Flatten(isolate, text);
DisallowHeapAllocation no_gc;
String::FlatContent flat = text->GetFlatContent();
std::unique_ptr<uc16[]> sap;
@@ -547,7 +536,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
break_iterator->setText(*u_text);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
@@ -611,7 +600,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
return *isolate->factory()->NewStringFromStaticChars("none");
} else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- return isolate->heap()->number_string();
+ return ReadOnlyRoots(isolate).number_string();
} else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
return *isolate->factory()->NewStringFromStaticChars("letter");
} else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
@@ -627,7 +616,7 @@ RUNTIME_FUNCTION(Runtime_StringToLowerCaseIntl) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- s = String::Flatten(s);
+ s = String::Flatten(isolate, s);
return ConvertToLower(s, isolate);
}
@@ -635,7 +624,7 @@ RUNTIME_FUNCTION(Runtime_StringToUpperCaseIntl) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- s = String::Flatten(s);
+ s = String::Flatten(isolate, s);
return ConvertToUpper(s, isolate);
}
@@ -649,8 +638,8 @@ RUNTIME_FUNCTION(Runtime_StringLocaleConvertCase) {
// Primary language tag can be up to 8 characters long in theory.
// https://tools.ietf.org/html/bcp47#section-2.2.1
DCHECK_LE(lang_arg->length(), 8);
- lang_arg = String::Flatten(lang_arg);
- s = String::Flatten(s);
+ lang_arg = String::Flatten(isolate, lang_arg);
+ s = String::Flatten(isolate, s);
// All the languages requiring special-handling have two-letter codes.
// Note that we have to check for '!= 2' here because private-use language
@@ -685,7 +674,8 @@ RUNTIME_FUNCTION(Runtime_StringLocaleConvertCase) {
RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- if (isolate->serializer_enabled()) return isolate->heap()->undefined_value();
+ if (isolate->serializer_enabled())
+ return ReadOnlyRoots(isolate).undefined_value();
if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
Handle<FixedArray> date_cache_version =
isolate->factory()->NewFixedArray(1, TENURED);
@@ -699,5 +689,20 @@ RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
return date_cache_version->get(0);
}
+RUNTIME_FUNCTION(Runtime_IntlUnwrapReceiver) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_SMI_ARG_CHECKED(type_int, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, method, 3);
+ CONVERT_BOOLEAN_ARG_CHECKED(check_legacy_constructor, 4);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::UnwrapReceiver(isolate, receiver, constructor,
+ Intl::TypeFromInt(type_int), method,
+ check_legacy_constructor));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 71e91bab35..6e17ba85d4 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -7,9 +7,10 @@
#include "src/allocation-site-scopes.h"
#include "src/arguments.h"
#include "src/ast/ast.h"
-#include "src/ast/compile-time-value.h"
#include "src/isolate-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -31,7 +32,7 @@ void PreInitializeLiteralSite(Handle<FeedbackVector> vector,
}
Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
- Handle<FixedArray> compile_time_value,
+ Handle<Object> description,
PretenureFlag pretenure_flag);
enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
@@ -48,6 +49,11 @@ class JSObjectWalkVisitor {
protected:
V8_WARN_UNUSED_RESULT inline MaybeHandle<JSObject> VisitElementOrProperty(
Handle<JSObject> object, Handle<JSObject> value) {
+ // Dont create allocation sites for nested object literals
+ if (!value->IsJSArray()) {
+ return StructureWalk(value);
+ }
+
Handle<AllocationSite> current_site = site_context()->EnterNewScope();
MaybeHandle<JSObject> copy_of_value = StructureWalk(value);
site_context()->ExitScope(current_site, value);
@@ -105,7 +111,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
// Deep copy own properties. Arrays only have 1 property "length".
if (!copy->IsJSArray()) {
if (copy->HasFastProperties()) {
- Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
+ Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors(),
+ isolate);
int limit = copy->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
DCHECK_EQ(kField, descriptors->GetDetails(i).location());
@@ -120,14 +127,14 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (copying) copy->FastPropertyAtPut(index, *value);
} else if (copying && raw->IsMutableHeapNumber()) {
DCHECK(descriptors->GetDetails(i).representation().IsDouble());
- uint64_t double_value = HeapNumber::cast(raw)->value_as_bits();
- Handle<HeapNumber> value = isolate->factory()->NewHeapNumber(MUTABLE);
- value->set_value_as_bits(double_value);
+ uint64_t double_value = MutableHeapNumber::cast(raw)->value_as_bits();
+ auto value =
+ isolate->factory()->NewMutableHeapNumberFromBits(double_value);
copy->FastPropertyAtPut(index, *value);
}
}
} else {
- Handle<NameDictionary> dict(copy->property_dictionary());
+ Handle<NameDictionary> dict(copy->property_dictionary(), isolate);
for (int i = 0; i < dict->Capacity(); i++) {
Object* raw = dict->ValueAt(i);
if (!raw->IsJSObject()) continue;
@@ -147,8 +154,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
switch (copy->GetElementsKind()) {
case PACKED_ELEMENTS:
case HOLEY_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
- if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
+ Handle<FixedArray> elements(FixedArray::cast(copy->elements()), isolate);
+ if (elements->map() == ReadOnlyRoots(isolate).fixed_cow_array_map()) {
#ifdef DEBUG
for (int i = 0; i < elements->length(); i++) {
DCHECK(!elements->get(i)->IsJSObject());
@@ -167,7 +174,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
break;
}
case DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> element_dictionary(copy->element_dictionary());
+ Handle<NumberDictionary> element_dictionary(copy->element_dictionary(),
+ isolate);
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* raw = element_dictionary->ValueAt(i);
@@ -239,19 +247,22 @@ class AllocationSiteCreationContext : public AllocationSiteContext {
if (top().is_null()) {
// We are creating the top level AllocationSite as opposed to a nested
// AllocationSite.
- InitializeTraversal(isolate()->factory()->NewAllocationSite());
+ InitializeTraversal(isolate()->factory()->NewAllocationSite(true));
scope_site = Handle<AllocationSite>(*top(), isolate());
if (FLAG_trace_creation_allocation_sites) {
- PrintF("*** Creating top level AllocationSite %p\n",
+ PrintF("*** Creating top level %s AllocationSite %p\n", "Fat",
static_cast<void*>(*scope_site));
}
} else {
DCHECK(!current().is_null());
- scope_site = isolate()->factory()->NewAllocationSite();
+ scope_site = isolate()->factory()->NewAllocationSite(false);
if (FLAG_trace_creation_allocation_sites) {
- PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
- static_cast<void*>(*top()), static_cast<void*>(*current()),
- static_cast<void*>(*scope_site));
+ PrintF(
+ "*** Creating nested %s AllocationSite (top, current, new) (%p, "
+ "%p, "
+ "%p)\n",
+ "Slim", static_cast<void*>(*top()), static_cast<void*>(*current()),
+ static_cast<void*>(*scope_site));
}
current()->set_nested_site(*scope_site);
update_current_site(*scope_site);
@@ -269,7 +280,7 @@ class AllocationSiteCreationContext : public AllocationSiteContext {
PrintF("*** Setting AllocationSite %p transition_info %p\n",
static_cast<void*>(*scope_site), static_cast<void*>(*object));
} else {
- PrintF("Setting AllocationSite (%p, %p) transition_info %p\n",
+ PrintF("*** Setting AllocationSite (%p, %p) transition_info %p\n",
static_cast<void*>(*top()), static_cast<void*>(*scope_site),
static_cast<void*>(*object));
}
@@ -311,8 +322,8 @@ struct ObjectBoilerplate {
Handle<HeapObject> description, int flags,
PretenureFlag pretenure_flag) {
Handle<Context> native_context = isolate->native_context();
- Handle<BoilerplateDescription> boilerplate_description =
- Handle<BoilerplateDescription>::cast(description);
+ Handle<ObjectBoilerplateDescription> object_boilerplate_description =
+ Handle<ObjectBoilerplateDescription>::cast(description);
bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
@@ -320,7 +331,8 @@ struct ObjectBoilerplate {
// slow properties mode for now. We don't go in the map cache because
// maps with constant functions can't be shared if the functions are
// not the same (which is the common case).
- int number_of_properties = boilerplate_description->backing_store_size();
+ int number_of_properties =
+ object_boilerplate_description->backing_store_size();
// Ignoring number_of_properties for force dictionary map with
// __proto__:null.
@@ -341,17 +353,16 @@ struct ObjectBoilerplate {
if (!use_fast_elements) JSObject::NormalizeElements(boilerplate);
// Add the constant properties to the boilerplate.
- int length = boilerplate_description->size();
+ int length = object_boilerplate_description->size();
// TODO(verwaest): Support tracking representations in the boilerplate.
for (int index = 0; index < length; index++) {
- Handle<Object> key(boilerplate_description->name(index), isolate);
- Handle<Object> value(boilerplate_description->value(index), isolate);
- if (value->IsFixedArray()) {
- // The value contains the CompileTimeValue with the boilerplate
- // properties of a simple object or array literal.
- Handle<FixedArray> compile_time_value = Handle<FixedArray>::cast(value);
- value =
- InnerCreateBoilerplate(isolate, compile_time_value, pretenure_flag);
+ Handle<Object> key(object_boilerplate_description->name(index), isolate);
+ Handle<Object> value(object_boilerplate_description->value(index),
+ isolate);
+
+ if (value->IsObjectBoilerplateDescription() ||
+ value->IsArrayBoilerplateDescription()) {
+ value = InnerCreateBoilerplate(isolate, value, pretenure_flag);
}
uint32_t element_index = 0;
if (key->ToArrayIndex(&element_index)) {
@@ -385,14 +396,16 @@ struct ArrayBoilerplate {
static Handle<JSObject> Create(Isolate* isolate,
Handle<HeapObject> description, int flags,
PretenureFlag pretenure_flag) {
- Handle<ConstantElementsPair> elements =
- Handle<ConstantElementsPair>::cast(description);
- // Create the JSArray.
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description =
+ Handle<ArrayBoilerplateDescription>::cast(description);
+
ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(elements->elements_kind());
+ array_boilerplate_description->elements_kind();
Handle<FixedArrayBase> constant_elements_values(
- elements->constant_values());
+ array_boilerplate_description->constant_elements(), isolate);
+
+ // Create the JSArray.
Handle<FixedArrayBase> copied_elements_values;
if (IsDoubleElementsKind(constant_elements_kind)) {
copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
@@ -400,7 +413,7 @@ struct ArrayBoilerplate {
} else {
DCHECK(IsSmiOrObjectElementsKind(constant_elements_kind));
const bool is_cow = (constant_elements_values->map() ==
- isolate->heap()->fixed_cow_array_map());
+ ReadOnlyRoots(isolate).fixed_cow_array_map());
if (is_cow) {
copied_elements_values = constant_elements_values;
#if DEBUG
@@ -418,14 +431,12 @@ struct ArrayBoilerplate {
copied_elements_values = fixed_array_values_copy;
FOR_WITH_HANDLE_SCOPE(
isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
- if (fixed_array_values->get(i)->IsFixedArray()) {
- // The value contains the CompileTimeValue with the
- // boilerplate description of a simple object or
- // array literal.
- Handle<FixedArray> compile_time_value(
- FixedArray::cast(fixed_array_values->get(i)));
- Handle<Object> result = InnerCreateBoilerplate(
- isolate, compile_time_value, pretenure_flag);
+ Handle<Object> value(fixed_array_values->get(i), isolate);
+
+ if (value->IsArrayBoilerplateDescription() ||
+ value->IsObjectBoilerplateDescription()) {
+ Handle<Object> result =
+ InnerCreateBoilerplate(isolate, value, pretenure_flag);
fixed_array_values_copy->set(i, *result);
}
});
@@ -439,15 +450,22 @@ struct ArrayBoilerplate {
};
Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
- Handle<FixedArray> compile_time_value,
+ Handle<Object> description,
PretenureFlag pretenure_flag) {
- Handle<HeapObject> elements =
- CompileTimeValue::GetElements(compile_time_value);
- int flags = CompileTimeValue::GetLiteralTypeFlags(compile_time_value);
- if (flags == CompileTimeValue::kArrayLiteralFlag) {
- return ArrayBoilerplate::Create(isolate, elements, flags, pretenure_flag);
+ if (description->IsObjectBoilerplateDescription()) {
+ Handle<ObjectBoilerplateDescription> object_boilerplate_description =
+ Handle<ObjectBoilerplateDescription>::cast(description);
+ return ObjectBoilerplate::Create(isolate, object_boilerplate_description,
+ object_boilerplate_description->flags(),
+ pretenure_flag);
+ } else {
+ DCHECK(description->IsArrayBoilerplateDescription());
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description =
+ Handle<ArrayBoilerplateDescription>::cast(description);
+ return ArrayBoilerplate::Create(
+ isolate, array_boilerplate_description,
+ array_boilerplate_description->elements_kind(), pretenure_flag);
}
- return ObjectBoilerplate::Create(isolate, elements, flags, pretenure_flag);
}
template <typename Boilerplate>
@@ -490,7 +508,7 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
return boilerplate;
} else {
PretenureFlag pretenure_flag =
- isolate->heap()->InNewSpace(*vector) ? NOT_TENURED : TENURED;
+ Heap::InNewSpace(*vector) ? NOT_TENURED : TENURED;
boilerplate =
Boilerplate::Create(isolate, description, flags, pretenure_flag);
}
@@ -523,7 +541,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(BoilerplateDescription, description, 2);
+ CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
RETURN_RESULT_OR_FAILURE(
isolate, CreateLiteral<ObjectBoilerplate>(isolate, vector, literals_index,
@@ -535,7 +553,7 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
+ CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
RETURN_RESULT_OR_FAILURE(
isolate, CreateLiteral<ArrayBoilerplate>(isolate, vector, literals_index,
@@ -557,7 +575,8 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
Handle<Object> boilerplate;
if (!HasBoilerplate(isolate, literal_site)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, boilerplate, JSRegExp::New(pattern, JSRegExp::Flags(flags)));
+ isolate, boilerplate,
+ JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags)));
if (IsUninitializedLiteralSite(*literal_site)) {
PreInitializeLiteralSite(vector, literal_slot);
return *boilerplate;
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
deleted file mode 100644
index ee1602b712..0000000000
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/debug/debug.h"
-#include "src/debug/debug-frames.h"
-#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/isolate-inl.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {
-
-// For a script finds all SharedFunctionInfo's in the heap that points
-// to this script. Returns JSArray of SharedFunctionInfo wrapped
-// in OpaqueReferences.
-RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSValue, script_value, 0);
-
- CHECK(script_value->value()->IsScript());
- Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
-
- std::vector<Handle<SharedFunctionInfo>> found;
- Heap* heap = isolate->heap();
- {
- HeapIterator iterator(heap);
- HeapObject* heap_obj;
- while ((heap_obj = iterator.next()) != nullptr) {
- if (!heap_obj->IsSharedFunctionInfo()) continue;
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(heap_obj);
- if (shared->script() != *script) continue;
- found.push_back(Handle<SharedFunctionInfo>(shared));
- }
- }
-
- int found_size = static_cast<int>(found.size());
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(found_size);
- for (int i = 0; i < found_size; ++i) {
- Handle<SharedFunctionInfo> shared = found[i];
- SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
- Handle<String> name(shared->Name(), isolate);
- info_wrapper.SetProperties(name, shared->StartPosition(),
- shared->EndPosition(), shared);
- result->set(i, *info_wrapper.GetJSArray());
- }
- return *isolate->factory()->NewJSArrayWithElements(result);
-}
-
-
-// For a script calculates compilation information about all its functions.
-// The script source is explicitly specified by the second argument.
-// The source of the actual script is not used, however it is important that
-// all generated code keeps references to this particular instance of script.
-// Returns a JSArray of compilation infos. The array is ordered so that
-// each function with all its descendant is always stored in a continues range
-// with the function itself going first. The root function is a script function.
-RUNTIME_FUNCTION(Runtime_LiveEditGatherCompileInfo) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSValue, script, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- RETURN_RESULT_OR_FAILURE(isolate,
- LiveEdit::GatherCompileInfo(script_handle, source));
-}
-
-
-// Changes the source of the script to a new_source.
-// If old_script_name is provided (i.e. is a String), also creates a copy of
-// the script with its original source and sends notification to debugger.
-RUNTIME_FUNCTION(Runtime_LiveEditReplaceScript) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, old_script_name, 2);
-
- CHECK(original_script_value->value()->IsScript());
- Handle<Script> original_script(Script::cast(original_script_value->value()));
-
- Handle<Object> old_script = LiveEdit::ChangeScriptSource(
- original_script, new_source, old_script_name);
-
- if (old_script->IsScript()) {
- Handle<Script> script_handle = Handle<Script>::cast(old_script);
- return *Script::GetWrapper(script_handle);
- } else {
- return isolate->heap()->null_value();
- }
-}
-
-// Recreate the shared function infos array after changing the IDs of all
-// SharedFunctionInfos.
-RUNTIME_FUNCTION(Runtime_LiveEditFixupScript) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(args.length(), 2);
- CONVERT_ARG_CHECKED(JSValue, script_value, 0);
- CONVERT_INT32_ARG_CHECKED(max_function_literal_id, 1);
-
- CHECK(script_value->value()->IsScript());
- Handle<Script> script(Script::cast(script_value->value()));
-
- LiveEdit::FixupScript(script, max_function_literal_id);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_LiveEditFunctionSourceUpdated) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(args.length(), 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
- CONVERT_INT32_ARG_CHECKED(new_function_literal_id, 1);
- CHECK(SharedInfoWrapper::IsInstance(shared_info));
-
- LiveEdit::FunctionSourceUpdated(shared_info, new_function_literal_id);
- return isolate->heap()->undefined_value();
-}
-
-
-// Replaces code of SharedFunctionInfo with a new one.
-RUNTIME_FUNCTION(Runtime_LiveEditReplaceFunctionCode) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
- CHECK(SharedInfoWrapper::IsInstance(shared_info));
-
- LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
- return isolate->heap()->undefined_value();
-}
-
-
-// Connects SharedFunctionInfo to another script.
-RUNTIME_FUNCTION(Runtime_LiveEditFunctionSetScript) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, script_object, 1);
-
- if (function_object->IsJSValue()) {
- Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
- if (script_object->IsJSValue()) {
- CHECK(JSValue::cast(*script_object)->value()->IsScript());
- Script* script = Script::cast(JSValue::cast(*script_object)->value());
- script_object = Handle<Object>(script, isolate);
- }
- CHECK(function_wrapper->value()->IsSharedFunctionInfo());
- LiveEdit::SetFunctionScript(function_wrapper, script_object);
- } else {
- // Just ignore this. We may not have a SharedFunctionInfo for some functions
- // and we check it in this function.
- }
-
- return isolate->heap()->undefined_value();
-}
-
-
-// In a code of a parent function replaces original function as embedded object
-// with a substitution one.
-RUNTIME_FUNCTION(Runtime_LiveEditReplaceRefToNestedFunction) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSValue, subst_wrapper, 2);
- CHECK(parent_wrapper->value()->IsSharedFunctionInfo());
- CHECK(orig_wrapper->value()->IsSharedFunctionInfo());
- CHECK(subst_wrapper->value()->IsSharedFunctionInfo());
-
- LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
- subst_wrapper);
- return isolate->heap()->undefined_value();
-}
-
-
-// Updates positions of a shared function info (first parameter) according
-// to script source change. Text change is described in second parameter as
-// array of groups of 3 numbers:
-// (change_begin, change_end, change_end_new_position).
-// Each group describes a change in text; groups are sorted by change_begin.
-RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
- CHECK(SharedInfoWrapper::IsInstance(shared_array));
-
- LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
- return isolate->heap()->undefined_value();
-}
-
-
-// For array of SharedFunctionInfo's (each wrapped in JSValue)
-// checks that none of them have activations on stacks (of any thread).
-// Returns array of the same length with corresponding results of
-// LiveEdit::FunctionPatchabilityStatus type.
-RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArray, old_shared_array, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, new_shared_array, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 2);
- USE(new_shared_array);
- CHECK(old_shared_array->length()->IsSmi());
- CHECK(new_shared_array->length() == old_shared_array->length());
- CHECK(old_shared_array->HasFastElements());
- CHECK(new_shared_array->HasFastElements());
- int array_length = Smi::ToInt(old_shared_array->length());
- for (int i = 0; i < array_length; i++) {
- Handle<Object> old_element;
- Handle<Object> new_element;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, old_element,
- JSReceiver::GetElement(isolate, old_shared_array, i));
- CHECK(old_element->IsJSValue() &&
- Handle<JSValue>::cast(old_element)->value()->IsSharedFunctionInfo());
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, new_element,
- JSReceiver::GetElement(isolate, new_shared_array, i));
- CHECK(
- new_element->IsUndefined(isolate) ||
- (new_element->IsJSValue() &&
- Handle<JSValue>::cast(new_element)->value()->IsSharedFunctionInfo()));
- }
-
- return *LiveEdit::CheckAndDropActivations(old_shared_array, new_shared_array,
- do_drop);
-}
-
-
-// Compares 2 strings line-by-line, then token-wise and returns diff in form
-// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
-// of diff chunks.
-RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
-
- Handle<JSArray> result = LiveEdit::CompareStrings(s1, s2);
- uint32_t array_length = 0;
- CHECK(result->length()->ToArrayLength(&array_length));
- if (array_length > 0) {
- isolate->debug()->feature_tracker()->Track(DebugFeatureTracker::kLiveEdit);
- }
-
- return *result;
-}
-
-
-// Restarts a call frame and completely drops all frames above.
-// Returns true if successful. Otherwise returns undefined or an error message.
-RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK_EQ(2, args.length());
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- CHECK(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
- Heap* heap = isolate->heap();
-
- // Find the relevant frame with the requested index.
- StackFrame::Id id = isolate->debug()->break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there are no JavaScript stack frames return undefined.
- return heap->undefined_value();
- }
-
- StackTraceFrameIterator it(isolate, id);
- int inlined_jsframe_index =
- DebugFrameHelper::FindIndexedNonNativeFrame(&it, index);
- // Liveedit is not supported on Wasm.
- if (inlined_jsframe_index == -1 || it.is_wasm()) {
- return heap->undefined_value();
- }
- // We don't really care what the inlined frame index is, since we are
- // throwing away the entire frame anyways.
- const char* error_message = LiveEdit::RestartFrame(it.javascript_frame());
- if (error_message) {
- return *(isolate->factory()->InternalizeUtf8String(error_message));
- }
- return heap->true_value();
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index a758050306..5c59a2f997 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -7,6 +7,8 @@
#include "src/arguments.h"
#include "src/counters.h"
#include "src/objects-inl.h"
+#include "src/objects/js-promise.h"
+#include "src/objects/module.h"
namespace v8 {
namespace internal {
@@ -17,7 +19,7 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, specifier, 1);
- Handle<Script> script(Script::cast(function->shared()->script()));
+ Handle<Script> script(Script::cast(function->shared()->script()), isolate);
while (script->has_eval_from_shared()) {
script =
@@ -33,14 +35,14 @@ RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(module_request, 0);
- Handle<Module> module(isolate->context()->module());
- return *Module::GetModuleNamespace(module, module_request);
+ Handle<Module> module(isolate->context()->module(), isolate);
+ return *Module::GetModuleNamespace(isolate, module, module_request);
}
RUNTIME_FUNCTION(Runtime_GetImportMetaObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- Handle<Module> module(isolate->context()->module());
+ Handle<Module> module(isolate->context()->module(), isolate);
return *isolate->RunHostInitializeImportMetaObjectCallback(module);
}
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 643e1b7edb..2eac95923f 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -25,7 +25,7 @@ RUNTIME_FUNCTION(Runtime_StringToNumber) {
HandleScope handle_scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- return *String::ToNumber(subject);
+ return *String::ToNumber(isolate, subject);
}
@@ -40,15 +40,16 @@ RUNTIME_FUNCTION(Runtime_StringParseInt) {
Handle<String> subject;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, subject,
Object::ToString(isolate, string));
- subject = String::Flatten(subject);
+ subject = String::Flatten(isolate, subject);
// Convert {radix} to Int32.
if (!radix->IsNumber()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix, Object::ToNumber(radix));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix,
+ Object::ToNumber(isolate, radix));
}
int radix32 = DoubleToInt32(radix->Number());
if (radix32 != 0 && (radix32 < 2 || radix32 > 36)) {
- return isolate->heap()->nan_value();
+ return ReadOnlyRoots(isolate).nan_value();
}
double result = StringToInt(isolate, subject, radix32);
@@ -62,9 +63,9 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- double value =
- StringToDouble(isolate->unicode_cache(), subject, ALLOW_TRAILING_JUNK,
- std::numeric_limits<double>::quiet_NaN());
+ double value = StringToDouble(isolate, isolate->unicode_cache(), subject,
+ ALLOW_TRAILING_JUNK,
+ std::numeric_limits<double>::quiet_NaN());
return *isolate->factory()->NewNumber(value);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 1e1eb77b74..b3a8b18906 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -22,7 +22,7 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
Handle<Object> key,
bool* is_found_out) {
if (object->IsNullOrUndefined(isolate)) {
- if (*key == isolate->heap()->iterator_symbol()) {
+ if (*key == ReadOnlyRoots(isolate).iterator_symbol()) {
return Runtime::ThrowIteratorError(isolate, object);
}
THROW_NEW_ERROR(
@@ -82,7 +82,7 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
// Attempt dictionary lookup.
GlobalDictionary* dictionary =
JSGlobalObject::cast(*receiver)->global_dictionary();
- int entry = dictionary->FindEntry(key);
+ int entry = dictionary->FindEntry(isolate, key);
if (entry != GlobalDictionary::kNotFound) {
PropertyCell* cell = dictionary->CellAt(entry);
if (cell->property_details().kind() == kData) {
@@ -96,7 +96,7 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
} else if (!receiver->HasFastProperties()) {
// Attempt dictionary lookup.
NameDictionary* dictionary = receiver->property_dictionary();
- int entry = dictionary->FindEntry(key);
+ int entry = dictionary->FindEntry(isolate, key);
if ((entry != NameDictionary::kNotFound) &&
(dictionary->DetailsAt(entry).kind() == kData)) {
Object* value = dictionary->ValueAt(entry);
@@ -130,7 +130,7 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
if (index >= 0 && index < str->length()) {
Factory* factory = isolate->factory();
return factory->LookupSingleCharacterStringFromCode(
- String::Flatten(str)->Get(index));
+ String::Flatten(isolate, str)->Get(index));
}
}
@@ -180,9 +180,9 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
if (!index.is_inobject() && index.outobject_array_index() == 0) {
DCHECK(!Map::cast(backpointer)->HasOutOfObjectProperties());
// Clear out the properties backing store.
- receiver->SetProperties(isolate->heap()->empty_fixed_array());
+ receiver->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
} else {
- Object* filler = isolate->heap()->one_pointer_filler_map();
+ Object* filler = ReadOnlyRoots(isolate).one_pointer_filler_map();
JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);
// We must clear any recorded slot for the deleted property, because
// subsequent object modifications might put a raw double there.
@@ -198,12 +198,12 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// that depends on the assumption that no object that reached this map
// transitions away from it without triggering the "deoptimize dependent
// code" mechanism.
- map->NotifyLeafMapLayoutChange();
+ map->NotifyLeafMapLayoutChange(isolate);
// Finally, perform the map rollback.
receiver->synchronized_set_map(Map::cast(backpointer));
#if VERIFY_HEAP
- receiver->HeapObjectVerify();
- receiver->property_array()->PropertyArrayVerify();
+ receiver->HeapObjectVerify(isolate);
+ receiver->property_array()->PropertyArrayVerify(isolate);
#endif
return true;
}
@@ -244,6 +244,56 @@ RUNTIME_FUNCTION(Runtime_ObjectKeys) {
return *keys;
}
+// ES #sec-object.getOwnPropertyNames
+RUNTIME_FUNCTION(Runtime_ObjectGetOwnPropertyNames) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at(0);
+
+ // Convert the {object} to a proper {receiver}.
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+
+ // Collect the own keys for the {receiver}.
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly,
+ SKIP_SYMBOLS,
+ GetKeysConversion::kConvertToString));
+ return *keys;
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectGetOwnPropertyNamesTryFast) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at(0);
+
+ // Convert the {object} to a proper {receiver}.
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+
+ Handle<Map> map(receiver->map(), isolate);
+
+ int nod = map->NumberOfOwnDescriptors();
+ Handle<FixedArray> keys;
+ if (nod != 0 && map->NumberOfEnumerableProperties() == nod) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly,
+ SKIP_SYMBOLS,
+ GetKeysConversion::kConvertToString));
+ }
+
+ return *keys;
+}
+
// ES6 19.1.3.2
RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
HandleScope scope(isolate);
@@ -265,12 +315,12 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
if (key.is_null()) {
DCHECK(key_is_array_index);
// Namespace objects can't have indexed properties.
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
Maybe<bool> result =
JSReceiver::HasOwnProperty(Handle<JSReceiver>::cast(object), key);
- if (!result.IsJust()) return isolate->heap()->exception();
+ if (!result.IsJust()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
} else if (object->IsJSObject()) {
@@ -286,16 +336,16 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
key_is_array_index ? LookupIterator(isolate, js_obj, index, js_obj, c)
: LookupIterator(js_obj, key, js_obj, c);
Maybe<bool> maybe = JSReceiver::HasProperty(&it);
- if (maybe.IsNothing()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
DCHECK(!isolate->has_pending_exception());
- if (maybe.FromJust()) return isolate->heap()->true_value();
+ if (maybe.FromJust()) return ReadOnlyRoots(isolate).true_value();
}
Map* map = js_obj->map();
if (!map->has_hidden_prototype() &&
(key_is_array_index ? !map->has_indexed_interceptor()
: !map->has_named_interceptor())) {
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
// Slow case.
@@ -305,7 +355,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
: LookupIterator(js_obj, key, js_obj, c);
Maybe<bool> maybe = JSReceiver::HasProperty(&it);
- if (maybe.IsNothing()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
DCHECK(!isolate->has_pending_exception());
return isolate->heap()->ToBoolean(maybe.FromJust());
@@ -317,20 +367,20 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
Maybe<bool> result =
JSReceiver::HasOwnProperty(Handle<JSProxy>::cast(object), key);
- if (result.IsNothing()) return isolate->heap()->exception();
+ if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
} else if (object->IsString()) {
return isolate->heap()->ToBoolean(
key_is_array_index
? index < static_cast<uint32_t>(String::cast(*object)->length())
- : key->Equals(isolate->heap()->length_string()));
+ : key->Equals(ReadOnlyRoots(isolate).length_string()));
} else if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
}
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
RUNTIME_FUNCTION(Runtime_AddDictionaryProperty) {
@@ -343,7 +393,8 @@ RUNTIME_FUNCTION(Runtime_AddDictionaryProperty) {
Handle<NameDictionary> dictionary(receiver->property_dictionary(), isolate);
PropertyDetails property_details(kData, NONE, PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, name, value, property_details);
+ dictionary =
+ NameDictionary::Add(isolate, dictionary, name, value, property_details);
receiver->SetProperties(*dictionary);
return *value;
}
@@ -364,7 +415,7 @@ RUNTIME_FUNCTION(Runtime_ObjectCreate) {
// TODO(bmeurer): Use a dedicated cache for Object.create; think about
// slack tracking for Object.create.
Handle<Map> map =
- Map::GetObjectCreateMap(Handle<HeapObject>::cast(prototype));
+ Map::GetObjectCreateMap(isolate, Handle<HeapObject>::cast(prototype));
// Actually allocate the object.
Handle<JSObject> object;
@@ -435,13 +486,13 @@ RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
Handle<Map> function_map(function->map(), isolate);
if (!JSFunction::SetName(function, isolate->factory()->proto_string(),
isolate->factory()->empty_string())) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
CHECK_EQ(*function_map, function->map());
}
}
MAYBE_RETURN(JSReceiver::SetPrototype(obj, prototype, false, kThrowOnError),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
return *obj;
}
@@ -552,7 +603,7 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
DCHECK(!name->ToArrayIndex(&index));
LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (maybe.IsNothing()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
DCHECK(!it.IsFound());
#endif
@@ -578,7 +629,7 @@ RUNTIME_FUNCTION(Runtime_AddElement) {
LookupIterator it(isolate, object, index, object,
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (maybe.IsNothing()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
DCHECK(!it.IsFound());
if (object->IsJSArray()) {
@@ -617,7 +668,7 @@ Object* DeleteProperty(Isolate* isolate, Handle<Object> object,
Object::ToObject(isolate, object));
Maybe<bool> result =
Runtime::DeleteObjectProperty(isolate, receiver, key, language_mode);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -638,7 +689,8 @@ RUNTIME_FUNCTION(Runtime_ShrinkPropertyDictionary) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
Handle<NameDictionary> dictionary(receiver->property_dictionary(), isolate);
- Handle<NameDictionary> new_properties = NameDictionary::Shrink(dictionary);
+ Handle<NameDictionary> new_properties =
+ NameDictionary::Shrink(isolate, dictionary);
receiver->SetProperties(*new_properties);
return Smi::kZero;
}
@@ -665,7 +717,7 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
// Lookup the {name} on {receiver}.
Maybe<bool> maybe = JSReceiver::HasProperty(receiver, name);
- if (maybe.IsNothing()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(maybe.FromJust());
}
@@ -687,24 +739,6 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
}
-// Return information on whether an object has a named or indexed interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- if (!args[0]->IsJSObject()) {
- return Smi::kZero;
- }
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- int result = 0;
- if (obj->HasNamedInterceptor()) result |= 2;
- if (obj->HasIndexedInterceptor()) result |= 1;
-
- return Smi::FromInt(result);
-}
-
-
RUNTIME_FUNCTION(Runtime_ToFastProperties) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -738,9 +772,9 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Map, initial_map, 0);
- initial_map->CompleteInobjectSlackTracking();
+ initial_map->CompleteInobjectSlackTracking(isolate);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -786,7 +820,7 @@ RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSObject::DefineAccessor(obj, name, getter, setter, attrs));
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -803,7 +837,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
FeedbackNexus nexus(vector, FeedbackVector::ToSlot(index));
if (nexus.ic_state() == UNINITIALIZED) {
if (name->IsUniqueName()) {
- nexus.ConfigureMonomorphic(name, handle(object->map()),
+ nexus.ConfigureMonomorphic(name, handle(object->map(), isolate),
MaybeObjectHandle());
} else {
nexus.ConfigureMegamorphic(PROPERTY);
@@ -829,7 +863,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
Handle<Map> function_map(function->map(), isolate);
if (!JSFunction::SetName(function, name,
isolate->factory()->empty_string())) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
// Class constructors do not reserve in-object space for name field.
CHECK_IMPLIES(!IsClassConstructor(function->shared()->kind()),
@@ -860,14 +894,14 @@ RUNTIME_FUNCTION(Runtime_CollectTypeProfile) {
} else if (value->IsNull(isolate)) {
// typeof(null) is object. But it's more user-friendly to annotate
// null as type "null".
- type = Handle<String>(isolate->heap()->null_string());
+ type = Handle<String>(ReadOnlyRoots(isolate).null_string(), isolate);
}
DCHECK(vector->metadata()->HasTypeProfileSlot());
FeedbackNexus nexus(vector, vector->GetTypeProfileSlot());
nexus.Collect(type, position->value());
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
@@ -900,7 +934,7 @@ RUNTIME_FUNCTION(Runtime_ClassOf) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- if (!obj->IsJSReceiver()) return isolate->heap()->null_value();
+ if (!obj->IsJSReceiver()) return ReadOnlyRoots(isolate).null_value();
return JSReceiver::cast(obj)->class_name();
}
@@ -922,7 +956,7 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
if (String::cast(getter->shared()->Name())->length() == 0) {
Handle<Map> getter_map(getter->map(), isolate);
if (!JSFunction::SetName(getter, name, isolate->factory()->get_string())) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
CHECK_EQ(*getter_map, getter->map());
}
@@ -931,7 +965,7 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
isolate,
JSObject::DefineAccessor(object, name, getter,
isolate->factory()->null_value(), attrs));
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_SetDataProperties) {
@@ -942,12 +976,12 @@ RUNTIME_FUNCTION(Runtime_SetDataProperties) {
// 2. If source is undefined or null, let keys be an empty List.
if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, target, source),
- isolate->heap()->exception());
- return isolate->heap()->undefined_value();
+ ReadOnlyRoots(isolate).exception());
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_CopyDataProperties) {
@@ -958,13 +992,13 @@ RUNTIME_FUNCTION(Runtime_CopyDataProperties) {
// 2. If source is undefined or null, let keys be an empty List.
if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, target, source,
nullptr, false),
- isolate->heap()->exception());
- return isolate->heap()->undefined_value();
+ ReadOnlyRoots(isolate).exception());
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
@@ -974,7 +1008,7 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
// 2. If source is undefined or null, let keys be an empty List.
if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
ScopedVector<Handle<Object>> excluded_properties(args.length() - 1);
@@ -997,7 +1031,7 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
isolate->factory()->NewJSObject(isolate->object_function());
MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, target, source,
&excluded_properties, false),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
return *target;
}
@@ -1039,7 +1073,7 @@ RUNTIME_FUNCTION(Runtime_DefineMethodsInternal) {
for (int i = 0; i < keys->length(); ++i) {
Handle<Name> key = Handle<Name>::cast(FixedArray::get(*keys, i, isolate));
- if (*key == isolate->heap()->constructor_string()) continue;
+ if (*key == ReadOnlyRoots(isolate).constructor_string()) continue;
PropertyDescriptor descriptor;
Maybe<bool> did_get_descriptor =
@@ -1056,7 +1090,7 @@ RUNTIME_FUNCTION(Runtime_DefineMethodsInternal) {
isolate, target, key, &descriptor, kDontThrow);
CHECK(success.FromJust());
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
@@ -1070,7 +1104,7 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
if (String::cast(setter->shared()->Name())->length() == 0) {
Handle<Map> setter_map(setter->map(), isolate);
if (!JSFunction::SetName(setter, name, isolate->factory()->set_string())) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
CHECK_EQ(*setter_map, setter->map());
}
@@ -1079,7 +1113,7 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
isolate,
JSObject::DefineAccessor(object, name, isolate->factory()->null_value(),
setter, attrs));
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_ToObject) {
@@ -1108,14 +1142,14 @@ RUNTIME_FUNCTION(Runtime_ToNumber) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumber(input));
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumber(isolate, input));
}
RUNTIME_FUNCTION(Runtime_ToNumeric) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumeric(input));
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumeric(isolate, input));
}
RUNTIME_FUNCTION(Runtime_ToInteger) {
@@ -1172,10 +1206,10 @@ RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- if (!object->IsJSReceiver()) return isolate->heap()->false_value();
+ if (!object->IsJSReceiver()) return ReadOnlyRoots(isolate).false_value();
Maybe<bool> result = JSReceiver::HasInPrototypeChain(
isolate, Handle<JSReceiver>::cast(object), prototype);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -1186,7 +1220,8 @@ RUNTIME_FUNCTION(Runtime_CreateIterResultObject) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, done, 1);
- return *isolate->factory()->NewJSIteratorResult(value, done->BooleanValue());
+ return *isolate->factory()->NewJSIteratorResult(value,
+ done->BooleanValue(isolate));
}
RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
@@ -1198,9 +1233,9 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, o, key, &success, LookupIterator::OWN);
- if (!success) return isolate->heap()->exception();
+ if (!success) return ReadOnlyRoots(isolate).exception();
MAYBE_RETURN(JSReceiver::CreateDataProperty(&it, value, kThrowOnError),
- isolate->heap()->exception());
+ ReadOnlyRoots(isolate).exception());
return *value;
}
@@ -1235,9 +1270,9 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
PropertyDescriptor desc;
Maybe<bool> found =
JSReceiver::GetOwnPropertyDescriptor(isolate, object, name, &desc);
- MAYBE_RETURN(found, isolate->heap()->exception());
+ MAYBE_RETURN(found, ReadOnlyRoots(isolate).exception());
- if (!found.FromJust()) return isolate->heap()->undefined_value();
+ if (!found.FromJust()) return ReadOnlyRoots(isolate).undefined_value();
return *desc.ToPropertyDescriptorObject(isolate);
}
@@ -1260,7 +1295,7 @@ RUNTIME_FUNCTION(Runtime_AddPrivateField) {
CHECK(Object::AddDataProperty(&it, value, NONE, kDontThrow,
Object::MAY_BE_STORE_FROM_KEYED)
.FromJust());
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index d01d115892..d2d55ed753 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -23,8 +23,8 @@ RUNTIME_FUNCTION(Runtime_Equal) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
- Maybe<bool> result = Object::Equals(x, y);
- if (result.IsNothing()) return isolate->heap()->exception();
+ Maybe<bool> result = Object::Equals(isolate, x, y);
+ if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -33,8 +33,8 @@ RUNTIME_FUNCTION(Runtime_NotEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
- Maybe<bool> result = Object::Equals(x, y);
- if (result.IsNothing()) return isolate->heap()->exception();
+ Maybe<bool> result = Object::Equals(isolate, x, y);
+ if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(!result.FromJust());
}
@@ -59,8 +59,8 @@ RUNTIME_FUNCTION(Runtime_LessThan) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
- Maybe<bool> result = Object::LessThan(x, y);
- if (result.IsNothing()) return isolate->heap()->exception();
+ Maybe<bool> result = Object::LessThan(isolate, x, y);
+ if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -69,8 +69,8 @@ RUNTIME_FUNCTION(Runtime_GreaterThan) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
- Maybe<bool> result = Object::GreaterThan(x, y);
- if (result.IsNothing()) return isolate->heap()->exception();
+ Maybe<bool> result = Object::GreaterThan(isolate, x, y);
+ if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -79,8 +79,8 @@ RUNTIME_FUNCTION(Runtime_LessThanOrEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
- Maybe<bool> result = Object::LessThanOrEqual(x, y);
- if (result.IsNothing()) return isolate->heap()->exception();
+ Maybe<bool> result = Object::LessThanOrEqual(isolate, x, y);
+ if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -89,8 +89,8 @@ RUNTIME_FUNCTION(Runtime_GreaterThanOrEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
- Maybe<bool> result = Object::GreaterThanOrEqual(x, y);
- if (result.IsNothing()) return isolate->heap()->exception();
+ Maybe<bool> result = Object::GreaterThanOrEqual(isolate, x, y);
+ if (result.IsNothing()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 6c4f7d69eb..ec78904710 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -10,6 +10,7 @@
#include "src/debug/debug.h"
#include "src/elements.h"
#include "src/objects-inl.h"
+#include "src/objects/js-promise-inl.h"
namespace v8 {
namespace internal {
@@ -35,7 +36,7 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
isolate->ReportPromiseReject(promise, value,
v8::kPromiseRejectWithNoHandler);
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseRejectAfterResolved) {
@@ -45,7 +46,7 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectAfterResolved) {
CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
isolate->ReportPromiseReject(promise, reason,
v8::kPromiseRejectAfterResolved);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseResolveAfterResolved) {
@@ -55,7 +56,7 @@ RUNTIME_FUNCTION(Runtime_PromiseResolveAfterResolved) {
CONVERT_ARG_HANDLE_CHECKED(Object, resolution, 1);
isolate->ReportPromiseReject(promise, resolution,
v8::kPromiseResolveAfterResolved);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
@@ -66,7 +67,7 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
CHECK(!promise->has_handler());
isolate->ReportPromiseReject(promise, Handle<Object>(),
v8::kPromiseHandlerAddedAfterReject);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
@@ -76,14 +77,14 @@ RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
Handle<CallableTask> microtask =
isolate->factory()->NewCallableTask(function, isolate->native_context());
isolate->EnqueueMicrotask(microtask);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
isolate->RunMicrotasks();
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_RunMicrotaskCallback) {
@@ -95,7 +96,7 @@ RUNTIME_FUNCTION(Runtime_RunMicrotaskCallback) {
void* data = ToCData<void*>(microtask_data);
callback(data);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseStatus) {
@@ -119,7 +120,7 @@ RUNTIME_FUNCTION(Runtime_PromiseMarkAsHandled) {
CONVERT_ARG_CHECKED(JSPromise, promise, 0);
promise->set_has_handler(true);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseHookInit) {
@@ -128,35 +129,54 @@ RUNTIME_FUNCTION(Runtime_PromiseHookInit) {
CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, parent, 1);
isolate->RunPromiseHook(PromiseHookType::kInit, promise, parent);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_AwaitPromisesInit) {
+ DCHECK_EQ(3, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, wrapped_value, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, outer_promise, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, throwaway, 2);
+ isolate->RunPromiseHook(PromiseHookType::kInit, wrapped_value, outer_promise);
+ isolate->RunPromiseHook(PromiseHookType::kInit, throwaway, wrapped_value);
+ // On inspector side we capture async stack trace and store it by
+ // outer_promise->async_task_id when async function is suspended first time.
+ // To use captured stack trace later throwaway promise should have the same
+ // async_task_id as outer_promise since we generate WillHandle and DidHandle
+ // events using throwaway promise.
+ throwaway->set_async_task_id(outer_promise->async_task_id());
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, maybe_promise, 0);
- if (!maybe_promise->IsJSPromise()) return isolate->heap()->undefined_value();
+ if (!maybe_promise->IsJSPromise())
+ return ReadOnlyRoots(isolate).undefined_value();
Handle<JSPromise> promise = Handle<JSPromise>::cast(maybe_promise);
if (isolate->debug()->is_active()) isolate->PushPromise(promise);
if (promise->IsJSPromise()) {
isolate->RunPromiseHook(PromiseHookType::kBefore, promise,
isolate->factory()->undefined_value());
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseHookAfter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, maybe_promise, 0);
- if (!maybe_promise->IsJSPromise()) return isolate->heap()->undefined_value();
+ if (!maybe_promise->IsJSPromise())
+ return ReadOnlyRoots(isolate).undefined_value();
Handle<JSPromise> promise = Handle<JSPromise>::cast(maybe_promise);
if (isolate->debug()->is_active()) isolate->PopPromise();
if (promise->IsJSPromise()) {
isolate->RunPromiseHook(PromiseHookType::kAfter, promise,
isolate->factory()->undefined_value());
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_RejectPromise) {
@@ -165,7 +185,8 @@ RUNTIME_FUNCTION(Runtime_RejectPromise) {
CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
CONVERT_ARG_HANDLE_CHECKED(Oddball, debug_event, 2);
- return *JSPromise::Reject(promise, reason, debug_event->BooleanValue());
+ return *JSPromise::Reject(promise, reason,
+ debug_event->BooleanValue(isolate));
}
RUNTIME_FUNCTION(Runtime_ResolvePromise) {
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 5340d31843..8101ea6d29 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -41,19 +41,23 @@ RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
RUNTIME_FUNCTION(Runtime_GetPropertyWithReceiver) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, on_non_existent, 3);
bool success = false;
LookupIterator it = LookupIterator::PropertyOrElement(isolate, receiver, key,
&success, holder);
if (!success) {
DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
- RETURN_RESULT_OR_FAILURE(isolate, Object::GetProperty(&it));
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Object::GetProperty(
+ &it, static_cast<OnNonExistent>(on_non_existent->value())));
}
RUNTIME_FUNCTION(Runtime_SetPropertyWithReceiver) {
@@ -71,11 +75,11 @@ RUNTIME_FUNCTION(Runtime_SetPropertyWithReceiver) {
&success, holder);
if (!success) {
DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
Maybe<bool> result = Object::SetSuperProperty(
&it, value, language_mode, Object::MAY_BE_STORE_FROM_KEYED);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -101,7 +105,7 @@ RUNTIME_FUNCTION(Runtime_CheckProxyHasTrap) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 1);
Maybe<bool> result = JSProxy::CheckHasTrap(isolate, name, target);
- if (!result.IsJust()) return isolate->heap()->exception();
+ if (!result.IsJust()) return ReadOnlyRoots(isolate).exception();
return isolate->heap()->ToBoolean(result.FromJust());
}
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index dd65c1dee6..f58a3dd6f7 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -68,8 +68,9 @@ class CompiledReplacement {
: parts_(1, zone), replacement_substrings_(0, zone), zone_(zone) {}
// Return whether the replacement is simple.
- bool Compile(Handle<JSRegExp> regexp, Handle<String> replacement,
- int capture_count, int subject_length);
+ bool Compile(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> replacement, int capture_count,
+ int subject_length);
// Use Apply only if Compile returned false.
void Apply(ReplacementStringBuilder* builder, int match_from, int match_to,
@@ -148,9 +149,6 @@ class CompiledReplacement {
// Equivalent to String::GetSubstitution, except that this method converts
// the replacement string into an internal representation that avoids
// repeated parsing when used repeatedly.
- DCHECK_IMPLIES(capture_name_map != nullptr,
- FLAG_harmony_regexp_named_captures);
-
int length = characters.length();
int last = 0;
for (int i = 0; i < length; i++) {
@@ -315,7 +313,7 @@ class CompiledReplacement {
Zone* zone_;
};
-bool CompiledReplacement::Compile(Handle<JSRegExp> regexp,
+bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> replacement, int capture_count,
int subject_length) {
{
@@ -328,7 +326,6 @@ bool CompiledReplacement::Compile(Handle<JSRegExp> regexp,
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
Object* maybe_capture_name_map = regexp->CaptureNameMap();
if (maybe_capture_name_map->IsFixedArray()) {
- DCHECK(FLAG_harmony_regexp_named_captures);
capture_name_map = FixedArray::cast(maybe_capture_name_map);
}
}
@@ -347,7 +344,6 @@ bool CompiledReplacement::Compile(Handle<JSRegExp> regexp,
if (simple) return true;
}
- Isolate* isolate = replacement->GetIsolate();
// Find substrings of replacement string and create them as String objects.
int substring_index = 0;
for (int i = 0, n = parts_.length(); i < n; i++) {
@@ -561,7 +557,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
result_len = static_cast<int>(result_len_64);
}
if (result_len == 0) {
- return isolate->heap()->empty_string();
+ return ReadOnlyRoots(isolate).empty_string();
}
int subject_pos = 0;
@@ -601,7 +597,8 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
}
int32_t match_indices[] = {indices->back(), indices->back() + pattern_len};
- RegExpImpl::SetLastMatchInfo(last_match_info, subject, 0, match_indices);
+ RegExpImpl::SetLastMatchInfo(isolate, last_match_info, subject, 0,
+ match_indices);
TruncateRegexpIndicesList(isolate);
@@ -620,9 +617,9 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithString(
JSRegExp::Type typeTag = regexp->TypeTag();
if (typeTag == JSRegExp::IRREGEXP) {
// Ensure the RegExp is compiled so we can access the capture-name map.
- if (RegExpImpl::IrregexpPrepare(regexp, subject) == -1) {
+ if (RegExpImpl::IrregexpPrepare(isolate, regexp, subject) == -1) {
DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
}
@@ -630,7 +627,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithString(
Zone zone(isolate->allocator(), ZONE_NAME);
CompiledReplacement compiled_replacement(&zone);
const bool simple_replace = compiled_replacement.Compile(
- regexp, replacement, capture_count, subject_length);
+ isolate, regexp, replacement, capture_count, subject_length);
// Shortcut for simple non-regexp global replacements
if (typeTag == JSRegExp::ATOM && simple_replace) {
@@ -644,11 +641,11 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithString(
}
RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
- if (global_cache.HasException()) return isolate->heap()->exception();
+ if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
int32_t* current_match = global_cache.FetchNext();
if (current_match == nullptr) {
- if (global_cache.HasException()) return isolate->heap()->exception();
+ if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
return *subject;
}
@@ -685,14 +682,14 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithString(
current_match = global_cache.FetchNext();
} while (current_match != nullptr);
- if (global_cache.HasException()) return isolate->heap()->exception();
+ if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
if (prev < subject_length) {
builder.EnsureCapacity(2);
builder.AddSubjectSlice(prev, subject_length);
}
- RegExpImpl::SetLastMatchInfo(last_match_info, subject, capture_count,
+ RegExpImpl::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
global_cache.LastSuccessfulMatch());
RETURN_RESULT_OR_FAILURE(isolate, builder.ToString());
@@ -717,11 +714,11 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
}
RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
- if (global_cache.HasException()) return isolate->heap()->exception();
+ if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
int32_t* current_match = global_cache.FetchNext();
if (current_match == nullptr) {
- if (global_cache.HasException()) return isolate->heap()->exception();
+ if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
return *subject;
}
@@ -731,7 +728,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
int subject_length = subject->length();
int new_length = subject_length - (end - start);
- if (new_length == 0) return isolate->heap()->empty_string();
+ if (new_length == 0) return ReadOnlyRoots(isolate).empty_string();
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasOneByteEncoding) {
@@ -758,9 +755,9 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
current_match = global_cache.FetchNext();
} while (current_match != nullptr);
- if (global_cache.HasException()) return isolate->heap()->exception();
+ if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
- RegExpImpl::SetLastMatchInfo(last_match_info, subject, capture_count,
+ RegExpImpl::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
global_cache.LastSuccessfulMatch());
if (prev < subject_length) {
@@ -770,7 +767,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
position += subject_length - prev;
}
- if (position == 0) return isolate->heap()->empty_string();
+ if (position == 0) return ReadOnlyRoots(isolate).empty_string();
// Shorten string and fill
int string_size = ResultSeqString::SizeFor(position);
@@ -802,7 +799,7 @@ Object* StringReplaceGlobalRegExpWithStringHelper(
Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
- subject = String::Flatten(subject);
+ subject = String::Flatten(isolate, subject);
if (replacement->length() == 0) {
if (subject->HasOnlyOneByteChars()) {
@@ -814,7 +811,7 @@ Object* StringReplaceGlobalRegExpWithStringHelper(
}
}
- replacement = String::Flatten(replacement);
+ replacement = String::Flatten(isolate, replacement);
return StringReplaceGlobalRegExpWithString(isolate, subject, regexp,
replacement, last_match_info);
@@ -853,8 +850,8 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
// isn't empty, we can never create more parts than ~half the length
// of the subject.
- subject = String::Flatten(subject);
- pattern = String::Flatten(pattern);
+ subject = String::Flatten(isolate, subject);
+ pattern = String::Flatten(isolate, pattern);
std::vector<int>* indices = GetRewoundRegexpIndicesList(isolate);
@@ -875,7 +872,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
DCHECK(result->HasObjectElements());
- Handle<FixedArray> elements(FixedArray::cast(result->elements()));
+ Handle<FixedArray> elements(FixedArray::cast(result->elements()), isolate);
if (part_count == 1 && indices->at(0) == subject_length) {
elements->set(0, *subject);
@@ -915,8 +912,8 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
CHECK_LE(0, index);
CHECK_GE(subject->length(), index);
isolate->counters()->regexp_entry_runtime()->Increment();
- RETURN_RESULT_OR_FAILURE(
- isolate, RegExpImpl::Exec(regexp, subject, index, last_match_info));
+ RETURN_RESULT_OR_FAILURE(isolate, RegExpImpl::Exec(isolate, regexp, subject,
+ index, last_match_info));
}
RUNTIME_FUNCTION(Runtime_RegExpInternalReplace) {
@@ -941,14 +938,13 @@ class MatchInfoBackedMatch : public String::Match {
Handle<String> subject,
Handle<RegExpMatchInfo> match_info)
: isolate_(isolate), match_info_(match_info) {
- subject_ = String::Flatten(subject);
+ subject_ = String::Flatten(isolate, subject);
if (regexp->TypeTag() == JSRegExp::IRREGEXP) {
Object* o = regexp->CaptureNameMap();
has_named_captures_ = o->IsFixedArray();
if (has_named_captures_) {
- DCHECK(FLAG_harmony_regexp_named_captures);
- capture_name_map_ = handle(FixedArray::cast(o));
+ capture_name_map_ = handle(FixedArray::cast(o), isolate);
}
} else {
has_named_captures_ = false;
@@ -1031,7 +1027,7 @@ class VectorBackedMatch : public String::Match {
match_(match),
match_position_(match_position),
captures_(captures) {
- subject_ = String::Flatten(subject);
+ subject_ = String::Flatten(isolate, subject);
DCHECK(groups_obj->IsUndefined(isolate) || groups_obj->IsJSReceiver());
has_named_captures_ = !groups_obj->IsUndefined(isolate);
@@ -1079,7 +1075,8 @@ class VectorBackedMatch : public String::Match {
Handle<Object> capture_obj;
ASSIGN_RETURN_ON_EXCEPTION(isolate_, capture_obj,
- Object::GetProperty(groups_obj_, name), String);
+ Object::GetProperty(isolate_, groups_obj_, name),
+ String);
if (capture_obj->IsUndefined(isolate_)) {
*state = UNMATCHED;
return isolate_->factory()->empty_string();
@@ -1105,7 +1102,6 @@ class VectorBackedMatch : public String::Match {
Handle<JSObject> ConstructNamedCaptureGroupsObject(
Isolate* isolate, Handle<FixedArray> capture_map,
std::function<Object*(int)> f_get_capture) {
- DCHECK(FLAG_harmony_regexp_named_captures);
Handle<JSObject> groups = isolate->factory()->NewJSObjectWithNullProto();
const int capture_count = capture_map->length() >> 1;
@@ -1113,14 +1109,15 @@ Handle<JSObject> ConstructNamedCaptureGroupsObject(
const int name_ix = i * 2;
const int index_ix = i * 2 + 1;
- Handle<String> capture_name(String::cast(capture_map->get(name_ix)));
+ Handle<String> capture_name(String::cast(capture_map->get(name_ix)),
+ isolate);
const int capture_ix = Smi::ToInt(capture_map->get(index_ix));
DCHECK(1 <= capture_ix && capture_ix <= capture_count);
Handle<Object> capture_value(f_get_capture(capture_ix), isolate);
DCHECK(capture_value->IsUndefined(isolate) || capture_value->IsString());
- JSObject::AddProperty(groups, capture_name, capture_value, NONE);
+ JSObject::AddProperty(isolate, groups, capture_name, capture_value, NONE);
}
return groups;
@@ -1154,26 +1151,26 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
last_match[i] = Smi::ToInt(last_match_cache->get(i));
}
Handle<FixedArray> cached_fixed_array =
- Handle<FixedArray>(FixedArray::cast(cached_answer));
+ Handle<FixedArray>(FixedArray::cast(cached_answer), isolate);
// The cache FixedArray is a COW-array and we need to return a copy.
Handle<FixedArray> copied_fixed_array =
isolate->factory()->CopyFixedArrayWithMap(
cached_fixed_array, isolate->factory()->fixed_array_map());
JSArray::SetContent(result_array, copied_fixed_array);
- RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
- last_match);
+ RegExpImpl::SetLastMatchInfo(isolate, last_match_array, subject,
+ capture_count, last_match);
DeleteArray(last_match);
return *result_array;
}
}
RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
- if (global_cache.HasException()) return isolate->heap()->exception();
+ if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
// Ensured in Runtime_RegExpExecMultiple.
DCHECK(result_array->HasObjectElements());
- Handle<FixedArray> result_elements(
- FixedArray::cast(result_array->elements()));
+ Handle<FixedArray> result_elements(FixedArray::cast(result_array->elements()),
+ isolate);
if (result_elements->length() < 16) {
result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
}
@@ -1192,7 +1189,7 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
int32_t* current_match = global_cache.FetchNext();
if (current_match == nullptr) break;
match_start = current_match[0];
- builder.EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ builder.EnsureCapacity(isolate, kMaxBuilderEntriesPerRegExpMatch);
if (match_end < match_start) {
ReplacementStringBuilder::AddSubjectSlice(&builder, match_end,
match_start);
@@ -1218,7 +1215,6 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
Handle<Object> maybe_capture_map(regexp->CaptureNameMap(), isolate);
const bool has_named_captures = maybe_capture_map->IsFixedArray();
- DCHECK_IMPLIES(has_named_captures, FLAG_harmony_regexp_named_captures);
const int argc =
has_named_captures ? 4 + capture_count : 3 + capture_count;
@@ -1237,7 +1233,7 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
elements->set(cursor++, *substring);
} else {
DCHECK_GT(0, current_match[i * 2 + 1]);
- elements->set(cursor++, isolate->heap()->undefined_value());
+ elements->set(cursor++, ReadOnlyRoots(isolate).undefined_value());
}
}
@@ -1260,7 +1256,7 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
}
- if (global_cache.HasException()) return isolate->heap()->exception();
+ if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
if (match_start >= 0) {
// Finished matching, with at least one match.
@@ -1269,7 +1265,8 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
subject_length);
}
- RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
+ RegExpImpl::SetLastMatchInfo(isolate, last_match_array, subject,
+ capture_count,
global_cache.LastSuccessfulMatch());
if (subject_length > kMinLengthToCache) {
@@ -1282,8 +1279,8 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
for (int i = 0; i < capture_registers; i++) {
last_match_cache->set(i, Smi::FromInt(last_match[i]));
}
- Handle<FixedArray> result_fixed_array = builder.array();
- result_fixed_array->Shrink(builder.length());
+ Handle<FixedArray> result_fixed_array =
+ FixedArray::ShrinkOrEmpty(isolate, builder.array(), builder.length());
// Cache the result and copy the FixedArray into a COW array.
Handle<FixedArray> copied_fixed_array =
isolate->factory()->CopyFixedArrayWithMap(
@@ -1294,7 +1291,7 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
return *builder.ToJSArray(result_array);
} else {
- return isolate->heap()->null_value(); // No matches at all.
+ return ReadOnlyRoots(isolate).null_value(); // No matches at all.
}
}
@@ -1316,7 +1313,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
Handle<String> replace;
ASSIGN_RETURN_ON_EXCEPTION(isolate, replace,
Object::ToString(isolate, replace_obj), String);
- replace = String::Flatten(replace);
+ replace = String::Flatten(isolate, replace);
Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
@@ -1337,7 +1334,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
Handle<Object> match_indices_obj;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, match_indices_obj,
- RegExpImpl::Exec(regexp, string, last_index, last_match_info), String);
+ RegExpImpl::Exec(isolate, regexp, string, last_index, last_match_info),
+ String);
if (match_indices_obj->IsNull(isolate)) {
if (sticky) regexp->set_last_index(Smi::kZero, SKIP_WRITE_BARRIER);
@@ -1412,7 +1410,7 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
CHECK(result_array->HasObjectElements());
- subject = String::Flatten(subject);
+ subject = String::Flatten(isolate, subject);
CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
if (regexp->CaptureCount() == 0) {
@@ -1456,7 +1454,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
Handle<Object> match_indices_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, match_indices_obj,
- RegExpImpl::Exec(regexp, subject, last_index, last_match_info));
+ RegExpImpl::Exec(isolate, regexp, subject, last_index, last_match_info));
if (match_indices_obj->IsNull(isolate)) {
if (sticky) regexp->set_last_index(Smi::kZero, SKIP_WRITE_BARRIER);
@@ -1491,11 +1489,10 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
Object* maybe_capture_map = regexp->CaptureNameMap();
if (maybe_capture_map->IsFixedArray()) {
has_named_captures = true;
- capture_map = handle(FixedArray::cast(maybe_capture_map));
+ capture_map = handle(FixedArray::cast(maybe_capture_map), isolate);
}
}
- DCHECK_IMPLIES(has_named_captures, FLAG_harmony_regexp_named_captures);
const uint32_t argc = GetArgcForReplaceCallable(m, has_named_captures);
if (argc == static_cast<uint32_t>(-1)) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -1553,7 +1550,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
}
Handle<Object> number;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, number, Object::ToNumber(object), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number, Object::ToNumber(isolate, object),
+ Object);
*out = NumberToUint32(*number);
return object;
}
@@ -1561,8 +1559,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
Handle<JSArray> NewJSArrayWithElements(Isolate* isolate,
Handle<FixedArray> elems,
int num_elems) {
- elems->Shrink(num_elems);
- return isolate->factory()->NewJSArrayWithElements(elems);
+ return isolate->factory()->NewJSArrayWithElements(
+ FixedArray::ShrinkOrEmpty(isolate, elems, num_elems));
}
} // namespace
@@ -1589,7 +1587,8 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
Handle<Object> flags_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, flags_obj, JSObject::GetProperty(recv, factory->flags_string()));
+ isolate, flags_obj,
+ JSObject::GetProperty(isolate, recv, factory->flags_string()));
Handle<String> flags;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags,
@@ -1682,7 +1681,7 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
{
Handle<String> substr =
factory->NewSubString(string, prev_string_index, string_index);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ elems = FixedArray::SetAndGrow(isolate, elems, num_elems++, substr);
if (num_elems == limit) {
return *NewJSArrayWithElements(isolate, elems, num_elems);
}
@@ -1693,7 +1692,8 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
Handle<Object> num_captures_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, num_captures_obj,
- Object::GetProperty(result, isolate->factory()->length_string()));
+ Object::GetProperty(isolate, result,
+ isolate->factory()->length_string()));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, num_captures_obj, Object::ToLength(isolate, num_captures_obj));
@@ -1703,7 +1703,7 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
Handle<Object> capture;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, capture, Object::GetElement(isolate, result, i));
- elems = FixedArray::SetAndGrow(elems, num_elems++, capture);
+ elems = FixedArray::SetAndGrow(isolate, elems, num_elems++, capture);
if (num_elems == limit) {
return *NewJSArrayWithElements(isolate, elems, num_elems);
}
@@ -1715,7 +1715,7 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
{
Handle<String> substr =
factory->NewSubString(string, prev_string_index, length);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ elems = FixedArray::SetAndGrow(isolate, elems, num_elems++, substr);
}
return *NewJSArrayWithElements(isolate, elems, num_elems);
@@ -1734,7 +1734,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
Factory* factory = isolate->factory();
- string = String::Flatten(string);
+ string = String::Flatten(isolate, string);
// Fast-path for unmodified JSRegExps.
if (RegExpUtils::IsUnmodifiedRegExp(isolate, recv)) {
@@ -1755,16 +1755,16 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
Handle<Object> global_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, global_obj,
- JSReceiver::GetProperty(recv, factory->global_string()));
- const bool global = global_obj->BooleanValue();
+ JSReceiver::GetProperty(isolate, recv, factory->global_string()));
+ const bool global = global_obj->BooleanValue(isolate);
bool unicode = false;
if (global) {
Handle<Object> unicode_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, unicode_obj,
- JSReceiver::GetProperty(recv, factory->unicode_string()));
- unicode = unicode_obj->BooleanValue();
+ JSReceiver::GetProperty(isolate, recv, factory->unicode_string()));
+ unicode = unicode_obj->BooleanValue(isolate);
RETURN_FAILURE_ON_EXCEPTION(isolate,
RegExpUtils::SetLastIndex(isolate, recv, 0));
@@ -1807,7 +1807,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
Handle<Object> captures_length_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, captures_length_obj,
- Object::GetProperty(result, factory->length_string()));
+ Object::GetProperty(isolate, result, factory->length_string()));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, captures_length_obj,
@@ -1828,7 +1828,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
Handle<Object> position_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, position_obj,
- Object::GetProperty(result, factory->index_string()));
+ Object::GetProperty(isolate, result, factory->index_string()));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, position_obj, Object::ToInteger(isolate, position_obj));
@@ -1851,14 +1851,11 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
}
Handle<Object> groups_obj = isolate->factory()->undefined_value();
- if (FLAG_harmony_regexp_named_captures) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, groups_obj,
- Object::GetProperty(result, factory->groups_string()));
- }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, groups_obj,
+ Object::GetProperty(isolate, result, factory->groups_string()));
const bool has_named_captures = !groups_obj->IsUndefined(isolate);
- DCHECK_IMPLIES(has_named_captures, FLAG_harmony_regexp_named_captures);
Handle<String> replacement;
if (functional_replace) {
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index b13d52bceb..93a0629783 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -14,6 +14,7 @@
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/objects/module-inl.h"
namespace v8 {
namespace internal {
@@ -49,9 +50,9 @@ Object* DeclareGlobal(
Handle<FeedbackVector> feedback_vector = Handle<FeedbackVector>(),
FeedbackSlot slot = FeedbackSlot::Invalid()) {
Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table());
+ global->native_context()->script_context_table(), isolate);
ScriptContextTable::LookupResult lookup;
- if (ScriptContextTable::Lookup(script_contexts, name, &lookup) &&
+ if (ScriptContextTable::Lookup(isolate, script_contexts, name, &lookup) &&
IsLexicalVariableMode(lookup.mode)) {
// ES#sec-globaldeclarationinstantiation 6.a:
// If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
@@ -70,14 +71,14 @@ Object* DeclareGlobal(
}
LookupIterator it(global, name, global, lookup_config);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (maybe.IsNothing()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
if (it.IsFound()) {
PropertyAttributes old_attributes = maybe.FromJust();
// The name was declared before; check for conflicting re-declarations.
// Skip var re-declarations.
- if (is_var) return isolate->heap()->undefined_value();
+ if (is_var) return ReadOnlyRoots(isolate).undefined_value();
DCHECK(is_function_declaration);
if ((old_attributes & DONT_DELETE) != 0) {
@@ -127,14 +128,14 @@ Object* DeclareGlobal(
nexus.ConfigurePropertyCellMode(it.GetPropertyCell());
}
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
int flags, Handle<FeedbackVector> feedback_vector) {
HandleScope scope(isolate);
Handle<JSGlobalObject> global(isolate->global_object());
- Handle<Context> context(isolate->context());
+ Handle<Context> context(isolate->context(), isolate);
// Traverse the name/value pairs and set the properties.
int length = declarations->length();
@@ -186,7 +187,7 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
if (isolate->has_pending_exception()) return result;
});
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace
@@ -274,13 +275,13 @@ Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
DCHECK_EQ(NONE, attributes);
// Skip var re-declarations.
- if (is_var) return isolate->heap()->undefined_value();
+ if (is_var) return ReadOnlyRoots(isolate).undefined_value();
DCHECK(is_function);
if (index != Context::kNotFound) {
DCHECK(holder.is_identical_to(context));
context->set(index, *value);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
object = Handle<JSObject>::cast(holder);
@@ -304,7 +305,7 @@ Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
object, name, value, NONE));
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace
@@ -406,13 +407,14 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
int mapped_count = Min(argument_count, parameter_count);
Handle<FixedArray> parameter_map =
isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
- parameter_map->set_map(isolate->heap()->sloppy_arguments_elements_map());
+ parameter_map->set_map(
+ ReadOnlyRoots(isolate).sloppy_arguments_elements_map());
result->set_map(isolate->native_context()->fast_aliased_arguments_map());
result->set_elements(*parameter_map);
// Store the context and the arguments array at the beginning of the
// parameter map.
- Handle<Context> context(isolate->context());
+ Handle<Context> context(isolate->context(), isolate);
Handle<FixedArray> arguments =
isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
parameter_map->set(0, *context);
@@ -427,43 +429,24 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
--index;
}
- Handle<ScopeInfo> scope_info(callee->shared()->scope_info());
- while (index >= 0) {
- // Detect duplicate names to the right in the parameter list.
- Handle<String> name(scope_info->ParameterName(index));
- int context_local_count = scope_info->ContextLocalCount();
- bool duplicate = false;
- for (int j = index + 1; j < parameter_count; ++j) {
- if (scope_info->ParameterName(j) == *name) {
- duplicate = true;
- break;
- }
- }
-
- if (duplicate) {
- // This goes directly in the arguments array with a hole in the
- // parameter map.
- arguments->set(index, parameters[index]);
- parameter_map->set_the_hole(index + 2);
- } else {
- // The context index goes in the parameter map with a hole in the
- // arguments array.
- int context_index = -1;
- for (int j = 0; j < context_local_count; ++j) {
- if (scope_info->ContextLocalName(j) == *name) {
- context_index = j;
- break;
- }
- }
-
- DCHECK_GE(context_index, 0);
- arguments->set_the_hole(index);
- parameter_map->set(
- index + 2,
- Smi::FromInt(Context::MIN_CONTEXT_SLOTS + context_index));
- }
+ Handle<ScopeInfo> scope_info(callee->shared()->scope_info(), isolate);
- --index;
+ // First mark all mappable slots as unmapped and copy the values into the
+ // arguments object.
+ for (int i = 0; i < mapped_count; i++) {
+ arguments->set(i, parameters[i]);
+ parameter_map->set_the_hole(i + 2);
+ }
+
+ // Walk all context slots to find context allocated parameters. Mark each
+ // found parameter as mapped.
+ for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+ if (!scope_info->ContextLocalIsParameter(i)) continue;
+ int parameter = scope_info->ContextLocalParameterNumber(i);
+ if (parameter >= mapped_count) continue;
+ arguments->set_the_hole(parameter);
+ Smi* slot = Smi::FromInt(Context::MIN_CONTEXT_SLOTS + i);
+ parameter_map->set(parameter + 2, slot);
}
} else {
// If there is no aliasing, the arguments object elements are not
@@ -644,15 +627,14 @@ RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
return *function;
}
-static Object* FindNameClash(Handle<ScopeInfo> scope_info,
+static Object* FindNameClash(Isolate* isolate, Handle<ScopeInfo> scope_info,
Handle<JSGlobalObject> global_object,
Handle<ScriptContextTable> script_context) {
- Isolate* isolate = scope_info->GetIsolate();
for (int var = 0; var < scope_info->ContextLocalCount(); var++) {
- Handle<String> name(scope_info->ContextLocalName(var));
+ Handle<String> name(scope_info->ContextLocalName(var), isolate);
VariableMode mode = scope_info->ContextLocalMode(var);
ScriptContextTable::LookupResult lookup;
- if (ScriptContextTable::Lookup(script_context, name, &lookup)) {
+ if (ScriptContextTable::Lookup(isolate, script_context, name, &lookup)) {
if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(lookup.mode)) {
// ES#sec-globaldeclarationinstantiation 5.b:
// If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
@@ -666,7 +648,7 @@ static Object* FindNameClash(Handle<ScopeInfo> scope_info,
LookupIterator it(global_object, name, global_object,
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (maybe.IsNothing()) return isolate->heap()->exception();
+ if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
if ((maybe.FromJust() & DONT_DELETE) != 0) {
// ES#sec-globaldeclarationinstantiation 5.a:
// If envRec.HasVarDeclaration(name) is true, throw a SyntaxError
@@ -680,7 +662,7 @@ static Object* FindNameClash(Handle<ScopeInfo> scope_info,
JSGlobalObject::InvalidatePropertyCell(global_object, name);
}
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -691,12 +673,13 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0);
Handle<Context> native_context(isolate->context(), isolate);
DCHECK(native_context->IsNativeContext());
- Handle<JSGlobalObject> global_object(native_context->global_object());
+ Handle<JSGlobalObject> global_object(native_context->global_object(),
+ isolate);
Handle<ScriptContextTable> script_context_table(
- native_context->script_context_table());
+ native_context->script_context_table(), isolate);
Object* name_clash_result =
- FindNameClash(scope_info, global_object, script_context_table);
+ FindNameClash(isolate, scope_info, global_object, script_context_table);
if (isolate->has_pending_exception()) return name_clash_result;
// We do not need script contexts here during bootstrap.
@@ -726,7 +709,7 @@ RUNTIME_FUNCTION(Runtime_PushWithContext) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, extension_object, 0);
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
- Handle<Context> current(isolate->context());
+ Handle<Context> current(isolate->context(), isolate);
Handle<Context> context =
isolate->factory()->NewWithContext(current, scope_info, extension_object);
isolate->set_context(*context);
@@ -786,14 +769,15 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
// If the slot was not found the result is true.
if (holder.is_null()) {
// In case of JSProxy, an exception might have been thrown.
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- return isolate->heap()->true_value();
+ if (isolate->has_pending_exception())
+ return ReadOnlyRoots(isolate).exception();
+ return ReadOnlyRoots(isolate).true_value();
}
// If the slot was found in a context or in module imports and exports it
// should be DONT_DELETE.
if (holder->IsContext() || holder->IsModule()) {
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
// The slot was found in a JSReceiver, either a context extension object,
@@ -801,18 +785,16 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
// (respecting DONT_DELETE).
Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
Maybe<bool> result = JSReceiver::DeleteProperty(object, name);
- MAYBE_RETURN(result, isolate->heap()->exception());
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
namespace {
-MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
+MaybeHandle<Object> LoadLookupSlot(Isolate* isolate, Handle<String> name,
ShouldThrow should_throw,
Handle<Object>* receiver_return = nullptr) {
- Isolate* const isolate = name->GetIsolate();
-
int index;
PropertyAttributes attributes;
InitializationFlag flag;
@@ -822,7 +804,7 @@ MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
if (!holder.is_null() && holder->IsModule()) {
- return Module::LoadVariable(Handle<Module>::cast(holder), index);
+ return Module::LoadVariable(isolate, Handle<Module>::cast(holder), index);
}
if (index != Context::kNotFound) {
DCHECK(holder->IsContext());
@@ -849,8 +831,7 @@ MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
// GetProperty function.
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, Object::GetProperty(holder, name),
- Object);
+ isolate, value, Object::GetProperty(isolate, holder, name), Object);
if (receiver_return) {
*receiver_return =
(holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject())
@@ -878,7 +859,8 @@ RUNTIME_FUNCTION(Runtime_LoadLookupSlot) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- RETURN_RESULT_OR_FAILURE(isolate, LoadLookupSlot(name, kThrowOnError));
+ RETURN_RESULT_OR_FAILURE(isolate,
+ LoadLookupSlot(isolate, name, kThrowOnError));
}
@@ -886,7 +868,7 @@ RUNTIME_FUNCTION(Runtime_LoadLookupSlotInsideTypeof) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- RETURN_RESULT_OR_FAILURE(isolate, LoadLookupSlot(name, kDontThrow));
+ RETURN_RESULT_OR_FAILURE(isolate, LoadLookupSlot(isolate, name, kDontThrow));
}
@@ -898,8 +880,8 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
Handle<Object> value;
Handle<Object> receiver;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, LoadLookupSlot(name, kThrowOnError, &receiver),
- MakePair(isolate->heap()->exception(), nullptr));
+ isolate, value, LoadLookupSlot(isolate, name, kThrowOnError, &receiver),
+ MakePair(ReadOnlyRoots(isolate).exception(), nullptr));
return MakePair(*value, *receiver);
}
@@ -907,9 +889,9 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
namespace {
MaybeHandle<Object> StoreLookupSlot(
- Handle<String> name, Handle<Object> value, LanguageMode language_mode,
+ Isolate* isolate, Handle<String> name, Handle<Object> value,
+ LanguageMode language_mode,
ContextLookupFlags context_lookup_flags = FOLLOW_CHAINS) {
- Isolate* const isolate = name->GetIsolate();
Handle<Context> context(isolate->context(), isolate);
int index;
@@ -962,12 +944,12 @@ MaybeHandle<Object> StoreLookupSlot(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name), Object);
} else {
// If absent in sloppy mode: add the property to the global object.
- object = Handle<JSReceiver>(context->global_object());
+ object = handle(context->global_object(), isolate);
}
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, Object::SetProperty(object, name, value, language_mode),
- Object);
+ isolate, value,
+ Object::SetProperty(isolate, object, name, value, language_mode), Object);
return value;
}
@@ -979,8 +961,8 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Sloppy) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- RETURN_RESULT_OR_FAILURE(isolate,
- StoreLookupSlot(name, value, LanguageMode::kSloppy));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, StoreLookupSlot(isolate, name, value, LanguageMode::kSloppy));
}
// Store into a dynamic context for sloppy-mode block-scoped function hoisting
@@ -994,8 +976,8 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_SloppyHoisting) {
const ContextLookupFlags lookup_flags = static_cast<ContextLookupFlags>(
FOLLOW_CONTEXT_CHAIN | STOP_AT_DECLARATION_SCOPE | SKIP_WITH_CONTEXT);
RETURN_RESULT_OR_FAILURE(
- isolate,
- StoreLookupSlot(name, value, LanguageMode::kSloppy, lookup_flags));
+ isolate, StoreLookupSlot(isolate, name, value, LanguageMode::kSloppy,
+ lookup_flags));
}
RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
@@ -1003,8 +985,8 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- RETURN_RESULT_OR_FAILURE(isolate,
- StoreLookupSlot(name, value, LanguageMode::kStrict));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, StoreLookupSlot(isolate, name, value, LanguageMode::kStrict));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 6f203b3d01..0c3c82deda 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -74,8 +74,8 @@ MaybeHandle<String> StringReplaceOneCharWithString(
recursion_limit--;
if (subject->IsConsString()) {
ConsString* cons = ConsString::cast(*subject);
- Handle<String> first = Handle<String>(cons->first());
- Handle<String> second = Handle<String>(cons->second());
+ Handle<String> first = handle(cons->first(), isolate);
+ Handle<String> second = handle(cons->second(), isolate);
Handle<String> new_first;
if (!StringReplaceOneCharWithString(isolate, first, search, replace, found,
recursion_limit).ToHandle(&new_first)) {
@@ -123,14 +123,16 @@ RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
kRecursionLimit).ToHandle(&result)) {
return *result;
}
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ if (isolate->has_pending_exception())
+ return ReadOnlyRoots(isolate).exception();
- subject = String::Flatten(subject);
+ subject = String::Flatten(isolate, subject);
if (StringReplaceOneCharWithString(isolate, subject, search, replace, &found,
kRecursionLimit).ToHandle(&result)) {
return *result;
}
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ if (isolate->has_pending_exception())
+ return ReadOnlyRoots(isolate).exception();
// In case of empty handle and no pending exception we have stack overflow.
return isolate->StackOverflow();
}
@@ -141,7 +143,7 @@ RUNTIME_FUNCTION(Runtime_StringTrim) {
Handle<String> string = args.at<String>(0);
CONVERT_SMI_ARG_CHECKED(mode, 1);
String::TrimMode trim_mode = static_cast<String::TrimMode>(mode);
- return *String::Trim(string, trim_mode);
+ return *String::Trim(isolate, string, trim_mode);
}
// ES6 #sec-string.prototype.includes
@@ -166,7 +168,7 @@ RUNTIME_FUNCTION(Runtime_StringIncludes) {
Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
if (is_reg_exp.IsNothing()) {
DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
if (is_reg_exp.FromJust()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -257,10 +259,10 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
// accessed.
- subject = String::Flatten(subject);
+ subject = String::Flatten(isolate, subject);
if (i >= static_cast<uint32_t>(subject->length())) {
- return isolate->heap()->nan_value();
+ return ReadOnlyRoots(isolate).nan_value();
}
return Smi::FromInt(subject->Get(i));
@@ -289,7 +291,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
int special_length = special->length();
if (!array->HasObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
+ return isolate->Throw(ReadOnlyRoots(isolate).illegal_argument_string());
}
int length;
@@ -303,7 +305,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
}
if (array_length == 0) {
- return isolate->heap()->empty_string();
+ return ReadOnlyRoots(isolate).empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
if (first->IsString()) return first;
@@ -313,10 +315,10 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
}
if (length == -1) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
+ return isolate->Throw(ReadOnlyRoots(isolate).illegal_argument_string());
}
if (length == 0) {
- return isolate->heap()->empty_string();
+ return ReadOnlyRoots(isolate).empty_string();
}
if (one_byte) {
@@ -350,13 +352,13 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
CHECK(array->HasObjectElements());
CHECK_GE(array_length, 0);
- Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
+ Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()), isolate);
if (fixed_array->length() < array_length) {
array_length = fixed_array->length();
}
if (array_length == 0) {
- return isolate->heap()->empty_string();
+ return ReadOnlyRoots(isolate).empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
CHECK(first->IsString());
@@ -570,7 +572,7 @@ static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
FixedArray* elements, int length) {
DisallowHeapAllocation no_gc;
FixedArray* one_byte_cache = heap->single_character_string_cache();
- Object* undefined = heap->undefined_value();
+ Object* undefined = ReadOnlyRoots(heap).undefined_value();
int i;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (i = 0; i < length; ++i) {
@@ -600,7 +602,7 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
- s = String::Flatten(s);
+ s = String::Flatten(isolate, s);
const int length = static_cast<int>(Min<uint32_t>(s->length(), limit));
Handle<FixedArray> elements;
@@ -618,8 +620,8 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
position = CopyCachedOneByteCharsToArray(isolate->heap(), chars.start(),
*elements, length);
} else {
- MemsetPointer(elements->data_start(), isolate->heap()->undefined_value(),
- length);
+ MemsetPointer(elements->data_start(),
+ ReadOnlyRoots(isolate).undefined_value(), length);
}
} else {
elements = isolate->factory()->NewFixedArray(length);
@@ -644,7 +646,7 @@ RUNTIME_FUNCTION(Runtime_StringLessThan) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- ComparisonResult result = String::Compare(x, y);
+ ComparisonResult result = String::Compare(isolate, x, y);
DCHECK_NE(result, ComparisonResult::kUndefined);
return isolate->heap()->ToBoolean(
ComparisonResultToBool(Operation::kLessThan, result));
@@ -655,7 +657,7 @@ RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- ComparisonResult result = String::Compare(x, y);
+ ComparisonResult result = String::Compare(isolate, x, y);
DCHECK_NE(result, ComparisonResult::kUndefined);
return isolate->heap()->ToBoolean(
ComparisonResultToBool(Operation::kLessThanOrEqual, result));
@@ -666,7 +668,7 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- ComparisonResult result = String::Compare(x, y);
+ ComparisonResult result = String::Compare(isolate, x, y);
DCHECK_NE(result, ComparisonResult::kUndefined);
return isolate->heap()->ToBoolean(
ComparisonResultToBool(Operation::kGreaterThan, result));
@@ -677,7 +679,7 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- ComparisonResult result = String::Compare(x, y);
+ ComparisonResult result = String::Compare(isolate, x, y);
DCHECK_NE(result, ComparisonResult::kUndefined);
return isolate->heap()->ToBoolean(
ComparisonResultToBool(Operation::kGreaterThanOrEqual, result));
@@ -688,7 +690,7 @@ RUNTIME_FUNCTION(Runtime_StringEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- return isolate->heap()->ToBoolean(String::Equals(x, y));
+ return isolate->heap()->ToBoolean(String::Equals(isolate, x, y));
}
RUNTIME_FUNCTION(Runtime_StringNotEqual) {
@@ -696,14 +698,14 @@ RUNTIME_FUNCTION(Runtime_StringNotEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- return isolate->heap()->ToBoolean(!String::Equals(x, y));
+ return isolate->heap()->ToBoolean(!String::Equals(isolate, x, y));
}
RUNTIME_FUNCTION(Runtime_FlattenString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
- return *String::Flatten(str);
+ return *String::Flatten(isolate, str);
}
RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
@@ -714,7 +716,7 @@ RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
code &= 0xFFFF;
return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
}
- return isolate->heap()->empty_string();
+ return ReadOnlyRoots(isolate).empty_string();
}
RUNTIME_FUNCTION(Runtime_StringMaxLength) {
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 488aa756c6..1472b4e2be 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -31,14 +31,6 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateFieldSymbol) {
return *symbol;
}
-RUNTIME_FUNCTION(Runtime_SymbolDescription) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Symbol, symbol, 0);
- return symbol->name();
-}
-
-
RUNTIME_FUNCTION(Runtime_SymbolDescriptiveString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 8cec33c23d..03251b67e1 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -5,6 +5,7 @@
#include "src/runtime/runtime-utils.h"
#include <memory>
+#include <sstream>
#include "src/api.h"
#include "src/arguments.h"
@@ -59,7 +60,7 @@ bool IsWasmInstantiateAllowed(v8::Isolate* isolate,
}
v8::Local<v8::WasmCompiledModule> module =
v8::Local<v8::WasmCompiledModule>::Cast(module_or_bytes);
- return static_cast<uint32_t>(module->GetWasmWireBytes()->Length()) <=
+ return static_cast<uint32_t>(module->GetWasmWireBytesRef().size) <=
ctrls.MaxWasmBufferSize;
}
@@ -139,16 +140,16 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
// Ignore calls on non-function objects to avoid runtime errors.
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
if (!function_object->IsJSFunction()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
// If the function is not optimized, just return.
- if (!function->IsOptimized()) return isolate->heap()->undefined_value();
+ if (!function->IsOptimized()) return ReadOnlyRoots(isolate).undefined_value();
Deoptimizer::DeoptimizeFunction(*function);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -160,15 +161,15 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
// Find the JavaScript function on the top of the stack.
JavaScriptFrameIterator it(isolate);
- if (!it.done()) function = Handle<JSFunction>(it.frame()->function());
- if (function.is_null()) return isolate->heap()->undefined_value();
+ if (!it.done()) function = handle(it.frame()->function(), isolate);
+ if (function.is_null()) return ReadOnlyRoots(isolate).undefined_value();
// If the function is not optimized, just return.
- if (!function->IsOptimized()) return isolate->heap()->undefined_value();
+ if (!function->IsOptimized()) return ReadOnlyRoots(isolate).undefined_value();
Deoptimizer::DeoptimizeFunction(*function);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -176,9 +177,9 @@ RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
#if defined(USE_SIMULATOR)
- return isolate->heap()->true_value();
+ return ReadOnlyRoots(isolate).true_value();
#else
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
#endif
}
@@ -195,14 +196,14 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
// This function is used by fuzzers, ignore calls with bogus arguments count.
if (args.length() != 1 && args.length() != 2) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// This function is used by fuzzers to get coverage for optimizations
// in compiler. Ignore calls on non-function objects to avoid runtime errors.
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
if (!function_object->IsJSFunction()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
@@ -210,24 +211,24 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
// JSFunction::MarkForOptimization().
if (!function->shared()->allows_lazy_compilation()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// If function isn't compiled, compile it now.
if (!function->shared()->is_compiled() &&
!Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// If the function is already optimized, just return.
if (function->IsOptimized() || function->shared()->HasAsmWasmData()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// If the function has optimized code, ensure that we check for it and return.
if (function->HasOptimizedCode()) {
DCHECK(function->ChecksOptimizationMarker());
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
@@ -256,7 +257,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
JSFunction::EnsureFeedbackVector(function);
function->MarkForOptimization(concurrency_mode);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
@@ -271,11 +272,11 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// Find the JavaScript function on the top of the stack.
JavaScriptFrameIterator it(isolate);
while (!it.done() && stack_depth--) it.Advance();
- if (!it.done()) function = Handle<JSFunction>(it.frame()->function());
- if (function.is_null()) return isolate->heap()->undefined_value();
+ if (!it.done()) function = handle(it.frame()->function(), isolate);
+ if (function.is_null()) return ReadOnlyRoots(isolate).undefined_value();
// If the function is already optimized, just return.
- if (function->IsOptimized()) return isolate->heap()->undefined_value();
+ if (function->IsOptimized()) return ReadOnlyRoots(isolate).undefined_value();
// Ensure that the function is marked for non-concurrent optimization, so that
// subsequent runs don't also optimize.
@@ -294,7 +295,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
it.frame(), AbstractCode::kMaxLoopNestingMarker);
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -305,12 +306,12 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
// in compiler. Ignore calls on non-function objects to avoid runtime errors.
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
if (!function_object->IsJSFunction()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
function->shared()->DisableOptimization(
BailoutReason::kOptimizationDisabledForTest);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
@@ -339,7 +340,8 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
bool sync_with_compiler_thread = true;
if (args.length() == 2) {
CONVERT_ARG_HANDLE_CHECKED(Object, sync_object, 1);
- if (!sync_object->IsString()) return isolate->heap()->undefined_value();
+ if (!sync_object->IsString())
+ return ReadOnlyRoots(isolate).undefined_value();
Handle<String> sync = Handle<String>::cast(sync_object);
if (sync->IsOneByteEqualTo(STATIC_CHAR_VECTOR("no sync"))) {
sync_with_compiler_thread = false;
@@ -401,7 +403,7 @@ RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
isolate->concurrent_recompilation_enabled()) {
isolate->optimizing_compile_dispatcher()->Unblock();
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_GetDeoptCount) {
@@ -466,7 +468,7 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->ClearTypeFeedbackInfo();
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
@@ -479,7 +481,7 @@ RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
ctrl.AllowAnySizeForAsync = allow_async;
ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
v8_isolate->SetWasmModuleCallback(WasmModuleOverride);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_SetWasmInstantiateControls) {
@@ -487,14 +489,14 @@ RUNTIME_FUNCTION(Runtime_SetWasmInstantiateControls) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
CHECK_EQ(args.length(), 0);
v8_isolate->SetWasmInstanceCallback(WasmInstanceOverride);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
isolate->heap()->NotifyContextDisposed(true);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -518,7 +520,7 @@ RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
}
}
#endif
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -530,7 +532,7 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
// MaybeObject*.
MaybeObject* maybe_object = reinterpret_cast<MaybeObject*>(args[0]);
- OFStream os(stdout);
+ StdoutStream os;
if (maybe_object->IsClearedWeakHeapObject()) {
os << "[weak cleared]";
} else {
@@ -595,14 +597,14 @@ RUNTIME_FUNCTION(Runtime_PrintWithNameForAssert) {
args[1]->ShortPrint();
PrintF("\n");
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugTrace) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
isolate->PrintStack(stdout);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugTrackRetainingPath) {
@@ -616,18 +618,18 @@ RUNTIME_FUNCTION(Runtime_DebugTrackRetainingPath) {
RetainingPathOption option = RetainingPathOption::kDefault;
if (args.length() == 2) {
CONVERT_ARG_HANDLE_CHECKED(String, str, 1);
- const char track_ephemeral_path[] = "track-ephemeral-path";
- if (str->IsOneByteEqualTo(STATIC_CHAR_VECTOR(track_ephemeral_path))) {
- option = RetainingPathOption::kTrackEphemeralPath;
+ const char track_ephemeron_path[] = "track-ephemeron-path";
+ if (str->IsOneByteEqualTo(STATIC_CHAR_VECTOR(track_ephemeron_path))) {
+ option = RetainingPathOption::kTrackEphemeronPath;
} else if (str->length() != 0) {
PrintF("Unexpected second argument of DebugTrackRetainingPath.\n");
PrintF("Expected an empty string or '%s', got '%s'.\n",
- track_ephemeral_path, str->ToCString().get());
+ track_ephemeron_path, str->ToCString().get());
}
}
isolate->heap()->AddRetainingPathTarget(object, option);
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// This will not allocate (flatten the string), but it may run
@@ -652,7 +654,7 @@ RUNTIME_FUNCTION(Runtime_SystemBreak) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
base::OS::DebugBreak();
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -664,7 +666,7 @@ RUNTIME_FUNCTION(Runtime_SetFlags) {
std::unique_ptr<char[]> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
FlagList::SetFlagsFromString(flags.get(), StrLength(flags.get()));
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_SetForceSlowPath) {
@@ -677,7 +679,7 @@ RUNTIME_FUNCTION(Runtime_SetForceSlowPath) {
DCHECK(arg->IsFalse(isolate));
isolate->set_force_slow_path(false);
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_Abort) {
@@ -707,12 +709,6 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
}
-RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
- DCHECK_EQ(0, args.length());
- return Smi::FromInt(Natives::GetBuiltinsCount());
-}
-
-
RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
HandleScope scope(isolate);
#ifdef DEBUG
@@ -721,13 +717,13 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
if (!func->is_compiled() &&
!Compiler::Compile(func, Compiler::KEEP_EXCEPTION)) {
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
- OFStream os(stdout);
+ StdoutStream os;
func->code()->Print(os);
os << std::endl;
#endif // DEBUG
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
namespace {
@@ -756,7 +752,7 @@ RUNTIME_FUNCTION(Runtime_TraceEnter) {
PrintIndentation(isolate);
JavaScriptFrame::PrintTop(isolate, stdout, true, false);
PrintF(" {\n");
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
@@ -784,7 +780,7 @@ RUNTIME_FUNCTION(Runtime_InNewSpace) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(isolate->heap()->InNewSpace(obj));
+ return isolate->heap()->ToBoolean(Heap::InNewSpace(obj));
}
RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
@@ -793,14 +789,14 @@ RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
CONVERT_ARG_CHECKED(JSFunction, function, 0);
if (!function->shared()->HasAsmWasmData()) {
// Doesn't have wasm data.
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
if (function->shared()->HasBuiltinId() &&
function->shared()->builtin_id() == Builtins::kInstantiateAsmJs) {
// Hasn't been compiled yet.
- return isolate->heap()->false_value();
+ return ReadOnlyRoots(isolate).false_value();
}
- return isolate->heap()->true_value();
+ return ReadOnlyRoots(isolate).true_value();
}
namespace {
@@ -817,7 +813,7 @@ RUNTIME_FUNCTION(Runtime_DisallowCodegenFromStrings) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8_isolate->SetAllowCodeGenerationFromStringsCallback(
flag ? DisallowCodegenFromStringsCallback : nullptr);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_DisallowWasmCodegen) {
@@ -827,7 +823,7 @@ RUNTIME_FUNCTION(Runtime_DisallowWasmCodegen) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8_isolate->SetAllowWasmCodeGenerationCallback(
flag ? DisallowCodegenFromStringsCallback : nullptr);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_IsWasmCode) {
@@ -908,18 +904,17 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
- Handle<WasmCompiledModule> compiled_module(module_obj->compiled_module(),
- isolate);
+ wasm::NativeModule* native_module = module_obj->native_module();
size_t compiled_size =
- wasm::GetSerializedNativeModuleSize(isolate, compiled_module);
+ wasm::GetSerializedNativeModuleSize(isolate, native_module);
void* array_data = isolate->array_buffer_allocator()->Allocate(compiled_size);
Handle<JSArrayBuffer> array_buffer = isolate->factory()->NewJSArrayBuffer();
JSArrayBuffer::Setup(array_buffer, isolate, false, array_data, compiled_size);
if (!array_data ||
!wasm::SerializeNativeModule(
- isolate, compiled_module,
+ isolate, native_module,
{reinterpret_cast<uint8_t*>(array_data), compiled_size})) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
return *array_buffer;
}
@@ -954,35 +949,17 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
}
Handle<WasmModuleObject> module_object;
if (!maybe_module_object.ToHandle(&module_object)) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
return *module_object;
}
-RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
- HandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, instance_count, 1);
- WasmInstanceObject::ValidateInstancesChainForTesting(isolate, module_obj,
- instance_count->value());
- return isolate->heap()->ToBoolean(true);
-}
-
-RUNTIME_FUNCTION(Runtime_ValidateWasmModuleState) {
- HandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
- WasmModuleObject::ValidateStateForTesting(isolate, module_obj);
- return isolate->heap()->ToBoolean(true);
-}
-
RUNTIME_FUNCTION(Runtime_HeapObjectVerify) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
#ifdef VERIFY_HEAP
- object->ObjectVerify();
+ object->ObjectVerify(isolate);
#else
CHECK(object->IsObject());
if (object->IsHeapObject()) {
@@ -994,6 +971,18 @@ RUNTIME_FUNCTION(Runtime_HeapObjectVerify) {
return isolate->heap()->ToBoolean(true);
}
+RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
+ int instance_count = 0;
+ WeakArrayList* weak_instance_list = module_obj->weak_instance_list();
+ for (int i = 0; i < weak_instance_list->length(); ++i) {
+ if (weak_instance_list->Get(i)->IsWeakHeapObject()) instance_count++;
+ }
+ return Smi::FromInt(instance_count);
+}
+
RUNTIME_FUNCTION(Runtime_WasmNumInterpretedCalls) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
@@ -1012,7 +1001,7 @@ RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) {
WasmInstanceObject::GetOrCreateDebugInfo(instance);
WasmDebugInfo::RedirectToInterpreter(debug_info,
Vector<int>(&function_index, 1));
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
@@ -1041,7 +1030,7 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
: wasm::ExecutionEngine::kTurbofan;
wasm::TraceMemoryOperation(eng, info, func_index, pos - func_start,
mem_start);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
@@ -1049,9 +1038,14 @@ RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
- wasm::WasmCode* wasm_code =
- WasmExportedFunction::cast(*function)->GetWasmCode();
- return isolate->heap()->ToBoolean(wasm_code->is_liftoff());
+ Handle<WasmExportedFunction> exp_fun =
+ Handle<WasmExportedFunction>::cast(function);
+ wasm::NativeModule* native_module =
+ exp_fun->instance()->module_object()->native_module();
+ uint32_t func_index = exp_fun->function_index();
+ return isolate->heap()->ToBoolean(
+ native_module->has_code(func_index) &&
+ native_module->code(func_index)->is_liftoff());
}
RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
@@ -1059,9 +1053,9 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- object->map()->CompleteInobjectSlackTracking();
+ object->map()->CompleteInobjectSlackTracking(isolate);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
@@ -1069,8 +1063,16 @@ RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
DisallowHeapAllocation no_gc;
CONVERT_ARG_CHECKED(WasmInstanceObject, instance, 0);
- instance->compiled_module()->GetNativeModule()->set_lazy_compile_frozen(true);
- return isolate->heap()->undefined_value();
+ instance->module_object()->native_module()->set_lazy_compile_frozen(true);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_WasmMemoryHasFullGuardRegion) {
+ DCHECK_EQ(1, args.length());
+ DisallowHeapAllocation no_gc;
+ CONVERT_ARG_CHECKED(WasmMemoryObject, memory, 0);
+
+ return isolate->heap()->ToBoolean(memory->has_full_guard_region(isolate));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 66d88f9860..d68bb06e82 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -26,11 +26,11 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
}
Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(argument);
if (!array_buffer->is_neuterable()) {
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
if (array_buffer->backing_store() == nullptr) {
CHECK_EQ(Smi::kZero, array_buffer->byte_length());
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
// Shared array buffers should never be neutered.
CHECK(!array_buffer->is_shared());
@@ -41,7 +41,7 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
isolate->heap()->UnregisterArrayBuffer(*array_buffer);
array_buffer->Neuter();
isolate->array_buffer_allocator()->Free(backing_store, byte_length);
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
@@ -121,7 +121,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
if (length <= 1) return *array;
Handle<FixedTypedArrayBase> elements(
- FixedTypedArrayBase::cast(array->elements()));
+ FixedTypedArrayBase::cast(array->elements()), isolate);
switch (array->type()) {
#define TYPED_ARRAY_SORT(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: { \
@@ -175,7 +175,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySet) {
Handle<Object> len;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, len,
- Object::GetProperty(obj, isolate->factory()->length_string()));
+ Object::GetProperty(isolate, obj, isolate->factory()->length_string()));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len,
Object::ToLength(isolate, len));
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index a6f55f32ab..55c549a6cf 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -67,10 +67,11 @@ class ClearThreadInWasmScope {
RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_UINT32_ARG_CHECKED(delta_pages, 0);
- Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate),
- isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ // {delta_pages} is checked to be a positive smi in the WasmGrowMemory builtin
+ // which calls this runtime function.
+ CONVERT_UINT32_ARG_CHECKED(delta_pages, 1);
// This runtime function is always being called from wasm code.
ClearThreadInWasmScope flag_scope(true);
@@ -79,8 +80,11 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
DCHECK_NULL(isolate->context());
isolate->set_context(instance->native_context());
- return *isolate->factory()->NewNumberFromInt(WasmMemoryObject::Grow(
- isolate, handle(instance->memory_object(), isolate), delta_pages));
+ int ret = WasmMemoryObject::Grow(
+ isolate, handle(instance->memory_object(), isolate), delta_pages);
+ // The WasmGrowMemory builtin which calls this runtime function expects us to
+ // always return a Smi.
+ return Smi::FromInt(ret);
}
RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
@@ -122,7 +126,7 @@ RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
MessageTemplate::kWasmExceptionError));
isolate->set_wasm_caught_exception(*exception);
CONVERT_ARG_HANDLE_CHECKED(Smi, id, 0);
- CHECK(!JSReceiver::SetProperty(exception,
+ CHECK(!JSReceiver::SetProperty(isolate, exception,
isolate->factory()->InternalizeUtf8String(
wasm::WasmException::kRuntimeIdStr),
id, LanguageMode::kStrict)
@@ -130,12 +134,12 @@ RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
CONVERT_SMI_ARG_CHECKED(size, 1);
Handle<JSTypedArray> values =
isolate->factory()->NewJSTypedArray(ElementsKind::UINT16_ELEMENTS, size);
- CHECK(!JSReceiver::SetProperty(exception,
+ CHECK(!JSReceiver::SetProperty(isolate, exception,
isolate->factory()->InternalizeUtf8String(
wasm::WasmException::kRuntimeValuesStr),
values, LanguageMode::kStrict)
.is_null());
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WasmThrow) {
@@ -157,9 +161,9 @@ RUNTIME_FUNCTION(Runtime_WasmGetExceptionRuntimeId) {
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
Handle<Object> except_obj(isolate->get_wasm_caught_exception(), isolate);
if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
- Handle<JSReceiver> exception(JSReceiver::cast(*except_obj));
+ Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
Handle<Object> tag;
- if (JSReceiver::GetProperty(exception,
+ if (JSReceiver::GetProperty(isolate, exception,
isolate->factory()->InternalizeUtf8String(
wasm::WasmException::kRuntimeIdStr))
.ToHandle(&tag)) {
@@ -179,9 +183,9 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetElement) {
DCHECK_EQ(1, args.length());
Handle<Object> except_obj(isolate->get_wasm_caught_exception(), isolate);
if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
- Handle<JSReceiver> exception(JSReceiver::cast(*except_obj));
+ Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
Handle<Object> values_obj;
- if (JSReceiver::GetProperty(exception,
+ if (JSReceiver::GetProperty(isolate, exception,
isolate->factory()->InternalizeUtf8String(
wasm::WasmException::kRuntimeValuesStr))
.ToHandle(&values_obj)) {
@@ -207,9 +211,9 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionSetElement) {
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
Handle<Object> except_obj(isolate->get_wasm_caught_exception(), isolate);
if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
- Handle<JSReceiver> exception(JSReceiver::cast(*except_obj));
+ Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
Handle<Object> values_obj;
- if (JSReceiver::GetProperty(exception,
+ if (JSReceiver::GetProperty(isolate, exception,
isolate->factory()->InternalizeUtf8String(
wasm::WasmException::kRuntimeValuesStr))
.ToHandle(&values_obj)) {
@@ -225,7 +229,7 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionSetElement) {
}
}
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
@@ -233,7 +237,8 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
HandleScope scope(isolate);
CONVERT_NUMBER_CHECKED(int32_t, func_index, Int32, args[0]);
CONVERT_ARG_HANDLE_CHECKED(Object, arg_buffer_obj, 1);
- Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate));
+ Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate),
+ isolate);
// The arg buffer is the raw pointer to the caller's stack. It looks like a
// Smi (lowest bit not set, as checked by IsSmi), but is no valid Smi. We just
@@ -265,9 +270,9 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
if (!success) {
DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
+ return ReadOnlyRoots(isolate).exception();
}
- return isolate->heap()->undefined_value();
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
@@ -289,19 +294,27 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
return isolate->stack_guard()->HandleInterrupts();
}
-RUNTIME_FUNCTION_RETURN_PAIR(Runtime_WasmCompileLazy) {
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance_on_stack, 0);
- // TODO(titzer): The location on the stack is not visited by the
- // roots visitor because the type of the frame is a special
- // WASM builtin. Reopen the handle in a handle scope as a workaround.
+RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
HandleScope scope(isolate);
- Handle<WasmInstanceObject> instance(*instance_on_stack, isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_SMI_ARG_CHECKED(func_index, 1);
ClearThreadInWasmScope wasm_flag(true);
- Address entrypoint = wasm::CompileLazy(isolate, instance);
- return MakePair(reinterpret_cast<Object*>(entrypoint), *instance);
+#ifdef DEBUG
+ StackFrameIterator it(isolate, isolate->thread_local_top());
+ // On top: C entry stub.
+ DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
+ it.Advance();
+ // Next: the wasm lazy compile frame.
+ DCHECK_EQ(StackFrame::WASM_COMPILE_LAZY, it.frame()->type());
+ DCHECK_EQ(*instance, WasmCompileLazyFrame::cast(it.frame())->wasm_instance());
+#endif
+
+ Address entrypoint = wasm::CompileLazy(
+ isolate, instance->module_object()->native_module(), func_index);
+ return reinterpret_cast<Object*>(entrypoint);
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 382bac7d2a..accb97d0e6 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -97,8 +97,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
F(GetWeakMapEntries, 2, 1) \
F(GetWeakSetValues, 2, 1) \
- F(IsJSMap, 1, 1) \
- F(IsJSSet, 1, 1) \
F(IsJSWeakMap, 1, 1) \
F(IsJSWeakSet, 1, 1) \
F(MapGrow, 1, 1) \
@@ -127,61 +125,32 @@ namespace internal {
F(IsDate, 1, 1)
#define FOR_EACH_INTRINSIC_DEBUG(F) \
- F(ChangeBreakOnException, 2, 1) \
- F(CheckExecutionState, 1, 1) \
F(ClearStepping, 0, 1) \
F(CollectGarbage, 1, 1) \
- F(DebugApplyInstrumentation, 1, 1) \
- F(DebugAsyncFunctionPromiseCreated, 1, 1) \
F(DebugBreakAtEntry, 1, 1) \
F(DebugCollectCoverage, 0, 1) \
- F(DebugConstructedBy, 2, 1) \
- F(DebugEvaluate, 5, 1) \
- F(DebugEvaluateGlobal, 2, 1) \
- F(DebugGetInternalProperties, 1, 1) \
- F(DebugGetLoadedScripts, 0, 1) \
- F(DebugGetProperty, 2, 1) \
- F(DebugGetPropertyDetails, 2, 1) \
- F(DebugGetPrototype, 1, 1) \
+ F(DebugGetLoadedScriptIds, 0, 1) \
F(DebugIsActive, 0, 1) \
F(DebugOnFunctionCall, 2, 1) \
F(DebugPopPromise, 0, 1) \
F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
- F(DebugPropertyAttributesFromDetails, 1, 1) \
- F(DebugPropertyKindFromDetails, 1, 1) \
F(DebugPushPromise, 1, 1) \
- F(DebugReferencedBy, 3, 1) \
- F(DebugSetScriptSource, 2, 1) \
+ F(DebugAsyncFunctionSuspended, 1, 1) \
+ F(DebugAsyncFunctionFinished, 2, 1) \
F(DebugToggleBlockCoverage, 1, 1) \
F(DebugTogglePreciseCoverage, 1, 1) \
- F(FunctionGetDebugName, 1, 1) \
F(FunctionGetInferredName, 1, 1) \
- F(GetAllScopesDetails, 4, 1) \
F(GetBreakLocations, 1, 1) \
- F(GetDebugContext, 0, 1) \
- F(GetFrameCount, 1, 1) \
- F(GetFrameDetails, 2, 1) \
- F(GetFunctionScopeCount, 1, 1) \
- F(GetFunctionScopeDetails, 2, 1) \
F(GetGeneratorScopeCount, 1, 1) \
F(GetGeneratorScopeDetails, 2, 1) \
F(GetHeapUsage, 0, 1) \
- F(GetScopeCount, 2, 1) \
- F(GetScopeDetails, 4, 1) \
- F(GetScript, 1, 1) \
F(HandleDebuggerStatement, 0, 1) \
F(IncBlockCounter, 2, 1) \
F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 2, 1) \
F(ScheduleBreak, 0, 1) \
- F(ScriptLineCount, 1, 1) \
F(ScriptLocationFromLine2, 4, 1) \
- F(ScriptLocationFromLine, 4, 1) \
- F(ScriptPositionInfo2, 3, 1) \
- F(ScriptPositionInfo, 3, 1) \
- F(SetScopeVariableValue, 6, 1)
-
-#define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
+ F(SetGeneratorScopeVariableValue, 4, 1) \
+ F(LiveEditPatchScript, 2, 1)
#define FOR_EACH_INTRINSIC_FORIN(F) \
F(ForInEnumerate, 1, 1) \
@@ -209,14 +178,12 @@ namespace internal {
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
F(Call, -1 /* >= 2 */, 1) \
- F(FunctionGetContextData, 1, 1) \
F(FunctionGetName, 1, 1) \
- F(FunctionGetScript, 1, 1) \
+ F(FunctionGetScriptSource, 1, 1) \
F(FunctionGetScriptId, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
F(FunctionGetSourceCode, 1, 1) \
F(FunctionIsAPIFunction, 1, 1) \
- F(FunctionToString, 1, 1) \
F(IsConstructor, 1, 1) \
F(IsFunction, 1, 1) \
F(SetCode, 2, 1) \
@@ -229,12 +196,9 @@ namespace internal {
F(AsyncGeneratorYield, 3, 1) \
F(CreateJSGeneratorObject, 2, 1) \
F(GeneratorClose, 1, 1) \
- F(GeneratorGetContinuation, 1, 1) \
F(GeneratorGetFunction, 1, 1) \
F(GeneratorGetInputOrDebugPos, 1, 1) \
- F(GeneratorGetReceiver, 1, 1) \
- F(GeneratorGetResumeMode, 1, 1) \
- F(GeneratorGetSourcePosition, 1, 1)
+ F(GeneratorGetResumeMode, 1, 1)
#ifdef V8_INTL_SUPPORT
#define FOR_EACH_INTRINSIC_INTL(F) \
@@ -256,7 +220,7 @@ namespace internal {
F(InternalCompare, 3, 1) \
F(InternalDateFormat, 2, 1) \
F(InternalNumberFormat, 2, 1) \
- F(IsInitializedIntlObject, 1, 1) \
+ F(IntlUnwrapReceiver, 5, 1) \
F(IsInitializedIntlObjectOfType, 2, 1) \
F(MarkAsInitializedIntlObjectOfType, 2, 1) \
F(PluralRulesSelect, 2, 1) \
@@ -318,20 +282,6 @@ namespace internal {
F(CreateObjectLiteral, 4, 1) \
F(CreateRegExpLiteral, 4, 1)
-#define FOR_EACH_INTRINSIC_LIVEEDIT(F) \
- F(LiveEditCheckAndDropActivations, 3, 1) \
- F(LiveEditCompareStrings, 2, 1) \
- F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
- F(LiveEditFixupScript, 2, 1) \
- F(LiveEditFunctionSetScript, 2, 1) \
- F(LiveEditFunctionSourceUpdated, 2, 1) \
- F(LiveEditGatherCompileInfo, 2, 1) \
- F(LiveEditPatchFunctionPositions, 2, 1) \
- F(LiveEditReplaceFunctionCode, 2, 1) \
- F(LiveEditReplaceRefToNestedFunction, 3, 1) \
- F(LiveEditReplaceScript, 3, 1) \
- F(LiveEditRestartFrame, 2, 1)
-
#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 0, 1)
#define FOR_EACH_INTRINSIC_MODULE(F) \
@@ -371,7 +321,6 @@ namespace internal {
F(DefineSetterPropertyUnchecked, 4, 1) \
F(DeleteProperty, 3, 1) \
F(GetFunctionName, 1, 1) \
- F(GetInterceptorInfo, 1, 1) \
F(GetOwnPropertyDescriptor, 2, 1) \
F(GetOwnPropertyKeys, 2, 1) \
F(GetProperty, 2, 1) \
@@ -389,6 +338,8 @@ namespace internal {
F(ObjectEntriesSkipFastPath, 1, 1) \
F(ObjectHasOwnProperty, 2, 1) \
F(ObjectKeys, 1, 1) \
+ F(ObjectGetOwnPropertyNames, 1, 1) \
+ F(ObjectGetOwnPropertyNamesTryFast, 1, 1) \
F(ObjectValues, 1, 1) \
F(ObjectValuesSkipFastPath, 1, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
@@ -426,6 +377,7 @@ namespace internal {
F(PromiseHookAfter, 1, 1) \
F(PromiseHookBefore, 1, 1) \
F(PromiseHookInit, 2, 1) \
+ F(AwaitPromisesInit, 3, 1) \
F(PromiseMarkAsHandled, 1, 1) \
F(PromiseRejectEventFromStack, 2, 1) \
F(PromiseResult, 1, 1) \
@@ -439,7 +391,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(CheckProxyGetSetTrapResult, 2, 1) \
F(CheckProxyHasTrap, 2, 1) \
- F(GetPropertyWithReceiver, 2, 1) \
+ F(GetPropertyWithReceiver, 3, 1) \
F(IsJSProxy, 1, 1) \
F(JSProxyGetHandler, 1, 1) \
F(JSProxyGetTarget, 1, 1) \
@@ -511,7 +463,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_SYMBOL(F) \
F(CreatePrivateFieldSymbol, 0, 1) \
F(CreatePrivateSymbol, -1 /* <= 1 */, 1) \
- F(SymbolDescription, 1, 1) \
F(SymbolDescriptiveString, 1, 1) \
F(SymbolIsPrivate, 1, 1)
@@ -564,15 +515,9 @@ namespace internal {
F(InNewSpace, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
- F(IsJSError, 1, 1) \
- F(IsJSGeneratorObject, 1, 1) \
- F(IsJSMapIterator, 1, 1) \
- F(IsJSSetIterator, 1, 1) \
F(IsLiftoffFunction, 1, 1) \
- F(IsScriptWrapper, 1, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
- F(NativeScriptsCount, 0, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
@@ -593,10 +538,10 @@ namespace internal {
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
- F(ValidateWasmInstancesChain, 2, 1) \
- F(ValidateWasmModuleState, 1, 1) \
+ F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
- F(WasmTraceMemory, 1, 1)
+ F(WasmTraceMemory, 1, 1) \
+ F(WasmMemoryHasFullGuardRegion, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferNeuter, 1, 1) \
@@ -614,15 +559,15 @@ namespace internal {
F(WasmExceptionGetElement, 1, 1) \
F(WasmExceptionSetElement, 2, 1) \
F(WasmGetExceptionRuntimeId, 0, 1) \
- F(WasmGrowMemory, 1, 1) \
+ F(WasmGrowMemory, 2, 1) \
F(WasmRunInterpreter, 2, 1) \
F(WasmStackGuard, 0, 1) \
F(WasmThrow, 0, 1) \
F(WasmThrowCreate, 2, 1) \
- F(WasmThrowTypeError, 0, 1)
+ F(WasmThrowTypeError, 0, 1) \
+ F(WasmCompileLazy, 2, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
- F(WasmCompileLazy, 1, 2) \
F(DebugBreakOnBytecode, 1, 2) \
F(LoadLookupSlotForCall, 1, 2)
@@ -654,7 +599,6 @@ namespace internal {
FOR_EACH_INTRINSIC_COMPILER(F) \
FOR_EACH_INTRINSIC_DATE(F) \
FOR_EACH_INTRINSIC_DEBUG(F) \
- FOR_EACH_INTRINSIC_ERROR(F) \
FOR_EACH_INTRINSIC_FORIN(F) \
FOR_EACH_INTRINSIC_FUNCTION(F) \
FOR_EACH_INTRINSIC_GENERATOR(F) \
@@ -663,7 +607,6 @@ namespace internal {
FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_INTL(F) \
FOR_EACH_INTRINSIC_LITERALS(F) \
- FOR_EACH_INTRINSIC_LIVEEDIT(F) \
FOR_EACH_INTRINSIC_MATHS(F) \
FOR_EACH_INTRINSIC_MODULE(F) \
FOR_EACH_INTRINSIC_NUMBERS(F) \
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index 9a4a0aa25f..b9a7a1f8eb 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -137,7 +137,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
int index = instr & 0xFFFFFFFF;
- return code_targets_[index];
+ return GetCodeTarget(index);
}
HeapObject* RelocInfo::target_object() {
@@ -156,7 +156,7 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
}
}
-void RelocInfo::set_target_object(HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -164,9 +164,8 @@ void RelocInfo::set_target_object(HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target);
- host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
+ heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
+ heap->RecordWriteIntoCode(host(), this, target);
}
}
@@ -182,13 +181,6 @@ void RelocInfo::set_target_external_reference(
icache_flush_mode);
}
-void RelocInfo::set_wasm_code_table_entry(Address target,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
- Assembler::set_target_address_at(pc_, constant_pool_, target,
- icache_flush_mode);
-}
-
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@@ -229,7 +221,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
@@ -245,23 +237,6 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
// Operand constructors
Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
-int32_t Assembler::emit_code_target(Handle<Code> target,
- RelocInfo::Mode rmode) {
- DCHECK(RelocInfo::IsCodeTarget(rmode));
- RecordRelocInfo(rmode);
-
- size_t current = code_targets_.size();
- if (current > 0 && !target.is_null() &&
- code_targets_.back().address() == target.address()) {
- // Optimization if we keep jumping to the same code target.
- current--;
- } else {
- code_targets_.push_back(target);
- }
- return current;
-}
-
-
// Fetch the 32bit value from the FIXED_SEQUENCE IIHF / IILF
Address Assembler::target_address_at(Address pc, Address constant_pool) {
// S390 Instruction!
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 130b7647d7..4b34bcb070 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -48,6 +48,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
+#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/s390/assembler-s390-inl.h"
@@ -258,7 +259,8 @@ Register ToRegister(int num) {
// Implementation of RelocInfo
const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::INTERNAL_REFERENCE;
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially
@@ -270,34 +272,27 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() { return false; }
-Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
-uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- Assembler::target_address_at(pc_, constant_pool_));
-}
-
-void RelocInfo::set_embedded_address(Address address,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
-}
-
-void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(pc_, constant_pool_,
- static_cast<Address>(size), flush_mode);
+int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(address, icache_flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return embedded_address();
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return static_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
}
// -----------------------------------------------------------------------------
@@ -332,8 +327,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_ + request.offset());
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- IMMUTABLE, TENURED);
+ object =
+ isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
set_target_address_at(pc, kNullAddress,
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
@@ -343,7 +338,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
int index = instr & 0xFFFFFFFF;
- code_targets_[index] = request.code_stub()->GetCode();
+ UpdateCodeTarget(index, request.code_stub()->GetCode());
break;
}
}
@@ -352,11 +347,11 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size) {
+Assembler::Assembler(const AssemblerOptions& options, void* buffer,
+ int buffer_size)
+ : AssemblerBase(options, buffer, buffer_size) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
- code_targets_.reserve(100);
-
+ ReserveCodeTargetSpace(100);
last_bound_pos_ = 0;
relocations_.reserve(128);
}
@@ -433,9 +428,9 @@ int Assembler::target_at(int pos) {
// check which type of branch this is 16 or 26 bit offset
Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
- if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
+ if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
- imm16 <<= 1; // BRC immediate is in # of halfwords
+ imm16 <<= 1; // immediate is in # of halfwords
if (imm16 == 0) return kEndOfChain;
return pos + imm16;
} else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
@@ -446,6 +441,13 @@ int Assembler::target_at(int pos) {
imm32 <<= 1; // BR* + LARL treat immediate in # of halfwords
if (imm32 == 0) return kEndOfChain;
return pos + imm32;
+ } else if (BRXHG == opcode) {
+ // offset is in bits 16-31 of 48 bit instruction
+ instr = instr >> 16;
+ int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
+ imm16 <<= 1; // immediate is in # of halfwords
+ if (imm16 == 0) return kEndOfChain;
+ return pos + imm16;
}
// Unknown condition
@@ -460,10 +462,11 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
if (is_branch != nullptr) {
*is_branch = (opcode == BRC || opcode == BRCT || opcode == BRCTG ||
- opcode == BRCL || opcode == BRASL);
+ opcode == BRCL || opcode == BRASL || opcode == BRXH ||
+ opcode == BRXHG);
}
- if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
+ if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
int16_t imm16 = target_pos - pos;
instr &= (~0xFFFF);
DCHECK(is_int16(imm16));
@@ -483,6 +486,15 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
instr_at_put<SixByteInstr>(pos, instr | imm32);
return;
+ } else if (BRXHG == opcode) {
+ // Immediate is in bits 16-31 of 48 bit instruction
+ int32_t imm16 = target_pos - pos;
+ instr &= (0xFFFF0000FFFF); // clear bits 16-31
+ imm16 &= 0xFFFF; // clear high halfword
+ imm16 <<= 16;
+ // Immediate is in # of halfwords
+ instr_at_put<SixByteInstr>(pos, instr | (imm16 >> 1));
+ return;
}
DCHECK(false);
}
@@ -490,10 +502,10 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// Returns the maximum number of bits given instruction can address.
int Assembler::max_reach_from(int pos) {
Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
-
// Check which type of instr. In theory, we can return
// the values below + 1, given offset is # of halfwords
- if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
+ if (BRC == opcode || BRCT == opcode || BRCTG == opcode|| BRXH == opcode ||
+ BRXHG == opcode) {
return 16;
} else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
BRASL == opcode) {
@@ -605,11 +617,6 @@ void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
}
}
-// 32-bit Store Multiple - short displacement (12-bits unsigned)
-void Assembler::stm(Register r1, Register r2, const MemOperand& src) {
- rs_form(STM, r1, r2, src.rb(), src.offset());
-}
-
// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-s390.h .
@@ -645,617 +652,6 @@ void Assembler::nop(int type) {
}
}
-
-
-// RI1 format: <insn> R1,I2
-// +--------+----+----+------------------+
-// | OpCode | R1 |OpCd| I2 |
-// +--------+----+----+------------------+
-// 0 8 12 16 31
-#define RI1_FORM_EMIT(name, op) \
- void Assembler::name(Register r, const Operand& i2) { ri_form(op, r, i2); }
-
-void Assembler::ri_form(Opcode op, Register r1, const Operand& i2) {
- DCHECK(is_uint12(op));
- DCHECK(is_uint16(i2.immediate()) || is_int16(i2.immediate()));
- emit4bytes((op & 0xFF0) * B20 | r1.code() * B20 | (op & 0xF) * B16 |
- (i2.immediate() & 0xFFFF));
-}
-
-// RI2 format: <insn> M1,I2
-// +--------+----+----+------------------+
-// | OpCode | M1 |OpCd| I2 |
-// +--------+----+----+------------------+
-// 0 8 12 16 31
-#define RI2_FORM_EMIT(name, op) \
- void Assembler::name(Condition m, const Operand& i2) { ri_form(op, m, i2); }
-
-void Assembler::ri_form(Opcode op, Condition m1, const Operand& i2) {
- DCHECK(is_uint12(op));
- DCHECK(is_uint4(m1));
- DCHECK(op == BRC ? is_int16(i2.immediate()) : is_uint16(i2.immediate()));
- emit4bytes((op & 0xFF0) * B20 | m1 * B20 | (op & 0xF) * B16 |
- (i2.immediate() & 0xFFFF));
-}
-
-// RIE-f format: <insn> R1,R2,I3,I4,I5
-// +--------+----+----+------------------+--------+--------+
-// | OpCode | R1 | R2 | I3 | I4 | I5 | OpCode |
-// +--------+----+----+------------------+--------+--------+
-// 0 8 12 16 24 32 40 47
-void Assembler::rie_f_form(Opcode op, Register r1, Register r2,
- const Operand& i3, const Operand& i4,
- const Operand& i5) {
- DCHECK(is_uint16(op));
- DCHECK(is_uint8(i3.immediate()));
- DCHECK(is_uint8(i4.immediate()));
- DCHECK(is_uint8(i5.immediate()));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(r2.code())) * B32 |
- (static_cast<uint64_t>(i3.immediate())) * B24 |
- (static_cast<uint64_t>(i4.immediate())) * B16 |
- (static_cast<uint64_t>(i5.immediate())) * B8 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-// RIE format: <insn> R1,R3,I2
-// +--------+----+----+------------------+--------+--------+
-// | OpCode | R1 | R3 | I2 |////////| OpCode |
-// +--------+----+----+------------------+--------+--------+
-// 0 8 12 16 32 40 47
-#define RIE_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r3, const Operand& i2) { \
- rie_form(op, r1, r3, i2); \
- }
-
-void Assembler::rie_form(Opcode op, Register r1, Register r3,
- const Operand& i2) {
- DCHECK(is_uint16(op));
- DCHECK(is_int16(i2.immediate()));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(r3.code())) * B32 |
- (static_cast<uint64_t>(i2.immediate() & 0xFFFF)) * B16 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-// RS1 format: <insn> R1,R3,D2(B2)
-// +--------+----+----+----+-------------+
-// | OpCode | R1 | R3 | B2 | D2 |
-// +--------+----+----+----+-------------+
-// 0 8 12 16 20 31
-#define RS1_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r3, Register b2, Disp d2) { \
- rs_form(op, r1, r3, b2, d2); \
- } \
- void Assembler::name(Register r1, Register r3, const MemOperand& opnd) { \
- name(r1, r3, opnd.getBaseRegister(), opnd.getDisplacement()); \
- }
-
-void Assembler::rs_form(Opcode op, Register r1, Register r3, Register b2,
- const Disp d2) {
- DCHECK(is_uint12(d2));
- emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 | b2.code() * B12 |
- d2);
-}
-
-// RS2 format: <insn> R1,M3,D2(B2)
-// +--------+----+----+----+-------------+
-// | OpCode | R1 | M3 | B2 | D2 |
-// +--------+----+----+----+-------------+
-// 0 8 12 16 20 31
-#define RS2_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Condition m3, Register b2, Disp d2) { \
- rs_form(op, r1, m3, b2, d2); \
- } \
- void Assembler::name(Register r1, Condition m3, const MemOperand& opnd) { \
- name(r1, m3, opnd.getBaseRegister(), opnd.getDisplacement()); \
- }
-
-void Assembler::rs_form(Opcode op, Register r1, Condition m3, Register b2,
- const Disp d2) {
- DCHECK(is_uint12(d2));
- emit4bytes(op * B24 | r1.code() * B20 | m3 * B16 | b2.code() * B12 | d2);
-}
-
-// RSI format: <insn> R1,R3,I2
-// +--------+----+----+------------------+
-// | OpCode | R1 | R3 | RI2 |
-// +--------+----+----+------------------+
-// 0 8 12 16 31
-#define RSI_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r3, const Operand& i2) { \
- rsi_form(op, r1, r3, i2); \
- }
-
-void Assembler::rsi_form(Opcode op, Register r1, Register r3,
- const Operand& i2) {
- DCHECK(is_uint8(op));
- DCHECK(is_uint16(i2.immediate()));
- emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 |
- (i2.immediate() & 0xFFFF));
-}
-
-// RSL format: <insn> R1,R3,D2(B2)
-// +--------+----+----+----+-------------+--------+--------+
-// | OpCode | L1 | | B2 | D2 | | OpCode |
-// +--------+----+----+----+-------------+--------+--------+
-// 0 8 12 16 20 32 40 47
-#define RSL_FORM_EMIT(name, op) \
- void Assembler::name(Length l1, Register b2, Disp d2) { \
- rsl_form(op, l1, b2, d2); \
- }
-
-void Assembler::rsl_form(Opcode op, Length l1, Register b2, Disp d2) {
- DCHECK(is_uint16(op));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(l1)) * B36 |
- (static_cast<uint64_t>(b2.code())) * B28 |
- (static_cast<uint64_t>(d2)) * B16 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-// RXE format: <insn> R1,D2(X2,B2)
-// +--------+----+----+----+-------------+--------+--------+
-// | OpCode | R1 | X2 | B2 | D2 |////////| OpCode |
-// +--------+----+----+----+-------------+--------+--------+
-// 0 8 12 16 20 32 40 47
-#define RXE_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
- rxe_form(op, r1, x2, b2, d2); \
- } \
- void Assembler::name(Register r1, const MemOperand& opnd) { \
- name(r1, opnd.getIndexRegister(), opnd.getBaseRegister(), \
- opnd.getDisplacement()); \
- }
-
-void Assembler::rxe_form(Opcode op, Register r1, Register x2, Register b2,
- Disp d2) {
- DCHECK(is_uint12(d2));
- DCHECK(is_uint16(op));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(x2.code())) * B32 |
- (static_cast<uint64_t>(b2.code())) * B28 |
- (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-// RRS format: <insn> R1,R2,M3,D4(B4)
-// +--------+----+----+----+-------------+----+---+--------+
-// | OpCode | R1 | R2 | B4 | D4 | M3 |///| OpCode |
-// +--------+----+----+----+-------------+----+---+--------+
-// 0 8 12 16 20 32 36 40 47
-#define RRS_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r2, Register b4, Disp d4, \
- Condition m3) { \
- rrs_form(op, r1, r2, b4, d4, m3); \
- } \
- void Assembler::name(Register r1, Register r2, Condition m3, \
- const MemOperand& opnd) { \
- name(r1, r2, opnd.getBaseRegister(), opnd.getDisplacement(), m3); \
- }
-
-void Assembler::rrs_form(Opcode op, Register r1, Register r2, Register b4,
- Disp d4, Condition m3) {
- DCHECK(is_uint12(d4));
- DCHECK(is_uint16(op));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(r2.code())) * B32 |
- (static_cast<uint64_t>(b4.code())) * B28 |
- (static_cast<uint64_t>(d4)) * B16 |
- (static_cast<uint64_t>(m3)) << 12 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-// RIS format: <insn> R1,I2,M3,D4(B4)
-// +--------+----+----+----+-------------+--------+--------+
-// | OpCode | R1 | M3 | B4 | D4 | I2 | OpCode |
-// +--------+----+----+----+-------------+--------+--------+
-// 0 8 12 16 20 32 40 47
-#define RIS_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Condition m3, Register b4, Disp d4, \
- const Operand& i2) { \
- ris_form(op, r1, m3, b4, d4, i2); \
- } \
- void Assembler::name(Register r1, const Operand& i2, Condition m3, \
- const MemOperand& opnd) { \
- name(r1, m3, opnd.getBaseRegister(), opnd.getDisplacement(), i2); \
- }
-
-void Assembler::ris_form(Opcode op, Register r1, Condition m3, Register b4,
- Disp d4, const Operand& i2) {
- DCHECK(is_uint12(d4));
- DCHECK(is_uint16(op));
- DCHECK(is_uint8(i2.immediate()));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(m3)) * B32 |
- (static_cast<uint64_t>(b4.code())) * B28 |
- (static_cast<uint64_t>(d4)) * B16 |
- (static_cast<uint64_t>(i2.immediate())) << 8 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-// S format: <insn> D2(B2)
-// +------------------+----+-------------+
-// | OpCode | B2 | D2 |
-// +------------------+----+-------------+
-// 0 16 20 31
-#define S_FORM_EMIT(name, op) \
- void Assembler::name(Register b1, Disp d2) { s_form(op, b1, d2); } \
- void Assembler::name(const MemOperand& opnd) { \
- name(opnd.getBaseRegister(), opnd.getDisplacement()); \
- }
-
-void Assembler::s_form(Opcode op, Register b1, Disp d2) {
- DCHECK(is_uint12(d2));
- emit4bytes(op << 16 | b1.code() * B12 | d2);
-}
-
-// SI format: <insn> D1(B1),I2
-// +--------+---------+----+-------------+
-// | OpCode | I2 | B1 | D1 |
-// +--------+---------+----+-------------+
-// 0 8 16 20 31
-#define SI_FORM_EMIT(name, op) \
- void Assembler::name(const Operand& i2, Register b1, Disp d1) { \
- si_form(op, i2, b1, d1); \
- } \
- void Assembler::name(const MemOperand& opnd, const Operand& i2) { \
- name(i2, opnd.getBaseRegister(), opnd.getDisplacement()); \
- }
-
-void Assembler::si_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
- emit4bytes((op & 0x00FF) << 24 | i2.immediate() * B16 | b1.code() * B12 | d1);
-}
-
-// SIY format: <insn> D1(B1),I2
-// +--------+---------+----+-------------+--------+--------+
-// | OpCode | I2 | B1 | DL1 | DH1 | OpCode |
-// +--------+---------+----+-------------+--------+--------+
-// 0 8 16 20 32 36 40 47
-#define SIY_FORM_EMIT(name, op) \
- void Assembler::name(const Operand& i2, Register b1, Disp d1) { \
- siy_form(op, i2, b1, d1); \
- } \
- void Assembler::name(const MemOperand& opnd, const Operand& i2) { \
- name(i2, opnd.getBaseRegister(), opnd.getDisplacement()); \
- }
-
-void Assembler::siy_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
- DCHECK(is_uint20(d1) || is_int20(d1));
- DCHECK(is_uint16(op));
- DCHECK(is_uint8(i2.immediate()));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(i2.immediate())) * B32 |
- (static_cast<uint64_t>(b1.code())) * B28 |
- (static_cast<uint64_t>(d1 & 0x0FFF)) * B16 |
- (static_cast<uint64_t>(d1 & 0x0FF000)) >> 4 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-// SIL format: <insn> D1(B1),I2
-// +------------------+----+-------------+-----------------+
-// | OpCode | B1 | D1 | I2 |
-// +------------------+----+-------------+-----------------+
-// 0 16 20 32 47
-#define SIL_FORM_EMIT(name, op) \
- void Assembler::name(Register b1, Disp d1, const Operand& i2) { \
- sil_form(op, b1, d1, i2); \
- } \
- void Assembler::name(const MemOperand& opnd, const Operand& i2) { \
- name(opnd.getBaseRegister(), opnd.getDisplacement(), i2); \
- }
-
-void Assembler::sil_form(Opcode op, Register b1, Disp d1, const Operand& i2) {
- DCHECK(is_uint12(d1));
- DCHECK(is_uint16(op));
- DCHECK(is_uint16(i2.immediate()));
- uint64_t code = (static_cast<uint64_t>(op)) * B32 |
- (static_cast<uint64_t>(b1.code())) * B28 |
- (static_cast<uint64_t>(d1)) * B16 |
- (static_cast<uint64_t>(i2.immediate()));
- emit6bytes(code);
-}
-
-// RXF format: <insn> R1,R3,D2(X2,B2)
-// +--------+----+----+----+-------------+----+---+--------+
-// | OpCode | R3 | X2 | B2 | D2 | R1 |///| OpCode |
-// +--------+----+----+----+-------------+----+---+--------+
-// 0 8 12 16 20 32 36 40 47
-#define RXF_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r3, Register b2, Register x2, \
- Disp d2) { \
- rxf_form(op, r1, r3, b2, x2, d2); \
- } \
- void Assembler::name(Register r1, Register r3, const MemOperand& opnd) { \
- name(r1, r3, opnd.getBaseRegister(), opnd.getIndexRegister(), \
- opnd.getDisplacement()); \
- }
-
-void Assembler::rxf_form(Opcode op, Register r1, Register r3, Register b2,
- Register x2, Disp d2) {
- DCHECK(is_uint12(d2));
- DCHECK(is_uint16(op));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(r3.code())) * B36 |
- (static_cast<uint64_t>(x2.code())) * B32 |
- (static_cast<uint64_t>(b2.code())) * B28 |
- (static_cast<uint64_t>(d2)) * B16 |
- (static_cast<uint64_t>(r1.code())) * B12 |
- (static_cast<uint64_t>(op & 0x00FF));
- emit6bytes(code);
-}
-
-// SS1 format: <insn> D1(L,B1),D2(B3)
-// +--------+----+----+----+-------------+----+------------+
-// | OpCode | L | B1 | D1 | B2 | D2 |
-// +--------+----+----+----+-------------+----+------------+
-// 0 8 12 16 20 32 36 47
-#define SS1_FORM_EMIT(name, op) \
- void Assembler::name(Register b1, Disp d1, Register b2, Disp d2, Length l) { \
- ss_form(op, l, b1, d1, b2, d2); \
- } \
- void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2, \
- Length length) { \
- name(opnd1.getBaseRegister(), opnd1.getDisplacement(), \
- opnd2.getBaseRegister(), opnd2.getDisplacement(), length); \
- }
-
-void Assembler::ss_form(Opcode op, Length l, Register b1, Disp d1, Register b2,
- Disp d2) {
- DCHECK(is_uint12(d2));
- DCHECK(is_uint12(d1));
- DCHECK(is_uint8(op));
- DCHECK(is_uint8(l));
- uint64_t code =
- (static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l)) * B32 |
- (static_cast<uint64_t>(b1.code())) * B28 |
- (static_cast<uint64_t>(d1)) * B16 |
- (static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
- emit6bytes(code);
-}
-
-// SS2 format: <insn> D1(L1,B1), D2(L3,B3)
-// +--------+----+----+----+-------------+----+------------+
-// | OpCode | L1 | L2 | B1 | D1 | B2 | D2 |
-// +--------+----+----+----+-------------+----+------------+
-// 0 8 12 16 20 32 36 47
-#define SS2_FORM_EMIT(name, op) \
- void Assembler::name(Register b1, Disp d1, Register b2, Disp d2, Length l1, \
- Length l2) { \
- ss_form(op, l1, l2, b1, d1, b2, d2); \
- } \
- void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2, \
- Length length1, Length length2) { \
- name(opnd1.getBaseRegister(), opnd1.getDisplacement(), \
- opnd2.getBaseRegister(), opnd2.getDisplacement(), length1, length2); \
- }
-
-void Assembler::ss_form(Opcode op, Length l1, Length l2, Register b1, Disp d1,
- Register b2, Disp d2) {
- DCHECK(is_uint12(d2));
- DCHECK(is_uint12(d1));
- DCHECK(is_uint8(op));
- DCHECK(is_uint4(l2));
- DCHECK(is_uint4(l1));
- uint64_t code =
- (static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l1)) * B36 |
- (static_cast<uint64_t>(l2)) * B32 |
- (static_cast<uint64_t>(b1.code())) * B28 |
- (static_cast<uint64_t>(d1)) * B16 |
- (static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
- emit6bytes(code);
-}
-
-// SS3 format: <insn> D1(L1,B1), D2(I3,B2)
-// +--------+----+----+----+-------------+----+------------+
-// | OpCode | L1 | I3 | B1 | D1 | B2 | D2 |
-// +--------+----+----+----+-------------+----+------------+
-// 0 8 12 16 20 32 36 47
-#define SS3_FORM_EMIT(name, op) \
- void Assembler::name(const Operand& i3, Register b1, Disp d1, Register b2, \
- Disp d2, Length l1) { \
- ss_form(op, l1, i3, b1, d1, b2, d2); \
- } \
- void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2, \
- Length length) { \
- DCHECK(false); \
- }
-void Assembler::ss_form(Opcode op, Length l1, const Operand& i3, Register b1,
- Disp d1, Register b2, Disp d2) {
- DCHECK(is_uint12(d2));
- DCHECK(is_uint12(d1));
- DCHECK(is_uint8(op));
- DCHECK(is_uint4(l1));
- DCHECK(is_uint4(i3.immediate()));
- uint64_t code =
- (static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l1)) * B36 |
- (static_cast<uint64_t>(i3.immediate())) * B32 |
- (static_cast<uint64_t>(b1.code())) * B28 |
- (static_cast<uint64_t>(d1)) * B16 |
- (static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
- emit6bytes(code);
-}
-
-// SS4 format: <insn> D1(R1,B1), D2(R3,B2)
-// +--------+----+----+----+-------------+----+------------+
-// | OpCode | R1 | R3 | B1 | D1 | B2 | D2 |
-// +--------+----+----+----+-------------+----+------------+
-// 0 8 12 16 20 32 36 47
-#define SS4_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r3, Register b1, Disp d1, \
- Register b2, Disp d2) { \
- ss_form(op, r1, r3, b1, d1, b2, d2); \
- } \
- void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2) { \
- DCHECK(false); \
- }
-void Assembler::ss_form(Opcode op, Register r1, Register r3, Register b1,
- Disp d1, Register b2, Disp d2) {
- DCHECK(is_uint12(d2));
- DCHECK(is_uint12(d1));
- DCHECK(is_uint8(op));
- uint64_t code = (static_cast<uint64_t>(op)) * B40 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(r3.code())) * B32 |
- (static_cast<uint64_t>(b1.code())) * B28 |
- (static_cast<uint64_t>(d1)) * B16 |
- (static_cast<uint64_t>(b2.code())) * B12 |
- (static_cast<uint64_t>(d2));
- emit6bytes(code);
-}
-
-// SS5 format: <insn> D1(R1,B1), D2(R3,B2)
-// +--------+----+----+----+-------------+----+------------+
-// | OpCode | R1 | R3 | B2 | D2 | B4 | D4 |
-// +--------+----+----+----+-------------+----+------------+
-// 0 8 12 16 20 32 36 47
-#define SS5_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r3, Register b2, Disp d2, \
- Register b4, Disp d4) { \
- ss_form(op, r1, r3, b2, d2, b4, d4); /*SS5 use the same form as SS4*/ \
- } \
- void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2) { \
- DCHECK(false); \
- }
-
-#define SS6_FORM_EMIT(name, op) SS1_FORM_EMIT(name, op)
-
-// SSE format: <insn> D1(B1),D2(B2)
-// +------------------+----+-------------+----+------------+
-// | OpCode | B1 | D1 | B2 | D2 |
-// +------------------+----+-------------+----+------------+
-// 0 8 12 16 20 32 36 47
-#define SSE_FORM_EMIT(name, op) \
- void Assembler::name(Register b1, Disp d1, Register b2, Disp d2) { \
- sse_form(op, b1, d1, b2, d2); \
- } \
- void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2) { \
- name(opnd1.getBaseRegister(), opnd1.getDisplacement(), \
- opnd2.getBaseRegister(), opnd2.getDisplacement()); \
- }
-void Assembler::sse_form(Opcode op, Register b1, Disp d1, Register b2,
- Disp d2) {
- DCHECK(is_uint12(d2));
- DCHECK(is_uint12(d1));
- DCHECK(is_uint16(op));
- uint64_t code = (static_cast<uint64_t>(op)) * B32 |
- (static_cast<uint64_t>(b1.code())) * B28 |
- (static_cast<uint64_t>(d1)) * B16 |
- (static_cast<uint64_t>(b2.code())) * B12 |
- (static_cast<uint64_t>(d2));
- emit6bytes(code);
-}
-
-// SSF format: <insn> R3, D1(B1),D2(B2),R3
-// +--------+----+----+----+-------------+----+------------+
-// | OpCode | R3 |OpCd| B1 | D1 | B2 | D2 |
-// +--------+----+----+----+-------------+----+------------+
-// 0 8 12 16 20 32 36 47
-#define SSF_FORM_EMIT(name, op) \
- void Assembler::name(Register r3, Register b1, Disp d1, Register b2, \
- Disp d2) { \
- ssf_form(op, r3, b1, d1, b2, d2); \
- } \
- void Assembler::name(Register r3, const MemOperand& opnd1, \
- const MemOperand& opnd2) { \
- name(r3, opnd1.getBaseRegister(), opnd1.getDisplacement(), \
- opnd2.getBaseRegister(), opnd2.getDisplacement()); \
- }
-
-void Assembler::ssf_form(Opcode op, Register r3, Register b1, Disp d1,
- Register b2, Disp d2) {
- DCHECK(is_uint12(d2));
- DCHECK(is_uint12(d1));
- DCHECK(is_uint12(op));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
- (static_cast<uint64_t>(r3.code())) * B36 |
- (static_cast<uint64_t>(op & 0x00F)) * B32 |
- (static_cast<uint64_t>(b1.code())) * B28 |
- (static_cast<uint64_t>(d1)) * B16 |
- (static_cast<uint64_t>(b2.code())) * B12 |
- (static_cast<uint64_t>(d2));
- emit6bytes(code);
-}
-
-// RRF1 format: <insn> R1,R2,R3
-// +------------------+----+----+----+----+
-// | OpCode | R3 | | R1 | R2 |
-// +------------------+----+----+----+----+
-// 0 16 20 24 28 31
-#define RRF1_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r2, Register r3) { \
- rrf1_form(op << 16 | r3.code() * B12 | r1.code() * B4 | r2.code()); \
- }
-
-void Assembler::rrf1_form(Opcode op, Register r1, Register r2, Register r3) {
- uint32_t code = op << 16 | r3.code() * B12 | r1.code() * B4 | r2.code();
- emit4bytes(code);
-}
-
-void Assembler::rrf1_form(uint32_t code) { emit4bytes(code); }
-
-// RRF2 format: <insn> R1,R2,M3
-// +------------------+----+----+----+----+
-// | OpCode | M3 | | R1 | R2 |
-// +------------------+----+----+----+----+
-// 0 16 20 24 28 31
-#define RRF2_FORM_EMIT(name, op) \
- void Assembler::name(Condition m3, Register r1, Register r2) { \
- rrf2_form(op << 16 | m3 * B12 | r1.code() * B4 | r2.code()); \
- }
-
-void Assembler::rrf2_form(uint32_t code) { emit4bytes(code); }
-
-// RRF3 format: <insn> R1,R2,R3,M4
-// +------------------+----+----+----+----+
-// | OpCode | R3 | M4 | R1 | R2 |
-// +------------------+----+----+----+----+
-// 0 16 20 24 28 31
-#define RRF3_FORM_EMIT(name, op) \
- void Assembler::name(Register r3, Conition m4, Register r1, Register r2) { \
- rrf3_form(op << 16 | r3.code() * B12 | m4 * B8 | r1.code() * B4 | \
- r2.code()); \
- }
-
-void Assembler::rrf3_form(uint32_t code) { emit4bytes(code); }
-
-// RRF-e format: <insn> R1,M3,R2,M4
-// +------------------+----+----+----+----+
-// | OpCode | M3 | M4 | R1 | R2 |
-// +------------------+----+----+----+----+
-// 0 16 20 24 28 31
-void Assembler::rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
- Register r2) {
- uint32_t code = op << 16 | m3 * B12 | m4 * B8 | r1.code() * B4 | r2.code();
- emit4bytes(code);
-}
-
-// end of S390 Instruction generation
-
-// start of S390 instruction
-SS1_FORM_EMIT(ed, ED)
-SS1_FORM_EMIT(mvn, MVN)
-SS1_FORM_EMIT(nc, NC)
-SI_FORM_EMIT(ni, NI)
-RI1_FORM_EMIT(nilh, NILH)
-RI1_FORM_EMIT(nill, NILL)
-RI1_FORM_EMIT(oill, OILL)
-RI1_FORM_EMIT(tmll, TMLL)
-SS1_FORM_EMIT(tr, TR)
-S_FORM_EMIT(ts, TS)
-
// -------------------------
// Load Address Instructions
// -------------------------
@@ -1264,613 +660,39 @@ void Assembler::larl(Register r1, Label* l) {
larl(r1, Operand(branch_offset(l)));
}
-// -----------------
-// Load Instructions
-// -----------------
-// Load Halfword Immediate (32)
-void Assembler::lhi(Register r, const Operand& imm) { ri_form(LHI, r, imm); }
-
-// Load Halfword Immediate (64)
-void Assembler::lghi(Register r, const Operand& imm) { ri_form(LGHI, r, imm); }
-
-// -------------------------
-// Load Logical Instructions
-// -------------------------
-// Load On Condition R-R (32)
-void Assembler::locr(Condition m3, Register r1, Register r2) {
- rrf2_form(LOCR << 16 | m3 * B12 | r1.code() * B4 | r2.code());
-}
-
-// Load On Condition R-R (64)
-void Assembler::locgr(Condition m3, Register r1, Register r2) {
- rrf2_form(LOCGR << 16 | m3 * B12 | r1.code() * B4 | r2.code());
-}
-
-// -------------------
-// Branch Instructions
-// -------------------
-// Branch on Count (64)
-// Branch Relative and Save (32)
-void Assembler::bras(Register r, const Operand& opnd) {
- ri_form(BRAS, r, opnd);
-}
-
-// Branch relative on Condition (32)
-void Assembler::brc(Condition c, const Operand& opnd) { ri_form(BRC, c, opnd); }
-
-// Branch On Count (32)
-void Assembler::brct(Register r1, const Operand& imm) {
- // BRCT encodes # of halfwords, so divide by 2.
- int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2;
- Operand halfwordOp = Operand(numHalfwords);
- halfwordOp.setBits(16);
- ri_form(BRCT, r1, halfwordOp);
-}
-
-// Branch On Count (32)
-void Assembler::brctg(Register r1, const Operand& imm) {
- // BRCTG encodes # of halfwords, so divide by 2.
- int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2;
- Operand halfwordOp = Operand(numHalfwords);
- halfwordOp.setBits(16);
- ri_form(BRCTG, r1, halfwordOp);
-}
-
-// --------------------
-// Compare Instructions
-// --------------------
-// Compare Halfword Immediate (32)
-void Assembler::chi(Register r, const Operand& opnd) { ri_form(CHI, r, opnd); }
-
-// Compare Halfword Immediate (64)
-void Assembler::cghi(Register r, const Operand& opnd) {
- ri_form(CGHI, r, opnd);
-}
-
-// ----------------------------
-// Compare Logical Instructions
-// ----------------------------
-// Compare Immediate (Mem - Imm) (8)
-void Assembler::cli(const MemOperand& opnd, const Operand& imm) {
- si_form(CLI, imm, opnd.rb(), opnd.offset());
-}
-
-// Compare Immediate (Mem - Imm) (8)
-void Assembler::cliy(const MemOperand& opnd, const Operand& imm) {
- siy_form(CLIY, imm, opnd.rb(), opnd.offset());
-}
-
-// Compare logical - mem to mem operation
-void Assembler::clc(const MemOperand& opnd1, const MemOperand& opnd2,
- Length length) {
- ss_form(CLC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),
- opnd2.getBaseRegister(), opnd2.getDisplacement());
-}
-
-// ----------------------------
-// Test Under Mask Instructions
-// ----------------------------
-// Test Under Mask (Mem - Imm) (8)
-void Assembler::tm(const MemOperand& opnd, const Operand& imm) {
- si_form(TM, imm, opnd.rb(), opnd.offset());
-}
-
-// Test Under Mask (Mem - Imm) (8)
-void Assembler::tmy(const MemOperand& opnd, const Operand& imm) {
- siy_form(TMY, imm, opnd.rb(), opnd.offset());
-}
-
-// -------------------------------
-// Rotate and Insert Selected Bits
-// -------------------------------
-// Rotate-And-Insert-Selected-Bits
-void Assembler::risbg(Register dst, Register src, const Operand& startBit,
- const Operand& endBit, const Operand& shiftAmt,
- bool zeroBits) {
- // High tag the top bit of I4/EndBit to zero out any unselected bits
- if (zeroBits)
- rie_f_form(RISBG, dst, src, startBit, Operand(endBit.immediate() | 0x80),
- shiftAmt);
- else
- rie_f_form(RISBG, dst, src, startBit, endBit, shiftAmt);
-}
-
-// Rotate-And-Insert-Selected-Bits
-void Assembler::risbgn(Register dst, Register src, const Operand& startBit,
- const Operand& endBit, const Operand& shiftAmt,
- bool zeroBits) {
- // High tag the top bit of I4/EndBit to zero out any unselected bits
- if (zeroBits)
- rie_f_form(RISBGN, dst, src, startBit, Operand(endBit.immediate() | 0x80),
- shiftAmt);
- else
- rie_f_form(RISBGN, dst, src, startBit, endBit, shiftAmt);
-}
-
-// ---------------------------
-// Move Character Instructions
-// ---------------------------
-// Move character - mem to mem operation
-void Assembler::mvc(const MemOperand& opnd1, const MemOperand& opnd2,
- uint32_t length) {
- ss_form(MVC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),
- opnd2.getBaseRegister(), opnd2.getDisplacement());
-}
-
-// -----------------------
-// 32-bit Add Instructions
-// -----------------------
-// Add Halfword Immediate (32)
-void Assembler::ahi(Register r1, const Operand& i2) { ri_form(AHI, r1, i2); }
-
-// Add Halfword Immediate (32)
-void Assembler::ahik(Register r1, Register r3, const Operand& i2) {
- rie_form(AHIK, r1, r3, i2);
-}
-
-// Add Register-Register-Register (32)
-void Assembler::ark(Register r1, Register r2, Register r3) {
- rrf1_form(ARK, r1, r2, r3);
-}
-
-// Add Storage-Imm (32)
-void Assembler::asi(const MemOperand& opnd, const Operand& imm) {
- DCHECK(is_int8(imm.immediate()));
- DCHECK(is_int20(opnd.offset()));
- siy_form(ASI, Operand(0xFF & imm.immediate()), opnd.rb(),
- 0xFFFFF & opnd.offset());
-}
-
-// -----------------------
-// 64-bit Add Instructions
-// -----------------------
-// Add Halfword Immediate (64)
-void Assembler::aghi(Register r1, const Operand& i2) { ri_form(AGHI, r1, i2); }
-
-// Add Halfword Immediate (64)
-void Assembler::aghik(Register r1, Register r3, const Operand& i2) {
- rie_form(AGHIK, r1, r3, i2);
-}
-
-// Add Register-Register-Register (64)
-void Assembler::agrk(Register r1, Register r2, Register r3) {
- rrf1_form(AGRK, r1, r2, r3);
-}
-
-// Add Storage-Imm (64)
-void Assembler::agsi(const MemOperand& opnd, const Operand& imm) {
- DCHECK(is_int8(imm.immediate()));
- DCHECK(is_int20(opnd.offset()));
- siy_form(AGSI, Operand(0xFF & imm.immediate()), opnd.rb(),
- 0xFFFFF & opnd.offset());
-}
-
-// -------------------------------
-// 32-bit Add Logical Instructions
-// -------------------------------
-// Add Logical Register-Register-Register (32)
-void Assembler::alrk(Register r1, Register r2, Register r3) {
- rrf1_form(ALRK, r1, r2, r3);
-}
-
-// -------------------------------
-// 64-bit Add Logical Instructions
-// -------------------------------
-// Add Logical Register-Register-Register (64)
-void Assembler::algrk(Register r1, Register r2, Register r3) {
- rrf1_form(ALGRK, r1, r2, r3);
-}
-
-// ----------------------------
-// 32-bit Subtract Instructions
-// ----------------------------
-// Subtract Register-Register-Register (32)
-void Assembler::srk(Register r1, Register r2, Register r3) {
- rrf1_form(SRK, r1, r2, r3);
-}
-
-// ----------------------------
-// 64-bit Subtract Instructions
-// ----------------------------
-// Subtract Register-Register-Register (64)
-void Assembler::sgrk(Register r1, Register r2, Register r3) {
- rrf1_form(SGRK, r1, r2, r3);
-}
-
-// ------------------------------------
-// 32-bit Subtract Logical Instructions
-// ------------------------------------
-// Subtract Logical Register-Register-Register (32)
-void Assembler::slrk(Register r1, Register r2, Register r3) {
- rrf1_form(SLRK, r1, r2, r3);
-}
-
-// ------------------------------------
-// 64-bit Subtract Logical Instructions
-// ------------------------------------
-// Subtract Logical Register-Register-Register (64)
-void Assembler::slgrk(Register r1, Register r2, Register r3) {
- rrf1_form(SLGRK, r1, r2, r3);
-}
-
-// ----------------------------
-// 32-bit Multiply Instructions
-// ----------------------------
-// Multiply Halfword Immediate (32)
-void Assembler::mhi(Register r1, const Operand& opnd) {
- ri_form(MHI, r1, opnd);
-}
-
-// Multiply Single Register (32)
-void Assembler::msrkc(Register r1, Register r2, Register r3) {
- rrf1_form(MSRKC, r1, r2, r3);
-}
-
-// Multiply Single Register (64)
-void Assembler::msgrkc(Register r1, Register r2, Register r3) {
- rrf1_form(MSGRKC, r1, r2, r3);
-}
-
-// ----------------------------
-// 64-bit Multiply Instructions
-// ----------------------------
-// Multiply Halfword Immediate (64)
-void Assembler::mghi(Register r1, const Operand& opnd) {
- ri_form(MGHI, r1, opnd);
-}
-
-// --------------------
-// Bitwise Instructions
-// --------------------
-// AND Register-Register-Register (32)
-void Assembler::nrk(Register r1, Register r2, Register r3) {
- rrf1_form(NRK, r1, r2, r3);
-}
-
-// AND Register-Register-Register (64)
-void Assembler::ngrk(Register r1, Register r2, Register r3) {
- rrf1_form(NGRK, r1, r2, r3);
-}
-
-// OR Register-Register-Register (32)
-void Assembler::ork(Register r1, Register r2, Register r3) {
- rrf1_form(ORK, r1, r2, r3);
-}
-
-// OR Register-Register-Register (64)
-void Assembler::ogrk(Register r1, Register r2, Register r3) {
- rrf1_form(OGRK, r1, r2, r3);
-}
-
-// XOR Register-Register-Register (32)
-void Assembler::xrk(Register r1, Register r2, Register r3) {
- rrf1_form(XRK, r1, r2, r3);
-}
-
-// XOR Register-Register-Register (64)
-void Assembler::xgrk(Register r1, Register r2, Register r3) {
- rrf1_form(XGRK, r1, r2, r3);
-}
-
-// XOR Storage-Storage
-void Assembler::xc(const MemOperand& opnd1, const MemOperand& opnd2,
- Length length) {
- ss_form(XC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),
- opnd2.getBaseRegister(), opnd2.getDisplacement());
-}
-
void Assembler::EnsureSpaceFor(int space_needed) {
if (buffer_space() <= (kGap + space_needed)) {
GrowBuffer(space_needed);
}
}
-// Shift Left Single Logical (32)
-void Assembler::sll(Register r1, Register opnd) {
- DCHECK(opnd != r0);
- rs_form(SLL, r1, r0, opnd, 0);
-}
-
-// Shift Left Single Logical (32)
-void Assembler::sll(Register r1, const Operand& opnd) {
- rs_form(SLL, r1, r0, r0, opnd.immediate());
-}
-
-// Shift Left Double Logical (64)
-void Assembler::sldl(Register r1, Register b2, const Operand& opnd) {
- DCHECK_EQ(r1.code() % 2, 0);
- rs_form(SLDL, r1, r0, b2, opnd.immediate());
-}
-
-// Shift Right Single Logical (32)
-void Assembler::srl(Register r1, Register opnd) {
- DCHECK(opnd != r0);
- rs_form(SRL, r1, r0, opnd, 0);
-}
-
-// Shift Right Double Arith (64)
-void Assembler::srda(Register r1, Register b2, const Operand& opnd) {
- DCHECK_EQ(r1.code() % 2, 0);
- rs_form(SRDA, r1, r0, b2, opnd.immediate());
-}
-
-// Shift Right Double Logical (64)
-void Assembler::srdl(Register r1, Register b2, const Operand& opnd) {
- DCHECK_EQ(r1.code() % 2, 0);
- rs_form(SRDL, r1, r0, b2, opnd.immediate());
-}
-
-// Shift Right Single Logical (32)
-void Assembler::srl(Register r1, const Operand& opnd) {
- rs_form(SRL, r1, r0, r0, opnd.immediate());
-}
-
-// Shift Left Single (32)
-void Assembler::sla(Register r1, Register opnd) {
- DCHECK(opnd != r0);
- rs_form(SLA, r1, r0, opnd, 0);
-}
-
-// Shift Left Single (32)
-void Assembler::sla(Register r1, const Operand& opnd) {
- rs_form(SLA, r1, r0, r0, opnd.immediate());
-}
-
-// Shift Right Single (32)
-void Assembler::sra(Register r1, Register opnd) {
- DCHECK(opnd != r0);
- rs_form(SRA, r1, r0, opnd, 0);
-}
-
-// Shift Right Single (32)
-void Assembler::sra(Register r1, const Operand& opnd) {
- rs_form(SRA, r1, r0, r0, opnd.immediate());
-}
-
-// Shift Right Double
-void Assembler::srda(Register r1, const Operand& opnd) {
- DCHECK_EQ(r1.code() % 2, 0);
- rs_form(SRDA, r1, r0, r0, opnd.immediate());
-}
-
-// Shift Right Double Logical
-void Assembler::srdl(Register r1, const Operand& opnd) {
- DCHECK_EQ(r1.code() % 2, 0);
- rs_form(SRDL, r1, r0, r0, opnd.immediate());
-}
-
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
- int32_t target_index = emit_code_target(target, rmode);
+ RecordRelocInfo(rmode);
+ int32_t target_index = AddCodeTarget(target);
brasl(r14, Operand(target_index));
}
void Assembler::call(CodeStub* stub) {
EnsureSpace ensure_space(this);
RequestHeapObject(HeapObjectRequest(stub));
- int32_t target_index =
- emit_code_target(Handle<Code>(), RelocInfo::CODE_TARGET);
+ RecordRelocInfo(RelocInfo::CODE_TARGET);
+ int32_t target_index = AddCodeTarget(Handle<Code>());
brasl(r14, Operand(target_index));
}
void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
Condition cond) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
- int32_t target_index = emit_code_target(target, rmode);
+ RecordRelocInfo(rmode);
+ int32_t target_index = AddCodeTarget(target);
brcl(cond, Operand(target_index));
}
-// 32-bit Load Multiple - short displacement (12-bits unsigned)
-void Assembler::lm(Register r1, Register r2, const MemOperand& src) {
- rs_form(LM, r1, r2, src.rb(), src.offset());
-}
-
-// 32-bit Compare and Swap
-void Assembler::cs(Register r1, Register r2, const MemOperand& src) {
- rs_form(CS, r1, r2, src.rb(), src.offset());
-}
-
-// Move integer (32)
-void Assembler::mvhi(const MemOperand& opnd1, const Operand& i2) {
- sil_form(MVHI, opnd1.getBaseRegister(), opnd1.getDisplacement(), i2);
-}
-
-// Move integer (64)
-void Assembler::mvghi(const MemOperand& opnd1, const Operand& i2) {
- sil_form(MVGHI, opnd1.getBaseRegister(), opnd1.getDisplacement(), i2);
-}
-
-// Insert Immediate (high high)
-void Assembler::iihh(Register r1, const Operand& opnd) {
- ri_form(IIHH, r1, opnd);
-}
-
-// Insert Immediate (high low)
-void Assembler::iihl(Register r1, const Operand& opnd) {
- ri_form(IIHL, r1, opnd);
-}
-
-// Insert Immediate (low high)
-void Assembler::iilh(Register r1, const Operand& opnd) {
- ri_form(IILH, r1, opnd);
-}
-
-// Insert Immediate (low low)
-void Assembler::iill(Register r1, const Operand& opnd) {
- ri_form(IILL, r1, opnd);
-}
-
-// GPR <-> FPR Instructions
-
-// Floating point instructions
-//
-// Add Register-Storage (LB)
-void Assembler::adb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(ADB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-// Add Register-Storage (LB)
-void Assembler::aeb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(AEB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-// Sub Register-Storage (LB)
-void Assembler::seb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(SEB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-// Divide Register-Storage (LB)
-void Assembler::ddb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(DDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-// Divide Register-Storage (LB)
-void Assembler::deb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(DEB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-// Multiply Register-Storage (LB)
-void Assembler::mdb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(MDB, Register::from_code(r1.code()), opnd.rb(), opnd.rx(),
- opnd.offset());
-}
-
-// Multiply Register-Storage (LB)
-void Assembler::meeb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(MEEB, Register::from_code(r1.code()), opnd.rb(), opnd.rx(),
- opnd.offset());
-}
-
-// Subtract Register-Storage (LB)
-void Assembler::sdb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(SDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-void Assembler::ceb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(CEB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-void Assembler::cdb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(CDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-// Square Root (LB)
-void Assembler::sqdb(DoubleRegister r1, const MemOperand& opnd) {
- rxe_form(SQDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-// Convert to Fixed point (64<-S)
-void Assembler::cgebr(Condition m, Register r1, DoubleRegister r2) {
- rrfe_form(CGEBR, m, Condition(0), r1, Register::from_code(r2.code()));
-}
-
-// Convert to Fixed point (64<-L)
-void Assembler::cgdbr(Condition m, Register r1, DoubleRegister r2) {
- rrfe_form(CGDBR, m, Condition(0), r1, Register::from_code(r2.code()));
-}
-
-// Convert to Fixed point (32<-L)
-void Assembler::cfdbr(Condition m, Register r1, DoubleRegister r2) {
- rrfe_form(CFDBR, m, Condition(0), r1, Register::from_code(r2.code()));
-}
-
-// Convert to Fixed Logical (64<-L)
-void Assembler::clgdbr(Condition m3, Condition m4, Register r1,
- DoubleRegister r2) {
- DCHECK_EQ(m4, Condition(0));
- rrfe_form(CLGDBR, m3, m4, r1, Register::from_code(r2.code()));
-}
-
-// Convert to Fixed Logical (64<-F32)
-void Assembler::clgebr(Condition m3, Condition m4, Register r1,
- DoubleRegister r2) {
- DCHECK_EQ(m4, Condition(0));
- rrfe_form(CLGEBR, m3, m4, r1, Register::from_code(r2.code()));
-}
-
-// Convert to Fixed Logical (32<-F64)
-void Assembler::clfdbr(Condition m3, Condition m4, Register r1,
- DoubleRegister r2) {
- DCHECK_EQ(m4, Condition(0));
- rrfe_form(CLFDBR, m3, Condition(0), r1, Register::from_code(r2.code()));
-}
-
-// Convert to Fixed Logical (32<-F32)
-void Assembler::clfebr(Condition m3, Condition m4, Register r1,
- DoubleRegister r2) {
- DCHECK_EQ(m4, Condition(0));
- rrfe_form(CLFEBR, m3, Condition(0), r1, Register::from_code(r2.code()));
-}
-
-// Convert from Fixed Logical (L<-64)
-void Assembler::celgbr(Condition m3, Condition m4, DoubleRegister r1,
- Register r2) {
- DCHECK_EQ(m3, Condition(0));
- DCHECK_EQ(m4, Condition(0));
- rrfe_form(CELGBR, Condition(0), Condition(0), Register::from_code(r1.code()),
- r2);
-}
-
-// Convert from Fixed Logical (F32<-32)
-void Assembler::celfbr(Condition m3, Condition m4, DoubleRegister r1,
- Register r2) {
- DCHECK_EQ(m4, Condition(0));
- rrfe_form(CELFBR, m3, Condition(0), Register::from_code(r1.code()), r2);
-}
-
-// Convert from Fixed Logical (L<-64)
-void Assembler::cdlgbr(Condition m3, Condition m4, DoubleRegister r1,
- Register r2) {
- DCHECK_EQ(m3, Condition(0));
- DCHECK_EQ(m4, Condition(0));
- rrfe_form(CDLGBR, Condition(0), Condition(0), Register::from_code(r1.code()),
- r2);
-}
-
-// Convert from Fixed Logical (L<-32)
-void Assembler::cdlfbr(Condition m3, Condition m4, DoubleRegister r1,
- Register r2) {
- DCHECK_EQ(m4, Condition(0));
- rrfe_form(CDLFBR, m3, Condition(0), Register::from_code(r1.code()), r2);
-}
-
-// Convert from Fixed point (S<-32)
-void Assembler::cefbr(Condition m3, DoubleRegister r1, Register r2) {
- rrfe_form(CEFBR, m3, Condition(0), Register::from_code(r1.code()), r2);
-}
-
-// Convert to Fixed point (32<-S)
-void Assembler::cfebr(Condition m3, Register r1, DoubleRegister r2) {
- rrfe_form(CFEBR, m3, Condition(0), r1, Register::from_code(r2.code()));
-}
-
-// Load (L <- S)
-void Assembler::ldeb(DoubleRegister d1, const MemOperand& opnd) {
- rxe_form(LDEB, Register::from_code(d1.code()), opnd.rx(), opnd.rb(),
- opnd.offset());
-}
-
-// Load FP Integer
-void Assembler::fiebra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3) {
- rrf2_form(FIEBRA << 16 | m3 * B12 | d1.code() * B4 | d2.code());
-}
-
-// Load FP Integer
-void Assembler::fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3) {
- rrf2_form(FIDBRA << 16 | m3 * B12 | d1.code() * B4 | d2.code());
-}
-
// end of S390instructions
bool Assembler::IsNop(SixByteInstr instr, int type) {
@@ -1972,10 +794,11 @@ void Assembler::dp(uintptr_t data) {
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (options().disable_reloc_info_for_patching) return;
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
- (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
- !emit_debug_code())) {
+ (RelocInfo::IsOnlyForSerializer(rmode) &&
+ !options().record_reloc_info_for_serialization && !emit_debug_code())) {
return;
}
DeferredRelocInfo rinfo(pc_offset(), rmode, data);
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 9d9a04d34f..f73897d13e 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -347,43 +347,31 @@ C_REGISTERS(DECLARE_C_REGISTER)
// Class Operand represents a shifter operand in data processing instructions
// defining immediate numbers and masks
-typedef uint8_t Length;
-
-struct Mask {
- uint8_t mask;
- uint8_t value() { return mask; }
- static Mask from_value(uint8_t input) {
- DCHECK_LE(input, 0x0F);
- Mask m = {input};
- return m;
- }
-};
-
class Operand BASE_EMBEDDED {
public:
// immediate
- INLINE(explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
- : rmode_(rmode)) {
+ V8_INLINE explicit Operand(intptr_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : rmode_(rmode) {
value_.immediate = immediate;
}
- INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
- INLINE(explicit Operand(const ExternalReference& f)
- : rmode_(RelocInfo::EXTERNAL_REFERENCE)) {
+ V8_INLINE static Operand Zero() { return Operand(static_cast<intptr_t>(0)); }
+ V8_INLINE explicit Operand(const ExternalReference& f)
+ : rmode_(RelocInfo::EXTERNAL_REFERENCE) {
value_.immediate = static_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value) : rmode_(RelocInfo::NONE)) {
+ V8_INLINE explicit Operand(Smi* value) : rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
// rm
- INLINE(explicit Operand(Register rm));
+ V8_INLINE explicit Operand(Register rm);
static Operand EmbeddedNumber(double value); // Smi or HeapNumber
// Return true if this is a register operand.
- INLINE(bool is_reg() const) { return rm_.is_valid(); }
+ V8_INLINE bool is_reg() const { return rm_.is_valid(); }
bool must_output_reloc_info(const Assembler* assembler) const;
@@ -500,9 +488,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : Assembler(IsolateData(isolate), buffer, buffer_size) {}
- Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
+ Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -547,10 +533,10 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
- INLINE(static Address target_address_at(Address pc, Address constant_pool));
- INLINE(static void set_target_address_at(
+ V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
+ V8_INLINE static void set_target_address_at(
Address pc, Address constant_pool, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -558,7 +544,7 @@ class Assembler : public AssemblerBase {
// Given the address of the beginning of a call, return the address
// in the instruction stream that the call will return to.
- INLINE(static Address return_address_from_call_start(Address pc));
+ V8_INLINE static Address return_address_from_call_start(Address pc);
inline Handle<Object> code_target_object_handle_at(Address pc);
// This sets the branch destination.
@@ -608,10 +594,6 @@ class Assembler : public AssemblerBase {
static constexpr int kCallSequenceLength = 8;
#endif
- static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
- return ((cr.code() * CRWIDTH) + crbit);
- }
-
// ---------------------------------------------------------------------------
// Code generation
@@ -625,64 +607,6 @@ class Assembler : public AssemblerBase {
return (value & mask_value) << shift;
}
- // Declare generic instruction formats by fields
- inline void e_format(Opcode opcode) {
- emit2bytes(getfield<uint16_t, 2, 0, 16>(opcode));
- }
-
- inline void i_format(Opcode opcode, int f1) {
- emit2bytes(getfield<uint16_t, 2, 0, 8>(opcode) |
- getfield<uint16_t, 2, 8, 16>(f1));
- }
-
- inline void ie_format(Opcode opcode, int f1, int f2) {
- emit4bytes(getfield<uint32_t, 4, 0, 16>(opcode) |
- getfield<uint32_t, 4, 24, 28>(f1) |
- getfield<uint32_t, 4, 28, 32>(f2));
- }
- inline void mii_format(Opcode opcode, int f1, int f2, int f3) {
- emit6bytes(
- getfield<uint64_t, 6, 0, 8>(opcode) | getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 24>(f2) | getfield<uint64_t, 6, 24, 48>(f3));
- }
-
- inline void ri_format(Opcode opcode, int f1, int f2) {
- uint32_t op1 = opcode >> 4;
- uint32_t op2 = opcode & 0xf;
- emit4bytes(
- getfield<uint32_t, 4, 0, 8>(op1) | getfield<uint32_t, 4, 8, 12>(f1) |
- getfield<uint32_t, 4, 12, 16>(op2) | getfield<uint32_t, 4, 16, 32>(f2));
- }
-
- inline void rie_1_format(Opcode opcode, int f1, int f2, int f3, int f4) {
- uint32_t op1 = opcode >> 8;
- uint32_t op2 = opcode & 0xff;
- emit6bytes(
- getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
- getfield<uint64_t, 6, 32, 36>(f4) | getfield<uint64_t, 6, 40, 48>(op2));
- }
-
- inline void rie_2_format(Opcode opcode, int f1, int f2, int f3, int f4) {
- uint32_t op1 = opcode >> 8;
- uint32_t op2 = opcode & 0xff;
- emit6bytes(
- getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
- getfield<uint64_t, 6, 32, 40>(f4) | getfield<uint64_t, 6, 40, 48>(op2));
- }
-
- inline void rie_3_format(Opcode opcode, int f1, int f2, int f3, int f4,
- int f5) {
- uint32_t op1 = opcode >> 8;
- uint32_t op2 = opcode & 0xff;
- emit6bytes(
- getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 24>(f3) |
- getfield<uint64_t, 6, 24, 32>(f4) | getfield<uint64_t, 6, 32, 40>(f5) |
- getfield<uint64_t, 6, 40, 48>(op2));
- }
-
#define DECLARE_S390_RIL_AB_INSTRUCTIONS(name, op_name, op_value) \
template <class R1> \
inline void name(R1 r1, const Operand& i2) { \
@@ -706,17 +630,6 @@ class Assembler : public AssemblerBase {
#undef DECLARE_S390_RIL_AB_INSTRUCTIONS
#undef DECLARE_S390_RIL_C_INSTRUCTIONS
- inline void ris_format(Opcode opcode, int f1, int f2, int f3, int f4,
- int f5) {
- uint32_t op1 = opcode >> 8;
- uint32_t op2 = opcode & 0xff;
- emit6bytes(
- getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 20>(f3) |
- getfield<uint64_t, 6, 20, 32>(f4) | getfield<uint64_t, 6, 32, 40>(f5) |
- getfield<uint64_t, 6, 40, 48>(op2));
- }
-
#define DECLARE_S390_RR_INSTRUCTIONS(name, op_name, op_value) \
inline void name(Register r1, Register r2) { \
rr_format(op_name, r1.code(), r2.code()); \
@@ -765,22 +678,16 @@ class Assembler : public AssemblerBase {
void lzdr(DoubleRegister r1) { rre_format(LZDR, r1.code(), 0); }
#undef DECLARE_S390_RRE_INSTRUCTIONS
- inline void rrf_format(Opcode opcode, int f1, int f2, int f3, int f4) {
- emit4bytes(
- getfield<uint32_t, 4, 0, 16>(opcode) |
- getfield<uint32_t, 4, 16, 20>(f1) | getfield<uint32_t, 4, 20, 24>(f2) |
- getfield<uint32_t, 4, 24, 28>(f3) | getfield<uint32_t, 4, 28, 32>(f4));
- }
-
-#define DECLARE_S390_RX_INSTRUCTIONS(name, op_name, op_value) \
- template <class R1> \
- inline void name(R1 r1, Register x2, Register b2, Disp d2) { \
- rx_format(op_name, r1.code(), x2.code(), b2.code(), d2); \
- } \
- template <class R1> \
- inline void name(R1 r1, const MemOperand& opnd) { \
- name(r1, opnd.getIndexRegister(), \
- opnd.getBaseRegister(), opnd.getDisplacement()); \
+#define DECLARE_S390_RX_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1> \
+ inline void name(R1 r1, Register x2, Register b2, const Operand& d2) { \
+ rx_format(op_name, r1.code(), x2.code(), b2.code(), \
+ d2.immediate()); \
+ } \
+ template <class R1> \
+ inline void name(R1 r1, const MemOperand& opnd) { \
+ name(r1, opnd.getIndexRegister(), opnd.getBaseRegister(), \
+ Operand(opnd.getDisplacement())); \
}
inline void rx_format(Opcode opcode, int f1, int f2, int f3, int f4) {
@@ -796,22 +703,22 @@ class Assembler : public AssemblerBase {
void bc(Condition cond, const MemOperand& opnd) {
bc(cond, opnd.getIndexRegister(),
- opnd.getBaseRegister(), opnd.getDisplacement());
+ opnd.getBaseRegister(), Operand(opnd.getDisplacement()));
}
- void bc(Condition cond, Register x2, Register b2, Disp d2) {
- rx_format(BC, cond, x2.code(), b2.code(), d2);
+ void bc(Condition cond, Register x2, Register b2, const Operand& d2) {
+ rx_format(BC, cond, x2.code(), b2.code(), d2.immediate());
}
#undef DECLARE_S390_RX_INSTRUCTIONS
-#define DECLARE_S390_RXY_INSTRUCTIONS(name, op_name, op_value) \
- template <class R1, class R2> \
- inline void name(R1 r1, R2 r2, Register b2, Disp d2) { \
- rxy_format(op_name, r1.code(), r2.code(), b2.code(), d2); \
- } \
- template <class R1> \
- inline void name(R1 r1, const MemOperand& opnd) { \
- name(r1, opnd.getIndexRegister(), \
- opnd.getBaseRegister(), opnd.getDisplacement()); \
+#define DECLARE_S390_RXY_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1, class R2> \
+ inline void name(R1 r1, R2 r2, Register b2, const Operand& d2) { \
+ rxy_format(op_name, r1.code(), r2.code(), b2.code(), d2.immediate()); \
+ } \
+ template <class R1> \
+ inline void name(R1 r1, const MemOperand& opnd) { \
+ name(r1, opnd.getIndexRegister(), opnd.getBaseRegister(), \
+ Operand(opnd.getDisplacement())); \
}
inline void rxy_format(Opcode opcode, int f1, int f2, int f3, int f4) {
@@ -829,14 +736,15 @@ class Assembler : public AssemblerBase {
void pfd(Condition cond, const MemOperand& opnd) {
pfd(cond, opnd.getIndexRegister(),
- opnd.getBaseRegister(), opnd.getDisplacement());
+ opnd.getBaseRegister(), Operand(opnd.getDisplacement()));
}
- void pfd(Condition cond, Register x2, Register b2, Disp d2) {
- rxy_format(PFD, cond, x2.code(), b2.code(), d2);
+ void pfd(Condition cond, Register x2, Register b2, const Operand& d2) {
+ rxy_format(PFD, cond, x2.code(), b2.code(), d2.immediate());
}
#undef DECLARE_S390_RXY_INSTRUCTIONS
-inline void rsy_form(Opcode op, int f1, int f2, int f3, const int f4) {
+
+inline void rsy_format(Opcode op, int f1, int f2, int f3, int f4) {
DCHECK(is_int20(f4));
DCHECK(is_uint16(op));
uint64_t code = (getfield<uint64_t, 6, 0, 8>(op >> 8) |
@@ -850,28 +758,519 @@ inline void rsy_form(Opcode op, int f1, int f2, int f3, const int f4) {
}
#define DECLARE_S390_RSY_A_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Register r3, Register b2, Disp d2 = 0) { \
- rsy_form(op_name, r1.code(), r3.code(), b2.code(), d2); \
+ void name(Register r1, Register r3, Register b2, \
+ const Operand& d2 = Operand::Zero()) { \
+ rsy_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
} \
void name(Register r1, Register r3, Operand d2) { \
- name(r1, r3, r0, d2.immediate()); \
+ name(r1, r3, r0, d2); \
} \
void name(Register r1, Register r3, const MemOperand& opnd) { \
- name(r1, r3, opnd.getBaseRegister(), opnd.getDisplacement()); \
+ name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RSY_A_OPCODE_LIST(DECLARE_S390_RSY_A_INSTRUCTIONS);
#undef DECLARE_S390_RSY_A_INSTRUCTIONS
#define DECLARE_S390_RSY_B_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Condition m3, Register b2, Disp d2) { \
- rsy_form(op_name, r1.code(), m3, b2.code(), d2); \
+ void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
+ rsy_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
} \
void name(Register r1, Condition m3, const MemOperand& opnd) { \
- name(r1, m3, opnd.getBaseRegister(), opnd.getDisplacement()); \
+ name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RSY_B_OPCODE_LIST(DECLARE_S390_RSY_B_INSTRUCTIONS);
#undef DECLARE_S390_RSY_B_INSTRUCTIONS
+
+inline void rs_format(Opcode op, int f1, int f2, int f3, const int f4) {
+ uint32_t code = getfield<uint32_t, 4, 0, 8>(op) |
+ getfield<uint32_t, 4, 8, 12>(f1) |
+ getfield<uint32_t, 4, 12, 16>(f2) |
+ getfield<uint32_t, 4, 16, 20>(f3) |
+ getfield<uint32_t, 4, 20, 32>(f4);
+ emit4bytes(code);
+}
+
+#define DECLARE_S390_RS_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r3, Register b2, const Operand& d2) { \
+ rs_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
+ } \
+ void name(Register r1, Register r3, const MemOperand& opnd) { \
+ name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+ }
+ S390_RS_A_OPCODE_LIST(DECLARE_S390_RS_A_INSTRUCTIONS);
+#undef DECLARE_S390_RS_A_INSTRUCTIONS
+
+#define DECLARE_S390_RS_B_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
+ rs_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
+ } \
+ void name(Register r1, Condition m3, const MemOperand& opnd) { \
+ name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+ }
+ S390_RS_B_OPCODE_LIST(DECLARE_S390_RS_B_INSTRUCTIONS);
+#undef DECLARE_S390_RS_B_INSTRUCTIONS
+
+#define DECLARE_S390_RS_SHIFT_FORMAT(name, opcode) \
+ void name(Register r1, Register r2, const Operand& opnd = \
+ Operand::Zero()) { \
+ DCHECK(r2 != r0); \
+ rs_format(opcode, r1.code(), r0.code(), r2.code(), opnd.immediate()); \
+ } \
+ void name(Register r1, const Operand& opnd) { \
+ rs_format(opcode, r1.code(), r0.code(), r0.code(), opnd.immediate()); \
+ }
+ DECLARE_S390_RS_SHIFT_FORMAT(sll, SLL)
+ DECLARE_S390_RS_SHIFT_FORMAT(srl, SRL)
+ DECLARE_S390_RS_SHIFT_FORMAT(sla, SLA)
+ DECLARE_S390_RS_SHIFT_FORMAT(sra, SRA)
+ DECLARE_S390_RS_SHIFT_FORMAT(sldl, SLDL)
+ DECLARE_S390_RS_SHIFT_FORMAT(srda, SRDA)
+ DECLARE_S390_RS_SHIFT_FORMAT(srdl, SRDL)
+#undef DECLARE_S390_RS_SHIFT_FORMAT
+
+
+inline void rxe_format(Opcode op, int f1, int f2, int f3, int f4, int f5 = 0) {
+ DCHECK(is_uint12(f4));
+ DCHECK(is_uint16(op));
+ uint64_t code = (getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 20>(f3) |
+ getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
+ getfield<uint64_t, 6, 32, 36>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op & 0xff));
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_RXE_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register x2, Register b2, const Operand& d2, \
+ Condition m3 = static_cast<Condition>(0)) { \
+ rxe_format(op_name, r1.code(), x2.code(), b2.code(), d2.immediate(), \
+ m3); \
+ } \
+ template<class _R1Type> \
+ void name(_R1Type r1, const MemOperand& opnd) { \
+ name(Register::from_code(r1.code()), opnd.rx(), opnd.rb(), \
+ Operand(opnd.offset())); \
+ }
+ S390_RXE_OPCODE_LIST(DECLARE_S390_RXE_INSTRUCTIONS);
+#undef DECLARE_S390_RXE_INSTRUCTIONS
+
+
+inline void ri_format(Opcode opcode, int f1, int f2) {
+ uint32_t op1 = opcode >> 4;
+ uint32_t op2 = opcode & 0xf;
+ emit4bytes(getfield<uint32_t, 4, 0, 8>(op1) |
+ getfield<uint32_t, 4, 8, 12>(f1) |
+ getfield<uint32_t, 4, 12, 16>(op2) |
+ getfield<uint32_t, 4, 16, 32>(f2));
+}
+
+#define DECLARE_S390_RI_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r, const Operand& i2) { \
+ DCHECK(is_uint12(op_name)); \
+ DCHECK(is_uint16(i2.immediate()) || is_int16(i2.immediate())); \
+ ri_format(op_name, r.code(), i2.immediate()); \
+ }
+ S390_RI_A_OPCODE_LIST(DECLARE_S390_RI_A_INSTRUCTIONS);
+#undef DECLARE_S390_RI_A_INSTRUCTIONS
+
+#define DECLARE_S390_RI_B_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, const Operand& imm) { \
+ /* 2nd argument encodes # of halfwords, so divide by 2. */ \
+ int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2; \
+ Operand halfwordOp = Operand(numHalfwords); \
+ halfwordOp.setBits(16); \
+ ri_format(op_name, r1.code(), halfwordOp.immediate()); \
+ }
+ S390_RI_B_OPCODE_LIST(DECLARE_S390_RI_B_INSTRUCTIONS);
+#undef DECLARE_S390_RI_B_INSTRUCTIONS
+
+#define DECLARE_S390_RI_C_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Condition m, const Operand& i2) { \
+ DCHECK(is_uint12(op_name)); \
+ DCHECK(is_uint4(m)); \
+ DCHECK(op_name == BRC ? \
+ is_int16(i2.immediate()) : is_uint16(i2.immediate())); \
+ ri_format(op_name, m, i2.immediate()); \
+ }
+ S390_RI_C_OPCODE_LIST(DECLARE_S390_RI_C_INSTRUCTIONS);
+#undef DECLARE_S390_RI_C_INSTRUCTIONS
+
+
+inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
+ uint32_t code = getfield<uint32_t, 4, 0, 16>(op) |
+ getfield<uint32_t, 4, 16, 20>(f1) |
+ getfield<uint32_t, 4, 20, 24>(f2) |
+ getfield<uint32_t, 4, 24, 28>(f3) |
+ getfield<uint32_t, 4, 28, 32>(f4);
+ emit4bytes(code);
+}
+
+#define DECLARE_S390_RRF_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Condition m4, Register r2, Register r3) { \
+ rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
+ } \
+ void name(Register r1, Register r2, Register r3) { \
+ name(r1, Condition(0), r2, r3); \
+ }
+ S390_RRF_A_OPCODE_LIST(DECLARE_S390_RRF_A_INSTRUCTIONS);
+#undef DECLARE_S390_RRF_A_INSTRUCTIONS
+
+
+#define DECLARE_S390_RRF_B_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Condition m4, Register r2, Register r3) { \
+ rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
+ } \
+ void name(Register r1, Register r2, Register r3) { \
+ name(r1, Condition(0), r2, r3); \
+ }
+ S390_RRF_B_OPCODE_LIST(DECLARE_S390_RRF_B_INSTRUCTIONS);
+#undef DECLARE_S390_RRF_B_INSTRUCTIONS
+
+
+#define DECLARE_S390_RRF_C_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1, class R2> \
+ void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
+ rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
+ } \
+ template <class R1, class R2> \
+ void name(Condition m3, R1 r1, R2 r2) { \
+ name(m3, Condition(0), r1, r2); \
+ }
+ S390_RRF_C_OPCODE_LIST(DECLARE_S390_RRF_C_INSTRUCTIONS);
+#undef DECLARE_S390_RRF_C_INSTRUCTIONS
+
+
+#define DECLARE_S390_RRF_D_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1, class R2> \
+ void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
+ rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
+ } \
+ template <class R1, class R2> \
+ void name(Condition m3, R1 r1, R2 r2) { \
+ name(m3, Condition(0), r1, r2); \
+ }
+ S390_RRF_D_OPCODE_LIST(DECLARE_S390_RRF_D_INSTRUCTIONS);
+#undef DECLARE_S390_RRF_D_INSTRUCTIONS
+
+
+#define DECLARE_S390_RRF_E_INSTRUCTIONS(name, op_name, op_value) \
+ template <class M3, class M4, class R1, class R2> \
+ void name(M3 m3, M4 m4, R1 r1, R2 r2) { \
+ rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
+ } \
+ template <class M3, class R1, class R2> \
+ void name(M3 m3, R1 r1, R2 r2) { \
+ name(m3, Condition(0), r1, r2); \
+ }
+ S390_RRF_E_OPCODE_LIST(DECLARE_S390_RRF_E_INSTRUCTIONS);
+#undef DECLARE_S390_RRF_E_INSTRUCTIONS
+
+enum FIDBRA_FLAGS {
+ FIDBRA_CURRENT_ROUNDING_MODE = 0,
+ FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0 = 1,
+ // ...
+ FIDBRA_ROUND_TOWARD_0 = 5,
+ FIDBRA_ROUND_TOWARD_POS_INF = 6,
+ FIDBRA_ROUND_TOWARD_NEG_INF = 7
+};
+
+
+inline void rsi_format(Opcode op, int f1, int f2, int f3) {
+ DCHECK(is_uint8(op));
+ DCHECK(is_uint16(f3) || is_int16(f3));
+ uint32_t code = getfield<uint32_t, 4, 0, 8>(op) |
+ getfield<uint32_t, 4, 8, 12>(f1) |
+ getfield<uint32_t, 4, 12, 16>(f2) |
+ getfield<uint32_t, 4, 16, 32>(f3);
+ emit4bytes(code);
+}
+
+#define DECLARE_S390_RSI_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r3, const Operand& i2) { \
+ rsi_format(op_name, r1.code(), r3.code(), i2.immediate()); \
+ }
+ S390_RSI_OPCODE_LIST(DECLARE_S390_RSI_INSTRUCTIONS);
+#undef DECLARE_S390_RSI_INSTRUCTIONS
+
+
+inline void rsl_format(Opcode op, uint16_t f1, int f2, int f3, int f4,
+ int f5) {
+ DCHECK(is_uint16(op));
+ uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 16>(f1) |
+ getfield<uint64_t, 6, 16, 20>(f2) |
+ getfield<uint64_t, 6, 20, 32>(f3) |
+ getfield<uint64_t, 6, 32, 36>(f4) |
+ getfield<uint64_t, 6, 36, 40>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_RSL_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(const Operand& l1, Register b1, const Operand& d1) { \
+ uint16_t L = static_cast<uint16_t>(l1.immediate() << 8); \
+ rsl_format(op_name, L, b1.code(), d1.immediate(), 0, 0); \
+ }
+ S390_RSL_A_OPCODE_LIST(DECLARE_S390_RSL_A_INSTRUCTIONS);
+#undef DECLARE_S390_RSL_A_INSTRUCTIONS
+
+#define DECLARE_S390_RSL_B_INSTRUCTIONS(name, op_name, op_value) \
+ void name(const Operand& l2, Register b2, const Operand& d2, \
+ Register r1, Condition m3) { \
+ uint16_t L = static_cast<uint16_t>(l2.immediate()); \
+ rsl_format(op_name, L, b2.code(), d2.immediate(), r1.code(), m3); \
+ }
+ S390_RSL_B_OPCODE_LIST(DECLARE_S390_RSL_B_INSTRUCTIONS);
+#undef DECLARE_S390_RSL_B_INSTRUCTIONS
+
+
+inline void s_format(Opcode op, int f1, int f2) {
+ DCHECK_NE(op & 0xff00, 0);
+ DCHECK(is_uint12(f2));
+ uint32_t code = getfield<uint32_t, 4, 0, 16>(op) |
+ getfield<uint32_t, 4, 16, 20>(f1) |
+ getfield<uint32_t, 4, 20, 32>(f2);
+ emit4bytes(code);
+}
+
+#define DECLARE_S390_S_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register b1, const Operand& d2) { \
+ Opcode op = op_name; \
+ if ((op & 0xFF00) == 0) { \
+ op = (Opcode)(op << 8); \
+ } \
+ s_format(op, b1.code(), d2.immediate()); \
+ } \
+ void name(const MemOperand& opnd) { \
+ Operand d2 = Operand(opnd.getDisplacement()); \
+ name(opnd.getBaseRegister(), d2); \
+ }
+ S390_S_OPCODE_LIST(DECLARE_S390_S_INSTRUCTIONS);
+#undef DECLARE_S390_S_INSTRUCTIONS
+
+
+inline void si_format(Opcode op, int f1, int f2, int f3) {
+ uint32_t code = getfield<uint32_t, 4, 0, 8>(op) |
+ getfield<uint32_t, 4, 8, 16>(f1) |
+ getfield<uint32_t, 4, 16, 20>(f2) |
+ getfield<uint32_t, 4, 20, 32>(f3);
+ emit4bytes(code);
+}
+
+#define DECLARE_S390_SI_INSTRUCTIONS(name, op_name, op_value) \
+ void name(const Operand& i2, Register b1, const Operand& d1) { \
+ si_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
+ } \
+ void name(const MemOperand& opnd, const Operand& i2) { \
+ name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+ }
+ S390_SI_OPCODE_LIST(DECLARE_S390_SI_INSTRUCTIONS);
+#undef DECLARE_S390_SI_INSTRUCTIONS
+
+
+inline void siy_format(Opcode op, int f1, int f2, int f3) {
+ DCHECK(is_uint20(f3) || is_int20(f3));
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint8(f1) || is_int8(f1));
+ uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 16>(f1) |
+ getfield<uint64_t, 6, 16, 20>(f2) |
+ getfield<uint64_t, 6, 20, 32>(f3) |
+ getfield<uint64_t, 6, 32, 40>(f3 >> 12) |
+ getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_SIY_INSTRUCTIONS(name, op_name, op_value) \
+ void name(const Operand& i2, Register b1, const Operand& d1) { \
+ siy_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
+ } \
+ void name(const MemOperand& opnd, const Operand& i2) { \
+ name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+ }
+ S390_SIY_OPCODE_LIST(DECLARE_S390_SIY_INSTRUCTIONS);
+#undef DECLARE_S390_SIY_INSTRUCTIONS
+
+
+inline void rrs_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
+ DCHECK(is_uint12(f4));
+ DCHECK(is_uint16(op));
+ uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 20>(f3) |
+ getfield<uint64_t, 6, 20, 32>(f4) |
+ getfield<uint64_t, 6, 32, 36>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_RRS_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r2, Register b4, const Operand& d4, \
+ Condition m3) { \
+ rrs_format(op_name, r1.code(), r2.code(), b4.code(), d4.immediate(), \
+ m3); \
+ } \
+ void name(Register r1, Register r2, Condition m3, \
+ const MemOperand& opnd) { \
+ name(r1, r2, opnd.getBaseRegister(), \
+ Operand(opnd.getDisplacement()), m3); \
+ }
+ S390_RRS_OPCODE_LIST(DECLARE_S390_RRS_INSTRUCTIONS);
+#undef DECLARE_S390_RRS_INSTRUCTIONS
+
+
+inline void ris_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
+ DCHECK(is_uint12(f3));
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint8(f5));
+ uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 20>(f3) |
+ getfield<uint64_t, 6, 20, 32>(f4) |
+ getfield<uint64_t, 6, 32, 40>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_RIS_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Condition m3, Register b4, const Operand& d4, \
+ const Operand& i2) { \
+ ris_format(op_name, r1.code(), m3, b4.code(), d4.immediate(), \
+ i2.immediate()); \
+ } \
+ void name(Register r1, const Operand& i2, Condition m3, \
+ const MemOperand& opnd) { \
+ name(r1, m3, opnd.getBaseRegister(), \
+ Operand(opnd.getDisplacement()), i2); \
+ }
+ S390_RIS_OPCODE_LIST(DECLARE_S390_RIS_INSTRUCTIONS);
+#undef DECLARE_S390_RIS_INSTRUCTIONS
+
+
+inline void sil_format(Opcode op, int f1, int f2, int f3) {
+ DCHECK(is_uint12(f2));
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint16(f3));
+ uint64_t code = getfield<uint64_t, 6, 0, 16>(op) |
+ getfield<uint64_t, 6, 16, 20>(f1) |
+ getfield<uint64_t, 6, 20, 32>(f2) |
+ getfield<uint64_t, 6, 32, 48>(f3);
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_SIL_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register b1, const Operand& d1, const Operand& i2) { \
+ sil_format(op_name, b1.code(), d1.immediate(), i2.immediate()); \
+ } \
+ void name(const MemOperand& opnd, const Operand& i2) { \
+ name(opnd.getBaseRegister(), Operand(opnd.getDisplacement()), i2); \
+ }
+ S390_SIL_OPCODE_LIST(DECLARE_S390_SIL_INSTRUCTIONS);
+#undef DECLARE_S390_SIL_INSTRUCTIONS
+
+
+inline void rie_d_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ uint64_t code = getfield<uint64_t, 6, 0, 8>(op1) |
+ getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 32>(f3) |
+ getfield<uint64_t, 6, 32, 40>(f4) |
+ getfield<uint64_t, 6, 40, 48>(op2);
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_RIE_D_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r3, const Operand& i2) { \
+ rie_d_format(op_name, r1.code(), r3.code(), i2.immediate(), 0); \
+ }
+ S390_RIE_D_OPCODE_LIST(DECLARE_S390_RIE_D_INSTRUCTIONS)
+#undef DECLARE_S390_RIE_D_INSTRUCTIONS
+
+
+inline void rie_e_format(Opcode opcode, int f1, int f2, int f3) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ uint64_t code = getfield<uint64_t, 6, 0, 8>(op1) |
+ getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 32>(f3) |
+ getfield<uint64_t, 6, 40, 48>(op2);
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_RIE_E_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r3, const Operand& i2) { \
+ rie_e_format(op_name, r1.code(), r3.code(), i2.immediate()); \
+ }
+ S390_RIE_E_OPCODE_LIST(DECLARE_S390_RIE_E_INSTRUCTIONS)
+#undef DECLARE_S390_RIE_E_INSTRUCTIONS
+
+
+inline void rie_f_format(Opcode opcode, int f1, int f2, int f3, int f4,
+ int f5) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ uint64_t code = getfield<uint64_t, 6, 0, 8>(op1) |
+ getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 24>(f3) |
+ getfield<uint64_t, 6, 24, 32>(f4) |
+ getfield<uint64_t, 6, 32, 40>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op2);
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_RIE_F_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register dst, Register src, const Operand& startBit, \
+ const Operand& endBit, const Operand& shiftAmt) { \
+ DCHECK(is_uint8(startBit.immediate())); \
+ DCHECK(is_uint8(endBit.immediate())); \
+ DCHECK(is_uint8(shiftAmt.immediate())); \
+ rie_f_format(op_name, dst.code(), src.code(), startBit.immediate(), \
+ endBit.immediate(), shiftAmt.immediate()); \
+ }
+ S390_RIE_F_OPCODE_LIST(DECLARE_S390_RIE_F_INSTRUCTIONS)
+#undef DECLARE_S390_RIE_F_INSTRUCTIONS
+
+
+inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
+ DCHECK(is_uint12(f5));
+ DCHECK(is_uint12(f3));
+ DCHECK(is_uint8(f1));
+ DCHECK(is_uint8(op));
+ uint64_t code = getfield<uint64_t, 6, 0, 8>(op) |
+ getfield<uint64_t, 6, 8, 16>(f1) |
+ getfield<uint64_t, 6, 16, 20>(f2) |
+ getfield<uint64_t, 6, 20, 32>(f3) |
+ getfield<uint64_t, 6, 32, 36>(f4) |
+ getfield<uint64_t, 6, 36, 48>(f5);
+ emit6bytes(code);
+}
+
+#define DECLARE_S390_SS_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register b1, const Operand& d1, Register b2, \
+ const Operand& d2, const Operand& length) { \
+ ss_a_format(op_name, length.immediate(), b1.code(), d1.immediate(), \
+ b2.code(), d2.immediate()); \
+ } \
+ void name(const MemOperand& opnd1, const MemOperand& opnd2, \
+ const Operand& length) { \
+ ss_a_format(op_name, length.immediate(), \
+ opnd1.getBaseRegister().code(), \
+ opnd1.getDisplacement(), opnd2.getBaseRegister().code(), \
+ opnd2.getDisplacement()); \
+ }
+ S390_SS_A_OPCODE_LIST(DECLARE_S390_SS_A_INSTRUCTIONS)
+#undef DECLARE_S390_SS_A_INSTRUCTIONS
+
+
// Helper for unconditional branch to Label with update to save register
void b(Register r, Label* l) {
int32_t halfwords = branch_offset(l) / 2;
@@ -919,6 +1318,59 @@ inline void rsy_form(Opcode op, int f1, int f2, int f3, const int f4) {
void bunordered(Register r) { b(unordered, r); }
void bordered(Register r) { b(ordered, r); }
+ // wrappers around asm instr
+ void brxh(Register dst, Register inc, Label* L) {
+ int offset_halfwords = branch_offset(L) / 2;
+ CHECK(is_int16(offset_halfwords));
+ brxh(dst, inc, Operand(offset_halfwords));
+ }
+
+ void brxhg(Register dst, Register inc, Label* L) {
+ int offset_halfwords = branch_offset(L) / 2;
+ CHECK(is_int16(offset_halfwords));
+ brxhg(dst, inc, Operand(offset_halfwords));
+ }
+
+ template <class R1, class R2>
+ void ledbr(R1 r1, R2 r2) {
+ ledbra(Condition(0), Condition(0), r1, r2);
+ }
+
+ template <class R1, class R2>
+ void cdfbr(R1 r1, R2 r2) {
+ cdfbra(Condition(0), Condition(0), r1, r2);
+ }
+
+ template <class R1, class R2>
+ void cdgbr(R1 r1, R2 r2) {
+ cdgbra(Condition(0), Condition(0), r1, r2);
+ }
+
+ template <class R1, class R2>
+ void cegbr(R1 r1, R2 r2) {
+ cegbra(Condition(0), Condition(0), r1, r2);
+ }
+
+ template <class R1, class R2>
+ void cgebr(Condition m3, R1 r1, R2 r2) {
+ cgebra(m3, Condition(0), r1, r2);
+ }
+
+ template <class R1, class R2>
+ void cgdbr(Condition m3, R1 r1, R2 r2) {
+ cgdbra(m3, Condition(0), r1, r2);
+ }
+
+ template <class R1, class R2>
+ void cfdbr(Condition m3, R1 r1, R2 r2) {
+ cfdbra(m3, Condition(0), r1, r2);
+ }
+
+ template <class R1, class R2>
+ void cfebr(Condition m3, R1 r1, R2 r2) {
+ cfebra(m3, Condition(0), r1, r2);
+ }
+
// ---------------------------------------------------------------------------
// Code generation
@@ -954,121 +1406,6 @@ inline void rsy_form(Opcode op, int f1, int f2, int f3, const int f4) {
void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond);
// S390 instruction generation
-#define I_FORM(name) void name(const Operand& i)
-
-#define RR_FORM(name) void name(Register r1, Register r2)
-
-#define RR2_FORM(name) void name(Condition m1, Register r2)
-
-#define RI1_FORM(name) void name(Register r, const Operand& i)
-
-#define RI2_FORM(name) void name(Condition m, const Operand& i)
-
-#define RIE_FORM(name) void name(Register r1, Register R3, const Operand& i)
-
-#define RIE_F_FORM(name) \
- void name(Register r1, Register r2, const Operand& i3, const Operand& i4, \
- const Operand& i5)
-
-#define RXE_FORM(name) \
- void name(Register r1, const MemOperand& opnd); \
- void name(Register r1, Register b2, Register x2, Disp d2)
-
-#define RXF_FORM(name) \
- void name(Register r1, Register r3, const MemOperand& opnd); \
- void name(Register r1, Register r3, Register b2, Register x2, Disp d2)
-
-#define RSI_FORM(name) void name(Register r1, Register r3, const Operand& i)
-
-#define RIS_FORM(name) \
- void name(Register r1, Condition m3, Register b4, Disp d4, \
- const Operand& i2); \
- void name(Register r1, const Operand& i2, Condition m3, \
- const MemOperand& opnd)
-
-#define SI_FORM(name) \
- void name(const MemOperand& opnd, const Operand& i); \
- void name(const Operand& i2, Register b1, Disp d1)
-
-#define SIL_FORM(name) \
- void name(Register b1, Disp d1, const Operand& i2); \
- void name(const MemOperand& opnd, const Operand& i2)
-
-#define RRF1_FORM(name) void name(Register r1, Register r2, Register r3)
-
-#define RRF2_FORM(name) void name(Condition m1, Register r1, Register r2)
-
-#define RRF3_FORM(name) \
- void name(Register r3, Condition m4, Register r1, Register r2)
-
-#define RS1_FORM(name) \
- void name(Register r1, Register r3, const MemOperand& opnd); \
- void name(Register r1, Register r3, Register b2, Disp d2)
-
-#define RS2_FORM(name) \
- void name(Register r1, Condition m3, const MemOperand& opnd); \
- void name(Register r1, Condition m3, Register b2, Disp d2)
-
-#define RSE_FORM(name) \
- void name(Register r1, Register r3, const MemOperand& opnd); \
- void name(Register r1, Register r3, Register b2, Disp d2)
-
-#define RSL_FORM(name) \
- void name(Length l, Register b2, Disp d2); \
- void name(const MemOperand& opnd)
-
-#define RSY1_FORM(name) \
- void name(Register r1, Register r3, Register b2, Disp d2); \
- void name(Register r1, Register r3, const MemOperand& opnd)
-
-#define RSY2_FORM(name) \
- void name(Register r1, Condition m3, Register b2, Disp d2); \
- void name(Register r1, Condition m3, const MemOperand& opnd)
-
-#define RRS_FORM(name) \
- void name(Register r1, Register r2, Register b4, Disp d4, Condition m3); \
- void name(Register r1, Register r2, Condition m3, const MemOperand& opnd)
-
-#define S_FORM(name) \
- void name(Register b2, Disp d2); \
- void name(const MemOperand& opnd)
-
-#define SIY_FORM(name) \
- void name(const Operand& i2, Register b1, Disp d1); \
- void name(const MemOperand& opnd, const Operand& i)
-
-#define SS1_FORM(name) \
- void name(Register b1, Disp d1, Register b3, Disp d2, Length length); \
- void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length)
-
-#define SS2_FORM(name) \
- void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length1, \
- Length length2); \
- void name(Register b1, Disp d1, Register b2, Disp d2, Length l1, Length l2)
-
-#define SS3_FORM(name) \
- void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length); \
- void name(const Operand& i3, Register b1, Disp d1, Register b2, Disp d2, \
- Length l1)
-
-#define SS4_FORM(name) \
- void name(const MemOperand& opnd1, const MemOperand& opnd2); \
- void name(Register r1, Register r3, Register b1, Disp d1, Register b2, \
- Disp d2)
-
-#define SS5_FORM(name) \
- void name(const MemOperand& opnd1, const MemOperand& opnd2); \
- void name(Register r1, Register r3, Register b3, Disp d2, Register b4, \
- Disp d4)
-
-#define SSE_FORM(name) \
- void name(Register b1, Disp d1, Register b2, Disp d2); \
- void name(const MemOperand& opnd1, const MemOperand& opnd2)
-
-#define SSF_FORM(name) \
- void name(Register r3, Register b1, Disp d1, Register b2, Disp d2); \
- void name(Register r3, const MemOperand& opnd1, const MemOperand& opnd2)
-
#define DECLARE_VRR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
void name(DoubleRegister v1, DoubleRegister v2, Condition m5, Condition m4, \
Condition m3) { \
@@ -1118,201 +1455,9 @@ inline void rsy_form(Opcode op, int f1, int f2, int f3, const int f4) {
static_cast<Condition>(3));
}
- // S390 instruction sets
- RXE_FORM(ddb);
- SS1_FORM(ed);
- RRF2_FORM(fidbr);
- RI1_FORM(iihh);
- RI1_FORM(iihl);
- RI1_FORM(iilh);
- RI1_FORM(iill);
- RSY1_FORM(loc);
- RXE_FORM(mdb);
- SS4_FORM(mvck);
- SSF_FORM(mvcos);
- SS4_FORM(mvcs);
- SS1_FORM(mvn);
- SS1_FORM(nc);
- SI_FORM(ni);
- RI1_FORM(nilh);
- RI1_FORM(nill);
- RI1_FORM(oill);
- RXE_FORM(sdb);
- RS1_FORM(srdl);
- RI1_FORM(tmll);
- SS1_FORM(tr);
- S_FORM(ts);
-
// Load Address Instructions
void larl(Register r, Label* l);
- // Load Instructions
- void lhi(Register r, const Operand& imm);
- void lghi(Register r, const Operand& imm);
-
- // Load Multiple Instructions
- void lm(Register r1, Register r2, const MemOperand& src);
-
- // Load On Condition Instructions
- void locr(Condition m3, Register r1, Register r2);
- void locgr(Condition m3, Register r1, Register r2);
-
- // Store Instructions
-
- // Store Multiple Instructions
- void stm(Register r1, Register r2, const MemOperand& src);
-
- // Compare Instructions
- void chi(Register r, const Operand& opnd);
- void cghi(Register r, const Operand& opnd);
-
- // Compare Logical Instructions
- void cli(const MemOperand& mem, const Operand& imm);
- void cliy(const MemOperand& mem, const Operand& imm);
- void clc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
-
- // Compare and Swap Instructions
- void cs(Register r1, Register r2, const MemOperand& src);
-
- // Test Under Mask Instructions
- void tm(const MemOperand& mem, const Operand& imm);
- void tmy(const MemOperand& mem, const Operand& imm);
-
- // Shift Instructions (32)
- void sll(Register r1, Register opnd);
- void sll(Register r1, const Operand& opnd);
- void srl(Register r1, Register opnd);
- void srl(Register r1, const Operand& opnd);
- void sra(Register r1, Register opnd);
- void sra(Register r1, const Operand& opnd);
- void sla(Register r1, Register opnd);
- void sla(Register r1, const Operand& opnd);
-
- // Shift Instructions (64)
- void srda(Register r1, const Operand& opnd);
- void srdl(Register r1, const Operand& opnd);
- void sldl(Register r1, Register b2, const Operand& opnd);
- void srdl(Register r1, Register b2, const Operand& opnd);
- void srda(Register r1, Register b2, const Operand& opnd);
-
- // Rotate and Insert Selected Bits
- void risbg(Register dst, Register src, const Operand& startBit,
- const Operand& endBit, const Operand& shiftAmt,
- bool zeroBits = true);
- void risbgn(Register dst, Register src, const Operand& startBit,
- const Operand& endBit, const Operand& shiftAmt,
- bool zeroBits = true);
-
- // Move Character (Mem to Mem)
- void mvc(const MemOperand& opnd1, const MemOperand& opnd2, uint32_t length);
-
- // Branch Instructions
- void bras(Register r, const Operand& opnd);
- void brc(Condition c, const Operand& opnd);
- void brct(Register r1, const Operand& opnd);
- void brctg(Register r1, const Operand& opnd);
-
- // 32-bit Add Instructions
- void ahi(Register r1, const Operand& opnd);
- void ahik(Register r1, Register r3, const Operand& opnd);
- void ark(Register r1, Register r2, Register r3);
- void asi(const MemOperand&, const Operand&);
-
- // 64-bit Add Instructions
- void aghi(Register r1, const Operand& opnd);
- void aghik(Register r1, Register r3, const Operand& opnd);
- void agrk(Register r1, Register r2, Register r3);
- void agsi(const MemOperand&, const Operand&);
-
- // 32-bit Add Logical Instructions
- void alrk(Register r1, Register r2, Register r3);
-
- // 64-bit Add Logical Instructions
- void algrk(Register r1, Register r2, Register r3);
-
- // 32-bit Subtract Instructions
- void srk(Register r1, Register r2, Register r3);
-
- // 64-bit Subtract Instructions
- void sgrk(Register r1, Register r2, Register r3);
-
- // 32-bit Subtract Logical Instructions
- void slrk(Register r1, Register r2, Register r3);
-
- // 64-bit Subtract Logical Instructions
- void slgrk(Register r1, Register r2, Register r3);
-
- // 32-bit Multiply Instructions
- void mhi(Register r1, const Operand& opnd);
- void msrkc(Register r1, Register r2, Register r3);
- void msgrkc(Register r1, Register r2, Register r3);
-
- // 64-bit Multiply Instructions
- void mghi(Register r1, const Operand& opnd);
-
- // Bitwise Instructions (AND / OR / XOR)
- void nrk(Register r1, Register r2, Register r3);
- void ngrk(Register r1, Register r2, Register r3);
- void ork(Register r1, Register r2, Register r3);
- void ogrk(Register r1, Register r2, Register r3);
- void xrk(Register r1, Register r2, Register r3);
- void xgrk(Register r1, Register r2, Register r3);
- void xc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
-
- // Floating <-> Fixed Point Conversion Instructions
- void cdlfbr(Condition m3, Condition m4, DoubleRegister fltReg,
- Register fixReg);
- void cdlgbr(Condition m3, Condition m4, DoubleRegister fltReg,
- Register fixReg);
- void celgbr(Condition m3, Condition m4, DoubleRegister fltReg,
- Register fixReg);
- void celfbr(Condition m3, Condition m4, DoubleRegister fltReg,
- Register fixReg);
- void clfdbr(Condition m3, Condition m4, Register fixReg,
- DoubleRegister fltReg);
- void clfebr(Condition m3, Condition m4, Register fixReg,
- DoubleRegister fltReg);
- void clgdbr(Condition m3, Condition m4, Register fixReg,
- DoubleRegister fltReg);
- void clgebr(Condition m3, Condition m4, Register fixReg,
- DoubleRegister fltReg);
- void cfdbr(Condition m, Register fixReg, DoubleRegister fltReg);
- void cgebr(Condition m, Register fixReg, DoubleRegister fltReg);
- void cgdbr(Condition m, Register fixReg, DoubleRegister fltReg);
- void cfebr(Condition m3, Register fixReg, DoubleRegister fltReg);
- void cefbr(Condition m3, DoubleRegister fltReg, Register fixReg);
-
- // Floating Point Compare Instructions
- void cdb(DoubleRegister r1, const MemOperand& opnd);
- void ceb(DoubleRegister r1, const MemOperand& opnd);
-
- // Floating Point Arithmetic Instructions
- void adb(DoubleRegister r1, const MemOperand& opnd);
- void aeb(DoubleRegister r1, const MemOperand& opnd);
- void sdb(DoubleRegister r1, const MemOperand& opnd);
- void seb(DoubleRegister r1, const MemOperand& opnd);
- void mdb(DoubleRegister r1, const MemOperand& opnd);
- void meeb(DoubleRegister r1, const MemOperand& opnd);
- void ddb(DoubleRegister r1, const MemOperand& opnd);
- void deb(DoubleRegister r1, const MemOperand& opnd);
- void sqdb(DoubleRegister r1, const MemOperand& opnd);
- void ldeb(DoubleRegister r1, const MemOperand& opnd);
-
- enum FIDBRA_MASK3 {
- FIDBRA_CURRENT_ROUNDING_MODE = 0,
- FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0 = 1,
- // ...
- FIDBRA_ROUND_TOWARD_0 = 5,
- FIDBRA_ROUND_TOWARD_POS_INF = 6,
- FIDBRA_ROUND_TOWARD_NEG_INF = 7
- };
- void fiebra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3);
- void fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3);
-
- // Move integer
- void mvhi(const MemOperand& opnd1, const Operand& i2);
- void mvghi(const MemOperand& opnd1, const Operand& i2);
-
// Exception-generating instructions and debugging support
void stop(const char* msg, Condition cond = al,
int32_t code = kDefaultStopCode, CRegister cr = cr7);
@@ -1409,7 +1554,6 @@ inline void rsy_form(Opcode op, int f1, int f2, int f3, const int f4) {
public:
byte* buffer_pos() const { return buffer_; }
- void RequestHeapObject(HeapObjectRequest request);
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1454,9 +1598,6 @@ inline void rsy_form(Opcode op, int f1, int f2, int f3, const int f4) {
inline void TrackBranch();
inline void UntrackBranch();
- inline int32_t emit_code_target(
- Handle<Code> target, RelocInfo::Mode rmode);
-
// Helper to emit the binary encoding of a 2 byte instruction
void emit2bytes(uint16_t x) {
CheckBuffer();
@@ -1510,83 +1651,16 @@ inline void rsy_form(Opcode op, int f1, int f2, int f3, const int f4) {
pc_ += 6;
}
- // Helpers to emit binary encoding for various instruction formats.
-
- inline void rr2_form(uint8_t op, Condition m1, Register r2);
-
- inline void ri_form(Opcode op, Register r1, const Operand& i2);
- inline void ri_form(Opcode op, Condition m1, const Operand& i2);
-
- inline void rie_form(Opcode op, Register r1, Register r3, const Operand& i2);
- inline void rie_f_form(Opcode op, Register r1, Register r2, const Operand& i3,
- const Operand& i4, const Operand& i5);
-
- inline void ris_form(Opcode op, Register r1, Condition m3, Register b4,
- Disp d4, const Operand& i2);
-
- inline void rrf1_form(Opcode op, Register r1, Register r2, Register r3);
- inline void rrf1_form(uint32_t x);
- inline void rrf2_form(uint32_t x);
- inline void rrf3_form(uint32_t x);
- inline void rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
- Register r2);
-
- inline void rrs_form(Opcode op, Register r1, Register r2, Register b4,
- Disp d4, Condition m3);
-
- inline void rs_form(Opcode op, Register r1, Condition m3, Register b2,
- const Disp d2);
- inline void rs_form(Opcode op, Register r1, Register r3, Register b2,
- const Disp d2);
-
- inline void rsi_form(Opcode op, Register r1, Register r3, const Operand& i2);
- inline void rsl_form(Opcode op, Length l1, Register b2, Disp d2);
- inline void rxe_form(Opcode op, Register r1, Register x2, Register b2,
- Disp d2);
-
- inline void rxf_form(Opcode op, Register r1, Register r3, Register b2,
- Register x2, Disp d2);
-
- inline void s_form(Opcode op, Register b1, Disp d2);
-
- inline void si_form(Opcode op, const Operand& i2, Register b1, Disp d1);
- inline void siy_form(Opcode op, const Operand& i2, Register b1, Disp d1);
-
- inline void sil_form(Opcode op, Register b1, Disp d1, const Operand& i2);
-
- inline void ss_form(Opcode op, Length l, Register b1, Disp d1, Register b2,
- Disp d2);
- inline void ss_form(Opcode op, Length l1, Length l2, Register b1, Disp d1,
- Register b2, Disp d2);
- inline void ss_form(Opcode op, Length l1, const Operand& i3, Register b1,
- Disp d1, Register b2, Disp d2);
- inline void ss_form(Opcode op, Register r1, Register r2, Register b1, Disp d1,
- Register b2, Disp d2);
- inline void sse_form(Opcode op, Register b1, Disp d1, Register b2, Disp d2);
- inline void ssf_form(Opcode op, Register r3, Register b1, Disp d1,
- Register b2, Disp d2);
-
// Labels
void print(Label* L);
int max_reach_from(int pos);
void bind_to(Label* L, int pos);
void next(Label* L);
- // The following functions help with avoiding allocations of embedded heap
- // objects during the code assembly phase. {RequestHeapObject} records the
- // need for a future heap number allocation or code stub generation. After
- // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
- // objects and place them where they are expected (determined by the pc offset
- // associated with each request). That is, for each request, it will patch the
- // dummy heap object handle that we emitted during code assembly with the
- // actual heap object handle.
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
- std::forward_list<HeapObjectRequest> heap_object_requests_;
friend class RegExpMacroAssemblerS390;
friend class RelocInfo;
-
- std::vector<Handle<Code>> code_targets_;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index cc41c1edcd..a9c1ec17e8 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -26,20 +26,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
- __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
- __ StoreP(r3, MemOperand(sp, r1));
- __ push(r3);
- __ push(r4);
- __ AddP(r2, r2, Operand(3));
- __ TailCallRuntime(Runtime::kNewArray);
-}
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
void JSEntryStub::Generate(MacroAssembler* masm) {
// r2: code entry
// r3: function
@@ -230,6 +216,18 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
}
void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
+ if (FLAG_embedded_builtins) {
+ if (masm->root_array_available() &&
+ isolate()->ShouldLoadConstantsFromRootList()) {
+ // This is basically an inlined version of Call(Handle<Code>) that loads
+ // the code object into lr instead of ip.
+ __ Move(ip, target);
+ __ IndirectLoadConstant(r1, GetCode());
+ __ AddP(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r1);
+ return;
+ }
+ }
#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
// Native AIX/S390X Linux use a function descriptor.
__ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
@@ -367,268 +365,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-template <class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ CmpP(r5, Operand(kind));
- T stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- // r4 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // r5 - kind (if mode != DISABLE_ALLOCATION_SITES)
- // r2 - number of arguments
- // r3 - constructor?
- // sp[0] - last argument
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(
- masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
- } else if (mode == DONT_OVERRIDE) {
- Label normal_sequence;
- // is the low bit set? If so, we are holey and that is good.
- __ AndP(r0, r5, Operand(1));
- __ bne(&normal_sequence);
-
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
- __ AddP(r5, r5, Operand(1));
- if (FLAG_debug_code) {
- __ LoadP(r7, FieldMemOperand(r4, 0));
- __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, AbortReason::kExpectedAllocationSite);
- }
-
- // Save the resulting elements kind in type info. We can't just store r5
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field...upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ LoadP(r6, FieldMemOperand(
- r4, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ AddSmiLiteral(r6, r6, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
- __ StoreP(r6, FieldMemOperand(
- r4, AllocationSite::kTransitionInfoOrBoilerplateOffset));
-
- __ bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ CmpP(r5, Operand(kind));
- ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
- __ TailCallStub(&stub, eq);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-template <class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(isolate, kind);
- stub.GetCode();
- if (AllocationSite::ShouldTrack(kind)) {
- T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode();
- }
- }
-}
-
-void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayNArgumentsConstructorStub stub(isolate);
- stub.GetCode();
- ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
- stubh1.GetCode();
- InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
- stubh2.GetCode();
- }
-}
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm, AllocationSiteOverrideMode mode) {
- Label not_zero_case, not_one_case;
- __ CmpP(r2, Operand::Zero());
- __ bne(&not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ CmpP(r2, Operand(1));
- __ bgt(&not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : argc (only if argument_count() == ANY)
- // -- r3 : constructor
- // -- r4 : AllocationSite or undefined
- // -- r5 : new target
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ TestIfSmi(r6);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r6, r6, r7, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
-
- // We should either have undefined in r4 or a valid AllocationSite
- __ AssertUndefinedOrAllocationSite(r4, r6);
- }
-
- // Enter the context of the Array function.
- __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
-
- Label subclassing;
- __ CmpP(r5, r3);
- __ bne(&subclassing, Label::kNear);
-
- Label no_info;
- // Get the elements kind and case on that.
- __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
- __ beq(&no_info);
-
- __ LoadP(r5, FieldMemOperand(
- r4, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ SmiUntag(r5);
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ AndP(r5, Operand(AllocationSite::ElementsKindBits::kMask));
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-
- __ bind(&subclassing);
- __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
- __ StoreP(r3, MemOperand(sp, r1));
- __ AddP(r2, r2, Operand(3));
- __ Push(r5, r4);
- __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
-}
-
-void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
- ElementsKind kind) {
- __ CmpLogicalP(r2, Operand(1));
-
- InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
- __ TailCallStub(&stub0, lt);
-
- ArrayNArgumentsConstructorStub stubN(isolate());
- __ TailCallStub(&stubN, gt);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- __ LoadP(r5, MemOperand(sp, 0));
- __ CmpP(r5, Operand::Zero());
-
- InternalArraySingleArgumentConstructorStub stub1_holey(
- isolate(), GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey, ne);
- }
-
- InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
- __ TailCallStub(&stub1);
-}
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : argc
- // -- r3 : constructor
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- __ TestIfSmi(r5);
- __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r5, r5, r6, MAP_TYPE);
- __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // Figure out the right elements kind
- __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the map's "bit field 2" into |result|.
- __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(r5);
-
- if (FLAG_debug_code) {
- Label done;
- __ CmpP(r5, Operand(PACKED_ELEMENTS));
- __ beq(&done);
- __ CmpP(r5, Operand(HOLEY_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
- }
-
- Label fast_elements_case;
- __ CmpP(r5, Operand(PACKED_ELEMENTS));
- __ beq(&fast_elements_case);
- GenerateCase(masm, HOLEY_ELEMENTS);
-
- __ bind(&fast_elements_case);
- GenerateCase(masm, PACKED_ELEMENTS);
-}
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@@ -656,14 +392,14 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
DCHECK(function_address == r3 || function_address == r4);
Register scratch = r5;
- __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
+ __ Move(scratch, ExternalReference::is_profiling_address(isolate));
__ LoadlB(scratch, MemOperand(scratch, 0));
__ CmpP(scratch, Operand::Zero());
Label profiler_disabled;
Label end_profiler_check;
__ beq(&profiler_disabled, Label::kNear);
- __ mov(scratch, Operand(thunk_ref));
+ __ Move(scratch, thunk_ref);
__ b(&end_profiler_check, Label::kNear);
__ bind(&profiler_disabled);
__ LoadRR(scratch, function_address);
@@ -674,7 +410,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// r6 - next_address->kNextOffset
// r7 - next_address->kLimitOffset
// r8 - next_address->kLevelOffset
- __ mov(r9, Operand(next_address));
+ __ Move(r9, next_address);
__ LoadP(r6, MemOperand(r9, kNextOffset));
__ LoadP(r7, MemOperand(r9, kLimitOffset));
__ LoadlW(r8, MemOperand(r9, kLevelOffset));
@@ -685,7 +421,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1, r2);
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+ __ Move(r2, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::log_enter_external_function(), 1);
__ PopSafepointRegisters();
}
@@ -700,7 +436,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1, r2);
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+ __ Move(r2, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::log_leave_external_function(), 1);
__ PopSafepointRegisters();
}
@@ -737,7 +473,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ LeaveExitFrame(false, r6, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
- __ mov(r7, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ Move(r7, ExternalReference::scheduled_exception_address(isolate));
__ LoadP(r7, MemOperand(r7));
__ CompareRoot(r7, Heap::kTheHoleValueRootIndex);
__ bne(&promote_scheduled_exception, Label::kNear);
@@ -753,7 +489,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ StoreP(r7, MemOperand(r9, kLimitOffset));
__ LoadRR(r6, r2);
__ PrepareCallCFunction(1, r7);
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+ __ Move(r2, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
__ LoadRR(r2, r6);
__ b(&leave_exit_frame, Label::kNear);
@@ -799,7 +535,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// return value default
__ push(scratch);
// isolate
- __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ push(scratch);
// holder
__ push(holder);
@@ -875,7 +611,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ push(scratch);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ Push(scratch, scratch);
- __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ Move(scratch, ExternalReference::isolate_address(isolate()));
__ Push(scratch, holder);
__ Push(Smi::kZero); // should_throw_on_error -> false
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/s390/constants-s390.h
index dee21b28ef..e02a3ab215 100644
--- a/deps/v8/src/s390/constants-s390.h
+++ b/deps/v8/src/s390/constants-s390.h
@@ -29,6 +29,9 @@
namespace v8 {
namespace internal {
+// TODO(sigurds): Change this value once we use relative jumps.
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+
// Number of registers
const int kNumRegisters = 16;
@@ -37,6 +40,11 @@ const int kNumDoubleRegisters = 16;
const int kNoRegister = -1;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 128;
+
// sign-extend the least significant 16-bits of value <imm>
#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
@@ -270,23 +278,15 @@ typedef uint64_t SixByteInstr;
#define S390_RRF_A_OPCODE_LIST(V) \
V(ipte, IPTE, 0xB221) /* type = RRF_A INVALIDATE PAGE TABLE ENTRY */ \
- V(mdtr, MDTR, 0xB3D0) /* type = RRF_A MULTIPLY (long DFP) */ \
V(mdtra, MDTRA, 0xB3D0) /* type = RRF_A MULTIPLY (long DFP) */ \
- V(ddtr, DDTR, 0xB3D1) /* type = RRF_A DIVIDE (long DFP) */ \
V(ddtra, DDTRA, 0xB3D1) /* type = RRF_A DIVIDE (long DFP) */ \
- V(adtr, ADTR, 0xB3D2) /* type = RRF_A ADD (long DFP) */ \
V(adtra, ADTRA, 0xB3D2) /* type = RRF_A ADD (long DFP) */ \
- V(sdtr, SDTR, 0xB3D3) /* type = RRF_A SUBTRACT (long DFP) */ \
V(sdtra, SDTRA, 0xB3D3) /* type = RRF_A SUBTRACT (long DFP) */ \
- V(mxtr, MXTR, 0xB3D8) /* type = RRF_A MULTIPLY (extended DFP) */ \
V(mxtra, MXTRA, 0xB3D8) /* type = RRF_A MULTIPLY (extended DFP) */ \
V(msrkc, MSRKC, 0xB9FD) /* type = RRF_A MULTIPLY (32)*/ \
V(msgrkc, MSGRKC, 0xB9ED) /* type = RRF_A MULTIPLY (64)*/ \
- V(dxtr, DXTR, 0xB3D9) /* type = RRF_A DIVIDE (extended DFP) */ \
V(dxtra, DXTRA, 0xB3D9) /* type = RRF_A DIVIDE (extended DFP) */ \
- V(axtr, AXTR, 0xB3DA) /* type = RRF_A ADD (extended DFP) */ \
V(axtra, AXTRA, 0xB3DA) /* type = RRF_A ADD (extended DFP) */ \
- V(sxtr, SXTR, 0xB3DB) /* type = RRF_A SUBTRACT (extended DFP) */ \
V(sxtra, SXTRA, 0xB3DB) /* type = RRF_A SUBTRACT (extended DFP) */ \
V(ahhhr, AHHHR, 0xB9C8) /* type = RRF_A ADD HIGH (32) */ \
V(shhhr, SHHHR, 0xB9C9) /* type = RRF_A SUBTRACT HIGH (32) */ \
@@ -362,9 +362,7 @@ typedef uint64_t SixByteInstr;
#define S390_RRF_C_OPCODE_LIST(V) \
V(sske, SSKE, 0xB22B) /* type = RRF_C SET STORAGE KEY EXTENDED */ \
- V(cuutf, CUUTF, 0xB2A6) /* type = RRF_C CONVERT UNICODE TO UTF-8 */ \
V(cu21, CU21, 0xB2A6) /* type = RRF_C CONVERT UTF-16 TO UTF-8 */ \
- V(cutfu, CUTFU, 0xB2A7) /* type = RRF_C CONVERT UTF-8 TO UNICODE */ \
V(cu12, CU12, 0xB2A7) /* type = RRF_C CONVERT UTF-8 TO UTF-16 */ \
V(ppa, PPA, 0xB2E8) /* type = RRF_C PERFORM PROCESSOR ASSIST */ \
V(cgrt, CGRT, 0xB960) /* type = RRF_C COMPARE AND TRAP (64) */ \
@@ -404,14 +402,11 @@ typedef uint64_t SixByteInstr;
0xB345) /* type = RRF_E LOAD ROUNDED (extended to long BFP) */ \
V(lexbra, LEXBRA, \
0xB346) /* type = RRF_E LOAD ROUNDED (extended to short BFP) */ \
- V(fixbr, FIXBR, 0xB347) /* type = RRF_E LOAD FP INTEGER (extended BFP) */ \
V(fixbra, FIXBRA, 0xB347) /* type = RRF_E LOAD FP INTEGER (extended BFP) */ \
V(tbedr, TBEDR, \
0xB350) /* type = RRF_E CONVERT HFP TO BFP (long to short) */ \
V(tbdr, TBDR, 0xB351) /* type = RRF_E CONVERT HFP TO BFP (long) */ \
- V(fiebr, FIEBR, 0xB357) /* type = RRF_E LOAD FP INTEGER (short BFP) */ \
V(fiebra, FIEBRA, 0xB357) /* type = RRF_E LOAD FP INTEGER (short BFP) */ \
- V(fidbr, FIDBR, 0xB35F) /* type = RRF_E LOAD FP INTEGER (long BFP) */ \
V(fidbra, FIDBRA, 0xB35F) /* type = RRF_E LOAD FP INTEGER (long BFP) */ \
V(celfbr, CELFBR, \
0xB390) /* type = RRF_E CONVERT FROM LOGICAL (32 to short BFP) */ \
@@ -425,15 +420,10 @@ typedef uint64_t SixByteInstr;
0xB395) /* type = RRF_E CONVERT FROM FIXED (32 to long BFP) */ \
V(cxfbra, CXFBRA, \
0xB396) /* type = RRF_E CONVERT FROM FIXED (32 to extended BFP) */ \
- V(cfebr, CFEBR, \
- 0xB398) /* type = RRF_E CONVERT TO FIXED (short BFP to 32) */ \
V(cfebra, CFEBRA, \
0xB398) /* type = RRF_E CONVERT TO FIXED (short BFP to 32) */ \
- V(cfdbr, CFDBR, 0xB399) /* type = RRF_E CONVERT TO FIXED (long BFP to 32) */ \
V(cfdbra, CFDBRA, \
0xB399) /* type = RRF_E CONVERT TO FIXED (long BFP to 32) */ \
- V(cfxbr, CFXBR, \
- 0xB39A) /* type = RRF_E CONVERT TO FIXED (extended BFP to 32) */ \
V(cfxbra, CFXBRA, \
0xB39A) /* type = RRF_E CONVERT TO FIXED (extended BFP to 32) */ \
V(clfebr, CLFEBR, \
@@ -454,15 +444,10 @@ typedef uint64_t SixByteInstr;
0xB3A5) /* type = RRF_E CONVERT FROM FIXED (64 to long BFP) */ \
V(cxgbra, CXGBRA, \
0xB3A6) /* type = RRF_E CONVERT FROM FIXED (64 to extended BFP) */ \
- V(cgebr, CGEBR, \
- 0xB3A8) /* type = RRF_E CONVERT TO FIXED (short BFP to 64) */ \
V(cgebra, CGEBRA, \
0xB3A8) /* type = RRF_E CONVERT TO FIXED (short BFP to 64) */ \
- V(cgdbr, CGDBR, 0xB3A9) /* type = RRF_E CONVERT TO FIXED (long BFP to 64) */ \
V(cgdbra, CGDBRA, \
0xB3A9) /* type = RRF_E CONVERT TO FIXED (long BFP to 64) */ \
- V(cgxbr, CGXBR, \
- 0xB3AA) /* type = RRF_E CONVERT TO FIXED (extended BFP to 64) */ \
V(cgxbra, CGXBRA, \
0xB3AA) /* type = RRF_E CONVERT TO FIXED (extended BFP to 64) */ \
V(clgebr, CLGEBR, \
@@ -484,11 +469,8 @@ typedef uint64_t SixByteInstr;
V(ldxtr, LDXTR, \
0xB3DD) /* type = RRF_E LOAD ROUNDED (extended to long DFP) */ \
V(fixtr, FIXTR, 0xB3DF) /* type = RRF_E LOAD FP INTEGER (extended DFP) */ \
- V(cgdtr, CGDTR, 0xB3E1) /* type = RRF_E CONVERT TO FIXED (long DFP to 64) */ \
V(cgdtra, CGDTRA, \
0xB3E1) /* type = RRF_E CONVERT TO FIXED (long DFP to 64) */ \
- V(cgxtr, CGXTR, \
- 0xB3E9) /* type = RRF_E CONVERT TO FIXED (extended DFP to 64) */ \
V(cgxtra, CGXTRA, \
0xB3E9) /* type = RRF_E CONVERT TO FIXED (extended DFP to 64) */ \
V(cdgtra, CDGTRA, \
@@ -686,7 +668,11 @@ typedef uint64_t SixByteInstr;
0xEDAD) /* type = RSL_B CONVERT TO PACKED (from extended DFP) */ \
V(cdpt, CDPT, 0xEDAE) /* type = RSL_B CONVERT FROM PACKED (to long DFP) */ \
V(cxpt, CXPT, \
- 0xEDAF) /* type = RSL_B CONVERT FROM PACKED (to extended DFP) */
+ 0xEDAF) /* type = RSL_B CONVERT FROM PACKED (to extended DFP) */ \
+ V(czdt, CZDT, 0xEDA8) /* type = RSL CONVERT TO ZONED (from long DFP) */ \
+ V(czxt, CZXT, 0xEDA9) /* type = RSL CONVERT TO ZONED (from extended DFP) */ \
+ V(cdzt, CDZT, 0xEDAA) /* type = RSL CONVERT FROM ZONED (to long DFP) */ \
+ V(cxzt, CXZT, 0xEDAB) /* type = RSL CONVERT FROM ZONED (to extended DFP) */
#define S390_SI_OPCODE_LIST(V) \
V(tm, TM, 0x91) /* type = SI TEST UNDER MASK */ \
@@ -829,9 +815,7 @@ typedef uint64_t SixByteInstr;
V(llilh, LLILH, 0xA5E) /* type = RI_A LOAD LOGICAL IMMEDIATE (low high) */ \
V(llill, LLILL, 0xA5F) /* type = RI_A LOAD LOGICAL IMMEDIATE (low low) */ \
V(tmlh, TMLH, 0xA70) /* type = RI_A TEST UNDER MASK (low high) */ \
- V(tmh, TMH, 0xA70) /* type = RI_A TEST UNDER MASK HIGH */ \
V(tmll, TMLL, 0xA71) /* type = RI_A TEST UNDER MASK (low low) */ \
- V(tml, TML, 0xA71) /* type = RI_A TEST UNDER MASK LOW */ \
V(tmhh, TMHH, 0xA72) /* type = RI_A TEST UNDER MASK (high high) */ \
V(tmhl, TMHL, 0xA73) /* type = RI_A TEST UNDER MASK (high low) */ \
V(lhi, LHI, 0xA78) /* type = RI_A LOAD HALFWORD IMMEDIATE (32)<-16 */ \
@@ -856,12 +840,6 @@ typedef uint64_t SixByteInstr;
#define S390_RI_C_OPCODE_LIST(V) \
V(brc, BRC, 0xA74) /* type = RI_C BRANCH RELATIVE ON CONDITION */
-#define S390_RSL_OPCODE_LIST(V) \
- V(czdt, CZDT, 0xEDA8) /* type = RSL CONVERT TO ZONED (from long DFP) */ \
- V(czxt, CZXT, 0xEDA9) /* type = RSL CONVERT TO ZONED (from extended DFP) */ \
- V(cdzt, CDZT, 0xEDAA) /* type = RSL CONVERT FROM ZONED (to long DFP) */ \
- V(cxzt, CXZT, 0xEDAB) /* type = RSL CONVERT FROM ZONED (to extended DFP) */
-
#define S390_SMI_OPCODE_LIST(V) \
V(bpp, BPP, 0xC7) /* type = SMI BRANCH PREDICTION PRELOAD */
@@ -1096,7 +1074,6 @@ typedef uint64_t SixByteInstr;
V(icm, ICM, 0xBF) /* type = RS_B INSERT CHARACTERS UNDER MASK (low) */
#define S390_S_OPCODE_LIST(V) \
- V(awr, AWR, 0x2E) /* type = S ADD UNNORMALIZED (long HFP) */ \
V(lpsw, LPSW, 0x82) /* type = S LOAD PSW */ \
V(diagnose, DIAGNOSE, 0x83) /* type = S DIAGNOSE */ \
V(ts, TS, 0x93) /* type = S TEST AND SET */ \
@@ -1194,7 +1171,6 @@ typedef uint64_t SixByteInstr;
V(ae, AE, 0x7A) /* type = RX_A ADD NORMALIZED (short HFP) */ \
V(se, SE, 0x7B) /* type = RX_A SUBTRACT NORMALIZED (short HFP) */ \
V(mde, MDE, 0x7C) /* type = RX_A MULTIPLY (short to long HFP) */ \
- V(me, ME, 0x7C) /* type = RX_A MULTIPLY (short to long HFP) */ \
V(de, DE, 0x7D) /* type = RX_A DIVIDE (short HFP) */ \
V(au, AU, 0x7E) /* type = RX_A ADD UNNORMALIZED (short HFP) */ \
V(su, SU, 0x7F) /* type = RX_A SUBTRACT UNNORMALIZED (short HFP) */ \
@@ -1325,11 +1301,6 @@ typedef uint64_t SixByteInstr;
V(lnxbr, LNXBR, 0xB341) /* type = RRE LOAD NEGATIVE (extended BFP) */ \
V(ltxbr, LTXBR, 0xB342) /* type = RRE LOAD AND TEST (extended BFP) */ \
V(lcxbr, LCXBR, 0xB343) /* type = RRE LOAD COMPLEMENT (extended BFP) */ \
- V(ledbr, LEDBR, 0xB344) /* type = RRE LOAD ROUNDED (long to short BFP) */ \
- V(ldxbr, LDXBR, \
- 0xB345) /* type = RRE LOAD ROUNDED (extended to long BFP) */ \
- V(lexbr, LEXBR, \
- 0xB346) /* type = RRE LOAD ROUNDED (extended to short BFP) */ \
V(kxbr, KXBR, 0xB348) /* type = RRE COMPARE AND SIGNAL (extended BFP) */ \
V(cxbr, CXBR, 0xB349) /* type = RRE COMPARE (extended BFP) */ \
V(axbr, AXBR, 0xB34A) /* type = RRE ADD (extended BFP) */ \
@@ -1359,18 +1330,6 @@ typedef uint64_t SixByteInstr;
V(sfpc, SFPC, 0xB384) /* type = RRE SET FPC */ \
V(sfasr, SFASR, 0xB385) /* type = RRE SET FPC AND SIGNAL */ \
V(efpc, EFPC, 0xB38C) /* type = RRE EXTRACT FPC */ \
- V(cefbr, CEFBR, \
- 0xB394) /* type = RRE CONVERT FROM FIXED (32 to short BFP) */ \
- V(cdfbr, CDFBR, \
- 0xB395) /* type = RRE CONVERT FROM FIXED (32 to long BFP) */ \
- V(cxfbr, CXFBR, \
- 0xB396) /* type = RRE CONVERT FROM FIXED (32 to extended BFP) */ \
- V(cegbr, CEGBR, \
- 0xB3A4) /* type = RRE CONVERT FROM FIXED (64 to short BFP) */ \
- V(cdgbr, CDGBR, \
- 0xB3A5) /* type = RRE CONVERT FROM FIXED (64 to long BFP) */ \
- V(cxgbr, CXGBR, \
- 0xB3A6) /* type = RRE CONVERT FROM FIXED (64 to extended BFP) */ \
V(cefr, CEFR, \
0xB3B4) /* type = RRE CONVERT FROM FIXED (32 to short HFP) */ \
V(cdfr, CDFR, 0xB3B5) /* type = RRE CONVERT FROM FIXED (32 to long HFP) */ \
@@ -1402,16 +1361,12 @@ typedef uint64_t SixByteInstr;
0xB3ED) /* type = RRE EXTRACT BIASED EXPONENT (extended DFP to 64) */ \
V(esxtr, ESXTR, \
0xB3EF) /* type = RRE EXTRACT SIGNIFICANCE (extended DFP to 64) */ \
- V(cdgtr, CDGTR, \
- 0xB3F1) /* type = RRE CONVERT FROM FIXED (64 to long DFP) */ \
V(cdutr, CDUTR, \
0xB3F2) /* type = RRE CONVERT FROM UNSIGNED PACKED (64 to long DFP) */ \
V(cdstr, CDSTR, \
0xB3F3) /* type = RRE CONVERT FROM SIGNED PACKED (64 to long DFP) */ \
V(cedtr, CEDTR, \
0xB3F4) /* type = RRE COMPARE BIASED EXPONENT (long DFP) */ \
- V(cxgtr, CXGTR, \
- 0xB3F9) /* type = RRE CONVERT FROM FIXED (64 to extended DFP) */ \
V(cxutr, CXUTR, \
0xB3FA) /* type = RRE CONVERT FROM UNSIGNED PACKED (128 to ext. DFP) */ \
V(cxstr, CXSTR, 0xB3FB) /* type = RRE CONVERT FROM SIGNED PACKED (128 to*/ \
@@ -1541,6 +1496,7 @@ typedef uint64_t SixByteInstr;
0xEC45) /* type = RIE_E BRANCH RELATIVE ON INDEX LOW OR EQ. (64) */
#define S390_RR_OPCODE_LIST(V) \
+ V(awr, AWR, 0x2E) /* type = RR ADD UNNORMALIZED (long HFP) */ \
V(spm, SPM, 0x04) /* type = RR SET PROGRAM MASK */ \
V(balr, BALR, 0x05) /* type = RR BRANCH AND LINK */ \
V(bctr, BCTR, 0x06) /* type = RR BRANCH ON COUNT (32) */ \
@@ -1572,7 +1528,6 @@ typedef uint64_t SixByteInstr;
V(lcdr, LCDR, 0x23) /* type = RR LOAD COMPLEMENT (long HFP) */ \
V(hdr, HDR, 0x24) /* type = RR HALVE (long HFP) */ \
V(ldxr, LDXR, 0x25) /* type = RR LOAD ROUNDED (extended to long HFP) */ \
- V(lrdr, LRDR, 0x25) /* type = RR LOAD ROUNDED (extended to long HFP) */ \
V(mxr, MXR, 0x26) /* type = RR MULTIPLY (extended HFP) */ \
V(mxdr, MXDR, 0x27) /* type = RR MULTIPLY (long to extended HFP) */ \
V(ldr, LDR, 0x28) /* type = RR LOAD (long) */ \
@@ -1588,7 +1543,6 @@ typedef uint64_t SixByteInstr;
V(lcer, LCER, 0x33) /* type = RR LOAD COMPLEMENT (short HFP) */ \
V(her_z, HER_Z, 0x34) /* type = RR HALVE (short HFP) */ \
V(ledr, LEDR, 0x35) /* type = RR LOAD ROUNDED (long to short HFP) */ \
- V(lrer, LRER, 0x35) /* type = RR LOAD ROUNDED (long to short HFP) */ \
V(axr, AXR, 0x36) /* type = RR ADD NORMALIZED (extended HFP) */ \
V(sxr, SXR, 0x37) /* type = RR SUBTRACT NORMALIZED (extended HFP) */ \
V(ler, LER, 0x38) /* type = RR LOAD (short) */ \
@@ -1596,7 +1550,6 @@ typedef uint64_t SixByteInstr;
V(aer, AER, 0x3A) /* type = RR ADD NORMALIZED (short HFP) */ \
V(ser, SER, 0x3B) /* type = RR SUBTRACT NORMALIZED (short HFP) */ \
V(mder, MDER, 0x3C) /* type = RR MULTIPLY (short to long HFP) */ \
- V(mer, MER, 0x3C) /* type = RR MULTIPLY (short to long HFP) */ \
V(der, DER, 0x3D) /* type = RR DIVIDE (short HFP) */ \
V(aur, AUR, 0x3E) /* type = RR ADD UNNORMALIZED (short HFP) */ \
V(sur, SUR, 0x3F) /* type = RR SUBTRACT UNNORMALIZED (short HFP) */
@@ -1684,7 +1637,6 @@ typedef uint64_t SixByteInstr;
S390_RSI_OPCODE_LIST(V) \
S390_RI_B_OPCODE_LIST(V) \
S390_RI_C_OPCODE_LIST(V) \
- S390_RSL_OPCODE_LIST(V) \
S390_SMI_OPCODE_LIST(V) \
S390_RXY_A_OPCODE_LIST(V) \
S390_RXY_B_OPCODE_LIST(V) \
@@ -2234,6 +2186,17 @@ class RSInstruction : Instruction {
inline int size() const { return 4; }
};
+// RSI Instruction
+class RSIInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
+ inline int R3Value() const { return Bits<FourByteInstr, int>(19, 16); }
+ inline int I2Value() const {
+ return static_cast<int32_t>(Bits<FourByteInstr, int16_t>(15, 0));
+ }
+ inline int size() const { return 4; }
+};
+
// RSY Instruction
class RSYInstruction : Instruction {
public:
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/s390/deoptimizer-s390.cc
index 4bf0249890..d2ae1ded27 100644
--- a/deps/v8/src/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/s390/deoptimizer-s390.cc
@@ -79,7 +79,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(r3, &context_check);
__ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ LoadImmP(r3, Operand(type())); // bailout type,
+ __ LoadImmP(r3, Operand(static_cast<int>(deopt_kind())));
// r4: bailout id already loaded.
// r5: code address or 0 already loaded.
// r6: Fp-to-sp delta.
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index 66d77d1250..5acd0e7755 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -78,9 +78,8 @@ class Decoder {
void Unknown(Instruction* instr);
void UnknownFormat(Instruction* instr, const char* opcname);
- bool DecodeTwoByte(Instruction* instr);
- bool DecodeFourByte(Instruction* instr);
- bool DecodeSixByte(Instruction* instr);
+ bool DecodeSpecial(Instruction* instr);
+ bool DecodeGeneric(Instruction* instr);
const disasm::NameConverter& converter_;
Vector<char> out_buffer_;
@@ -143,13 +142,11 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
DCHECK_EQ(format[0], 'r');
if (format[1] == '1') { // 'r1: register resides in bit 8-11
- RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
- int reg = rrinstr->R1Value();
+ int reg = instr->Bits<SixByteInstr, int>(39, 36);
PrintRegister(reg);
return 2;
} else if (format[1] == '2') { // 'r2: register resides in bit 12-15
- RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
- int reg = rrinstr->R2Value();
+ int reg = instr->Bits<SixByteInstr, int>(35, 32);
// indicating it is a r0 for displacement, in which case the offset
// should be 0.
if (format[2] == 'd') {
@@ -161,28 +158,23 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
return 2;
}
} else if (format[1] == '3') { // 'r3: register resides in bit 16-19
- RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
- int reg = rsinstr->B2Value();
+ int reg = instr->Bits<SixByteInstr, int>(31, 28);
PrintRegister(reg);
return 2;
} else if (format[1] == '4') { // 'r4: register resides in bit 20-23
- RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
- int reg = rsinstr->B2Value();
+ int reg = instr->Bits<SixByteInstr, int>(27, 24);
PrintRegister(reg);
return 2;
- } else if (format[1] == '5') { // 'r5: register resides in bit 24-28
- RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
- int reg = rreinstr->R1Value();
+ } else if (format[1] == '5') { // 'r5: register resides in bit 24-27
+ int reg = instr->Bits<SixByteInstr, int>(23, 20);
PrintRegister(reg);
return 2;
- } else if (format[1] == '6') { // 'r6: register resides in bit 29-32
- RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
- int reg = rreinstr->R2Value();
+ } else if (format[1] == '6') { // 'r6: register resides in bit 28-31
+ int reg = instr->Bits<SixByteInstr, int>(19, 16);
PrintRegister(reg);
return 2;
} else if (format[1] == '7') { // 'r6: register resides in bit 32-35
- SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
- int reg = ssinstr->B2Value();
+ int reg = instr->Bits<SixByteInstr, int>(15, 12);
PrintRegister(reg);
return 2;
}
@@ -493,449 +485,120 @@ void Decoder::UnknownFormat(Instruction* instr, const char* name) {
Format(instr, buffer);
}
-// Disassembles Two Byte S390 Instructions
-// @return true if successfully decoded
-bool Decoder::DecodeTwoByte(Instruction* instr) {
- // Print the Instruction bits.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%04x ",
- instr->InstructionBits<TwoByteInstr>());
+#undef VERIFY
+#undef STRING_STARTS_WITH
+// Handles special cases of instructions;
+// @return true if successfully decoded
+bool Decoder::DecodeSpecial(Instruction* instr) {
Opcode opcode = instr->S390OpcodeValue();
switch (opcode) {
- case AR:
- Format(instr, "ar\t'r1,'r2");
+ case BKPT:
+ Format(instr, "bkpt");
break;
- case SR:
- Format(instr, "sr\t'r1,'r2");
+ case DUMY:
+ Format(instr, "dumy\t'r1, 'd2 ( 'r2d, 'r3 )");
break;
- case MR:
- Format(instr, "mr\t'r1,'r2");
+ /* RR format */
+ case LDR:
+ Format(instr, "ldr\t'f1,'f2");
break;
- case DR:
- Format(instr, "dr\t'r1,'r2");
+ case BCR:
+ Format(instr, "bcr\t'm1,'r2");
break;
case OR:
Format(instr, "or\t'r1,'r2");
break;
- case NR:
- Format(instr, "nr\t'r1,'r2");
- break;
- case XR:
- Format(instr, "xr\t'r1,'r2");
- break;
- case LR:
- Format(instr, "lr\t'r1,'r2");
- break;
case CR:
Format(instr, "cr\t'r1,'r2");
break;
- case CLR:
- Format(instr, "clr\t'r1,'r2");
- break;
- case BCR:
- Format(instr, "bcr\t'm1,'r2");
- break;
- case LTR:
- Format(instr, "ltr\t'r1,'r2");
- break;
- case ALR:
- Format(instr, "alr\t'r1,'r2");
- break;
- case SLR:
- Format(instr, "slr\t'r1,'r2");
- break;
- case LNR:
- Format(instr, "lnr\t'r1,'r2");
- break;
- case LCR:
- Format(instr, "lcr\t'r1,'r2");
- break;
- case BASR:
- Format(instr, "basr\t'r1,'r2");
- break;
- case LDR:
- Format(instr, "ldr\t'f1,'f2");
- break;
- case BKPT:
- Format(instr, "bkpt");
- break;
- case LPR:
- Format(instr, "lpr\t'r1, 'r2");
- break;
- default:
- return false;
- }
- return true;
-}
-
-// Disassembles Four Byte S390 Instructions
-// @return true if successfully decoded
-bool Decoder::DecodeFourByte(Instruction* instr) {
- // Print the Instruction bits.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
- instr->InstructionBits<FourByteInstr>());
-
- Opcode opcode = instr->S390OpcodeValue();
- switch (opcode) {
- case AHI:
- Format(instr, "ahi\t'r1,'i1");
- break;
- case AGHI:
- Format(instr, "aghi\t'r1,'i1");
- break;
- case LHI:
- Format(instr, "lhi\t'r1,'i1");
- break;
- case LGHI:
- Format(instr, "lghi\t'r1,'i1");
- break;
- case MHI:
- Format(instr, "mhi\t'r1,'i1");
- break;
- case MGHI:
- Format(instr, "mghi\t'r1,'i1");
- break;
- case CHI:
- Format(instr, "chi\t'r1,'i1");
+ case MR:
+ Format(instr, "mr\t'r1,'r2");
break;
- case CGHI:
- Format(instr, "cghi\t'r1,'i1");
+ case HER_Z:
+ Format(instr, "her\t'r1,'r2");
break;
+ /* RI-b format */
case BRAS:
Format(instr, "bras\t'r1,'i1");
break;
- case BRC:
- Format(instr, "brc\t'm1,'i4");
- break;
- case BRCT:
- Format(instr, "brct\t'r1,'i4");
- break;
- case BRCTG:
- Format(instr, "brctg\t'r1,'i4");
- break;
- case IIHH:
- Format(instr, "iihh\t'r1,'i1");
- break;
- case IIHL:
- Format(instr, "iihl\t'r1,'i1");
- break;
- case IILH:
- Format(instr, "iilh\t'r1,'i1");
- break;
- case IILL:
- Format(instr, "iill\t'r1,'i1");
- break;
- case OILL:
- Format(instr, "oill\t'r1,'i1");
- break;
- case TMLL:
- Format(instr, "tmll\t'r1,'i1");
- break;
- case STM:
- Format(instr, "stm\t'r1,'r2,'d1('r3)");
- break;
- case LM:
- Format(instr, "lm\t'r1,'r2,'d1('r3)");
- break;
- case CS:
- Format(instr, "cs\t'r1,'r2,'d1('r3)");
- break;
- case SLL:
- Format(instr, "sll\t'r1,'d1('r3)");
- break;
- case SRL:
- Format(instr, "srl\t'r1,'d1('r3)");
- break;
- case SLA:
- Format(instr, "sla\t'r1,'d1('r3)");
- break;
- case SRA:
- Format(instr, "sra\t'r1,'d1('r3)");
- break;
- case SLDL:
- Format(instr, "sldl\t'r1,'d1('r3)");
- break;
- case AGR:
- Format(instr, "agr\t'r5,'r6");
- break;
- case AGFR:
- Format(instr, "agfr\t'r5,'r6");
- break;
- case ARK:
- Format(instr, "ark\t'r5,'r6,'r3");
- break;
- case AGRK:
- Format(instr, "agrk\t'r5,'r6,'r3");
- break;
- case SGR:
- Format(instr, "sgr\t'r5,'r6");
- break;
- case SGFR:
- Format(instr, "sgfr\t'r5,'r6");
- break;
- case SRK:
- Format(instr, "srk\t'r5,'r6,'r3");
- break;
- case SGRK:
- Format(instr, "sgrk\t'r5,'r6,'r3");
- break;
- case NGR:
- Format(instr, "ngr\t'r5,'r6");
- break;
- case NRK:
- Format(instr, "nrk\t'r5,'r6,'r3");
- break;
- case NGRK:
- Format(instr, "ngrk\t'r5,'r6,'r3");
- break;
- case NILL:
- Format(instr, "nill\t'r1,'i1");
- break;
- case NILH:
- Format(instr, "nilh\t'r1,'i1");
- break;
- case OGR:
- Format(instr, "ogr\t'r5,'r6");
- break;
- case ORK:
- Format(instr, "ork\t'r5,'r6,'r3");
- break;
- case OGRK:
- Format(instr, "ogrk\t'r5,'r6,'r3");
- break;
- case XGR:
- Format(instr, "xgr\t'r5,'r6");
- break;
- case XRK:
- Format(instr, "xrk\t'r5,'r6,'r3");
- break;
- case XGRK:
- Format(instr, "xgrk\t'r5,'r6,'r3");
- break;
- case CGFR:
- Format(instr, "cgfr\t'r5,'r6");
- break;
- case CGR:
- Format(instr, "cgr\t'r5,'r6");
- break;
- case CLGR:
- Format(instr, "clgr\t'r5,'r6");
- break;
- case LLGFR:
- Format(instr, "llgfr\t'r5,'r6");
+ /* RRE format */
+ case MDBR:
+ Format(instr, "mdbr\t'f5,'f6");
break;
- case POPCNT_Z:
- Format(instr, "popcnt\t'r5,'r6");
+ case SDBR:
+ Format(instr, "sdbr\t'f5,'f6");
break;
- case LLGCR:
- Format(instr, "llgcr\t'r5,'r6");
+ case ADBR:
+ Format(instr, "adbr\t'f5,'f6");
break;
- case LLCR:
- Format(instr, "llcr\t'r5,'r6");
+ case CDBR:
+ Format(instr, "cdbr\t'f5,'f6");
break;
- case LBR:
- Format(instr, "lbr\t'r5,'r6");
+ case MEEBR:
+ Format(instr, "meebr\t'f5,'f6");
break;
- case LEDBR:
- Format(instr, "ledbr\t'f5,'f6");
+ case SQDBR:
+ Format(instr, "sqdbr\t'f5,'f6");
break;
- case LDEBR:
- Format(instr, "ldebr\t'f5,'f6");
+ case SQEBR:
+ Format(instr, "sqebr\t'f5,'f6");
break;
- case LTGR:
- Format(instr, "ltgr\t'r5,'r6");
+ case LCDBR:
+ Format(instr, "lcdbr\t'f5,'f6");
break;
- case LTDBR:
- Format(instr, "ltdbr\t'f5,'f6");
+ case LCEBR:
+ Format(instr, "lcebr\t'f5,'f6");
break;
case LTEBR:
Format(instr, "ltebr\t'f5,'f6");
break;
- case LRVR:
- Format(instr, "lrvr\t'r5,'r6");
- break;
- case LRVGR:
- Format(instr, "lrvgr\t'r5,'r6");
- break;
- case LGR:
- Format(instr, "lgr\t'r5,'r6");
- break;
- case LGDR:
- Format(instr, "lgdr\t'r5,'f6");
- break;
- case LGFR:
- Format(instr, "lgfr\t'r5,'r6");
- break;
- case LTGFR:
- Format(instr, "ltgfr\t'r5,'r6");
- break;
- case LCGR:
- Format(instr, "lcgr\t'r5,'r6");
- break;
- case MSR:
- Format(instr, "msr\t'r5,'r6");
- break;
- case MSRKC:
- Format(instr, "msrkc\t'r5,'r6,'r3");
+ case LDEBR:
+ Format(instr, "ldebr\t'f5,'f6");
break;
- case LGBR:
- Format(instr, "lgbr\t'r5,'r6");
+ case CEBR:
+ Format(instr, "cebr\t'f5,'f6");
break;
- case LGHR:
- Format(instr, "lghr\t'r5,'r6");
+ case AEBR:
+ Format(instr, "aebr\t'f5,'f6");
break;
- case MSGR:
- Format(instr, "msgr\t'r5,'r6");
+ case SEBR:
+ Format(instr, "sebr\t'f5,'f6");
break;
- case MSGRKC:
- Format(instr, "msgrkc\t'r5,'r6,'r3");
+ case DEBR:
+ Format(instr, "debr\t'f5,'f6");
break;
- case DSGR:
- Format(instr, "dsgr\t'r5,'r6");
+ case LTDBR:
+ Format(instr, "ltdbr\t'f5,'f6");
break;
- case DSGFR:
- Format(instr, "dsgfr\t'r5,'r6");
+ case LDGR:
+ Format(instr, "ldgr\t'f5,'f6");
break;
- case MSGFR:
- Format(instr, "msgfr\t'r5,'r6");
+ case DDBR:
+ Format(instr, "ddbr\t'f5,'f6");
break;
case LZDR:
Format(instr, "lzdr\t'f5");
break;
- case MLR:
- Format(instr, "mlr\t'r5,'r6");
- break;
- case MLGR:
- Format(instr, "mlgr\t'r5,'r6");
- break;
- case ALCR:
- Format(instr, "alcr\t'r5,'r6");
- break;
- case ALGR:
- Format(instr, "algr\t'r5,'r6");
- break;
- case ALRK:
- Format(instr, "alrk\t'r5,'r6,'r3");
- break;
- case ALGRK:
- Format(instr, "algrk\t'r5,'r6,'r3");
- break;
- case SLGR:
- Format(instr, "slgr\t'r5,'r6");
- break;
- case SLBR:
- Format(instr, "slbr\t'r5,'r6");
- break;
- case DLR:
- Format(instr, "dlr\t'r5,'r6");
- break;
- case DLGR:
- Format(instr, "dlgr\t'r5,'r6");
- break;
- case SLRK:
- Format(instr, "slrk\t'r5,'r6,'r3");
- break;
- case SLGRK:
- Format(instr, "slgrk\t'r5,'r6,'r3");
- break;
- case LHR:
- Format(instr, "lhr\t'r5,'r6");
- break;
- case LLHR:
- Format(instr, "llhr\t'r5,'r6");
- break;
- case LLGHR:
- Format(instr, "llghr\t'r5,'r6");
- break;
- case LOCR:
- Format(instr, "locr\t'm1,'r5,'r6");
- break;
- case LOCGR:
- Format(instr, "locgr\t'm1,'r5,'r6");
- break;
- case LNGR:
- Format(instr, "lngr\t'r5,'r6");
- break;
- case A:
- Format(instr, "a\t'r1,'d1('r2d,'r3)");
- break;
- case S:
- Format(instr, "s\t'r1,'d1('r2d,'r3)");
- break;
- case M:
- Format(instr, "m\t'r1,'d1('r2d,'r3)");
- break;
- case D:
- Format(instr, "d\t'r1,'d1('r2d,'r3)");
- break;
- case O:
- Format(instr, "o\t'r1,'d1('r2d,'r3)");
- break;
- case N:
- Format(instr, "n\t'r1,'d1('r2d,'r3)");
- break;
- case L:
- Format(instr, "l\t'r1,'d1('r2d,'r3)");
- break;
- case C:
- Format(instr, "c\t'r1,'d1('r2d,'r3)");
- break;
- case AH:
- Format(instr, "ah\t'r1,'d1('r2d,'r3)");
- break;
- case SH:
- Format(instr, "sh\t'r1,'d1('r2d,'r3)");
- break;
- case MH:
- Format(instr, "mh\t'r1,'d1('r2d,'r3)");
- break;
- case AL:
- Format(instr, "al\t'r1,'d1('r2d,'r3)");
- break;
- case SL:
- Format(instr, "sl\t'r1,'d1('r2d,'r3)");
- break;
- case LA:
- Format(instr, "la\t'r1,'d1('r2d,'r3)");
- break;
- case CH:
- Format(instr, "ch\t'r1,'d1('r2d,'r3)");
- break;
- case CL:
- Format(instr, "cl\t'r1,'d1('r2d,'r3)");
- break;
- case CLI:
- Format(instr, "cli\t'd1('r3),'i8");
- break;
- case TM:
- Format(instr, "tm\t'd1('r3),'i8");
- break;
- case BC:
- Format(instr, "bc\t'm1,'d1('r2d,'r3)");
- break;
- case BCT:
- Format(instr, "bct\t'r1,'d1('r2d,'r3)");
- break;
- case ST:
- Format(instr, "st\t'r1,'d1('r2d,'r3)");
+ /* RRF-e format */
+ case FIEBRA:
+ Format(instr, "fiebra\t'f5,'m2,'f6,'m3");
break;
- case STC:
- Format(instr, "stc\t'r1,'d1('r2d,'r3)");
+ case FIDBRA:
+ Format(instr, "fidbra\t'f5,'m2,'f6,'m3");
break;
+ /* RX-a format */
case IC_z:
Format(instr, "ic\t'r1,'d1('r2d,'r3)");
break;
- case LD:
- Format(instr, "ld\t'f1,'d1('r2d,'r3)");
+ case AL:
+ Format(instr, "al\t'r1,'d1('r2d,'r3)");
break;
case LE:
Format(instr, "le\t'f1,'d1('r2d,'r3)");
break;
- case LDGR:
- Format(instr, "ldgr\t'f5,'r6");
- break;
- case MS:
- Format(instr, "ms\t'r1,'d1('r2d,'r3)");
+ case LD:
+ Format(instr, "ld\t'f1,'d1('r2d,'r3)");
break;
case STE:
Format(instr, "ste\t'f1,'d1('r2d,'r3)");
@@ -943,564 +606,318 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case STD:
Format(instr, "std\t'f1,'d1('r2d,'r3)");
break;
- case CFDBR:
- Format(instr, "cfdbr\t'r5,'m2,'f6");
- break;
- case CDFBR:
- Format(instr, "cdfbr\t'f5,'m2,'r6");
- break;
- case CFEBR:
- Format(instr, "cfebr\t'r5,'m2,'f6");
- break;
- case CEFBR:
- Format(instr, "cefbr\t'f5,'m2,'r6");
- break;
- case CELFBR:
- Format(instr, "celfbr\t'f5,'m2,'r6");
- break;
- case CGEBR:
- Format(instr, "cgebr\t'r5,'m2,'f6");
- break;
- case CGDBR:
- Format(instr, "cgdbr\t'r5,'m2,'f6");
- break;
- case CEGBR:
- Format(instr, "cegbr\t'f5,'m2,'r6");
- break;
- case CDGBR:
- Format(instr, "cdgbr\t'f5,'m2,'r6");
- break;
- case CDLFBR:
- Format(instr, "cdlfbr\t'f5,'m2,'r6");
- break;
- case CDLGBR:
- Format(instr, "cdlgbr\t'f5,'m2,'r6");
- break;
- case CELGBR:
- Format(instr, "celgbr\t'f5,'m2,'r6");
- break;
- case CLFDBR:
- Format(instr, "clfdbr\t'r5,'m2,'f6");
- break;
- case CLFEBR:
- Format(instr, "clfebr\t'r5,'m2,'f6");
- break;
- case CLGEBR:
- Format(instr, "clgebr\t'r5,'m2,'f6");
- break;
- case CLGDBR:
- Format(instr, "clgdbr\t'r5,'m2,'f6");
- break;
- case AEBR:
- Format(instr, "aebr\t'f5,'f6");
- break;
- case SEBR:
- Format(instr, "sebr\t'f5,'f6");
- break;
- case MEEBR:
- Format(instr, "meebr\t'f5,'f6");
- break;
- case DEBR:
- Format(instr, "debr\t'f5,'f6");
- break;
- case ADBR:
- Format(instr, "adbr\t'f5,'f6");
- break;
- case SDBR:
- Format(instr, "sdbr\t'f5,'f6");
- break;
- case MDBR:
- Format(instr, "mdbr\t'f5,'f6");
- break;
- case DDBR:
- Format(instr, "ddbr\t'f5,'f6");
- break;
- case CDBR:
- Format(instr, "cdbr\t'f5,'f6");
- break;
- case CEBR:
- Format(instr, "cebr\t'f5,'f6");
- break;
- case SQDBR:
- Format(instr, "sqdbr\t'f5,'f6");
- break;
- case SQEBR:
- Format(instr, "sqebr\t'f5,'f6");
- break;
- case LCDBR:
- Format(instr, "lcdbr\t'f5,'f6");
- break;
- case LCEBR:
- Format(instr, "lcebr\t'f5,'f6");
- break;
- case STH:
- Format(instr, "sth\t'r1,'d1('r2d,'r3)");
- break;
- case SRDA:
- Format(instr, "srda\t'r1,'d1('r3)");
- break;
- case SRDL:
- Format(instr, "srdl\t'r1,'d1('r3)");
- break;
- case MADBR:
- Format(instr, "madbr\t'f3,'f5,'f6");
- break;
- case MSDBR:
- Format(instr, "msdbr\t'f3,'f5,'f6");
- break;
- case FLOGR:
- Format(instr, "flogr\t'r5,'r6");
- break;
- case FIEBRA:
- Format(instr, "fiebra\t'f5,'m2,'f6,'m3");
- break;
- case FIDBRA:
- Format(instr, "fidbra\t'f5,'m2,'f6,'m3");
- break;
+ /* S format */
// TRAP4 is used in calling to native function. it will not be generated
// in native code.
- case TRAP4: {
+ case TRAP4:
Format(instr, "trap4");
break;
- }
- case LPGR:
- Format(instr, "lpgr\t'r5,'r6");
- break;
- case LPGFR:
- Format(instr, "lpgfr\t'r5,'r6");
- break;
- default:
- return false;
- }
- return true;
-}
-
-// Disassembles Six Byte S390 Instructions
-// @return true if successfully decoded
-bool Decoder::DecodeSixByte(Instruction* instr) {
- // Print the Instruction bits.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%012" PRIx64 " ",
- instr->InstructionBits<SixByteInstr>());
-
- Opcode opcode = instr->S390OpcodeValue();
- switch (opcode) {
- case DUMY:
- Format(instr, "dumy\t'r1, 'd2 ( 'r2d, 'r3 )");
- break;
-#define DECODE_VRR_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'f1,'f2,'f3"); \
- break;
- S390_VRR_C_OPCODE_LIST(DECODE_VRR_C_INSTRUCTIONS)
-#undef DECODE_VRR_C_INSTRUCTIONS
- case LLILF:
- Format(instr, "llilf\t'r1,'i7");
- break;
- case LLIHF:
- Format(instr, "llihf\t'r1,'i7");
- break;
- case AFI:
- Format(instr, "afi\t'r1,'i7");
- break;
- case AIH:
- Format(instr, "aih\t'r1,'i7");
- break;
- case ASI:
- Format(instr, "asi\t'd2('r3),'ic");
- break;
- case AGSI:
- Format(instr, "agsi\t'd2('r3),'ic");
- break;
- case ALFI:
- Format(instr, "alfi\t'r1,'i7");
- break;
- case AHIK:
- Format(instr, "ahik\t'r1,'r2,'i1");
- break;
- case AGHIK:
- Format(instr, "aghik\t'r1,'r2,'i1");
- break;
- case CLGFI:
- Format(instr, "clgfi\t'r1,'i7");
- break;
- case CLFI:
- Format(instr, "clfi\t'r1,'i7");
- break;
- case CLIH:
- Format(instr, "clih\t'r1,'i7");
- break;
- case CIH:
- Format(instr, "cih\t'r1,'i2");
- break;
+ /* RIL-a format */
case CFI:
Format(instr, "cfi\t'r1,'i2");
break;
case CGFI:
Format(instr, "cgfi\t'r1,'i2");
break;
- case BRASL:
- Format(instr, "brasl\t'r1,'ie");
- break;
- case BRCL:
- Format(instr, "brcl\t'm1,'i5");
- break;
- case IIHF:
- Format(instr, "iihf\t'r1,'i7");
- break;
- case LGFI:
- Format(instr, "lgfi\t'r1,'i7");
- break;
- case IILF:
- Format(instr, "iilf\t'r1,'i7");
- break;
- case XIHF:
- Format(instr, "xihf\t'r1,'i7");
- break;
- case XILF:
- Format(instr, "xilf\t'r1,'i7");
- break;
- case SLLK:
- Format(instr, "sllk\t'r1,'r2,'d2('r3)");
- break;
- case SLLG:
- Format(instr, "sllg\t'r1,'r2,'d2('r3)");
- break;
- case RLL:
- Format(instr, "rll\t'r1,'r2,'d2('r3)");
- break;
- case RLLG:
- Format(instr, "rllg\t'r1,'r2,'d2('r3)");
- break;
- case SRLK:
- Format(instr, "srlk\t'r1,'r2,'d2('r3)");
- break;
- case SRLG:
- Format(instr, "srlg\t'r1,'r2,'d2('r3)");
- break;
- case SLAK:
- Format(instr, "slak\t'r1,'r2,'d2('r3)");
- break;
- case SLAG:
- Format(instr, "slag\t'r1,'r2,'d2('r3)");
- break;
- case SRAK:
- Format(instr, "srak\t'r1,'r2,'d2('r3)");
- break;
- case SRAG:
- Format(instr, "srag\t'r1,'r2,'d2('r3)");
+ case AFI:
+ Format(instr, "afi\t'r1,'i2");
break;
- case RISBG:
- Format(instr, "risbg\t'r1,'r2,'i9,'ia,'ib");
+ case AGFI:
+ Format(instr, "agfi\t'r1,'i2");
break;
- case RISBGN:
- Format(instr, "risbgn\t'r1,'r2,'i9,'ia,'ib");
+ case MSFI:
+ Format(instr, "msfi\t'r1,'i2");
break;
- case LOCG:
- Format(instr, "locg\t'm2,'r1,'d2('r3)");
+ case MSGFI:
+ Format(instr, "msgfi\t'r1,'i2");
break;
- case LOC:
- Format(instr, "loc\t'm2,'r1,'d2('r3)");
+ case ALSIH:
+ Format(instr, "alsih\t'r1,'i2");
break;
- case LMY:
- Format(instr, "lmy\t'r1,'r2,'d2('r3)");
+ case ALSIHN:
+ Format(instr, "alsihn\t'r1,'i2");
break;
- case LMG:
- Format(instr, "lmg\t'r1,'r2,'d2('r3)");
+ case CIH:
+ Format(instr, "cih\t'r1,'i2");
break;
- case CSY:
- Format(instr, "csy\t'r1,'r2,'d2('r3)");
+ case AIH:
+ Format(instr, "aih\t'r1,'i2");
break;
- case CSG:
- Format(instr, "csg\t'r1,'r2,'d2('r3)");
+ case LGFI:
+ Format(instr, "lgfi\t'r1,'i2");
break;
- case STMY:
- Format(instr, "stmy\t'r1,'r2,'d2('r3)");
+ /* SIY format */
+ case ASI:
+ Format(instr, "asi\t'd2('r3),'ic");
break;
- case STMG:
- Format(instr, "stmg\t'r1,'r2,'d2('r3)");
+ case AGSI:
+ Format(instr, "agsi\t'd2('r3),'ic");
break;
+ /* RXY-a format */
case LT:
Format(instr, "lt\t'r1,'d2('r2d,'r3)");
break;
- case LTG:
- Format(instr, "ltg\t'r1,'d2('r2d,'r3)");
- break;
- case ML:
- Format(instr, "ml\t'r1,'d2('r2d,'r3)");
- break;
- case AY:
- Format(instr, "ay\t'r1,'d2('r2d,'r3)");
- break;
- case SY:
- Format(instr, "sy\t'r1,'d2('r2d,'r3)");
- break;
- case NY:
- Format(instr, "ny\t'r1,'d2('r2d,'r3)");
- break;
- case OY:
- Format(instr, "oy\t'r1,'d2('r2d,'r3)");
- break;
- case XY:
- Format(instr, "xy\t'r1,'d2('r2d,'r3)");
- break;
- case CY:
- Format(instr, "cy\t'r1,'d2('r2d,'r3)");
- break;
- case AHY:
- Format(instr, "ahy\t'r1,'d2('r2d,'r3)");
- break;
- case SHY:
- Format(instr, "shy\t'r1,'d2('r2d,'r3)");
- break;
- case LGH:
- Format(instr, "lgh\t'r1,'d2('r2d,'r3)");
- break;
- case AG:
- Format(instr, "ag\t'r1,'d2('r2d,'r3)");
- break;
- case AGF:
- Format(instr, "agf\t'r1,'d2('r2d,'r3)");
- break;
- case SG:
- Format(instr, "sg\t'r1,'d2('r2d,'r3)");
- break;
- case NG:
- Format(instr, "ng\t'r1,'d2('r2d,'r3)");
- break;
- case OG:
- Format(instr, "og\t'r1,'d2('r2d,'r3)");
- break;
- case XG:
- Format(instr, "xg\t'r1,'d2('r2d,'r3)");
- break;
- case CG:
- Format(instr, "cg\t'r1,'d2('r2d,'r3)");
- break;
- case LB:
- Format(instr, "lb\t'r1,'d2('r2d,'r3)");
- break;
- case LRVH:
- Format(instr, "lrvh\t'r1,'d2('r2d,'r3)");
- break;
- case LRV:
- Format(instr, "lrv\t'r1,'d2('r2d,'r3)");
- break;
- case LRVG:
- Format(instr, "lrvg\t'r1,'d2('r2d,'r3)");
- break;
- case LG:
- Format(instr, "lg\t'r1,'d2('r2d,'r3)");
- break;
- case LGF:
- Format(instr, "lgf\t'r1,'d2('r2d,'r3)");
- break;
- case LLGF:
- Format(instr, "llgf\t'r1,'d2('r2d,'r3)");
- break;
- case LY:
- Format(instr, "ly\t'r1,'d2('r2d,'r3)");
- break;
- case ALY:
- Format(instr, "aly\t'r1,'d2('r2d,'r3)");
- break;
- case ALG:
- Format(instr, "alg\t'r1,'d2('r2d,'r3)");
- break;
- case SLG:
- Format(instr, "slg\t'r1,'d2('r2d,'r3)");
- break;
- case SGF:
- Format(instr, "sgf\t'r1,'d2('r2d,'r3)");
- break;
- case SLY:
- Format(instr, "sly\t'r1,'d2('r2d,'r3)");
- break;
- case LLH:
- Format(instr, "llh\t'r1,'d2('r2d,'r3)");
- break;
- case LLGH:
- Format(instr, "llgh\t'r1,'d2('r2d,'r3)");
- break;
- case LLC:
- Format(instr, "llc\t'r1,'d2('r2d,'r3)");
- break;
- case LLGC:
- Format(instr, "llgc\t'r1,'d2('r2d,'r3)");
- break;
- case LDEB:
- Format(instr, "ldeb\t'f1,'d2('r2d,'r3)");
- break;
- case LAY:
- Format(instr, "lay\t'r1,'d2('r2d,'r3)");
- break;
- case LARL:
- Format(instr, "larl\t'r1,'i5");
- break;
- case LGB:
- Format(instr, "lgb\t'r1,'d2('r2d,'r3)");
- break;
- case CHY:
- Format(instr, "chy\t'r1,'d2('r2d,'r3)");
- break;
- case CLY:
- Format(instr, "cly\t'r1,'d2('r2d,'r3)");
- break;
- case CLIY:
- Format(instr, "cliy\t'd2('r3),'i8");
- break;
- case TMY:
- Format(instr, "tmy\t'd2('r3),'i8");
- break;
- case CLG:
- Format(instr, "clg\t'r1,'d2('r2d,'r3)");
- break;
- case BCTG:
- Format(instr, "bctg\t'r1,'d2('r2d,'r3)");
- break;
- case STY:
- Format(instr, "sty\t'r1,'d2('r2d,'r3)");
- break;
- case STRVH:
- Format(instr, "strvh\t'r1,'d2('r2d,'r3)");
- break;
- case STRV:
- Format(instr, "strv\t'r1,'d2('r2d,'r3)");
- break;
- case STRVG:
- Format(instr, "strvg\t'r1,'d2('r2d,'r3)");
- break;
- case STG:
- Format(instr, "stg\t'r1,'d2('r2d,'r3)");
- break;
- case ICY:
- Format(instr, "icy\t'r1,'d2('r2d,'r3)");
- break;
- case MVC:
- Format(instr, "mvc\t'd3('i8,'r3),'d4('r7)");
- break;
- case MVHI:
- Format(instr, "mvhi\t'd3('r3),'id");
- break;
- case MVGHI:
- Format(instr, "mvghi\t'd3('r3),'id");
- break;
- case ALGFI:
- Format(instr, "algfi\t'r1,'i7");
- break;
- case SLGFI:
- Format(instr, "slgfi\t'r1,'i7");
- break;
- case SLFI:
- Format(instr, "slfi\t'r1,'i7");
- break;
- case NIHF:
- Format(instr, "nihf\t'r1,'i7");
- break;
- case NILF:
- Format(instr, "nilf\t'r1,'i7");
- break;
- case OIHF:
- Format(instr, "oihf\t'r1,'i7");
- break;
- case OILF:
- Format(instr, "oilf\t'r1,'i7");
- break;
- case MSFI:
- Format(instr, "msfi\t'r1,'i7");
- break;
- case MSGFI:
- Format(instr, "msgfi\t'r1,'i7");
- break;
case LDY:
Format(instr, "ldy\t'f1,'d2('r2d,'r3)");
break;
case LEY:
Format(instr, "ley\t'f1,'d2('r2d,'r3)");
break;
- case MSG:
- Format(instr, "msg\t'r1,'d2('r2d,'r3)");
- break;
- case DSG:
- Format(instr, "dsg\t'r1,'d2('r2d,'r3)");
- break;
- case DSGF:
- Format(instr, "dsgf\t'r1,'d2('r2d,'r3)");
- break;
- case MSGF:
- Format(instr, "msgf\t'r1,'d2('r2d,'r3)");
- break;
- case MSY:
- Format(instr, "msy\t'r1,'d2('r2d,'r3)");
- break;
- case MSC:
- Format(instr, "msc\t'r1,'d2('r2d,'r3)");
- break;
- case MSGC:
- Format(instr, "msgc\t'r1,'d2('r2d,'r3)");
+ case STDY:
+ Format(instr, "stdy\t'f1,'d2('r2d,'r3)");
break;
case STEY:
Format(instr, "stey\t'f1,'d2('r2d,'r3)");
break;
- case STDY:
- Format(instr, "stdy\t'f1,'d2('r2d,'r3)");
+ /* RXE format */
+ case LDEB:
+ Format(instr, "ldeb\t'f1,'d2('r2d,'r3)");
break;
- case ADB:
- Format(instr, "adb\t'f1,'d1('r2d, 'r3)");
+ default:
+ return false;
+ }
+ return true;
+}
+
+// Handles common cases of instructions;
+// @return true if successfully decoded
+bool Decoder::DecodeGeneric(Instruction* instr) {
+ Opcode opcode = instr->S390OpcodeValue();
+ switch (opcode) {
+ /* 2 bytes */
+#define DECODE_RR_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'r2"); \
+ break;
+ S390_RR_OPCODE_LIST(DECODE_RR_INSTRUCTIONS)
+#undef DECODE_RR_INSTRUCTIONS
+
+ /* 4 bytes */
+#define DECODE_RS_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'r2,'d1('r3)"); \
break;
- case AEB:
- Format(instr, "aeb\t'f1,'d1('r2d, 'r3)");
+ S390_RS_A_OPCODE_LIST(DECODE_RS_A_INSTRUCTIONS)
+#undef DECODE_RS_A_INSTRUCTIONS
+
+#define DECODE_RSI_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'r2,'i4"); \
break;
- case CDB:
- Format(instr, "cdb\t'f1,'d1('r2d, 'r3)");
+ S390_RSI_OPCODE_LIST(DECODE_RSI_INSTRUCTIONS)
+#undef DECODE_RSI_INSTRUCTIONS
+
+#define DECODE_RI_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'i1"); \
break;
- case CEB:
- Format(instr, "ceb\t'f1,'d1('r2d, 'r3)");
+ S390_RI_A_OPCODE_LIST(DECODE_RI_A_INSTRUCTIONS)
+#undef DECODE_RI_A_INSTRUCTIONS
+
+#define DECODE_RI_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'i4"); \
break;
- case SDB:
- Format(instr, "sdb\t'r1,'d1('r2d, 'r3)");
+ S390_RI_B_OPCODE_LIST(DECODE_RI_B_INSTRUCTIONS)
+#undef DECODE_RI_B_INSTRUCTIONS
+
+#define DECODE_RI_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'm1,'i4"); \
break;
- case SEB:
- Format(instr, "seb\t'r1,'d1('r2d, 'r3)");
+ S390_RI_C_OPCODE_LIST(DECODE_RI_C_INSTRUCTIONS)
+#undef DECODE_RI_C_INSTRUCTIONS
+
+#define DECODE_RRE_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r5,'r6"); \
+ break;
+ S390_RRE_OPCODE_LIST(DECODE_RRE_INSTRUCTIONS)
+#undef DECODE_RRE_INSTRUCTIONS
+
+#define DECODE_RRF_A_INSTRUCTIONS(name, opcode_name, opcode_val) \
+ case opcode_name: \
+ Format(instr, #name "\t'r5,'r6,'r3"); \
break;
- case MDB:
- Format(instr, "mdb\t'r1,'d1('r2d, 'r3)");
+ S390_RRF_A_OPCODE_LIST(DECODE_RRF_A_INSTRUCTIONS)
+#undef DECODE_RRF_A_INSTRUCTIONS
+
+#define DECODE_RRF_C_INSTRUCTIONS(name, opcode_name, opcode_val) \
+ case opcode_name: \
+ Format(instr, #name "\t'r5,'r6,'m2"); \
break;
- case MEEB:
- Format(instr, "meeb\t'r1,'d1('r2d, 'r3)");
+ S390_RRF_C_OPCODE_LIST(DECODE_RRF_C_INSTRUCTIONS)
+#undef DECODE_RRF_C_INSTRUCTIONS
+
+#define DECODE_RRF_E_INSTRUCTIONS(name, opcode_name, opcode_val) \
+ case opcode_name: \
+ Format(instr, #name "\t'r5,'m2,'f6"); \
break;
- case DDB:
- Format(instr, "ddb\t'r1,'d1('r2d, 'r3)");
+ S390_RRF_E_OPCODE_LIST(DECODE_RRF_E_INSTRUCTIONS)
+#undef DECODE_RRF_E_INSTRUCTIONS
+
+#define DECODE_RX_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'d1('r2d,'r3)"); \
break;
- case DEB:
- Format(instr, "deb\t'r1,'d1('r2d, 'r3)");
+ S390_RX_A_OPCODE_LIST(DECODE_RX_A_INSTRUCTIONS)
+#undef DECODE_RX_A_INSTRUCTIONS
+
+#define DECODE_RX_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'm1,'d1('r2d,'r3)"); \
break;
- case SQDB:
- Format(instr, "sqdb\t'r1,'d1('r2d, 'r3)");
+ S390_RX_B_OPCODE_LIST(DECODE_RX_B_INSTRUCTIONS)
+#undef DECODE_RX_B_INSTRUCTIONS
+
+#define DECODE_RRD_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f3,'f5,'f6"); \
break;
- case PFD:
- Format(instr, "pfd\t'm1,'d2('r2d,'r3)");
+ S390_RRD_OPCODE_LIST(DECODE_RRD_INSTRUCTIONS)
+#undef DECODE_RRD_INSTRUCTIONS
+
+#define DECODE_SI_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'd1('r3),'i8"); \
break;
+ S390_SI_OPCODE_LIST(DECODE_SI_INSTRUCTIONS)
+#undef DECODE_SI_INSTRUCTIONS
+
+ /* 6 bytes */
+#define DECODE_VRR_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'f2,'f3"); \
+ break;
+ S390_VRR_C_OPCODE_LIST(DECODE_VRR_C_INSTRUCTIONS)
+#undef DECODE_VRR_C_INSTRUCTIONS
+
+#define DECODE_RIL_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'i7"); \
+ break;
+ S390_RIL_A_OPCODE_LIST(DECODE_RIL_A_INSTRUCTIONS)
+#undef DECODE_RIL_A_INSTRUCTIONS
+
+#define DECODE_RIL_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'ie"); \
+ break;
+ S390_RIL_B_OPCODE_LIST(DECODE_RIL_B_INSTRUCTIONS)
+#undef DECODE_RIL_B_INSTRUCTIONS
+
+#define DECODE_RIL_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'm1,'ie"); \
+ break;
+ S390_RIL_C_OPCODE_LIST(DECODE_RIL_C_INSTRUCTIONS)
+#undef DECODE_RIL_C_INSTRUCTIONS
+
+#define DECODE_SIY_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'd2('r3),'i8"); \
+ break;
+ S390_SIY_OPCODE_LIST(DECODE_SIY_INSTRUCTIONS)
+#undef DECODE_SIY_INSTRUCTIONS
+
+#define DECODE_RIE_D_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'r2,'i1"); \
+ break;
+ S390_RIE_D_OPCODE_LIST(DECODE_RIE_D_INSTRUCTIONS)
+#undef DECODE_RIE_D_INSTRUCTIONS
+
+#define DECODE_RIE_E_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'r2,'i4"); \
+ break;
+ S390_RIE_E_OPCODE_LIST(DECODE_RIE_E_INSTRUCTIONS)
+#undef DECODE_RIE_E_INSTRUCTIONS
+
+#define DECODE_RIE_F_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'r2,'i9,'ia,'ib"); \
+ break;
+ S390_RIE_F_OPCODE_LIST(DECODE_RIE_F_INSTRUCTIONS)
+#undef DECODE_RIE_F_INSTRUCTIONS
+
+#define DECODE_RSY_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'r2,'d2('r3)"); \
+ break;
+ S390_RSY_A_OPCODE_LIST(DECODE_RSY_A_INSTRUCTIONS)
+#undef DECODE_RSY_A_INSTRUCTIONS
+
+#define DECODE_RSY_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'm2,'r1,'d2('r3)"); \
+ break;
+ S390_RSY_B_OPCODE_LIST(DECODE_RSY_B_INSTRUCTIONS)
+#undef DECODE_RSY_B_INSTRUCTIONS
+
+#define DECODE_RXY_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'d2('r2d,'r3)"); \
+ break;
+ S390_RXY_A_OPCODE_LIST(DECODE_RXY_A_INSTRUCTIONS)
+#undef DECODE_RXY_A_INSTRUCTIONS
+
+#define DECODE_RXY_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'm1,'d2('r2d,'r3)"); \
+ break;
+ S390_RXY_B_OPCODE_LIST(DECODE_RXY_B_INSTRUCTIONS)
+#undef DECODE_RXY_B_INSTRUCTIONS
+
+#define DECODE_RXE_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'d1('r2d, 'r3)"); \
+ break;
+ S390_RXE_OPCODE_LIST(DECODE_RXE_INSTRUCTIONS)
+#undef DECODE_RXE_INSTRUCTIONS
+
+#define DECODE_SIL_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'd3('r3),'id"); \
+ break;
+ S390_SIL_OPCODE_LIST(DECODE_SIL_INSTRUCTIONS)
+#undef DECODE_SIL_INSTRUCTIONS
+
+#define DECODE_SS_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'd3('i8,'r3),'d4('r7)"); \
+ break;
+ S390_SS_A_OPCODE_LIST(DECODE_SS_A_INSTRUCTIONS)
+#undef DECODE_SS_A_INSTRUCTIONS
+
default:
return false;
}
return true;
}
-#undef VERIFIY
-
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
int instrLength = instr->InstructionLength();
- if (2 == instrLength)
- DecodeTwoByte(instr);
- else if (4 == instrLength)
- DecodeFourByte(instr);
- else
- DecodeSixByte(instr);
+ // Print the Instruction bits.
+ if (instrLength == 2) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%04x ", instr->InstructionBits<TwoByteInstr>());
+ } else if (instrLength == 4) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ", instr->InstructionBits<FourByteInstr>());
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%012" PRIx64 " ", instr->InstructionBits<SixByteInstr>());
+ }
+ bool decoded = DecodeSpecial(instr);
+ if (!decoded)
+ decoded = DecodeGeneric(instr);
+ if (!decoded)
+ Unknown(instr);
return instrLength;
}
diff --git a/deps/v8/src/s390/frame-constants-s390.h b/deps/v8/src/s390/frame-constants-s390.h
index f1c9febbb1..0d89ceedb3 100644
--- a/deps/v8/src/s390/frame-constants-s390.h
+++ b/deps/v8/src/s390/frame-constants-s390.h
@@ -5,6 +5,9 @@
#ifndef V8_S390_FRAME_CONSTANTS_S390_H_
#define V8_S390_FRAME_CONSTANTS_S390_H_
+#include "src/base/macros.h"
+#include "src/frame-constants.h"
+
namespace v8 {
namespace internal {
@@ -30,6 +33,23 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
};
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 5;
+#ifdef V8_TARGET_ARCH_S390X
+ static constexpr int kNumberOfSavedFpParamRegs = 4;
+#else
+ static constexpr int kNumberOfSavedFpParamRegs = 2;
+#endif
+
+ // FP-relative.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 87a802b091..c5fbebc5a0 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -57,12 +57,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return r7; }
const Register ApiGetterDescriptor::HolderRegister() { return r2; }
const Register ApiGetterDescriptor::CallbackRegister() { return r5; }
-const Register MathPowTaggedDescriptor::exponent() { return r4; }
-
-const Register MathPowIntegerDescriptor::exponent() {
- return MathPowTaggedDescriptor::exponent();
-}
-
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
@@ -175,22 +169,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ConstructTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments
- // r3 : the target to call
- // r5 : the new target
- Register registers[] = {r3, r5, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void TransitionElementsKindDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r2, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortJSDescriptor::InitializePlatformSpecific(
+void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -198,41 +177,7 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
- Register registers[] = {r3, r5, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // r2 -- number of arguments
- // r3 -- function
- // r4 -- allocation site with elements kind
- Register registers[] = {r3, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // r2 -- number of arguments
- // r3 -- function
- // r4 -- allocation site with elements kind
- Register registers[] = {r3, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {r3, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
@@ -299,7 +244,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+namespace {
+
+void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (argc)
@@ -309,6 +256,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+} // namespace
+
+void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
+void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 8ea3785944..584ba9a0db 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -10,7 +10,6 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
-#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -20,16 +19,17 @@
#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot.h"
#include "src/s390/macro-assembler-s390.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, buffer, size, create_code_object) {
+MacroAssembler::MacroAssembler(Isolate* isolate,
+ const AssemblerOptions& options, void* buffer,
+ int size, CodeObjectRequired create_code_object)
+ : TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@@ -41,15 +41,6 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
}
}
-TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ = Handle<HeapObject>::New(
- isolate->heap()->self_reference_marker(), isolate);
- }
-}
-
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -128,58 +119,35 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-#ifdef V8_EMBEDDED_BUILTINS
-void TurboAssembler::LookupConstant(Register destination,
- Handle<Object> object) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Ensure the given object is in the builtins constants table and fetch its
- // index.
- BuiltinsConstantsTableBuilder* builder =
- isolate()->builtins_constants_table_builder();
- uint32_t index = builder->AddObject(object);
-
- // TODO(jgruber): Load builtins from the builtins table.
- // TODO(jgruber): Ensure that code generation can recognize constant targets
- // in kArchCallCodeObject.
-
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
const uint32_t offset =
- FixedArray::kHeaderSize + index * kPointerSize - kHeapObjectTag;
+ FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
- LoadP(destination, MemOperand(destination, offset));
+ LoadP(destination, MemOperand(destination, offset), r1);
}
-void TurboAssembler::LookupExternalReference(Register destination,
- ExternalReference reference) {
- CHECK(reference.address() !=
- ExternalReference::roots_array_start(isolate()).address());
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Encode as an index into the external reference table stored on the isolate.
-
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
- uint32_t index = v.index();
-
- // Generate code to load from the external reference table.
-
- int32_t roots_to_external_reference_offset =
- Heap::roots_to_external_reference_table_offset() +
- ExternalReferenceTable::OffsetOfEntry(index);
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ LoadP(destination, MemOperand(kRootRegister, offset));
+}
- LoadP(destination,
- MemOperand(kRootRegister, roots_to_external_reference_offset));
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ LoadRR(destination, kRootRegister);
+ } else if (is_uint12(offset)) {
+ la(destination, MemOperand(kRootRegister, offset));
+ } else {
+ DCHECK(is_int20(offset));
+ lay(destination, MemOperand(kRootRegister, offset));
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); }
@@ -189,7 +157,7 @@ void MacroAssembler::JumpToJSEntry(Register target) {
}
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
- Condition cond, CRegister) {
+ Condition cond) {
Label skip;
if (cond != al) b(NegateCondition(cond), &skip);
@@ -202,24 +170,37 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
bind(&skip);
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
- CRegister cr) {
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
+ Condition cond) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
- Jump(static_cast<intptr_t>(target), rmode, cond, cr);
+ Jump(static_cast<intptr_t>(target), rmode, cond);
}
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- Register scratch = r1;
- LookupConstant(scratch, code);
- la(scratch, MemOperand(scratch, Code::kHeaderSize - kHeapObjectTag));
- b(cond, scratch);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ Register scratch = r1;
+ IndirectLoadConstant(scratch, code);
+ la(scratch, MemOperand(scratch, Code::kHeaderSize - kHeapObjectTag));
+ b(cond, scratch);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do
+ // not preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(ip, cond);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
jump(code, rmode, cond);
}
@@ -292,16 +273,30 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
- LookupConstant(ip, code);
- la(ip, MemOperand(ip, Code::kHeaderSize - kHeapObjectTag));
- Call(ip);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ IndirectLoadConstant(ip, code);
+ la(ip, MemOperand(ip, Code::kHeaderSize - kHeapObjectTag));
+ Call(ip);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do
+ // not preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(ip);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
call(code, rmode);
}
@@ -336,29 +331,22 @@ void TurboAssembler::Push(Smi* smi) {
}
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- Heap::RootListIndex root_index;
- if (!isolate()->heap()->IsRootHandle(value, &root_index)) {
- LookupConstant(dst, value);
- } else {
- LoadRoot(dst, root_index);
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
}
- return;
}
-#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(value));
}
void TurboAssembler::Move(Register dst, ExternalReference reference) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
- reference.address() !=
- ExternalReference::roots_array_start(isolate()).address()) {
- LookupExternalReference(dst, reference);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, reference);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(reference));
}
@@ -378,6 +366,48 @@ void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
}
}
+// Wrapper around Assembler::mvc (SS-a format)
+void TurboAssembler::MoveChar(const MemOperand& opnd1,
+ const MemOperand& opnd2,
+ const Operand& length) {
+ mvc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
+}
+
+// Wrapper around Assembler::clc (SS-a format)
+void TurboAssembler::CompareLogicalChar(const MemOperand& opnd1,
+ const MemOperand& opnd2,
+ const Operand& length) {
+ clc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
+}
+
+// Wrapper around Assembler::xc (SS-a format)
+void TurboAssembler::ExclusiveOrChar(const MemOperand& opnd1,
+ const MemOperand& opnd2,
+ const Operand& length) {
+ xc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
+}
+
+// Wrapper around Assembler::risbg(n) (RIE-f)
+void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
+ const Operand& startBit, const Operand& endBit,
+ const Operand& shiftAmt, bool zeroBits) {
+ if (zeroBits)
+ // High tag the top bit of I4/EndBit to zero out any unselected bits
+ risbg(dst, src, startBit,
+ Operand(static_cast<intptr_t>(endBit.immediate() | 0x80)), shiftAmt);
+ else
+ risbg(dst, src, startBit, endBit, shiftAmt);
+}
+
+void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
+ Label* L) {
+#if V8_TARGET_ARCH_S390X
+ brxhg(dst, inc, L);
+#else
+ brxh(dst, inc, L);
+#endif // V8_TARGET_ARCH_S390X
+}
+
void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kPointerSize;
@@ -432,7 +462,7 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition) {
- LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
+ LoadP(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), r0);
}
void MacroAssembler::RecordWriteField(Register object, int offset,
@@ -700,7 +730,7 @@ void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
}
void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
- cefbr(Condition(4), dst, src);
+ cefbra(Condition(4), dst, src);
}
void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
@@ -956,7 +986,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
uint32_t shift) {
LoadRR(r0, src_high);
LoadRR(r1, src_low);
- srdl(r0, r0, Operand(shift));
+ srdl(r0, Operand(shift));
LoadRR(dst_high, r0);
LoadRR(dst_low, r1);
}
@@ -1015,11 +1045,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
Load(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
-
- if (type == StackFrame::INTERNAL) {
- Move(r1, CodeObject());
- push(r1);
- }
}
int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
@@ -1270,8 +1295,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
// ARM has some sanity checks as per below, considering add them for S390
- // DCHECK(actual.is_immediate() || actual.reg() == r2);
- // DCHECK(expected.is_immediate() || expected.reg() == r4);
+ DCHECK(actual.is_immediate() || actual.reg() == r2);
+ DCHECK(expected.is_immediate() || expected.reg() == r4);
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1421,9 +1446,10 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
Register temp_reg = r6;
LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
- LoadW(expected_reg,
- FieldMemOperand(temp_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ LoadLogicalHalfWordP(
+ expected_reg,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(fun, new_target, expected, actual, flag);
@@ -1473,8 +1499,8 @@ void MacroAssembler::PushStackHandler() {
StoreP(r0, MemOperand(sp)); // Padding.
// Copy the old handler into the next handler slot.
- mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
- kPointerSize);
+ MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
+ Operand(kPointerSize));
// Set this new handler as the current one.
StoreP(sp, MemOperand(r7));
}
@@ -1509,7 +1535,7 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
}
void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
- CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
+ CmpP(obj, MemOperand(kRootRegister, RootRegisterOffset(index)));
}
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
@@ -1550,7 +1576,8 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
- DoubleRegister double_input) {
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -1561,7 +1588,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
lay(sp, MemOperand(sp, -kDoubleSize));
StoreDouble(double_input, MemOperand(sp));
- Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
LoadP(result, MemOperand(sp, 0));
la(sp, MemOperand(sp, kDoubleSize));
@@ -1580,19 +1611,14 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
beq(done);
}
-void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles) {
+void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
+ Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
mov(r2, Operand(f->nargs));
Move(r3, ExternalReference::Create(f));
-
-#if V8_TARGET_ARCH_S390X
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
-#else
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
-#endif
- Call(code, RelocInfo::CODE_TARGET);
+ DCHECK(!AreAliased(centry, r2, r3));
+ la(centry, MemOperand(centry, Code::kHeaderSize - kHeapObjectTag));
+ Call(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@@ -1689,18 +1715,17 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
const char* msg = GetAbortReason(reason);
- if (msg != nullptr) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+#endif
- if (FLAG_trap_on_abort) {
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
stop(msg);
return;
}
-#endif
LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
@@ -1755,18 +1780,6 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
-void MacroAssembler::AssertFixedArray(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- TestIfSmi(object);
- Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, cr0);
- push(object);
- CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
- pop(object);
- Check(eq, AbortReason::kOperandIsNotAFixedArray);
- }
-}
-
void MacroAssembler::AssertConstructor(Register object, Register scratch) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1836,7 +1849,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
CompareRoot(object, Heap::kUndefinedValueRootIndex);
beq(&done_checking, Label::kNear);
LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
@@ -2254,7 +2267,7 @@ void TurboAssembler::Div32(Register dst, Register src1, Register src2) {
#define Generate_DivU32(instr) \
{ \
lr(r0, src1); \
- srdl(r0, Operand(32)); \
+ srdl(r0, Operand(32)); \
instr(r0, src2); \
LoadlW(dst, r1); \
}
@@ -2328,7 +2341,7 @@ void TurboAssembler::Mod32(Register dst, Register src1, Register src2) {
#define Generate_ModU32(instr) \
{ \
lr(r0, src1); \
- srdl(r0, Operand(32)); \
+ srdl(r0, Operand(32)); \
instr(r0, src2); \
LoadlW(dst, r0); \
}
@@ -2869,6 +2882,13 @@ void TurboAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
#endif
}
+// Load And Subtract 32-bit (similar to laa/lan/lao/lax)
+void TurboAssembler::LoadAndSub32(Register dst, Register src,
+ const MemOperand& opnd) {
+ lcr(dst, src);
+ laa(dst, dst, opnd);
+}
+
//----------------------------------------------------------------------------
// Subtract Logical Instructions
//----------------------------------------------------------------------------
@@ -3015,14 +3035,15 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
int endBit = 63 - trailing_zeros;
// Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
- risbg(dst, src, Operand(startBit), Operand(endBit), Operand::Zero(),
- true);
+ RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
+ Operand::Zero(), true);
return;
} else if (-1 == shifted_value) {
// A Special case in which all top bits up to MSB are 1's. In this case,
// we can set startBit to be 0.
int endBit = 63 - trailing_zeros;
- risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
+ RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
+ Operand::Zero(), true);
return;
}
}
@@ -4239,7 +4260,8 @@ void TurboAssembler::ClearRightImm(Register dst, Register src,
// Try to use RISBG if possible
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int endBit = 63 - numBitsToClear;
- risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
+ RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
+ Operand::Zero(), true);
return;
}
@@ -4428,6 +4450,16 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}
+void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
+ Cmp32(x, Operand(y));
+ beq(dest);
+}
+
+void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
+ Cmp32(x, Operand(y));
+ blt(dest);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 0b3626c797..a82f7999f1 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -9,6 +9,7 @@
#include "src/bailout-reason.h"
#include "src/globals.h"
#include "src/s390/assembler-s390.h"
+#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
@@ -25,9 +26,13 @@ constexpr Register kInterpreterAccumulatorRegister = r2;
constexpr Register kInterpreterBytecodeOffsetRegister = r6;
constexpr Register kInterpreterBytecodeArrayRegister = r7;
constexpr Register kInterpreterDispatchTableRegister = r8;
+
constexpr Register kJavaScriptCallArgCountRegister = r2;
-constexpr Register kJavaScriptCallNewTargetRegister = r5;
constexpr Register kJavaScriptCallCodeStartRegister = r4;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = r5;
+constexpr Register kJavaScriptCallExtraArg1Register = r4;
+
constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r3;
constexpr Register kRuntimeCallArgCountRegister = r2;
@@ -167,23 +172,18 @@ bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
#endif
-class TurboAssembler : public Assembler {
+class TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object);
+ TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : TurboAssemblerBase(isolate, options, buffer, buffer_size,
+ create_code_object) {}
- Isolate* isolate() const { return isolate_; }
-
- Handle<HeapObject> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
-
-#ifdef V8_EMBEDDED_BUILTINS
- void LookupConstant(Register destination, Handle<Object> object);
- void LookupExternalReference(Register destination,
- ExternalReference reference);
-#endif // V8_EMBEDDED_BUILTINS
+ void LoadFromConstantsTable(Register destination,
+ int constant_index) override;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
+ void LoadRootRelative(Register destination, int32_t offset) override;
// Returns the size of a call in instructions.
static int CallSize(Register target);
@@ -191,14 +191,16 @@ class TurboAssembler : public Assembler {
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
- void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
- CRegister cr = cr7);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
TestIfSmi(value);
beq(smi_label /*, cr0*/); // branch if SMI
}
+ void JumpIfEqual(Register x, int32_t y, Label* dest);
+ void JumpIfLessThan(Register x, int32_t y, Label* dest);
+
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
@@ -209,7 +211,9 @@ class TurboAssembler : public Assembler {
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
- void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
+ void CallForDeoptimization(Address target, int deopt_id,
+ RelocInfo::Mode rmode) {
+ USE(deopt_id);
Call(target, rmode);
}
@@ -232,6 +236,21 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
+ void MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
+ const Operand& length);
+
+ void CompareLogicalChar(const MemOperand& opnd1, const MemOperand& opnd2,
+ const Operand& length);
+
+ void ExclusiveOrChar(const MemOperand& opnd1, const MemOperand& opnd2,
+ const Operand& length);
+
+ void RotateInsertSelectBits(Register dst, Register src,
+ const Operand& startBit, const Operand& endBit,
+ const Operand& shiftAmt, bool zeroBits);
+
+ void BranchRelativeOnIdxHighP(Register dst, Register inc, Label* L);
+
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
@@ -264,8 +283,11 @@ class TurboAssembler : public Assembler {
Register exclusion3 = no_reg);
// Load an object from the root table.
+ void LoadRoot(Register destination, Heap::RootListIndex index) override {
+ LoadRoot(destination, index, al);
+ }
void LoadRoot(Register destination, Heap::RootListIndex index,
- Condition cond = al);
+ Condition cond);
//--------------------------------------------------------------------------
// S390 Macro Assemblers for Instructions
//--------------------------------------------------------------------------
@@ -333,6 +355,7 @@ class TurboAssembler : public Assembler {
void Sub32(Register dst, const MemOperand& opnd);
void SubP(Register dst, const MemOperand& opnd);
void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
+ void LoadAndSub32(Register dst, Register src, const MemOperand& opnd);
// Subtract Logical (Register - Mem)
void SubLogical(Register dst, const MemOperand& opnd);
@@ -655,6 +678,7 @@ class TurboAssembler : public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
+ AddP(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
}
// Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
@@ -834,10 +858,9 @@ class TurboAssembler : public Assembler {
// Call a code stub.
void CallStubDelayed(CodeStub* stub);
- // Call a runtime routine.
- // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
- void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ // Call a runtime routine. This expects {centry} to contain a fitting CEntry
+ // builtin for the target runtime function and uses an indirect call.
+ void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
@@ -879,7 +902,7 @@ class TurboAssembler : public Assembler {
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
- DoubleRegister double_input);
+ DoubleRegister double_input, StubCallMode stub_mode);
void TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input,
Label* done);
@@ -896,8 +919,6 @@ class TurboAssembler : public Assembler {
// Print a message to stdout and abort execution.
void Abort(AbortReason reason);
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
@@ -917,8 +938,8 @@ class TurboAssembler : public Assembler {
int shiftAmount = (64 - rangeEnd) % 64; // Convert to shift left.
int endBit = 63; // End is always LSB after shifting.
int startBit = 63 - rangeStart + rangeEnd;
- risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
- true);
+ RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
+ Operand(shiftAmount), true);
} else {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
@@ -1025,34 +1046,26 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst);
- bool root_array_available() const { return root_array_available_; }
- void set_root_array_available(bool v) { root_array_available_ = v; }
-
- protected:
- // This handle will be patched with the code object on installation.
- Handle<HeapObject> code_object_;
-
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
- CRegister cr = cr7);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
-
- bool has_frame_ = false;
- bool root_array_available_ = true;
- Isolate* isolate_;
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ CodeObjectRequired create_code_object)
+ : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
+ size, create_code_object) {}
+ MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int size, CodeObjectRequired create_code_object);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
@@ -1254,9 +1267,6 @@ class MacroAssembler : public TurboAssembler {
#define SmiWordOffset(offset) offset
#endif
- // Abort execution if argument is not a FixedArray, enabled via --debug-code.
- void AssertFixedArray(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object, Register scratch);
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 9e521b3106..0fec28b69e 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -365,7 +365,7 @@ void S390Debugger::Debug() {
(strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
intptr_t value;
- OFStream os(stdout);
+ StdoutStream os;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
os << arg1 << ": \n";
@@ -605,7 +605,7 @@ void S390Debugger::Debug() {
PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the S390Debugger.\n");
+ PrintF(" stop and give control to the S390Debugger.\n");
PrintF(" The first %d stop codes are watched:\n",
Simulator::kNumOfWatchedStops);
PrintF(" - They can be enabled / disabled: the Simulator\n");
@@ -998,8 +998,6 @@ void Simulator::EvalTableInit() {
EvalTable[STFPC] = &Simulator::Evaluate_STFPC;
EvalTable[LFPC] = &Simulator::Evaluate_LFPC;
EvalTable[TRE] = &Simulator::Evaluate_TRE;
- EvalTable[CUUTF] = &Simulator::Evaluate_CUUTF;
- EvalTable[CUTFU] = &Simulator::Evaluate_CUTFU;
EvalTable[STFLE] = &Simulator::Evaluate_STFLE;
EvalTable[SRNMB] = &Simulator::Evaluate_SRNMB;
EvalTable[SRNMT] = &Simulator::Evaluate_SRNMT;
@@ -1105,7 +1103,6 @@ void Simulator::EvalTableInit() {
EvalTable[CGDR] = &Simulator::Evaluate_CGDR;
EvalTable[CGXR] = &Simulator::Evaluate_CGXR;
EvalTable[LGDR] = &Simulator::Evaluate_LGDR;
- EvalTable[MDTR] = &Simulator::Evaluate_MDTR;
EvalTable[MDTRA] = &Simulator::Evaluate_MDTRA;
EvalTable[DDTRA] = &Simulator::Evaluate_DDTRA;
EvalTable[ADTRA] = &Simulator::Evaluate_ADTRA;
@@ -2680,6 +2677,12 @@ uintptr_t Simulator::PopAddress() {
int d2 = AS(RSInstruction)->D2Value(); \
int length = 4;
+#define DECODE_RSI_INSTRUCTION(r1, r3, i2) \
+ int r1 = AS(RSIInstruction)->R1Value(); \
+ int r3 = AS(RSIInstruction)->R3Value(); \
+ int32_t i2 = AS(RSIInstruction)->I2Value(); \
+ int length = 4;
+
#define DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val) \
int b1 = AS(SIInstruction)->B1Value(); \
intptr_t d1_val = AS(SIInstruction)->D1Value(); \
@@ -2749,6 +2752,12 @@ uintptr_t Simulator::PopAddress() {
int32_t i2 = AS(RIEInstruction)->I6Value(); \
int length = 6;
+#define DECODE_RIE_E_INSTRUCTION(r1, r2, i2) \
+ int r1 = AS(RIEInstruction)->R1Value(); \
+ int r2 = AS(RIEInstruction)->R2Value(); \
+ int32_t i2 = AS(RIEInstruction)->I6Value(); \
+ int length = 6;
+
#define DECODE_RIE_F_INSTRUCTION(r1, r2, i3, i4, i5) \
int r1 = AS(RIEInstruction)->R1Value(); \
int r2 = AS(RIEInstruction)->R2Value(); \
@@ -3866,9 +3875,19 @@ EVALUATE(LE) {
}
EVALUATE(BRXH) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(BRXH);
+ DECODE_RSI_INSTRUCTION(r1, r3, i2);
+ int32_t r1_val = (r1 == 0) ? 0 : get_low_register<int32_t>(r1);
+ int32_t r3_val = (r3 == 0) ? 0 : get_low_register<int32_t>(r3);
+ intptr_t branch_address = get_pc() + (2 * i2);
+ r1_val += r3_val;
+ int32_t compare_val = r3 % 2 == 0 ?
+ get_low_register<int32_t>(r3 + 1) : r3_val;
+ if (r1_val > compare_val) {
+ set_pc(branch_address);
+ }
+ set_low_register(r1, r1_val);
+ return length;
}
EVALUATE(BRXLE) {
@@ -5269,18 +5288,6 @@ EVALUATE(TRE) {
return 0;
}
-EVALUATE(CUUTF) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
-}
-
-EVALUATE(CUTFU) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
-}
-
EVALUATE(STFLE) {
UNIMPLEMENTED();
USE(instr);
@@ -6390,12 +6397,6 @@ EVALUATE(LGDR) {
return length;
}
-EVALUATE(MDTR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
-}
-
EVALUATE(MDTRA) {
UNIMPLEMENTED();
USE(instr);
@@ -9194,28 +9195,38 @@ EVALUATE(STOC) {
return 0;
}
+#define ATOMIC_LOAD_AND_UPDATE_WORD32(op) \
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2); \
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2); \
+ intptr_t addr = static_cast<intptr_t>(b2_val) + d2; \
+ int32_t r3_val = get_low_register<int32_t>(r3); \
+ DCHECK_EQ(addr & 0x3, 0); \
+ int32_t r1_val = op(reinterpret_cast<int32_t*>(addr), \
+ r3_val, __ATOMIC_SEQ_CST); \
+ set_low_register(r1, r1_val);
+
EVALUATE(LAN) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LAN);
+ ATOMIC_LOAD_AND_UPDATE_WORD32(__atomic_fetch_and);
+ return length;
}
EVALUATE(LAO) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LAO);
+ ATOMIC_LOAD_AND_UPDATE_WORD32(__atomic_fetch_or);
+ return length;
}
EVALUATE(LAX) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LAX);
+ ATOMIC_LOAD_AND_UPDATE_WORD32(__atomic_fetch_xor);
+ return length;
}
EVALUATE(LAA) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LAA);
+ ATOMIC_LOAD_AND_UPDATE_WORD32(__atomic_fetch_add);
+ return length;
}
EVALUATE(LAAL) {
@@ -9224,10 +9235,21 @@ EVALUATE(LAAL) {
return 0;
}
+#undef ATOMIC_LOAD_AND_UPDATE_WORD32
+
EVALUATE(BRXHG) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(BRXHG);
+ DECODE_RIE_E_INSTRUCTION(r1, r3, i2);
+ int64_t r1_val = (r1 == 0) ? 0 : get_register(r1);
+ int64_t r3_val = (r3 == 0) ? 0 : get_register(r3);
+ intptr_t branch_address = get_pc() + (2 * i2);
+ r1_val += r3_val;
+ int64_t compare_val = r3 % 2 == 0 ? get_register(r3 + 1) : r3_val;
+ if (r1_val > compare_val) {
+ set_pc(branch_address);
+ }
+ set_register(r1, r1_val);
+ return length;
}
EVALUATE(BRXLG) {
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 176693d2aa..e5062a7e2c 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -71,6 +71,7 @@ SafepointEntry SafepointTable::FindEntry(Address pc) const {
// We use kMaxUInt32 as sentinel value, so check that we don't hit that.
DCHECK_NE(kMaxUInt32, pc_offset);
unsigned len = length();
+ CHECK_GT(len, 0);
// If pc == kMaxUInt32, then this entry covers all call sites in the function.
if (len == 1 && GetPcOffset(0) == kMaxUInt32) return GetEntry(0);
for (unsigned i = 0; i < len; i++) {
diff --git a/deps/v8/src/setup-isolate-deserialize.cc b/deps/v8/src/setup-isolate-deserialize.cc
index e232b47148..a99d735af6 100644
--- a/deps/v8/src/setup-isolate-deserialize.cc
+++ b/deps/v8/src/setup-isolate-deserialize.cc
@@ -21,10 +21,10 @@ void SetupIsolateDelegate::SetupInterpreter(
interpreter::Interpreter* interpreter) {
#if defined(V8_USE_SNAPSHOT) && !defined(V8_USE_SNAPSHOT_WITH_UNWINDING_INFO)
if (FLAG_perf_prof_unwinding_info) {
- OFStream os(stdout);
- os << "Warning: The --perf-prof-unwinding-info flag can be passed at "
- "mksnapshot time to get better results."
- << std::endl;
+ StdoutStream{}
+ << "Warning: The --perf-prof-unwinding-info flag can be passed at "
+ "mksnapshot time to get better results."
+ << std::endl;
}
#endif
CHECK(interpreter->IsDispatchTableInitialized());
diff --git a/deps/v8/src/signature.h b/deps/v8/src/signature.h
index 1c84703147..60950a93bb 100644
--- a/deps/v8/src/signature.h
+++ b/deps/v8/src/signature.h
@@ -5,6 +5,7 @@
#ifndef V8_SIGNATURE_H_
#define V8_SIGNATURE_H_
+#include "src/base/functional.h"
#include "src/base/iterator.h"
#include "src/machine-type.h"
#include "src/zone/zone.h"
@@ -46,16 +47,13 @@ class Signature : public ZoneObject {
return {reps_, reps_ + return_count_ + parameter_count_};
}
- bool Equals(const Signature* that) const {
- if (this == that) return true;
- if (this->parameter_count() != that->parameter_count()) return false;
- if (this->return_count() != that->return_count()) return false;
- size_t size = this->return_count() + this->parameter_count();
- for (size_t i = 0; i < size; i++) {
- if (this->reps_[i] != that->reps_[i]) return false;
- }
- return true;
+ bool operator==(const Signature& other) const {
+ if (this == &other) return true;
+ if (parameter_count() != other.parameter_count()) return false;
+ if (return_count() != other.return_count()) return false;
+ return std::equal(all().begin(), all().end(), other.all().begin());
}
+ bool operator!=(const Signature& other) const { return !(*this == other); }
// For incrementally building signatures.
class Builder {
@@ -101,6 +99,13 @@ class Signature : public ZoneObject {
typedef Signature<MachineType> MachineSignature;
+template <typename T>
+size_t hash_value(const Signature<T>& sig) {
+ size_t hash = base::hash_combine(sig.parameter_count(), sig.return_count());
+ for (const T& t : sig.all()) hash = base::hash_combine(hash, t);
+ return hash;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-deserializer.cc b/deps/v8/src/snapshot/builtin-deserializer.cc
index 8e3ad8c20c..0e32844ba0 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.cc
+++ b/deps/v8/src/snapshot/builtin-deserializer.cc
@@ -73,6 +73,20 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
}
#endif
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_builtin_code) {
+ // We can't print builtins during deserialization because they may refer
+ // to not yet deserialized builtins.
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ if (!IsLazyDeserializationEnabled() || !Builtins::IsLazy(i)) {
+ Code* code = builtins->builtin(i);
+ const char* name = Builtins::name(i);
+ code->PrintBuiltinCode(isolate(), name);
+ }
+ }
+ }
+#endif
+
// Deserialize bytecode handlers.
Interpreter* interpreter = isolate()->interpreter();
@@ -113,12 +127,8 @@ Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
-
- DCHECK(isolate()->builtins()->is_initialized());
- code->Disassemble(Builtins::name(builtin_id), os);
- os << std::flush;
+ const char* name = Builtins::name(builtin_id);
+ code->PrintBuiltinCode(isolate(), name);
}
#endif // ENABLE_DISASSEMBLER
@@ -129,19 +139,7 @@ Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
OperandScale operand_scale) {
allocator()->ReserveForHandler(bytecode, operand_scale);
DisallowHeapAllocation no_gc;
- Code* code = DeserializeHandlerRaw(bytecode, operand_scale);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
-
- code->Disassemble(Bytecodes::ToString(bytecode), os);
- os << std::flush;
- }
-#endif // ENABLE_DISASSEMBLER
-
- return code;
+ return DeserializeHandlerRaw(bytecode, operand_scale);
}
Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
@@ -196,13 +194,15 @@ Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
Assembler::FlushICache(code->raw_instruction_start(),
code->raw_instruction_size());
- const char* handler_name =
- isolate()->interpreter()->LookupNameOfBytecodeHandler(code);
- if (handler_name == nullptr) {
- handler_name = "UnknownBytecodeHadler";
- }
+ std::string name = Bytecodes::ToString(bytecode, operand_scale);
PROFILE(isolate(), CodeCreateEvent(CodeEventListener::HANDLER_TAG,
- AbstractCode::cast(code), handler_name));
+ AbstractCode::cast(code), name.c_str()));
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_builtin_code) {
+ code->PrintBuiltinCode(isolate(), name.c_str());
+ }
+#endif // ENABLE_DISASSEMBLER
+
return code;
}
diff --git a/deps/v8/src/snapshot/builtin-serializer-allocator.cc b/deps/v8/src/snapshot/builtin-serializer-allocator.cc
index af33e1ee49..a50fc23dd8 100644
--- a/deps/v8/src/snapshot/builtin-serializer-allocator.cc
+++ b/deps/v8/src/snapshot/builtin-serializer-allocator.cc
@@ -18,12 +18,11 @@ SerializerReference BuiltinSerializerAllocator::Allocate(AllocationSpace space,
// system. Instead of worrying about chunk indices and offsets, we simply
// need to generate unique offsets here.
- const uint32_t virtual_chunk_index = 0;
- const auto ref = SerializerReference::BackReference(
- CODE_SPACE, virtual_chunk_index, virtual_chunk_offset_);
+ const auto ref =
+ SerializerReference::BuiltinReference(next_builtin_reference_index_);
- virtual_chunk_size_ += size;
- virtual_chunk_offset_ += kObjectAlignment; // Needs to be aligned.
+ allocated_bytes_ += size;
+ next_builtin_reference_index_++;
return ref;
}
@@ -31,11 +30,8 @@ SerializerReference BuiltinSerializerAllocator::Allocate(AllocationSpace space,
#ifdef DEBUG
bool BuiltinSerializerAllocator::BackReferenceIsAlreadyAllocated(
SerializerReference reference) const {
- DCHECK(reference.is_back_reference());
- AllocationSpace space = reference.space();
- DCHECK_EQ(space, CODE_SPACE);
- DCHECK_EQ(reference.chunk_index(), 0);
- return reference.chunk_offset() < virtual_chunk_offset_;
+ DCHECK(reference.is_builtin_reference());
+ return reference.builtin_index() < next_builtin_reference_index_;
}
#endif // DEBUG
@@ -55,7 +51,7 @@ void BuiltinSerializerAllocator::OutputStatistics() {
PrintF("\n");
for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
- uint32_t space_size = (space == CODE_SPACE) ? virtual_chunk_size_ : 0;
+ uint32_t space_size = (space == CODE_SPACE) ? allocated_bytes_ : 0;
PrintF("%16d", space_size);
}
PrintF("\n");
diff --git a/deps/v8/src/snapshot/builtin-serializer-allocator.h b/deps/v8/src/snapshot/builtin-serializer-allocator.h
index a2c9a036e4..5a92843685 100644
--- a/deps/v8/src/snapshot/builtin-serializer-allocator.h
+++ b/deps/v8/src/snapshot/builtin-serializer-allocator.h
@@ -38,10 +38,8 @@ class BuiltinSerializerAllocator final {
static constexpr int kNumberOfSpaces =
SerializerDeserializer::kNumberOfSpaces;
- // We need to track a faked offset to create back-references. The size is
- // kept simply to display statistics.
- uint32_t virtual_chunk_size_ = 0;
- uint32_t virtual_chunk_offset_ = 0;
+ uint32_t allocated_bytes_ = 0;
+ uint32_t next_builtin_reference_index_ = 0;
DISALLOW_COPY_AND_ASSIGN(BuiltinSerializerAllocator)
};
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 0ff35abc5d..e91799cdad 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -31,6 +31,11 @@ ScriptData::ScriptData(const byte* data, int length)
}
}
+CodeSerializer::CodeSerializer(Isolate* isolate, uint32_t source_hash)
+ : Serializer(isolate), source_hash_(source_hash) {
+ allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
+}
+
// static
ScriptCompiler::CachedData* CodeSerializer::Serialize(
Handle<SharedFunctionInfo> info) {
@@ -52,13 +57,14 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
// TODO(7110): Enable serialization of Asm modules once the AsmWasmData is
// context independent.
if (script->ContainsAsmModule()) return nullptr;
- if (isolate->debug()->is_loaded()) return nullptr;
+ if (isolate->debug()->is_active()) return nullptr;
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
// Serialize code object.
Handle<String> source(String::cast(script->source()), isolate);
- CodeSerializer cs(isolate, SerializedCodeData::SourceHash(source));
+ CodeSerializer cs(isolate, SerializedCodeData::SourceHash(
+ source, script->origin_options()));
DisallowHeapAllocation no_gc;
cs.reference_map()->AddAttachedReference(*source);
ScriptData* script_data = cs.SerializeSharedFunctionInfo(info);
@@ -158,29 +164,27 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
UNREACHABLE();
}
+ ReadOnlyRoots roots(isolate());
if (ElideObject(obj)) {
- return SerializeObject(isolate()->heap()->undefined_value(), how_to_code,
- where_to_point, skip);
+ return SerializeObject(roots.undefined_value(), how_to_code, where_to_point,
+ skip);
}
if (obj->IsScript()) {
Script* script_obj = Script::cast(obj);
DCHECK_NE(script_obj->compilation_type(), Script::COMPILATION_TYPE_EVAL);
- // Wrapper object is a context-dependent JSValue. Reset it here.
- script_obj->set_wrapper(isolate()->heap()->undefined_value());
// We want to differentiate between undefined and uninitialized_symbol for
// context_data for now. It is hack to allow debugging for scripts that are
// included as a part of custom snapshot. (see debug::Script::IsEmbedded())
Object* context_data = script_obj->context_data();
- if (context_data != isolate()->heap()->undefined_value() &&
- context_data != isolate()->heap()->uninitialized_symbol()) {
- script_obj->set_context_data(isolate()->heap()->undefined_value());
+ if (context_data != roots.undefined_value() &&
+ context_data != roots.uninitialized_symbol()) {
+ script_obj->set_context_data(roots.undefined_value());
}
// We don't want to serialize host options to avoid serializing unnecessary
// object graph.
FixedArray* host_options = script_obj->host_defined_options();
- script_obj->set_host_defined_options(
- isolate()->heap()->empty_fixed_array());
+ script_obj->set_host_defined_options(roots.empty_fixed_array());
SerializeGeneric(obj, how_to_code, where_to_point);
script_obj->set_host_defined_options(host_options);
script_obj->set_context_data(context_data);
@@ -192,16 +196,34 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// TODO(7110): Enable serializing of Asm modules once the AsmWasmData
// is context independent.
DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
- // Clear debug info.
- Object* debug_info = sfi->debug_info();
- sfi->set_debug_info(Smi::kZero);
+
+ DebugInfo* debug_info = nullptr;
+ BytecodeArray* debug_bytecode_array = nullptr;
+ if (sfi->HasDebugInfo()) {
+ // Clear debug info.
+ debug_info = sfi->GetDebugInfo();
+ if (debug_info->HasInstrumentedBytecodeArray()) {
+ debug_bytecode_array = debug_info->DebugBytecodeArray();
+ sfi->SetDebugBytecodeArray(debug_info->OriginalBytecodeArray());
+ }
+ sfi->set_function_identifier_or_debug_info(
+ debug_info->function_identifier());
+ }
+ DCHECK(!sfi->HasDebugInfo());
// Mark SFI to indicate whether the code is cached.
bool was_deserialized = sfi->deserialized();
sfi->set_deserialized(sfi->is_compiled());
SerializeGeneric(obj, how_to_code, where_to_point);
sfi->set_deserialized(was_deserialized);
- sfi->set_debug_info(debug_info);
+
+ // Restore debug info
+ if (debug_info != nullptr) {
+ sfi->set_function_identifier_or_debug_info(debug_info);
+ if (debug_bytecode_array != nullptr) {
+ sfi->SetDebugBytecodeArray(debug_bytecode_array);
+ }
+ }
return;
}
@@ -234,7 +256,7 @@ void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
void CodeSerializer::SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
WhereToPoint where_to_point) {
// We only arrive here if we have not encountered this code stub before.
- DCHECK(!reference_map()->Lookup(code_stub).is_valid());
+ DCHECK(!reference_map()->LookupReference(code_stub).is_valid());
uint32_t stub_key = code_stub->stub_key();
DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
@@ -251,21 +273,23 @@ void CodeSerializer::SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
}
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
- Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
+ Isolate* isolate, ScriptData* cached_data, Handle<String> source,
+ ScriptOriginOptions origin_options) {
base::ElapsedTimer timer;
- if (FLAG_profile_deserialization) timer.Start();
+ if (FLAG_profile_deserialization || FLAG_log_function_events) timer.Start();
HandleScope scope(isolate);
SerializedCodeData::SanityCheckResult sanity_check_result =
SerializedCodeData::CHECK_SUCCESS;
const SerializedCodeData scd = SerializedCodeData::FromCachedData(
- isolate, cached_data, SerializedCodeData::SourceHash(source),
+ isolate, cached_data,
+ SerializedCodeData::SourceHash(source, origin_options),
&sanity_check_result);
if (sanity_check_result != SerializedCodeData::CHECK_SUCCESS) {
if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
DCHECK(cached_data->rejected());
- source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(
+ isolate->counters()->code_cache_reject_reason()->AddSample(
sanity_check_result);
return MaybeHandle<SharedFunctionInfo>();
}
@@ -287,15 +311,24 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
}
- if (isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling()) {
- String* name = isolate->heap()->empty_string();
+ bool log_code_creation = isolate->logger()->is_listening_to_code_events() ||
+ isolate->is_profiling();
+ if (log_code_creation || FLAG_log_function_events) {
+ String* name = ReadOnlyRoots(isolate).empty_string();
if (result->script()->IsScript()) {
Script* script = Script::cast(result->script());
if (script->name()->IsString()) name = String::cast(script->name());
+ if (FLAG_log_function_events) {
+ LOG(isolate, FunctionEvent("deserialize", script->id(),
+ timer.Elapsed().InMillisecondsF(),
+ result->StartPosition(),
+ result->EndPosition(), name));
+ }
+ }
+ if (log_code_creation) {
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
+ result->abstract_code(), *result, name));
}
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
- result->abstract_code(), *result, name));
}
if (isolate->NeedsSourcePositionsForProfiling()) {
@@ -427,8 +460,15 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
return CHECK_SUCCESS;
}
-uint32_t SerializedCodeData::SourceHash(Handle<String> source) {
- return source->length();
+uint32_t SerializedCodeData::SourceHash(Handle<String> source,
+ ScriptOriginOptions origin_options) {
+ const uint32_t source_length = source->length();
+
+ static constexpr uint32_t kModuleFlagMask = (1 << 31);
+ const uint32_t is_module = origin_options.IsModule() ? kModuleFlagMask : 0;
+ DCHECK_EQ(0, source_length & kModuleFlagMask);
+
+ return source_length | is_module;
}
// Return ScriptData object and relinquish ownership over it to the caller.
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index f6b51bf9b1..c4878811ec 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -50,15 +50,15 @@ class CodeSerializer : public Serializer<> {
ScriptData* SerializeSharedFunctionInfo(Handle<SharedFunctionInfo> info);
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
- Isolate* isolate, ScriptData* cached_data, Handle<String> source);
+ Isolate* isolate, ScriptData* cached_data, Handle<String> source,
+ ScriptOriginOptions origin_options);
const std::vector<uint32_t>* stub_keys() const { return &stub_keys_; }
uint32_t source_hash() const { return source_hash_; }
protected:
- explicit CodeSerializer(Isolate* isolate, uint32_t source_hash)
- : Serializer(isolate), source_hash_(source_hash) {}
+ CodeSerializer(Isolate* isolate, uint32_t source_hash);
~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
virtual void SerializeCodeObject(Code* code_object, HowToCode how_to_code,
@@ -148,7 +148,8 @@ class SerializedCodeData : public SerializedData {
Vector<const uint32_t> CodeStubKeys() const;
- static uint32_t SourceHash(Handle<String> source);
+ static uint32_t SourceHash(Handle<String> source,
+ ScriptOriginOptions origin_options);
private:
explicit SerializedCodeData(ScriptData* data);
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.cc b/deps/v8/src/snapshot/default-deserializer-allocator.cc
index 4704ae6643..610b87c771 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.cc
@@ -71,9 +71,9 @@ Address DefaultDeserializerAllocator::Allocate(AllocationSpace space,
// aligned object when the filler maps have not been deserialized yet.
// We require filler maps as padding to align the object.
Heap* heap = isolate()->heap();
- DCHECK(heap->free_space_map()->IsMap());
- DCHECK(heap->one_pointer_filler_map()->IsMap());
- DCHECK(heap->two_pointer_filler_map()->IsMap());
+ DCHECK(ReadOnlyRoots(heap).free_space_map()->IsMap());
+ DCHECK(ReadOnlyRoots(heap).one_pointer_filler_map()->IsMap());
+ DCHECK(ReadOnlyRoots(heap).two_pointer_filler_map()->IsMap());
obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
address = obj->address();
next_alignment_ = kWordAligned;
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.cc b/deps/v8/src/snapshot/default-serializer-allocator.cc
index c00f059704..d0efc2bd65 100644
--- a/deps/v8/src/snapshot/default-serializer-allocator.cc
+++ b/deps/v8/src/snapshot/default-serializer-allocator.cc
@@ -5,6 +5,7 @@
#include "src/snapshot/default-serializer-allocator.h"
#include "src/heap/heap-inl.h"
+#include "src/snapshot/references.h"
#include "src/snapshot/serializer.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -19,18 +20,34 @@ DefaultSerializerAllocator::DefaultSerializerAllocator(
}
}
+void DefaultSerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
+ custom_chunk_size_ = chunk_size;
+}
+
+static uint32_t PageSizeOfSpace(int space) {
+ return static_cast<uint32_t>(
+ MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)));
+}
+
+uint32_t DefaultSerializerAllocator::TargetChunkSize(int space) {
+ if (custom_chunk_size_ == 0) return PageSizeOfSpace(space);
+ DCHECK_LE(custom_chunk_size_, PageSizeOfSpace(space));
+ return custom_chunk_size_;
+}
+
SerializerReference DefaultSerializerAllocator::Allocate(AllocationSpace space,
uint32_t size) {
DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
- DCHECK(size > 0 && size <= MaxChunkSizeInSpace(space));
+ DCHECK(size > 0 && size <= PageSizeOfSpace(space));
// Maps are allocated through AllocateMap.
DCHECK_NE(MAP_SPACE, space);
- uint32_t new_chunk_size = pending_chunk_[space] + size;
- if (new_chunk_size > MaxChunkSizeInSpace(space)) {
- // The new chunk size would not fit onto a single page. Complete the
- // current chunk and start a new one.
+ uint32_t old_chunk_size = pending_chunk_[space];
+ uint32_t new_chunk_size = old_chunk_size + size;
+ // Start a new chunk if the new size exceeds the target chunk size.
+ // We may exceed the target chunk size if the single object size does.
+ if (new_chunk_size > TargetChunkSize(space) && old_chunk_size != 0) {
serializer_->PutNextChunk(space);
completed_chunks_[space].push_back(pending_chunk_[space]);
pending_chunk_[space] = 0;
@@ -136,13 +153,5 @@ void DefaultSerializerAllocator::OutputStatistics() {
PrintF("%16d\n", large_objects_total_size_);
}
-// static
-uint32_t DefaultSerializerAllocator::MaxChunkSizeInSpace(int space) {
- DCHECK(0 <= space && space < kNumberOfPreallocatedSpaces);
-
- return static_cast<uint32_t>(
- MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.h b/deps/v8/src/snapshot/default-serializer-allocator.h
index b01532752a..e410eab565 100644
--- a/deps/v8/src/snapshot/default-serializer-allocator.h
+++ b/deps/v8/src/snapshot/default-serializer-allocator.h
@@ -23,6 +23,8 @@ class DefaultSerializerAllocator final {
SerializerReference AllocateLargeObject(uint32_t size);
SerializerReference AllocateOffHeapBackingStore();
+ void UseCustomChunkSize(uint32_t chunk_size);
+
#ifdef DEBUG
bool BackReferenceIsAlreadyAllocated(
SerializerReference back_reference) const;
@@ -33,13 +35,15 @@ class DefaultSerializerAllocator final {
void OutputStatistics();
private:
+ // We try to not exceed this size for every chunk. We will not succeed for
+ // larger objects though.
+ uint32_t TargetChunkSize(int space);
+
static constexpr int kNumberOfPreallocatedSpaces =
SerializerDeserializer::kNumberOfPreallocatedSpaces;
static constexpr int kNumberOfSpaces =
SerializerDeserializer::kNumberOfSpaces;
- static uint32_t MaxChunkSizeInSpace(int space);
-
// Objects from the same space are put into chunks for bulk-allocation
// when deserializing. We have to make sure that each chunk fits into a
// page. So we track the chunk size in pending_chunk_ of a space, but
@@ -61,6 +65,8 @@ class DefaultSerializerAllocator final {
// from index 0.
uint32_t seen_backing_stores_index_ = 1;
+ uint32_t custom_chunk_size_ = 0;
+
// The current serializer.
Serializer<DefaultSerializerAllocator>* const serializer_;
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 2090443c81..dcb37ce63c 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -45,7 +45,7 @@ bool Deserializer<AllocatorT>::IsLazyDeserializationEnabled() const {
template <class AllocatorT>
void Deserializer<AllocatorT>::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
- for (const auto& item : to_rehash_) item->RehashBasedOnMap();
+ for (const auto& item : to_rehash_) item->RehashBasedOnMap(isolate());
}
template <class AllocatorT>
@@ -161,14 +161,18 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
if (canonical != nullptr) return canonical;
- new_internalized_strings_.push_back(handle(string));
+ new_internalized_strings_.push_back(handle(string, isolate_));
return string;
}
} else if (obj->IsScript()) {
- new_scripts_.push_back(handle(Script::cast(obj)));
+ new_scripts_.push_back(handle(Script::cast(obj), isolate_));
} else {
DCHECK(CanBeDeferred(obj));
}
+ } else if (obj->IsScript()) {
+ LOG(isolate_, ScriptEvent(Logger::ScriptEventType::kDeserialize,
+ Script::cast(obj)->id()));
+ LOG(isolate_, ScriptDetails(Script::cast(obj)));
}
if (obj->IsAllocationSite()) {
@@ -179,7 +183,7 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
// as a (weak) root. If this root is relocated correctly, this becomes
// unnecessary.
if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
- site->set_weak_next(isolate_->heap()->undefined_value());
+ site->set_weak_next(ReadOnlyRoots(isolate_).undefined_value());
} else {
site->set_weak_next(isolate_->heap()->allocation_sites_list());
}
@@ -200,7 +204,7 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
#endif
} else if (obj->IsExternalString()) {
- if (obj->map() == isolate_->heap()->native_source_string_map()) {
+ if (obj->map() == ReadOnlyRoots(isolate_).native_source_string_map()) {
ExternalOneByteString* string = ExternalOneByteString::cast(obj);
DCHECK(string->is_short());
string->set_resource(
@@ -273,33 +277,37 @@ int Deserializer<AllocatorT>::MaybeReplaceWithDeserializeLazy(int builtin_id) {
template <class AllocatorT>
HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
HeapObject* obj;
- SerializerReference back_reference =
- SerializerReference::FromBitfield(source_.GetInt());
-
switch (space) {
case LO_SPACE:
- obj = allocator()->GetLargeObject(back_reference.large_object_index());
+ obj = allocator()->GetLargeObject(source_.GetInt());
break;
case MAP_SPACE:
- obj = allocator()->GetMap(back_reference.map_index());
+ obj = allocator()->GetMap(source_.GetInt());
break;
- case RO_SPACE:
+ case RO_SPACE: {
+ uint32_t chunk_index = source_.GetInt();
+ uint32_t chunk_offset = source_.GetInt();
if (isolate()->heap()->deserialization_complete()) {
PagedSpace* read_only_space = isolate()->heap()->read_only_space();
- Page* page = read_only_space->FirstPage();
- for (uint32_t i = 0; i < back_reference.chunk_index(); ++i) {
+ Page* page = read_only_space->first_page();
+ for (uint32_t i = 0; i < chunk_index; ++i) {
page = page->next_page();
}
- Address address = page->OffsetToAddress(back_reference.chunk_offset());
+ Address address = page->OffsetToAddress(chunk_offset);
obj = HeapObject::FromAddress(address);
- break;
+ } else {
+ obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
+ chunk_index, chunk_offset);
}
- V8_FALLTHROUGH;
- default:
+ break;
+ }
+ default: {
+ uint32_t chunk_index = source_.GetInt();
+ uint32_t chunk_offset = source_.GetInt();
obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
- back_reference.chunk_index(),
- back_reference.chunk_offset());
+ chunk_index, chunk_offset);
break;
+ }
}
if (deserializing_user_code() && obj->IsThinString()) {
@@ -530,7 +538,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
}
case kOffHeapTarget: {
-#ifdef V8_EMBEDDED_BUILTINS
+ DCHECK(FLAG_embedded_builtins);
int skip = source_.GetInt();
int builtin_index = source_.GetInt();
DCHECK(Builtins::IsBuiltinId(builtin_index));
@@ -558,9 +566,6 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
UnalignedCopy(current, &o);
current++;
}
-#else
- UNREACHABLE();
-#endif
break;
}
@@ -613,7 +618,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
case kVariableRepeat: {
int repeats = source_.GetInt();
MaybeObject* object = current[-1];
- DCHECK(!isolate->heap()->InNewSpace(object));
+ DCHECK(!Heap::InNewSpace(object));
DCHECK(!allocator()->next_reference_is_weak());
for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
break;
@@ -679,7 +684,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
MaybeObject* object =
MaybeObject::FromObject(isolate->heap()->root(root_index));
- DCHECK(!isolate->heap()->InNewSpace(object));
+ DCHECK(!Heap::InNewSpace(object));
DCHECK(!allocator()->next_reference_is_weak());
UnalignedCopy(current++, &object);
break;
@@ -704,7 +709,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
}
UnalignedCopy(current, &hot_maybe_object);
- if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
+ if (write_barrier_needed && Heap::InNewSpace(hot_object)) {
Address current_address = reinterpret_cast<Address>(current);
isolate->heap()->RecordWrite(
HeapObject::FromAddress(current_object_address),
@@ -732,7 +737,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
MaybeObject* object;
DCHECK(!allocator()->next_reference_is_weak());
UnalignedCopy(&object, current - 1);
- DCHECK(!isolate->heap()->InNewSpace(object));
+ DCHECK(!Heap::InNewSpace(object));
for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
break;
}
@@ -814,16 +819,16 @@ MaybeObject** Deserializer<AllocatorT>::ReadDataCase(
int id = source_.GetInt();
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
new_object = isolate->heap()->root(root_index);
- emit_write_barrier = isolate->heap()->InNewSpace(new_object);
+ emit_write_barrier = Heap::InNewSpace(new_object);
hot_objects_.Add(HeapObject::cast(new_object));
} else if (where == kPartialSnapshotCache) {
int cache_index = source_.GetInt();
new_object = isolate->partial_snapshot_cache()->at(cache_index);
- emit_write_barrier = isolate->heap()->InNewSpace(new_object);
+ emit_write_barrier = Heap::InNewSpace(new_object);
} else if (where == kAttachedReference) {
int index = source_.GetInt();
new_object = *attached_objects_[index];
- emit_write_barrier = isolate->heap()->InNewSpace(new_object);
+ emit_write_barrier = Heap::InNewSpace(new_object);
} else {
DCHECK_EQ(where, kBuiltin);
int builtin_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
diff --git a/deps/v8/src/snapshot/embedded-empty.cc b/deps/v8/src/snapshot/embedded-empty.cc
index 3294a6788e..77e83b73ce 100644
--- a/deps/v8/src/snapshot/embedded-empty.cc
+++ b/deps/v8/src/snapshot/embedded-empty.cc
@@ -9,7 +9,6 @@
namespace v8 {
namespace internal {
-#ifdef V8_EMBEDDED_BUILTINS
const uint8_t* DefaultEmbeddedBlob() { return nullptr; }
uint32_t DefaultEmbeddedBlobSize() { return 0; }
@@ -18,7 +17,5 @@ const uint8_t* TrustedEmbeddedBlob() { return nullptr; }
uint32_t TrustedEmbeddedBlobSize() { return 0; }
#endif
-#endif
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index c862d63090..9516108749 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -7,7 +7,7 @@
#include <stdio.h>
#include "include/libplatform/libplatform.h"
-#include "src/assembler.h"
+#include "src/assembler-arch.h"
#include "src/base/platform/platform.h"
#include "src/flags.h"
#include "src/msan.h"
@@ -22,7 +22,6 @@ class SnapshotWriter {
SnapshotWriter()
: snapshot_cpp_path_(nullptr), snapshot_blob_path_(nullptr) {}
-#ifdef V8_EMBEDDED_BUILTINS
void SetEmbeddedFile(const char* embedded_cpp_file) {
embedded_cpp_path_ = embedded_cpp_file;
}
@@ -30,7 +29,6 @@ class SnapshotWriter {
void SetEmbeddedVariant(const char* embedded_variant) {
embedded_variant_ = embedded_variant;
}
-#endif
void SetSnapshotFile(const char* snapshot_cpp_file) {
snapshot_cpp_path_ = snapshot_cpp_file;
@@ -51,11 +49,9 @@ class SnapshotWriter {
MaybeWriteStartupBlob(blob_vector);
}
-#ifdef V8_EMBEDDED_BUILTINS
void WriteEmbedded(const i::EmbeddedData* blob) const {
MaybeWriteEmbeddedFile(blob);
}
-#endif
private:
void MaybeWriteStartupBlob(const i::Vector<const i::byte>& blob) const {
@@ -120,7 +116,6 @@ class SnapshotWriter {
fprintf(fp, "\n");
}
-#ifdef V8_EMBEDDED_BUILTINS
void MaybeWriteEmbeddedFile(const i::EmbeddedData* blob) const {
if (embedded_cpp_path_ == nullptr) return;
@@ -207,7 +202,6 @@ class SnapshotWriter {
if (current_line_length != 0) fprintf(fp, "\\n\"\n");
fprintf(fp, ");\n");
}
-#endif
static FILE* GetFileDescriptorOrDie(const char* filename) {
FILE* fp = v8::base::OS::FOpen(filename, "wb");
@@ -218,10 +212,8 @@ class SnapshotWriter {
return fp;
}
-#ifdef V8_EMBEDDED_BUILTINS
const char* embedded_cpp_path_ = nullptr;
const char* embedded_variant_ = "Default";
-#endif
const char* snapshot_cpp_path_;
const char* snapshot_blob_path_;
};
@@ -349,14 +341,12 @@ v8::StartupData WarmUpSnapshotDataBlob(v8::SnapshotCreator* snapshot_creator,
return result;
}
-#ifdef V8_EMBEDDED_BUILTINS
void WriteEmbeddedFile(v8::SnapshotCreator* creator, SnapshotWriter* writer) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(creator->GetIsolate());
isolate->PrepareEmbeddedBlobForSerialization();
i::EmbeddedData embedded_blob = i::EmbeddedData::FromBlob();
writer->WriteEmbedded(&embedded_blob);
}
-#endif // V8_EMBEDDED_BUILTINS
} // namespace
int main(int argc, char** argv) {
@@ -385,11 +375,11 @@ int main(int argc, char** argv) {
SnapshotWriter writer;
if (i::FLAG_startup_src) writer.SetSnapshotFile(i::FLAG_startup_src);
if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
-#ifdef V8_EMBEDDED_BUILTINS
- if (i::FLAG_embedded_src) writer.SetEmbeddedFile(i::FLAG_embedded_src);
- if (i::FLAG_embedded_variant)
- writer.SetEmbeddedVariant(i::FLAG_embedded_variant);
-#endif
+ if (i::FLAG_embedded_builtins) {
+ if (i::FLAG_embedded_src) writer.SetEmbeddedFile(i::FLAG_embedded_src);
+ if (i::FLAG_embedded_variant)
+ writer.SetEmbeddedVariant(i::FLAG_embedded_variant);
+ }
std::unique_ptr<char> embed_script(
GetExtraCode(argc >= 2 ? argv[1] : nullptr, "embedding"));
@@ -398,14 +388,26 @@ int main(int argc, char** argv) {
v8::StartupData blob;
{
- v8::SnapshotCreator snapshot_creator;
-#ifdef V8_EMBEDDED_BUILTINS
- // This process is a bit tricky since we might go on to make a second
- // snapshot if a warmup script is passed. In that case, create the first
- // snapshot without off-heap trampolines and only move code off-heap for
- // the warmed-up snapshot.
- if (!warmup_script) WriteEmbeddedFile(&snapshot_creator, &writer);
-#endif
+ v8::Isolate* isolate = v8::Isolate::Allocate();
+ if (i::FLAG_embedded_builtins) {
+ // Set code range such that relative jumps for builtins to
+ // builtin calls in the snapshot are possible.
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ size_t code_range_size =
+ i::kMaximalCodeRangeSize == 0
+ ? i::kMaxPCRelativeCodeRangeInMB
+ : std::min(i::kMaximalCodeRangeSize / i::MB,
+ i::kMaxPCRelativeCodeRangeInMB);
+ i_isolate->heap()->ConfigureHeap(0, 0, code_range_size);
+ }
+ v8::SnapshotCreator snapshot_creator(isolate);
+ if (i::FLAG_embedded_builtins) {
+ // This process is a bit tricky since we might go on to make a second
+ // snapshot if a warmup script is passed. In that case, create the first
+ // snapshot without off-heap trampolines and only move code off-heap for
+ // the warmed-up snapshot.
+ if (!warmup_script) WriteEmbeddedFile(&snapshot_creator, &writer);
+ }
blob = CreateSnapshotDataBlob(&snapshot_creator, embed_script.get());
}
@@ -413,9 +415,9 @@ int main(int argc, char** argv) {
CHECK(blob.raw_size > 0 && blob.data != nullptr);
v8::StartupData cold = blob;
v8::SnapshotCreator snapshot_creator(nullptr, &cold);
-#ifdef V8_EMBEDDED_BUILTINS
- WriteEmbeddedFile(&snapshot_creator, &writer);
-#endif
+ if (i::FLAG_embedded_builtins) {
+ WriteEmbeddedFile(&snapshot_creator, &writer);
+ }
blob = WarmUpSnapshotDataBlob(&snapshot_creator, warmup_script.get());
delete[] cold.data;
}
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 1769f9ca24..2fb86867d0 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -9,7 +9,6 @@
#include "src/isolate.h"
#include "src/objects.h"
#include "src/snapshot/code-serializer.h"
-#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -33,38 +32,6 @@ ObjectDeserializer::DeserializeSharedFunctionInfo(
: MaybeHandle<SharedFunctionInfo>();
}
-MaybeHandle<WasmCompiledModule>
-ObjectDeserializer::DeserializeWasmCompiledModule(
- Isolate* isolate, const SerializedCodeData* data,
- Vector<const byte> wire_bytes) {
- ObjectDeserializer d(data);
-
- d.AddAttachedObject(isolate->native_context());
-
- MaybeHandle<String> maybe_wire_bytes_as_string =
- isolate->factory()->NewStringFromOneByte(wire_bytes, TENURED);
- Handle<String> wire_bytes_as_string;
- if (!maybe_wire_bytes_as_string.ToHandle(&wire_bytes_as_string)) {
- return MaybeHandle<WasmCompiledModule>();
- }
- d.AddAttachedObject(wire_bytes_as_string);
-
- Vector<const uint32_t> code_stub_keys = data->CodeStubKeys();
- for (int i = 0; i < code_stub_keys.length(); i++) {
- d.AddAttachedObject(
- CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked());
- }
-
- Handle<HeapObject> result;
- if (!d.Deserialize(isolate).ToHandle(&result))
- return MaybeHandle<WasmCompiledModule>();
-
- if (!result->IsWasmCompiledModule()) return MaybeHandle<WasmCompiledModule>();
-
- // Cast without type checks, as the module wrapper is not there yet.
- return handle(static_cast<WasmCompiledModule*>(*result), isolate);
-}
-
MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
Initialize(isolate);
@@ -79,7 +46,7 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &root);
DeserializeDeferredObjects();
FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
- result = Handle<HeapObject>(HeapObject::cast(root));
+ result = handle(HeapObject::cast(root), isolate);
Rehash();
allocator()->RegisterDeserializedObjectsForBlackAllocation();
}
@@ -114,9 +81,12 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
for (Handle<Script> script : new_scripts()) {
// Assign a new script id to avoid collision.
script->set_id(isolate()->heap()->NextScriptId());
+ LOG(isolate(),
+ ScriptEvent(Logger::ScriptEventType::kDeserialize, script->id()));
+ LOG(isolate(), ScriptDetails(*script));
// Add script to list.
Handle<Object> list =
- FixedArrayOfWeakCells::Add(factory->script_list(), script);
+ FixedArrayOfWeakCells::Add(isolate(), factory->script_list(), script);
heap->SetRootScriptList(*list);
}
}
diff --git a/deps/v8/src/snapshot/object-deserializer.h b/deps/v8/src/snapshot/object-deserializer.h
index 8f236f5f20..1e8bf1b649 100644
--- a/deps/v8/src/snapshot/object-deserializer.h
+++ b/deps/v8/src/snapshot/object-deserializer.h
@@ -12,7 +12,6 @@ namespace internal {
class SerializedCodeData;
class SharedFunctionInfo;
-class WasmCompiledModule;
// Deserializes the object graph rooted at a given object.
class ObjectDeserializer final : public Deserializer<> {
@@ -20,10 +19,6 @@ class ObjectDeserializer final : public Deserializer<> {
static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source);
- static MaybeHandle<WasmCompiledModule> DeserializeWasmCompiledModule(
- Isolate* isolate, const SerializedCodeData* data,
- Vector<const byte> wire_bytes);
-
private:
explicit ObjectDeserializer(const SerializedCodeData* data)
: Deserializer(data, true) {}
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 5624ba9887..5161629fa4 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -20,6 +20,7 @@ PartialSerializer::PartialSerializer(
can_be_rehashed_(true),
context_(nullptr) {
InitializeCodeAddressMap();
+ allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
PartialSerializer::~PartialSerializer() {
@@ -36,11 +37,11 @@ void PartialSerializer::Serialize(Context** o, bool include_global_proxy) {
// it before serializing, it will get re-added to the context list
// explicitly when it's loaded.
context_->set(Context::NEXT_CONTEXT_LINK,
- isolate()->heap()->undefined_value());
- DCHECK(!context_->global_object()->IsUndefined(context_->GetIsolate()));
+ ReadOnlyRoots(isolate()).undefined_value());
+ DCHECK(!context_->global_object()->IsUndefined());
// Reset math random cache to get fresh random numbers.
context_->set_math_random_index(Smi::kZero);
- context_->set_math_random_cache(isolate()->heap()->undefined_value());
+ context_->set_math_random_cache(ReadOnlyRoots(isolate()).undefined_value());
VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
reinterpret_cast<Object**>(o));
@@ -124,8 +125,8 @@ bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
o->IsTemplateInfo() ||
- o->map() ==
- startup_serializer_->isolate()->heap()->fixed_cow_array_map();
+ o->map() == ReadOnlyRoots(startup_serializer_->isolate())
+ .fixed_cow_array_map();
}
void PartialSerializer::SerializeEmbedderFields() {
@@ -139,7 +140,7 @@ void PartialSerializer::SerializeEmbedderFields() {
HandleScope scope(isolate());
Handle<JSObject> obj(embedder_field_holders_.back(), isolate());
embedder_field_holders_.pop_back();
- SerializerReference reference = reference_map()->Lookup(*obj);
+ SerializerReference reference = reference_map()->LookupReference(*obj);
DCHECK(reference.is_back_reference());
int embedder_fields_count = obj->GetEmbedderFieldCount();
for (int i = 0; i < embedder_fields_count; i++) {
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
new file mode 100644
index 0000000000..6beb2065c1
--- /dev/null
+++ b/deps/v8/src/snapshot/references.h
@@ -0,0 +1,197 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_REFERENCES_H_
+#define V8_SNAPSHOT_REFERENCES_H_
+
+#include "src/assert-scope.h"
+#include "src/base/hashmap.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class SerializerReference {
+ private:
+ enum SpecialValueType {
+ kInvalidValue,
+ kAttachedReference,
+ kOffHeapBackingStore,
+ kBuiltinReference,
+ };
+
+ static const int kSpecialValueSpace = LAST_SPACE + 1;
+ STATIC_ASSERT(kSpecialValueSpace < (1 << kSpaceTagSize));
+
+ SerializerReference(SpecialValueType type, uint32_t value)
+ : bitfield_(SpaceBits::encode(kSpecialValueSpace) |
+ SpecialValueTypeBits::encode(type)),
+ value_(value) {}
+
+ public:
+ SerializerReference() : SerializerReference(kInvalidValue, 0) {}
+
+ SerializerReference(uint32_t space, uint32_t chunk_index,
+ uint32_t chunk_offset)
+ : bitfield_(SpaceBits::encode(space) |
+ ChunkIndexBits::encode(chunk_index)),
+ value_(chunk_offset) {}
+
+ static SerializerReference BackReference(AllocationSpace space,
+ uint32_t chunk_index,
+ uint32_t chunk_offset) {
+ DCHECK(IsAligned(chunk_offset, kObjectAlignment));
+ DCHECK_LT(space, LO_SPACE);
+ return SerializerReference(space, chunk_index, chunk_offset);
+ }
+
+ static SerializerReference MapReference(uint32_t index) {
+ return SerializerReference(MAP_SPACE, 0, index);
+ }
+
+ static SerializerReference OffHeapBackingStoreReference(uint32_t index) {
+ return SerializerReference(kOffHeapBackingStore, index);
+ }
+
+ static SerializerReference LargeObjectReference(uint32_t index) {
+ return SerializerReference(LO_SPACE, 0, index);
+ }
+
+ static SerializerReference AttachedReference(uint32_t index) {
+ return SerializerReference(kAttachedReference, index);
+ }
+
+ static SerializerReference BuiltinReference(uint32_t index) {
+ return SerializerReference(kBuiltinReference, index);
+ }
+
+ bool is_valid() const {
+ return SpaceBits::decode(bitfield_) != kSpecialValueSpace ||
+ SpecialValueTypeBits::decode(bitfield_) != kInvalidValue;
+ }
+
+ bool is_back_reference() const {
+ return SpaceBits::decode(bitfield_) <= LAST_SPACE;
+ }
+
+ AllocationSpace space() const {
+ DCHECK(is_back_reference());
+ return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_));
+ }
+
+ uint32_t chunk_offset() const {
+ DCHECK(is_back_reference());
+ return value_;
+ }
+
+ uint32_t chunk_index() const {
+ DCHECK(space() != MAP_SPACE && space() != LO_SPACE);
+ return ChunkIndexBits::decode(bitfield_);
+ }
+
+ uint32_t map_index() const {
+ DCHECK_EQ(MAP_SPACE, SpaceBits::decode(bitfield_));
+ return value_;
+ }
+
+ bool is_off_heap_backing_store_reference() const {
+ return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
+ SpecialValueTypeBits::decode(bitfield_) == kOffHeapBackingStore;
+ }
+
+ uint32_t off_heap_backing_store_index() const {
+ DCHECK(is_off_heap_backing_store_reference());
+ return value_;
+ }
+
+ uint32_t large_object_index() const {
+ DCHECK_EQ(LO_SPACE, SpaceBits::decode(bitfield_));
+ return value_;
+ }
+
+ bool is_attached_reference() const {
+ return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
+ SpecialValueTypeBits::decode(bitfield_) == kAttachedReference;
+ }
+
+ uint32_t attached_reference_index() const {
+ DCHECK(is_attached_reference());
+ return value_;
+ }
+
+ bool is_builtin_reference() const {
+ return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
+ SpecialValueTypeBits::decode(bitfield_) == kBuiltinReference;
+ }
+
+ uint32_t builtin_index() const {
+ DCHECK(is_builtin_reference());
+ return value_;
+ }
+
+ private:
+ class SpaceBits : public BitField<int, 0, kSpaceTagSize> {};
+ class ChunkIndexBits
+ : public BitField<uint32_t, SpaceBits::kNext, 32 - kSpaceTagSize> {};
+ class SpecialValueTypeBits
+ : public BitField<SpecialValueType, SpaceBits::kNext,
+ 32 - kSpaceTagSize> {};
+
+ // We use two fields to store a reference.
+ // In case of a normal back reference, the bitfield_ stores the space and
+ // the chunk index. In case of special references, it uses a special value
+ // for space and stores the special value type.
+ uint32_t bitfield_;
+ // value_ stores either chunk offset or special value.
+ uint32_t value_;
+
+ friend class SerializerReferenceMap;
+};
+
+class SerializerReferenceMap
+ : public base::TemplateHashMapImpl<uintptr_t, SerializerReference,
+ base::KeyEqualityMatcher<intptr_t>,
+ base::DefaultAllocationPolicy> {
+ public:
+ typedef base::TemplateHashMapEntry<uintptr_t, SerializerReference> Entry;
+
+ SerializerReferenceMap() : no_allocation_(), attached_reference_index_(0) {}
+
+ SerializerReference LookupReference(void* value) const {
+ uintptr_t key = Key(value);
+ Entry* entry = Lookup(key, Hash(key));
+ if (entry == nullptr) return SerializerReference();
+ return entry->value;
+ }
+
+ void Add(void* obj, SerializerReference reference) {
+ DCHECK(reference.is_valid());
+ DCHECK(!LookupReference(obj).is_valid());
+ uintptr_t key = Key(obj);
+ LookupOrInsert(key, Hash(key))->value = reference;
+ }
+
+ SerializerReference AddAttachedReference(void* attached_reference) {
+ SerializerReference reference =
+ SerializerReference::AttachedReference(attached_reference_index_++);
+ Add(attached_reference, reference);
+ return reference;
+ }
+
+ private:
+ static inline uintptr_t Key(void* value) {
+ return reinterpret_cast<uintptr_t>(value);
+ }
+
+ static uint32_t Hash(uintptr_t key) { return static_cast<uint32_t>(key); }
+
+ DisallowHeapAllocation no_allocation_;
+ int attached_reference_index_;
+ DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_REFERENCES_H_
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 566046abd2..f4db5513bb 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/external-reference-table.h"
#include "src/globals.h"
+#include "src/snapshot/references.h"
#include "src/utils.h"
#include "src/visitors.h"
@@ -105,8 +106,12 @@ class SerializerDeserializer : public RootVisitor {
// No reservation for large object space necessary.
// We also handle map space differenly.
STATIC_ASSERT(MAP_SPACE == CODE_SPACE + 1);
+
+ // We do not support young generation large objects.
+ STATIC_ASSERT(LAST_SPACE == NEW_LO_SPACE);
+ STATIC_ASSERT(LAST_SPACE - 1 == LO_SPACE);
static const int kNumberOfPreallocatedSpaces = CODE_SPACE + 1;
- static const int kNumberOfSpaces = LAST_SPACE + 1;
+ static const int kNumberOfSpaces = LO_SPACE + 1;
protected:
static bool CanBeDeferred(HeapObject* o);
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 5a0828c7d7..67644b83f3 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -177,7 +177,7 @@ bool Serializer<AllocatorT>::SerializeBackReference(HeapObject* obj,
HowToCode how_to_code,
WhereToPoint where_to_point,
int skip) {
- SerializerReference reference = reference_map_.Lookup(obj);
+ SerializerReference reference = reference_map_.LookupReference(obj);
if (!reference.is_valid()) return false;
// Encode the location of an already deserialized object in order to write
// its location into a later object. We can encode the location as an
@@ -264,8 +264,7 @@ void Serializer<AllocatorT>::PutRoot(
kNumberOfRootArrayConstants - 1);
if (how_to_code == kPlain && where_to_point == kStartOfObject &&
- root_index < kNumberOfRootArrayConstants &&
- !isolate()->heap()->InNewSpace(object)) {
+ root_index < kNumberOfRootArrayConstants && !Heap::InNewSpace(object)) {
if (skip == 0) {
sink_.Put(kRootArrayConstants + root_index, "RootConstant");
} else {
@@ -291,7 +290,21 @@ template <class AllocatorT>
void Serializer<AllocatorT>::PutBackReference(HeapObject* object,
SerializerReference reference) {
DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
- sink_.PutInt(reference.back_reference(), "BackRefValue");
+ switch (reference.space()) {
+ case MAP_SPACE:
+ sink_.PutInt(reference.map_index(), "BackRefMapIndex");
+ break;
+
+ case LO_SPACE:
+ sink_.PutInt(reference.large_object_index(), "BackRefLargeObjectIndex");
+ break;
+
+ default:
+ sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
+ sink_.PutInt(reference.chunk_offset(), "BackRefChunkOffset");
+ break;
+ }
+
hot_objects_.Add(object);
}
@@ -406,7 +419,7 @@ template <class AllocatorT>
int32_t Serializer<AllocatorT>::ObjectSerializer::SerializeBackingStore(
void* backing_store, int32_t byte_length) {
SerializerReference reference =
- serializer_->reference_map()->Lookup(backing_store);
+ serializer_->reference_map()->LookupReference(backing_store);
// Serialize the off-heap backing store.
if (!reference.is_valid()) {
@@ -486,7 +499,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
// for native native source code strings, we replace the resource field
// with the native source id.
// For the rest we serialize them to look like ordinary sequential strings.
- if (object_->map() != heap->native_source_string_map()) {
+ if (object_->map() != ReadOnlyRoots(heap).native_source_string_map()) {
ExternalString* string = ExternalString::cast(object_);
Address resource = string->resource_as_address();
ExternalReferenceEncoder::Value reference;
@@ -518,9 +531,9 @@ void Serializer<
AllocatorT>::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// Instead of serializing this as an external string, we serialize
// an imaginary sequential string with the same content.
- Isolate* isolate = serializer_->isolate();
+ ReadOnlyRoots roots(serializer_->isolate());
DCHECK(object_->IsExternalString());
- DCHECK(object_->map() != isolate->heap()->native_source_string_map());
+ DCHECK(object_->map() != roots.native_source_string_map());
ExternalString* string = ExternalString::cast(object_);
int length = string->length();
Map* map;
@@ -530,15 +543,14 @@ void Serializer<
// Find the map and size for the imaginary sequential string.
bool internalized = object_->IsInternalizedString();
if (object_->IsExternalOneByteString()) {
- map = internalized ? isolate->heap()->one_byte_internalized_string_map()
- : isolate->heap()->one_byte_string_map();
+ map = internalized ? roots.one_byte_internalized_string_map()
+ : roots.one_byte_string_map();
allocation_size = SeqOneByteString::SizeFor(length);
content_size = length * kCharSize;
resource = reinterpret_cast<const byte*>(
ExternalOneByteString::cast(string)->resource()->data());
} else {
- map = internalized ? isolate->heap()->internalized_string_map()
- : isolate->heap()->string_map();
+ map = internalized ? roots.internalized_string_map() : roots.string_map();
allocation_size = SeqTwoByteString::SizeFor(length);
content_size = length * kShortSize;
resource = reinterpret_cast<const byte*>(
@@ -576,12 +588,13 @@ void Serializer<
// TODO(all): replace this with proper iteration of weak slots in serializer.
class UnlinkWeakNextScope {
public:
- explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
+ explicit UnlinkWeakNextScope(Heap* heap, HeapObject* object)
+ : object_(nullptr) {
if (object->IsAllocationSite()) {
object_ = object;
next_ = AllocationSite::cast(object)->weak_next();
AllocationSite::cast(object)->set_weak_next(
- object->GetHeap()->undefined_value());
+ ReadOnlyRoots(heap).undefined_value());
}
}
@@ -634,7 +647,7 @@ void Serializer<AllocatorT>::ObjectSerializer::Serialize() {
if (object_->IsScript()) {
// Clear cached line ends.
- Object* undefined = serializer_->isolate()->heap()->undefined_value();
+ Object* undefined = ReadOnlyRoots(serializer_->isolate()).undefined_value();
Script::cast(object_)->set_line_ends(undefined);
}
@@ -647,6 +660,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeObject() {
Map* map = object_->map();
AllocationSpace space =
MemoryChunk::FromAddress(object_->address())->owner()->identity();
+ DCHECK(space != NEW_LO_SPACE);
SerializePrologue(space, size, map);
// Serialize the rest of the object.
@@ -677,7 +691,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeDeferred() {
int size = object_->Size();
Map* map = object_->map();
SerializerReference back_reference =
- serializer_->reference_map()->Lookup(object_);
+ serializer_->reference_map()->LookupReference(object_);
DCHECK(back_reference.is_back_reference());
// Serialize the rest of the object.
@@ -695,7 +709,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeDeferred() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeContent(Map* map,
int size) {
- UnlinkWeakNextScope unlink_weak_next(object_);
+ UnlinkWeakNextScope unlink_weak_next(serializer_->isolate()->heap(), object_);
if (object_->IsCode()) {
// For code objects, output raw bytes first.
OutputCode(size);
@@ -742,7 +756,7 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
Heap::RootIsImmortalImmovable(root_index) &&
*current == current[-1]) {
DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
- DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
+ DCHECK(!Heap::InNewSpace(current_contents));
int repeat_count = 1;
while (&current[repeat_count] < end - 1 &&
current[repeat_count] == *current) {
@@ -857,10 +871,10 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitRuntimeEntry(
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitOffHeapTarget(
Code* host, RelocInfo* rinfo) {
-#ifdef V8_EMBEDDED_BUILTINS
+ DCHECK(FLAG_embedded_builtins);
{
STATIC_ASSERT(EmbeddedData::kTableSize == Builtins::builtin_count);
- CHECK(Builtins::IsEmbeddedBuiltin(host));
+ CHECK(Builtins::IsIsolateIndependentBuiltin(host));
Address addr = rinfo->target_off_heap_target();
CHECK_NE(kNullAddress, addr);
CHECK_NOT_NULL(
@@ -872,9 +886,6 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitOffHeapTarget(
sink_->PutInt(skip, "SkipB4OffHeapTarget");
sink_->PutInt(host->builtin_index(), "builtin index");
bytes_processed_so_far_ += rinfo->target_address_size();
-#else
- UNREACHABLE();
-#endif
}
namespace {
@@ -993,7 +1004,7 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
// To make snapshots reproducible, we make a copy of the code object
// and wipe all pointers in the copy, which we then serialize.
code = serializer_->CopyCode(code);
- int mode_mask = RelocInfo::kCodeTargetMask |
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index f1061a6c2f..6a5d1a4aac 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -20,7 +20,7 @@ namespace internal {
class CodeAddressMap : public CodeEventLogger {
public:
- explicit CodeAddressMap(Isolate* isolate) : isolate_(isolate) {
+ explicit CodeAddressMap(Isolate* isolate) : CodeEventLogger(isolate) {
isolate->logger()->AddCodeEventListener(this);
}
@@ -28,8 +28,8 @@ class CodeAddressMap : public CodeEventLogger {
isolate_->logger()->RemoveCodeEventListener(this);
}
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override {
- address_to_name_map_.Move(from->address(), to->address());
+ void CodeMoveEvent(AbstractCode* from, Address to) override {
+ address_to_name_map_.Move(from->address(), to);
}
void CodeDisableOptEvent(AbstractCode* code,
@@ -125,7 +125,6 @@ class CodeAddressMap : public CodeEventLogger {
}
NameMap address_to_name_map_;
- Isolate* isolate_;
};
template <class AllocatorT = DefaultSerializerAllocator>
@@ -141,7 +140,7 @@ class Serializer : public SerializerDeserializer {
const std::vector<byte>* Payload() const { return sink_.data(); }
bool ReferenceMapContains(HeapObject* o) {
- return reference_map()->Lookup(o).is_valid();
+ return reference_map()->LookupReference(o).is_valid();
}
Isolate* isolate() const { return isolate_; }
@@ -219,7 +218,7 @@ class Serializer : public SerializerDeserializer {
Code* CopyCode(Code* code);
void QueueDeferredObject(HeapObject* obj) {
- DCHECK(reference_map_.Lookup(obj).is_back_reference());
+ DCHECK(reference_map_.LookupReference(obj).is_back_reference());
deferred_objects_.push_back(obj);
}
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index d2fcc3087b..5da7bb0f49 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -7,6 +7,7 @@
#include "src/snapshot/snapshot.h"
#include "src/api.h"
+#include "src/assembler-inl.h"
#include "src/base/platform/platform.h"
#include "src/callable.h"
#include "src/interface-descriptors.h"
@@ -259,8 +260,9 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
reinterpret_cast<const char*>(startup_snapshot->RawData().start()),
payload_length);
if (FLAG_profile_deserialization) {
- PrintF("Snapshot blob consists of:\n%10d bytes for startup\n",
- payload_length);
+ PrintF("Snapshot blob consists of:\n%10d bytes in %d chunks for startup\n",
+ payload_length,
+ static_cast<uint32_t>(startup_snapshot->Reservations().size()));
}
payload_offset += payload_length;
@@ -285,7 +287,8 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
reinterpret_cast<const char*>(context_snapshot->RawData().start()),
payload_length);
if (FLAG_profile_deserialization) {
- PrintF("%10d bytes for context #%d\n", payload_length, i);
+ PrintF("%10d bytes in %d chunks for context #%d\n", payload_length,
+ static_cast<uint32_t>(context_snapshot->Reservations().size()), i);
}
payload_offset += payload_length;
}
@@ -295,7 +298,6 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
return result;
}
-#ifdef V8_EMBEDDED_BUILTINS
namespace {
bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code* code) {
DCHECK(Builtins::IsIsolateIndependent(code->builtin_index()));
@@ -327,6 +329,48 @@ bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code* code) {
return false;
}
+
+void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
+ static const int kRelocMask =
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
+
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (!Builtins::IsIsolateIndependent(i)) continue;
+
+ Code* code = isolate->builtins()->builtin(i);
+ RelocIterator on_heap_it(code, kRelocMask);
+ RelocIterator off_heap_it(blob, code, kRelocMask);
+
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
+ defined(V8_TARGET_ARCH_ARM)
+ // On X64, ARM, ARM64 we emit relative builtin-to-builtin jumps for isolate
+ // independent builtins in the snapshot. This fixes up the relative jumps
+ // to the right offsets in the snapshot.
+ while (!on_heap_it.done()) {
+ DCHECK(!off_heap_it.done());
+
+ RelocInfo* rinfo = on_heap_it.rinfo();
+ DCHECK_EQ(rinfo->rmode(), off_heap_it.rinfo()->rmode());
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ CHECK(Builtins::IsIsolateIndependentBuiltin(target));
+
+ off_heap_it.rinfo()->set_target_address(
+ blob->InstructionStartOfBuiltin(target->builtin_index()));
+
+ on_heap_it.next();
+ off_heap_it.next();
+ }
+ DCHECK(off_heap_it.done());
+#else
+ // Architectures other than x64 and arm/arm64 do not use pc-relative calls
+ // and thus must not contain embedded code targets. Instead, we use an
+ // indirection through the root register.
+ CHECK(on_heap_it.done());
+ CHECK(off_heap_it.done());
+#endif // defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
+ }
+}
} // namespace
// static
@@ -347,7 +391,7 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
// Sanity-check that the given builtin is isolate-independent and does not
// use the trampoline register in its calling convention.
- if (!code->IsProcessIndependent()) {
+ if (!code->IsIsolateIndependent(isolate)) {
saw_unsafe_builtin = true;
fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
}
@@ -401,6 +445,9 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
EmbeddedData d(blob, blob_size);
+ // Fix up call targets that point to other embedded builtins.
+ FinalizeEmbeddedCodeTargets(isolate, &d);
+
// Hash the blob and store the result.
STATIC_ASSERT(HashSize() == kSizetSize);
const size_t hash = d.CreateHash();
@@ -409,6 +456,8 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
DCHECK_EQ(hash, d.CreateHash());
DCHECK_EQ(hash, d.Hash());
+ if (FLAG_serialization_statistics) d.PrintStatistics();
+
return d;
}
@@ -440,7 +489,6 @@ size_t EmbeddedData::CreateHash() const {
STATIC_ASSERT(HashSize() == kSizetSize);
return base::hash_range(data_ + HashSize(), data_ + size_);
}
-#endif
uint32_t Snapshot::ExtractNumContexts(const v8::StartupData* data) {
CHECK_LT(kNumberOfContextsOffset, data->raw_size);
@@ -448,6 +496,48 @@ uint32_t Snapshot::ExtractNumContexts(const v8::StartupData* data) {
return num_contexts;
}
+void EmbeddedData::PrintStatistics() const {
+ DCHECK(FLAG_serialization_statistics);
+
+ constexpr int kCount = Builtins::builtin_count;
+
+ int embedded_count = 0;
+ int instruction_size = 0;
+ int sizes[kCount];
+ for (int i = 0; i < kCount; i++) {
+ if (!Builtins::IsIsolateIndependent(i)) continue;
+ const int size = InstructionSizeOfBuiltin(i);
+ instruction_size += size;
+ sizes[embedded_count] = size;
+ embedded_count++;
+ }
+
+ // Sort for percentiles.
+ std::sort(&sizes[0], &sizes[embedded_count]);
+
+ const int k50th = embedded_count * 0.5;
+ const int k75th = embedded_count * 0.75;
+ const int k90th = embedded_count * 0.90;
+ const int k99th = embedded_count * 0.99;
+
+ const int metadata_size =
+ static_cast<int>(HashSize() + OffsetsSize() + LengthsSize());
+
+ PrintF("EmbeddedData:\n");
+ PrintF(" Total size: %d\n",
+ static_cast<int>(size()));
+ PrintF(" Metadata size: %d\n", metadata_size);
+ PrintF(" Instruction size: %d\n", instruction_size);
+ PrintF(" Padding: %d\n",
+ static_cast<int>(size() - metadata_size - instruction_size));
+ PrintF(" Embedded builtin count: %d\n", embedded_count);
+ PrintF(" Instruction size (50th percentile): %d\n", sizes[k50th]);
+ PrintF(" Instruction size (75th percentile): %d\n", sizes[k75th]);
+ PrintF(" Instruction size (90th percentile): %d\n", sizes[k90th]);
+ PrintF(" Instruction size (99th percentile): %d\n", sizes[k99th]);
+ PrintF("\n");
+}
+
uint32_t Snapshot::ExtractContextOffset(const v8::StartupData* data,
uint32_t index) {
// Extract the offset of the context at a given index from the StartupData,
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index bbf5cd92e9..26f1cdb44b 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -79,7 +79,6 @@ class BuiltinSnapshotData final : public SnapshotData {
// ... list of builtins offsets
};
-#ifdef V8_EMBEDDED_BUILTINS
class EmbeddedData final {
public:
static EmbeddedData FromIsolate(Isolate* isolate);
@@ -140,10 +139,11 @@ class EmbeddedData final {
}
const uint8_t* RawData() const { return data_ + RawDataOffset(); }
+ void PrintStatistics() const;
+
const uint8_t* data_;
uint32_t size_;
};
-#endif
class Snapshot : public AllStatic {
public:
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 16d731493d..8fbb073703 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -6,6 +6,8 @@
#include "src/api.h"
#include "src/assembler-inl.h"
+#include "src/code-stubs.h"
+#include "src/code-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/snapshot.h"
@@ -51,12 +53,13 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
FlushICacheForNewIsolate();
}
- isolate->heap()->set_native_contexts_list(isolate->heap()->undefined_value());
+ isolate->heap()->set_native_contexts_list(
+ ReadOnlyRoots(isolate).undefined_value());
// The allocation site list is build during root iteration, but if no sites
// were encountered then it needs to be initialized to undefined.
if (isolate->heap()->allocation_sites_list() == Smi::kZero) {
isolate->heap()->set_allocation_sites_list(
- isolate->heap()->undefined_value());
+ ReadOnlyRoots(isolate).undefined_value());
}
// Issue code events for newly deserialized code objects.
@@ -95,7 +98,13 @@ void StartupDeserializer::PrintDisassembledCodeObjects() {
for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
if (obj->IsCode()) {
- Code::cast(obj)->Disassemble(nullptr, os);
+ Code* code = Code::cast(obj);
+ // Printing of builtins and bytecode handlers is handled during their
+ // deserialization.
+ if (code->kind() != Code::BUILTIN &&
+ code->kind() != Code::BYTECODE_HANDLER) {
+ code->PrintBuiltinCode(isolate(), nullptr);
+ }
}
}
}
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 15835c0bdd..34c23a6077 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -5,6 +5,7 @@
#include "src/snapshot/startup-serializer.h"
#include "src/api.h"
+#include "src/code-tracer.h"
#include "src/global-handles.h"
#include "src/objects-inl.h"
#include "src/v8threads.h"
@@ -65,12 +66,12 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
call_handler_infos_.push_back(info);
} else if (obj->IsScript() && Script::cast(obj)->IsUserJavaScript()) {
Script::cast(obj)->set_context_data(
- isolate()->heap()->uninitialized_symbol());
+ ReadOnlyRoots(isolate()).uninitialized_symbol());
} else if (obj->IsSharedFunctionInfo()) {
// Clear inferred name for native functions.
SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
if (!shared->IsSubjectToDebugging() && shared->HasInferredName()) {
- shared->set_inferred_name(isolate()->heap()->empty_string());
+ shared->set_inferred_name(ReadOnlyRoots(isolate()).empty_string());
}
}
@@ -86,7 +87,7 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
// This comes right after serialization of the partial snapshot, where we
// add entries to the partial snapshot cache of the startup snapshot. Add
// one entry with 'undefined' to terminate the partial snapshot cache.
- Object* undefined = isolate()->heap()->undefined_value();
+ Object* undefined = ReadOnlyRoots(isolate()).undefined_value();
VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &undefined);
isolate()->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
SerializeDeferredObjects();
diff --git a/deps/v8/src/source-position-table.cc b/deps/v8/src/source-position-table.cc
index 1057aba08c..6ae24533df 100644
--- a/deps/v8/src/source-position-table.cc
+++ b/deps/v8/src/source-position-table.cc
@@ -111,6 +111,22 @@ Vector<const byte> VectorFromByteArray(ByteArray* byte_array) {
byte_array->length());
}
+#ifdef ENABLE_SLOW_DCHECKS
+void CheckTableEquals(std::vector<PositionTableEntry>& raw_entries,
+ SourcePositionTableIterator& encoded) {
+ // Brute force testing: Record all positions and decode
+ // the entire table to verify they are identical.
+ auto raw = raw_entries.begin();
+ for (; !encoded.done(); encoded.Advance(), raw++) {
+ DCHECK(raw != raw_entries.end());
+ DCHECK_EQ(encoded.code_offset(), raw->code_offset);
+ DCHECK_EQ(encoded.source_position().raw(), raw->source_position);
+ DCHECK_EQ(encoded.is_statement(), raw->is_statement);
+ }
+ DCHECK(raw == raw_entries.end());
+}
+#endif
+
} // namespace
SourcePositionTableBuilder::SourcePositionTableBuilder(
@@ -143,21 +159,30 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
Handle<ByteArray> table = isolate->factory()->NewByteArray(
static_cast<int>(bytes_.size()), TENURED);
+ MemCopy(table->GetDataStartAddress(), bytes_.data(), bytes_.size());
- MemCopy(table->GetDataStartAddress(), &*bytes_.begin(), bytes_.size());
+#ifdef ENABLE_SLOW_DCHECKS
+ // Brute force testing: Record all positions and decode
+ // the entire table to verify they are identical.
+ SourcePositionTableIterator it(*table);
+ CheckTableEquals(raw_entries_, it);
+ // No additional source positions after creating the table.
+ mode_ = OMIT_SOURCE_POSITIONS;
+#endif
+ return table;
+}
+
+OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
+ if (bytes_.empty()) return OwnedVector<byte>();
+ DCHECK(!Omit());
+
+ OwnedVector<byte> table = OwnedVector<byte>::Of(bytes_);
#ifdef ENABLE_SLOW_DCHECKS
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
- auto raw = raw_entries_.begin();
- for (SourcePositionTableIterator encoded(*table); !encoded.done();
- encoded.Advance(), raw++) {
- DCHECK(raw != raw_entries_.end());
- DCHECK_EQ(encoded.code_offset(), raw->code_offset);
- DCHECK_EQ(encoded.source_position().raw(), raw->source_position);
- DCHECK_EQ(encoded.is_statement(), raw->is_statement);
- }
- DCHECK(raw == raw_entries_.end());
+ SourcePositionTableIterator it(table.as_vector());
+ CheckTableEquals(raw_entries_, it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
#endif
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
index 652f5aa34a..60853bc938 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/source-position-table.h
@@ -42,6 +42,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
bool is_statement);
Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
+ OwnedVector<byte> ToSourcePositionTableVector();
private:
void AddEntry(const PositionTableEntry& entry);
diff --git a/deps/v8/src/source-position.cc b/deps/v8/src/source-position.cc
index 8c515562aa..2df5380a24 100644
--- a/deps/v8/src/source-position.cc
+++ b/deps/v8/src/source-position.cc
@@ -56,20 +56,21 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
std::vector<SourcePositionInfo> SourcePosition::InliningStack(
Handle<Code> code) const {
+ Isolate* isolate = code->GetIsolate();
Handle<DeoptimizationData> deopt_data(
- DeoptimizationData::cast(code->deoptimization_data()));
+ DeoptimizationData::cast(code->deoptimization_data()), isolate);
SourcePosition pos = *this;
std::vector<SourcePositionInfo> stack;
while (pos.isInlined()) {
InliningPosition inl =
deopt_data->InliningPositions()->get(pos.InliningId());
Handle<SharedFunctionInfo> function(
- deopt_data->GetInlinedFunction(inl.inlined_function_id));
+ deopt_data->GetInlinedFunction(inl.inlined_function_id), isolate);
stack.push_back(SourcePositionInfo(pos, function));
pos = inl.position;
}
Handle<SharedFunctionInfo> function(
- SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()), isolate);
stack.push_back(SourcePositionInfo(pos, function));
return stack;
}
@@ -125,7 +126,7 @@ SourcePositionInfo::SourcePositionInfo(SourcePosition pos,
: position(pos),
script(f.is_null() || !f->script()->IsScript()
? Handle<Script>::null()
- : handle(Script::cast(f->script()))) {
+ : handle(Script::cast(f->script()), f->GetIsolate())) {
if (!script.is_null()) {
Script::PositionInfo info;
if (Script::GetPositionInfo(script, pos.ScriptOffset(), &info,
diff --git a/deps/v8/src/splay-tree.h b/deps/v8/src/splay-tree.h
index e26d21331f..e16575419f 100644
--- a/deps/v8/src/splay-tree.h
+++ b/deps/v8/src/splay-tree.h
@@ -39,15 +39,13 @@ class SplayTree {
: root_(nullptr), allocator_(allocator) {}
~SplayTree();
- INLINE(void* operator new(size_t size,
- AllocationPolicy allocator = AllocationPolicy())) {
+ V8_INLINE void* operator new(
+ size_t size, AllocationPolicy allocator = AllocationPolicy()) {
return allocator.New(static_cast<int>(size));
}
- INLINE(void operator delete(void* p)) {
- AllocationPolicy::Delete(p);
- }
+ V8_INLINE void operator delete(void* p) { AllocationPolicy::Delete(p); }
// Please the MSVC compiler. We should never have to execute this.
- INLINE(void operator delete(void* p, AllocationPolicy policy)) {
+ V8_INLINE void operator delete(void* p, AllocationPolicy policy) {
UNREACHABLE();
}
@@ -102,15 +100,15 @@ class SplayTree {
Node(const Key& key, const Value& value)
: key_(key), value_(value), left_(nullptr), right_(nullptr) {}
- INLINE(void* operator new(size_t size, AllocationPolicy allocator)) {
+ V8_INLINE void* operator new(size_t size, AllocationPolicy allocator) {
return allocator.New(static_cast<int>(size));
}
- INLINE(void operator delete(void* p)) {
+ V8_INLINE void operator delete(void* p) {
return AllocationPolicy::Delete(p);
}
// Please the MSVC compiler. We should never have to execute
// this.
- INLINE(void operator delete(void* p, AllocationPolicy allocator)) {
+ V8_INLINE void operator delete(void* p, AllocationPolicy allocator) {
UNREACHABLE();
}
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/string-builder.cc
index 6c5144d574..bee5db9508 100644
--- a/deps/v8/src/string-builder.cc
+++ b/deps/v8/src/string-builder.cc
@@ -52,7 +52,8 @@ IncrementalStringBuilder::IncrementalStringBuilder(Isolate* isolate)
part_length_(kInitialPartLength),
current_index_(0) {
// Create an accumulator handle starting with the empty string.
- accumulator_ = Handle<String>::New(isolate->heap()->empty_string(), isolate);
+ accumulator_ =
+ Handle<String>::New(ReadOnlyRoots(isolate).empty_string(), isolate);
current_part_ =
factory()->NewRawOneByteString(part_length_).ToHandleChecked();
}
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index 66776dfe67..aa11161620 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -138,7 +138,7 @@ class FixedArrayBuilder {
return (length >= required_length);
}
- void EnsureCapacity(int elements) {
+ void EnsureCapacity(Isolate* isolate, int elements) {
int length = array_->length();
int required_length = length_ + elements;
if (length < required_length) {
@@ -147,7 +147,7 @@ class FixedArrayBuilder {
new_length *= 2;
} while (new_length < required_length);
Handle<FixedArray> extended_array =
- array_->GetIsolate()->factory()->NewFixedArrayWithHoles(new_length);
+ isolate->factory()->NewFixedArrayWithHoles(new_length);
array_->CopyTo(0, *extended_array, 0, length_);
array_ = extended_array;
}
@@ -218,9 +218,9 @@ class ReplacementStringBuilder {
}
}
-
- void EnsureCapacity(int elements) { array_builder_.EnsureCapacity(elements); }
-
+ void EnsureCapacity(int elements) {
+ array_builder_.EnsureCapacity(heap_->isolate(), elements);
+ }
void AddSubjectSlice(int from, int to) {
AddSubjectSlice(&array_builder_, from, to);
@@ -270,12 +270,12 @@ class IncrementalStringBuilder {
public:
explicit IncrementalStringBuilder(Isolate* isolate);
- INLINE(String::Encoding CurrentEncoding()) { return encoding_; }
+ V8_INLINE String::Encoding CurrentEncoding() { return encoding_; }
template <typename SrcChar, typename DestChar>
- INLINE(void Append(SrcChar c));
+ V8_INLINE void Append(SrcChar c);
- INLINE(void AppendCharacter(uint8_t c)) {
+ V8_INLINE void AppendCharacter(uint8_t c) {
if (encoding_ == String::ONE_BYTE_ENCODING) {
Append<uint8_t, uint8_t>(c);
} else {
@@ -283,7 +283,7 @@ class IncrementalStringBuilder {
}
}
- INLINE(void AppendCString(const char* s)) {
+ V8_INLINE void AppendCString(const char* s) {
const uint8_t* u = reinterpret_cast<const uint8_t*>(s);
if (encoding_ == String::ONE_BYTE_ENCODING) {
while (*u != '\0') Append<uint8_t, uint8_t>(*(u++));
@@ -292,7 +292,7 @@ class IncrementalStringBuilder {
}
}
- INLINE(void AppendCString(const uc16* s)) {
+ V8_INLINE void AppendCString(const uc16* s) {
if (encoding_ == String::ONE_BYTE_ENCODING) {
while (*s != '\0') Append<uc16, uint8_t>(*(s++));
} else {
@@ -300,7 +300,7 @@ class IncrementalStringBuilder {
}
}
- INLINE(bool CurrentPartCanFit(int length)) {
+ V8_INLINE bool CurrentPartCanFit(int length) {
return part_length_ - current_index_ > length;
}
@@ -308,7 +308,7 @@ class IncrementalStringBuilder {
// serialized without allocating a new string part. The worst case length of
// an escaped character is 6. Shifting the remaining string length right by 3
// is a more pessimistic estimate, but faster to calculate.
- INLINE(int EscapedLengthIfCurrentPartFits(int length)) {
+ V8_INLINE int EscapedLengthIfCurrentPartFits(int length) {
if (length > kMaxPartLength) return 0;
STATIC_ASSERT((kMaxPartLength << 3) <= String::kMaxLength);
// This shift will not overflow because length is already less than the
@@ -321,9 +321,11 @@ class IncrementalStringBuilder {
MaybeHandle<String> Finish();
- INLINE(bool HasOverflowed()) const { return overflowed_; }
+ V8_INLINE bool HasOverflowed() const { return overflowed_; }
- INLINE(int Length()) const { return accumulator_->length() + current_index_; }
+ V8_INLINE int Length() const {
+ return accumulator_->length() + current_index_;
+ }
// Change encoding to two-byte.
void ChangeEncoding() {
@@ -348,8 +350,8 @@ class IncrementalStringBuilder {
cursor_ = start_;
}
- INLINE(void Append(DestChar c)) { *(cursor_++) = c; }
- INLINE(void AppendCString(const char* s)) {
+ V8_INLINE void Append(DestChar c) { *(cursor_++) = c; }
+ V8_INLINE void AppendCString(const char* s) {
const uint8_t* u = reinterpret_cast<const uint8_t*>(s);
while (*u != '\0') Append(*(u++));
}
@@ -402,15 +404,15 @@ class IncrementalStringBuilder {
private:
Factory* factory() { return isolate_->factory(); }
- INLINE(Handle<String> accumulator()) { return accumulator_; }
+ V8_INLINE Handle<String> accumulator() { return accumulator_; }
- INLINE(void set_accumulator(Handle<String> string)) {
+ V8_INLINE void set_accumulator(Handle<String> string) {
*accumulator_.location() = *string;
}
- INLINE(Handle<String> current_part()) { return current_part_; }
+ V8_INLINE Handle<String> current_part() { return current_part_; }
- INLINE(void set_current_part(Handle<String> string)) {
+ V8_INLINE void set_current_part(Handle<String> string) {
*current_part_.location() = *string;
}
diff --git a/deps/v8/src/string-hasher-inl.h b/deps/v8/src/string-hasher-inl.h
index c742c65164..caf0e082ba 100644
--- a/deps/v8/src/string-hasher-inl.h
+++ b/deps/v8/src/string-hasher-inl.h
@@ -12,9 +12,9 @@
namespace v8 {
namespace internal {
-StringHasher::StringHasher(int length, uint32_t seed)
+StringHasher::StringHasher(int length, uint64_t seed)
: length_(length),
- raw_running_hash_(seed),
+ raw_running_hash_(static_cast<uint32_t>(seed)),
array_index_(0),
is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
is_first_char_(true) {
@@ -113,16 +113,16 @@ inline void StringHasher::AddCharacters(const Char* chars, int length) {
template <typename schar>
uint32_t StringHasher::HashSequentialString(const schar* chars, int length,
- uint32_t seed) {
+ uint64_t seed) {
StringHasher hasher(length, seed);
if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length);
return hasher.GetHashField();
}
-IteratingStringHasher::IteratingStringHasher(int len, uint32_t seed)
+IteratingStringHasher::IteratingStringHasher(int len, uint64_t seed)
: StringHasher(len, seed) {}
-uint32_t IteratingStringHasher::Hash(String* string, uint32_t seed) {
+uint32_t IteratingStringHasher::Hash(String* string, uint64_t seed) {
IteratingStringHasher hasher(string->length(), seed);
// Nothing to do.
if (hasher.has_trivial_hash()) return hasher.GetHashField();
diff --git a/deps/v8/src/string-hasher.h b/deps/v8/src/string-hasher.h
index 62bb9750c6..68cff519c2 100644
--- a/deps/v8/src/string-hasher.h
+++ b/deps/v8/src/string-hasher.h
@@ -18,14 +18,14 @@ class Vector;
class V8_EXPORT_PRIVATE StringHasher {
public:
- explicit inline StringHasher(int length, uint32_t seed);
+ explicit inline StringHasher(int length, uint64_t seed);
template <typename schar>
static inline uint32_t HashSequentialString(const schar* chars, int length,
- uint32_t seed);
+ uint64_t seed);
// Reads all the data, even for long strings and computes the utf16 length.
- static uint32_t ComputeUtf8Hash(Vector<const char> chars, uint32_t seed,
+ static uint32_t ComputeUtf8Hash(Vector<const char> chars, uint64_t seed,
int* utf16_length_out);
// Calculated hash value for a string consisting of 1 to
@@ -39,13 +39,13 @@ class V8_EXPORT_PRIVATE StringHasher {
static const int kZeroHash = 27;
// Reusable parts of the hashing algorithm.
- INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c));
- INLINE(static uint32_t GetHashCore(uint32_t running_hash));
- INLINE(static uint32_t ComputeRunningHash(uint32_t running_hash,
- const uc16* chars, int length));
- INLINE(static uint32_t ComputeRunningHashOneByte(uint32_t running_hash,
- const char* chars,
- int length));
+ V8_INLINE static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c);
+ V8_INLINE static uint32_t GetHashCore(uint32_t running_hash);
+ V8_INLINE static uint32_t ComputeRunningHash(uint32_t running_hash,
+ const uc16* chars, int length);
+ V8_INLINE static uint32_t ComputeRunningHashOneByte(uint32_t running_hash,
+ const char* chars,
+ int length);
protected:
// Returns the value to store in the hash field of a string with
@@ -74,22 +74,22 @@ class V8_EXPORT_PRIVATE StringHasher {
class IteratingStringHasher : public StringHasher {
public:
- static inline uint32_t Hash(String* string, uint32_t seed);
+ static inline uint32_t Hash(String* string, uint64_t seed);
inline void VisitOneByteString(const uint8_t* chars, int length);
inline void VisitTwoByteString(const uint16_t* chars, int length);
private:
- inline IteratingStringHasher(int len, uint32_t seed);
+ inline IteratingStringHasher(int len, uint64_t seed);
void VisitConsString(ConsString* cons_string);
DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
};
// Useful for std containers that require something ()'able.
struct SeededStringHasher {
- explicit SeededStringHasher(uint32_t hashseed) : hashseed_(hashseed) {}
+ explicit SeededStringHasher(uint64_t hashseed) : hashseed_(hashseed) {}
inline std::size_t operator()(const char* name) const;
- uint32_t hashseed_;
+ uint64_t hashseed_;
};
// Useful for std containers that require something ()'able.
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index aa7b847ce6..bf96879a29 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -567,9 +567,9 @@ int SearchString(Isolate* isolate,
// and pattern as vectors before calling SearchString. Used from the
// StringIndexOf builtin.
template <typename SubjectChar, typename PatternChar>
-int SearchStringRaw(Isolate* isolate, const SubjectChar* subject_ptr,
- int subject_length, const PatternChar* pattern_ptr,
- int pattern_length, int start_index) {
+intptr_t SearchStringRaw(Isolate* isolate, const SubjectChar* subject_ptr,
+ int subject_length, const PatternChar* pattern_ptr,
+ int pattern_length, int start_index) {
DisallowHeapAllocation no_gc;
Vector<const SubjectChar> subject(subject_ptr, subject_length);
Vector<const PatternChar> pattern(pattern_ptr, pattern_length);
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 272b2a354d..80e8b2837b 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -187,9 +187,10 @@ void StringStream::PrintObject(Object* o) {
return;
}
if (o->IsHeapObject() && object_print_mode_ == kPrintObjectVerbose) {
- HeapObject* ho = HeapObject::cast(o);
- DebugObjectCache* debug_object_cache = ho->GetIsolate()->
- string_stream_debug_object_cache();
+ // TODO(delphick): Consider whether we can get the isolate without using
+ // TLS.
+ DebugObjectCache* debug_object_cache =
+ Isolate::Current()->string_stream_debug_object_cache();
for (size_t i = 0; i < debug_object_cache->size(); i++) {
if ((*debug_object_cache)[i] == o) {
Add("#%d#", static_cast<int>(i));
@@ -294,12 +295,6 @@ void StringStream::PrintName(Object* name) {
void StringStream::PrintUsingMap(JSObject* js_object) {
Map* map = js_object->map();
- if (!js_object->GetHeap()->Contains(map) ||
- !map->IsHeapObject() ||
- !map->IsMap()) {
- Add("<Invalid map>\n");
- return;
- }
int real_size = map->NumberOfOwnDescriptors();
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < real_size; i++) {
@@ -335,10 +330,10 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
- Isolate* isolate = array->GetIsolate();
+ ReadOnlyRoots roots = array->GetReadOnlyRoots();
for (unsigned int i = 0; i < 10 && i < limit; i++) {
Object* element = array->get(i);
- if (element->IsTheHole(isolate)) continue;
+ if (element->IsTheHole(roots)) continue;
for (int len = 1; len < 18; len++) {
Put(' ');
}
@@ -406,77 +401,20 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
}
}
-
-void StringStream::PrintSecurityTokenIfChanged(Object* f) {
- if (!f->IsHeapObject()) return;
- HeapObject* obj = HeapObject::cast(f);
- Isolate* isolate = obj->GetIsolate();
- Heap* heap = isolate->heap();
- if (!heap->Contains(obj)) return;
- Map* map = obj->map();
- if (!map->IsHeapObject() ||
- !heap->Contains(map) ||
- !map->IsMap() ||
- !f->IsJSFunction()) {
- return;
- }
-
- JSFunction* fun = JSFunction::cast(f);
- Object* perhaps_context = fun->context();
- if (perhaps_context->IsHeapObject() &&
- heap->Contains(HeapObject::cast(perhaps_context)) &&
- perhaps_context->IsContext()) {
- Context* context = fun->context();
- if (!heap->Contains(context)) {
- Add("(Function context is outside heap)\n");
- return;
- }
- Object* token = context->native_context()->security_token();
- if (token != isolate->string_stream_current_security_token()) {
- Add("Security context: %o\n", token);
- isolate->set_string_stream_current_security_token(token);
- }
- } else {
- Add("(Function context is corrupt)\n");
+void StringStream::PrintSecurityTokenIfChanged(JSFunction* fun) {
+ Context* context = fun->context();
+ Object* token = context->native_context()->security_token();
+ Isolate* isolate = fun->GetIsolate();
+ if (token != isolate->string_stream_current_security_token()) {
+ Add("Security context: %o\n", token);
+ isolate->set_string_stream_current_security_token(token);
}
}
-
-void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
- if (!f->IsHeapObject()) {
- Add("/* warning: 'function' was not a heap object */ ");
- return;
- }
- Heap* heap = HeapObject::cast(f)->GetHeap();
- if (!heap->Contains(HeapObject::cast(f))) {
- Add("/* warning: 'function' was not on the heap */ ");
- return;
- }
- if (!heap->Contains(HeapObject::cast(f)->map())) {
- Add("/* warning: function's map was not on the heap */ ");
- return;
- }
- if (!HeapObject::cast(f)->map()->IsMap()) {
- Add("/* warning: function's map was not a valid map */ ");
- return;
- }
- if (f->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(f);
- // Common case: on-stack function present and resolved.
- PrintPrototype(fun, receiver);
- *code = fun->code();
- } else if (f->IsInternalizedString()) {
- // Unresolved and megamorphic calls: Instead of the function
- // we have the function name on the stack.
- PrintName(f);
- Add("/* unresolved */ ");
- } else {
- // Unless this is the frame of a built-in function, we should always have
- // the callee function or name on the stack. If we don't, we have a
- // problem or a change of the stack frame layout.
- Add("%o", f);
- Add("/* warning: no JSFunction object or function name found */ ");
- }
+void StringStream::PrintFunction(JSFunction* fun, Object* receiver,
+ Code** code) {
+ PrintPrototype(fun, receiver);
+ *code = fun->code();
}
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index c9be46f046..aa4edffab4 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -144,9 +144,9 @@ class StringStream final {
void PrintByteArray(ByteArray* ba);
void PrintUsingMap(JSObject* js_object);
void PrintPrototype(JSFunction* fun, Object* receiver);
- void PrintSecurityTokenIfChanged(Object* function);
+ void PrintSecurityTokenIfChanged(JSFunction* function);
// NOTE: Returns the code in the output parameter.
- void PrintFunction(Object* function, Object* receiver, Code** code);
+ void PrintFunction(JSFunction* function, Object* receiver, Code** code);
// Reset the stream.
void Reset() {
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index 57d8444017..53fc289351 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -182,7 +182,8 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
if ((*script->GetScriptName())->IsString()) {
Local<String> script_name =
Local<String>::Cast(script->GetScriptName());
- temp_file_name = new char[script_name->Utf8Length() + 1];
+ temp_file_name =
+ new char[script_name->Utf8Length(event->isolate) + 1];
script_name->WriteUtf8(temp_file_name);
jmethod.source_file_name = temp_file_name;
}
diff --git a/deps/v8/src/torque/Torque.g4 b/deps/v8/src/torque/Torque.g4
index 16c4fe9b45..1497aaf9c1 100644
--- a/deps/v8/src/torque/Torque.g4
+++ b/deps/v8/src/torque/Torque.g4
@@ -14,11 +14,8 @@ BUILTIN: 'builtin';
RUNTIME: 'runtime';
MODULE: 'module';
JAVASCRIPT: 'javascript';
-IMPLICIT: 'implicit';
DEFERRED: 'deferred';
IF: 'if';
-CAST_KEYWORD: 'cast';
-CONVERT_KEYWORD: 'convert';
FOR: 'for';
WHILE: 'while';
RETURN: 'return';
@@ -28,15 +25,16 @@ BREAK: 'break';
GOTO: 'goto';
OTHERWISE: 'otherwise';
TRY: 'try';
-CATCH: 'catch';
LABEL: 'label';
LABELS: 'labels';
TAIL: 'tail';
ISNT: 'isnt';
IS: 'is';
LET: 'let';
+CONST: 'const';
EXTERN: 'extern';
-ASSERT: 'assert';
+ASSERT_TOKEN: 'assert';
+CHECK_TOKEN: 'check';
UNREACHABLE_TOKEN: 'unreachable';
DEBUG_TOKEN: 'debug';
@@ -84,7 +82,7 @@ NOT: '!';
STRING_LITERAL : ('"' ( ESCAPE | ~('"' | '\\' | '\n' | '\r') ) * '"')
| ('\'' ( ESCAPE | ~('\'' | '\\' | '\n' | '\r') ) * '\'');
-fragment ESCAPE : '\\' ( '\'' | '\\' | '"' );
+fragment ESCAPE : '\\' ( '\'' | '\\' | '"' | 'n' | 'r' );
IDENTIFIER : [A-Za-z][0-9A-Za-z_]* ;
@@ -115,11 +113,15 @@ DECIMAL_LITERAL
: MINUS? DECIMAL_INTEGER_LITERAL '.' DECIMAL_DIGIT* EXPONENT_PART?
| MINUS? '.' DECIMAL_DIGIT+ EXPONENT_PART?
| MINUS? DECIMAL_INTEGER_LITERAL EXPONENT_PART?
+ | MINUS? '0x' [0-9a-fA-F]+
;
type : CONSTEXPR? IDENTIFIER
| BUILTIN '(' typeList ')' '=>' type
+ | type BIT_OR type
+ | '(' type ')'
;
+
typeList : (type (',' type)*)?;
genericSpecializationTypeList: '<' typeList '>';
@@ -188,9 +190,11 @@ unaryExpression
| op=(PLUS | MINUS | BIT_NOT | NOT) unaryExpression;
locationExpression
- : IDENTIFIER genericSpecializationTypeList?
+ : IDENTIFIER
| locationExpression '.' IDENTIFIER
- | locationExpression '[' expression ']';
+ | primaryExpression '.' IDENTIFIER
+ | locationExpression '[' expression ']'
+ | primaryExpression '[' expression ']';
incrementDecrement
: INCREMENT locationExpression
@@ -204,16 +208,24 @@ assignment
| locationExpression ((ASSIGNMENT | ASSIGNMENT_OPERATOR) expression)?;
assignmentExpression
- : primaryExpression
+ : functionPointerExpression
| assignment;
+structExpression
+ : IDENTIFIER '{' (expression (',' expression)*)? '}';
+
+functionPointerExpression
+ : primaryExpression
+ | IDENTIFIER genericSpecializationTypeList?
+ ;
+
primaryExpression
: helperCall
+ | structExpression
| DECIMAL_LITERAL
| STRING_LITERAL
- | CAST_KEYWORD '<' type '>' '(' expression ')' OTHERWISE IDENTIFIER
- | CONVERT_KEYWORD '<' type '>' '(' expression ')'
- | ('(' expression ')');
+ | ('(' expression ')')
+ ;
forInitialization : variableDeclarationWithInitialization?;
forLoop: FOR '(' forInitialization ';' expression ';' assignment ')' statementBlock;
@@ -228,7 +240,7 @@ argumentList: '(' argument? (',' argument)* ')';
helperCall: (MIN | MAX | IDENTIFIER) genericSpecializationTypeList? argumentList optionalOtherwise;
labelReference: IDENTIFIER;
-variableDeclaration: LET IDENTIFIER ':' type;
+variableDeclaration: (LET | CONST) IDENTIFIER ':' type;
variableDeclarationWithInitialization: variableDeclaration (ASSIGNMENT expression)?;
helperCallStatement: (TAIL)? helperCall;
expressionStatement: assignment;
@@ -238,10 +250,10 @@ returnStatement: RETURN expression?;
breakStatement: BREAK;
continueStatement: CONTINUE;
gotoStatement: GOTO labelReference argumentList?;
-handlerWithStatement: (CATCH IDENTIFIER | LABEL labelDeclaration) statementBlock;
-tryCatch: TRY statementBlock handlerWithStatement+;
+handlerWithStatement: LABEL labelDeclaration statementBlock;
+tryLabelStatement: TRY statementBlock handlerWithStatement+;
-diagnosticStatement: (ASSERT '(' expression ')') | UNREACHABLE_TOKEN | DEBUG_TOKEN;
+diagnosticStatement: ((ASSERT_TOKEN | CHECK_TOKEN) '(' expression ')') | UNREACHABLE_TOKEN | DEBUG_TOKEN;
statement : variableDeclarationWithInitialization ';'
| helperCallStatement ';'
@@ -255,7 +267,7 @@ statement : variableDeclarationWithInitialization ';'
| whileLoop
| forOfLoop
| forLoop
- | tryCatch
+ | tryLabelStatement
;
statementList : statement*;
@@ -266,27 +278,36 @@ statementBlock
helperBody : statementScope;
+fieldDeclaration: IDENTIFIER ':' type ';';
+fieldListDeclaration: fieldDeclaration*;
+
extendsDeclaration: 'extends' IDENTIFIER;
generatesDeclaration: 'generates' STRING_LITERAL;
constexprDeclaration: 'constexpr' STRING_LITERAL;
typeDeclaration : 'type' IDENTIFIER extendsDeclaration? generatesDeclaration? constexprDeclaration?';';
+typeAliasDeclaration : 'type' IDENTIFIER '=' type ';';
externalBuiltin : EXTERN JAVASCRIPT? BUILTIN IDENTIFIER optionalGenericTypeList '(' typeList ')' optionalType ';';
-externalMacro : EXTERN (IMPLICIT? 'operator' STRING_LITERAL)? MACRO IDENTIFIER optionalGenericTypeList typeListMaybeVarArgs optionalType optionalLabelList ';';
+externalMacro : EXTERN ('operator' STRING_LITERAL)? MACRO IDENTIFIER optionalGenericTypeList typeListMaybeVarArgs optionalType optionalLabelList ';';
externalRuntime : EXTERN RUNTIME IDENTIFIER typeListMaybeVarArgs optionalType ';';
-builtinDeclaration : JAVASCRIPT? BUILTIN IDENTIFIER optionalGenericTypeList parameterList optionalType helperBody;
+builtinDeclaration : JAVASCRIPT? BUILTIN IDENTIFIER optionalGenericTypeList parameterList optionalType (helperBody | ';');
genericSpecialization: IDENTIFIER genericSpecializationTypeList parameterList optionalType optionalLabelList helperBody;
-macroDeclaration : MACRO IDENTIFIER optionalGenericTypeList parameterList optionalType optionalLabelList helperBody;
-constDeclaration : 'const' IDENTIFIER ':' type '=' STRING_LITERAL ';';
+macroDeclaration : ('operator' STRING_LITERAL)? MACRO IDENTIFIER optionalGenericTypeList parameterList optionalType optionalLabelList (helperBody | ';');
+externConstDeclaration : CONST IDENTIFIER ':' type generatesDeclaration ';';
+constDeclaration: CONST IDENTIFIER ':' type ASSIGNMENT expression ';';
+structDeclaration : 'struct' IDENTIFIER '{' fieldListDeclaration '}';
declaration
- : typeDeclaration
+ : structDeclaration
+ | typeDeclaration
+ | typeAliasDeclaration
| builtinDeclaration
| genericSpecialization
| macroDeclaration
| externalMacro
| externalBuiltin
| externalRuntime
+ | externConstDeclaration
| constDeclaration;
moduleDeclaration : MODULE IDENTIFIER '{' declaration* '}';
diff --git a/deps/v8/src/torque/Torque.interp b/deps/v8/src/torque/Torque.interp
new file mode 100644
index 0000000000..0ffb78795f
--- /dev/null
+++ b/deps/v8/src/torque/Torque.interp
@@ -0,0 +1,249 @@
+token literal names:
+null
+'('
+')'
+'=>'
+','
+':'
+'type'
+'?'
+'||'
+'&&'
+'.'
+'['
+']'
+'{'
+'}'
+';'
+'of'
+'else'
+'extends'
+'generates'
+'operator'
+'struct'
+'macro'
+'builtin'
+'runtime'
+'module'
+'javascript'
+'deferred'
+'if'
+'for'
+'while'
+'return'
+'constexpr'
+'continue'
+'break'
+'goto'
+'otherwise'
+'try'
+'label'
+'labels'
+'tail'
+'isnt'
+'is'
+'let'
+'const'
+'extern'
+'assert'
+'check'
+'unreachable'
+'debug'
+'='
+null
+'=='
+'+'
+'-'
+'*'
+'/'
+'%'
+'|'
+'&'
+'~'
+'max'
+'min'
+'!='
+'<'
+'<='
+'>'
+'>='
+'<<'
+'>>'
+'>>>'
+'...'
+null
+'++'
+'--'
+'!'
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+MACRO
+BUILTIN
+RUNTIME
+MODULE
+JAVASCRIPT
+DEFERRED
+IF
+FOR
+WHILE
+RETURN
+CONSTEXPR
+CONTINUE
+BREAK
+GOTO
+OTHERWISE
+TRY
+LABEL
+LABELS
+TAIL
+ISNT
+IS
+LET
+CONST
+EXTERN
+ASSERT_TOKEN
+CHECK_TOKEN
+UNREACHABLE_TOKEN
+DEBUG_TOKEN
+ASSIGNMENT
+ASSIGNMENT_OPERATOR
+EQUAL
+PLUS
+MINUS
+MULTIPLY
+DIVIDE
+MODULO
+BIT_OR
+BIT_AND
+BIT_NOT
+MAX
+MIN
+NOT_EQUAL
+LESS_THAN
+LESS_THAN_EQUAL
+GREATER_THAN
+GREATER_THAN_EQUAL
+SHIFT_LEFT
+SHIFT_RIGHT
+SHIFT_RIGHT_ARITHMETIC
+VARARGS
+EQUALITY_OPERATOR
+INCREMENT
+DECREMENT
+NOT
+STRING_LITERAL
+IDENTIFIER
+WS
+BLOCK_COMMENT
+LINE_COMMENT
+DECIMAL_LITERAL
+
+rule names:
+type
+typeList
+genericSpecializationTypeList
+optionalGenericTypeList
+typeListMaybeVarArgs
+labelParameter
+optionalType
+optionalLabelList
+optionalOtherwise
+parameter
+parameterList
+labelDeclaration
+expression
+conditionalExpression
+logicalORExpression
+logicalANDExpression
+bitwiseExpression
+equalityExpression
+relationalExpression
+shiftExpression
+additiveExpression
+multiplicativeExpression
+unaryExpression
+locationExpression
+incrementDecrement
+assignment
+assignmentExpression
+structExpression
+functionPointerExpression
+primaryExpression
+forInitialization
+forLoop
+rangeSpecifier
+forOfRange
+forOfLoop
+argument
+argumentList
+helperCall
+labelReference
+variableDeclaration
+variableDeclarationWithInitialization
+helperCallStatement
+expressionStatement
+ifStatement
+whileLoop
+returnStatement
+breakStatement
+continueStatement
+gotoStatement
+handlerWithStatement
+tryLabelStatement
+diagnosticStatement
+statement
+statementList
+statementScope
+statementBlock
+helperBody
+fieldDeclaration
+fieldListDeclaration
+extendsDeclaration
+generatesDeclaration
+constexprDeclaration
+typeDeclaration
+typeAliasDeclaration
+externalBuiltin
+externalMacro
+externalRuntime
+builtinDeclaration
+genericSpecialization
+macroDeclaration
+externConstDeclaration
+constDeclaration
+structDeclaration
+declaration
+moduleDeclaration
+file
+
+
+atn:
+[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 83, 821, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44, 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 4, 49, 9, 49, 4, 50, 9, 50, 4, 51, 9, 51, 4, 52, 9, 52, 4, 53, 9, 53, 4, 54, 9, 54, 4, 55, 9, 55, 4, 56, 9, 56, 4, 57, 9, 57, 4, 58, 9, 58, 4, 59, 9, 59, 4, 60, 9, 60, 4, 61, 9, 61, 4, 62, 9, 62, 4, 63, 9, 63, 4, 64, 9, 64, 4, 65, 9, 65, 4, 66, 9, 66, 4, 67, 9, 67, 4, 68, 9, 68, 4, 69, 9, 69, 4, 70, 9, 70, 4, 71, 9, 71, 4, 72, 9, 72, 4, 73, 9, 73, 4, 74, 9, 74, 4, 75, 9, 75, 4, 76, 9, 76, 4, 77, 9, 77, 3, 2, 3, 2, 5, 2, 157, 10, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 5, 2, 171, 10, 2, 3, 2, 3, 2, 3, 2, 7, 2, 176, 10, 2, 12, 2, 14, 2, 179, 11, 2, 3, 3, 3, 3, 3, 3, 7, 3, 184, 10, 3, 12, 3, 14, 3, 187, 11, 3, 5, 3, 189, 10, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 203, 10, 5, 12, 5, 14, 5, 206, 11, 5, 3, 5, 5, 5, 209, 10, 5, 3, 6, 3, 6, 5, 6, 213, 10, 6, 3, 6, 3, 6, 7, 6, 217, 10, 6, 12, 6, 14, 6, 220, 11, 6, 3, 6, 3, 6, 5, 6, 224, 10, 6, 3, 6, 3, 6, 3, 6, 3, 6, 5, 6, 230, 10, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 5, 7, 237, 10, 7, 3, 8, 3, 8, 5, 8, 241, 10, 8, 3, 9, 3, 9, 3, 9, 3, 9, 7, 9, 247, 10, 9, 12, 9, 14, 9, 250, 11, 9, 5, 9, 252, 10, 9, 3, 10, 3, 10, 3, 10, 3, 10, 7, 10, 258, 10, 10, 12, 10, 14, 10, 261, 11, 10, 5, 10, 263, 10, 10, 3, 11, 3, 11, 3, 11, 5, 11, 268, 10, 11, 3, 12, 3, 12, 5, 12, 272, 10, 12, 3, 12, 3, 12, 7, 12, 276, 10, 12, 12, 12, 14, 12, 279, 11, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 5, 12, 291, 10, 12, 3, 13, 3, 13, 5, 13, 295, 10, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 7, 15, 308, 10, 15, 12, 15, 14, 15, 311, 11, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 7, 16, 319, 10, 16, 12, 16, 14, 16, 322, 11, 16, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 7, 17, 330, 10, 17, 12, 17, 14, 17, 333, 11, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 7, 18, 341, 10, 18, 12, 18, 14, 18, 344, 11, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 7, 19, 352, 10, 19, 12, 19, 14, 19, 355, 11, 19, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 7, 20, 363, 10, 20, 12, 20, 14, 20, 366, 11, 20, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 7, 21, 374, 10, 21, 12, 21, 14, 21, 377, 11, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 7, 22, 385, 10, 22, 12, 22, 14, 22, 388, 11, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 7, 23, 396, 10, 23, 12, 23, 14, 23, 399, 11, 23, 3, 24, 3, 24, 3, 24, 5, 24, 404, 10, 24, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 5, 25, 417, 10, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 7, 25, 427, 10, 25, 12, 25, 14, 25, 430, 11, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 5, 26, 442, 10, 26, 3, 27, 3, 27, 3, 27, 3, 27, 5, 27, 448, 10, 27, 5, 27, 450, 10, 27, 3, 28, 3, 28, 5, 28, 454, 10, 28, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 7, 29, 461, 10, 29, 12, 29, 14, 29, 464, 11, 29, 5, 29, 466, 10, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 5, 30, 473, 10, 30, 5, 30, 475, 10, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 5, 31, 485, 10, 31, 3, 32, 5, 32, 488, 10, 32, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 5, 34, 502, 10, 34, 3, 34, 3, 34, 5, 34, 506, 10, 34, 3, 34, 3, 34, 3, 35, 5, 35, 511, 10, 35, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 37, 3, 37, 3, 38, 3, 38, 5, 38, 526, 10, 38, 3, 38, 3, 38, 7, 38, 530, 10, 38, 12, 38, 14, 38, 533, 11, 38, 3, 38, 3, 38, 3, 39, 3, 39, 5, 39, 539, 10, 39, 3, 39, 3, 39, 3, 39, 3, 40, 3, 40, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 42, 3, 42, 3, 42, 5, 42, 554, 10, 42, 3, 43, 5, 43, 557, 10, 43, 3, 43, 3, 43, 3, 44, 3, 44, 3, 45, 3, 45, 5, 45, 565, 10, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 5, 45, 573, 10, 45, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 47, 3, 47, 5, 47, 583, 10, 47, 3, 48, 3, 48, 3, 49, 3, 49, 3, 50, 3, 50, 3, 50, 5, 50, 592, 10, 50, 3, 51, 3, 51, 3, 51, 3, 51, 3, 52, 3, 52, 3, 52, 6, 52, 601, 10, 52, 13, 52, 14, 52, 602, 3, 53, 3, 53, 3, 53, 3, 53, 3, 53, 3, 53, 3, 53, 5, 53, 612, 10, 53, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 5, 54, 643, 10, 54, 3, 55, 7, 55, 646, 10, 55, 12, 55, 14, 55, 649, 11, 55, 3, 56, 5, 56, 652, 10, 56, 3, 56, 3, 56, 3, 56, 3, 56, 3, 57, 3, 57, 5, 57, 660, 10, 57, 3, 58, 3, 58, 3, 59, 3, 59, 3, 59, 3, 59, 3, 59, 3, 60, 7, 60, 670, 10, 60, 12, 60, 14, 60, 673, 11, 60, 3, 61, 3, 61, 3, 61, 3, 62, 3, 62, 3, 62, 3, 63, 3, 63, 3, 63, 3, 64, 3, 64, 3, 64, 5, 64, 687, 10, 64, 3, 64, 5, 64, 690, 10, 64, 3, 64, 5, 64, 693, 10, 64, 3, 64, 3, 64, 3, 65, 3, 65, 3, 65, 3, 65, 3, 65, 3, 65, 3, 66, 3, 66, 5, 66, 705, 10, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 67, 3, 67, 3, 67, 5, 67, 719, 10, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 68, 3, 68, 3, 68, 3, 68, 3, 68, 3, 68, 3, 68, 3, 69, 5, 69, 737, 10, 69, 3, 69, 3, 69, 3, 69, 3, 69, 3, 69, 3, 69, 3, 69, 5, 69, 746, 10, 69, 3, 70, 3, 70, 3, 70, 3, 70, 3, 70, 3, 70, 3, 70, 3, 71, 3, 71, 5, 71, 757, 10, 71, 3, 71, 3, 71, 3, 71, 3, 71, 3, 71, 3, 71, 3, 71, 3, 71, 5, 71, 767, 10, 71, 3, 72, 3, 72, 3, 72, 3, 72, 3, 72, 3, 72, 3, 72, 3, 73, 3, 73, 3, 73, 3, 73, 3, 73, 3, 73, 3, 73, 3, 73, 3, 74, 3, 74, 3, 74, 3, 74, 3, 74, 3, 74, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 5, 75, 801, 10, 75, 3, 76, 3, 76, 3, 76, 3, 76, 7, 76, 807, 10, 76, 12, 76, 14, 76, 810, 11, 76, 3, 76, 3, 76, 3, 77, 3, 77, 7, 77, 816, 10, 77, 12, 77, 14, 77, 819, 11, 77, 3, 77, 2, 13, 2, 28, 30, 32, 34, 36, 38, 40, 42, 44, 48, 78, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 2, 13, 3, 2, 60, 61, 4, 2, 54, 54, 65, 65, 3, 2, 66, 69, 3, 2, 70, 72, 3, 2, 55, 56, 3, 2, 57, 59, 5, 2, 55, 56, 62, 62, 77, 77, 3, 2, 52, 53, 4, 2, 63, 64, 79, 79, 3, 2, 45, 46, 3, 2, 48, 49, 2, 849, 2, 170, 3, 2, 2, 2, 4, 188, 3, 2, 2, 2, 6, 190, 3, 2, 2, 2, 8, 208, 3, 2, 2, 2, 10, 229, 3, 2, 2, 2, 12, 231, 3, 2, 2, 2, 14, 240, 3, 2, 2, 2, 16, 251, 3, 2, 2, 2, 18, 262, 3, 2, 2, 2, 20, 264, 3, 2, 2, 2, 22, 290, 3, 2, 2, 2, 24, 292, 3, 2, 2, 2, 26, 296, 3, 2, 2, 2, 28, 298, 3, 2, 2, 2, 30, 312, 3, 2, 2, 2, 32, 323, 3, 2, 2, 2, 34, 334, 3, 2, 2, 2, 36, 345, 3, 2, 2, 2, 38, 356, 3, 2, 2, 2, 40, 367, 3, 2, 2, 2, 42, 378, 3, 2, 2, 2, 44, 389, 3, 2, 2, 2, 46, 403, 3, 2, 2, 2, 48, 416, 3, 2, 2, 2, 50, 441, 3, 2, 2, 2, 52, 449, 3, 2, 2, 2, 54, 453, 3, 2, 2, 2, 56, 455, 3, 2, 2, 2, 58, 474, 3, 2, 2, 2, 60, 484, 3, 2, 2, 2, 62, 487, 3, 2, 2, 2, 64, 489, 3, 2, 2, 2, 66, 499, 3, 2, 2, 2, 68, 510, 3, 2, 2, 2, 70, 512, 3, 2, 2, 2, 72, 521, 3, 2, 2, 2, 74, 523, 3, 2, 2, 2, 76, 536, 3, 2, 2, 2, 78, 543, 3, 2, 2, 2, 80, 545, 3, 2, 2, 2, 82, 550, 3, 2, 2, 2, 84, 556, 3, 2, 2, 2, 86, 560, 3, 2, 2, 2, 88, 562, 3, 2, 2, 2, 90, 574, 3, 2, 2, 2, 92, 580, 3, 2, 2, 2, 94, 584, 3, 2, 2, 2, 96, 586, 3, 2, 2, 2, 98, 588, 3, 2, 2, 2, 100, 593, 3, 2, 2, 2, 102, 597, 3, 2, 2, 2, 104, 611, 3, 2, 2, 2, 106, 642, 3, 2, 2, 2, 108, 647, 3, 2, 2, 2, 110, 651, 3, 2, 2, 2, 112, 659, 3, 2, 2, 2, 114, 661, 3, 2, 2, 2, 116, 663, 3, 2, 2, 2, 118, 671, 3, 2, 2, 2, 120, 674, 3, 2, 2, 2, 122, 677, 3, 2, 2, 2, 124, 680, 3, 2, 2, 2, 126, 683, 3, 2, 2, 2, 128, 696, 3, 2, 2, 2, 130, 702, 3, 2, 2, 2, 132, 715, 3, 2, 2, 2, 134, 728, 3, 2, 2, 2, 136, 736, 3, 2, 2, 2, 138, 747, 3, 2, 2, 2, 140, 756, 3, 2, 2, 2, 142, 768, 3, 2, 2, 2, 144, 775, 3, 2, 2, 2, 146, 783, 3, 2, 2, 2, 148, 800, 3, 2, 2, 2, 150, 802, 3, 2, 2, 2, 152, 817, 3, 2, 2, 2, 154, 156, 8, 2, 1, 2, 155, 157, 7, 34, 2, 2, 156, 155, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 171, 7, 79, 2, 2, 159, 160, 7, 25, 2, 2, 160, 161, 7, 3, 2, 2, 161, 162, 5, 4, 3, 2, 162, 163, 7, 4, 2, 2, 163, 164, 7, 5, 2, 2, 164, 165, 5, 2, 2, 5, 165, 171, 3, 2, 2, 2, 166, 167, 7, 3, 2, 2, 167, 168, 5, 2, 2, 2, 168, 169, 7, 4, 2, 2, 169, 171, 3, 2, 2, 2, 170, 154, 3, 2, 2, 2, 170, 159, 3, 2, 2, 2, 170, 166, 3, 2, 2, 2, 171, 177, 3, 2, 2, 2, 172, 173, 12, 4, 2, 2, 173, 174, 7, 60, 2, 2, 174, 176, 5, 2, 2, 5, 175, 172, 3, 2, 2, 2, 176, 179, 3, 2, 2, 2, 177, 175, 3, 2, 2, 2, 177, 178, 3, 2, 2, 2, 178, 3, 3, 2, 2, 2, 179, 177, 3, 2, 2, 2, 180, 185, 5, 2, 2, 2, 181, 182, 7, 6, 2, 2, 182, 184, 5, 2, 2, 2, 183, 181, 3, 2, 2, 2, 184, 187, 3, 2, 2, 2, 185, 183, 3, 2, 2, 2, 185, 186, 3, 2, 2, 2, 186, 189, 3, 2, 2, 2, 187, 185, 3, 2, 2, 2, 188, 180, 3, 2, 2, 2, 188, 189, 3, 2, 2, 2, 189, 5, 3, 2, 2, 2, 190, 191, 7, 66, 2, 2, 191, 192, 5, 4, 3, 2, 192, 193, 7, 68, 2, 2, 193, 7, 3, 2, 2, 2, 194, 195, 7, 66, 2, 2, 195, 196, 7, 79, 2, 2, 196, 197, 7, 7, 2, 2, 197, 204, 7, 8, 2, 2, 198, 199, 7, 6, 2, 2, 199, 200, 7, 79, 2, 2, 200, 201, 7, 7, 2, 2, 201, 203, 7, 8, 2, 2, 202, 198, 3, 2, 2, 2, 203, 206, 3, 2, 2, 2, 204, 202, 3, 2, 2, 2, 204, 205, 3, 2, 2, 2, 205, 207, 3, 2, 2, 2, 206, 204, 3, 2, 2, 2, 207, 209, 7, 68, 2, 2, 208, 194, 3, 2, 2, 2, 208, 209, 3, 2, 2, 2, 209, 9, 3, 2, 2, 2, 210, 212, 7, 3, 2, 2, 211, 213, 5, 2, 2, 2, 212, 211, 3, 2, 2, 2, 212, 213, 3, 2, 2, 2, 213, 218, 3, 2, 2, 2, 214, 215, 7, 6, 2, 2, 215, 217, 5, 2, 2, 2, 216, 214, 3, 2, 2, 2, 217, 220, 3, 2, 2, 2, 218, 216, 3, 2, 2, 2, 218, 219, 3, 2, 2, 2, 219, 223, 3, 2, 2, 2, 220, 218, 3, 2, 2, 2, 221, 222, 7, 6, 2, 2, 222, 224, 7, 73, 2, 2, 223, 221, 3, 2, 2, 2, 223, 224, 3, 2, 2, 2, 224, 225, 3, 2, 2, 2, 225, 230, 7, 4, 2, 2, 226, 227, 7, 3, 2, 2, 227, 228, 7, 73, 2, 2, 228, 230, 7, 4, 2, 2, 229, 210, 3, 2, 2, 2, 229, 226, 3, 2, 2, 2, 230, 11, 3, 2, 2, 2, 231, 236, 7, 79, 2, 2, 232, 233, 7, 3, 2, 2, 233, 234, 5, 4, 3, 2, 234, 235, 7, 4, 2, 2, 235, 237, 3, 2, 2, 2, 236, 232, 3, 2, 2, 2, 236, 237, 3, 2, 2, 2, 237, 13, 3, 2, 2, 2, 238, 239, 7, 7, 2, 2, 239, 241, 5, 2, 2, 2, 240, 238, 3, 2, 2, 2, 240, 241, 3, 2, 2, 2, 241, 15, 3, 2, 2, 2, 242, 243, 7, 41, 2, 2, 243, 248, 5, 12, 7, 2, 244, 245, 7, 6, 2, 2, 245, 247, 5, 12, 7, 2, 246, 244, 3, 2, 2, 2, 247, 250, 3, 2, 2, 2, 248, 246, 3, 2, 2, 2, 248, 249, 3, 2, 2, 2, 249, 252, 3, 2, 2, 2, 250, 248, 3, 2, 2, 2, 251, 242, 3, 2, 2, 2, 251, 252, 3, 2, 2, 2, 252, 17, 3, 2, 2, 2, 253, 254, 7, 38, 2, 2, 254, 259, 7, 79, 2, 2, 255, 256, 7, 6, 2, 2, 256, 258, 7, 79, 2, 2, 257, 255, 3, 2, 2, 2, 258, 261, 3, 2, 2, 2, 259, 257, 3, 2, 2, 2, 259, 260, 3, 2, 2, 2, 260, 263, 3, 2, 2, 2, 261, 259, 3, 2, 2, 2, 262, 253, 3, 2, 2, 2, 262, 263, 3, 2, 2, 2, 263, 19, 3, 2, 2, 2, 264, 265, 7, 79, 2, 2, 265, 267, 7, 7, 2, 2, 266, 268, 5, 2, 2, 2, 267, 266, 3, 2, 2, 2, 267, 268, 3, 2, 2, 2, 268, 21, 3, 2, 2, 2, 269, 271, 7, 3, 2, 2, 270, 272, 5, 20, 11, 2, 271, 270, 3, 2, 2, 2, 271, 272, 3, 2, 2, 2, 272, 277, 3, 2, 2, 2, 273, 274, 7, 6, 2, 2, 274, 276, 5, 20, 11, 2, 275, 273, 3, 2, 2, 2, 276, 279, 3, 2, 2, 2, 277, 275, 3, 2, 2, 2, 277, 278, 3, 2, 2, 2, 278, 280, 3, 2, 2, 2, 279, 277, 3, 2, 2, 2, 280, 291, 7, 4, 2, 2, 281, 282, 7, 3, 2, 2, 282, 283, 5, 20, 11, 2, 283, 284, 7, 6, 2, 2, 284, 285, 5, 20, 11, 2, 285, 286, 7, 6, 2, 2, 286, 287, 7, 73, 2, 2, 287, 288, 7, 79, 2, 2, 288, 289, 7, 4, 2, 2, 289, 291, 3, 2, 2, 2, 290, 269, 3, 2, 2, 2, 290, 281, 3, 2, 2, 2, 291, 23, 3, 2, 2, 2, 292, 294, 7, 79, 2, 2, 293, 295, 5, 22, 12, 2, 294, 293, 3, 2, 2, 2, 294, 295, 3, 2, 2, 2, 295, 25, 3, 2, 2, 2, 296, 297, 5, 28, 15, 2, 297, 27, 3, 2, 2, 2, 298, 299, 8, 15, 1, 2, 299, 300, 5, 30, 16, 2, 300, 309, 3, 2, 2, 2, 301, 302, 12, 3, 2, 2, 302, 303, 7, 9, 2, 2, 303, 304, 5, 30, 16, 2, 304, 305, 7, 7, 2, 2, 305, 306, 5, 30, 16, 2, 306, 308, 3, 2, 2, 2, 307, 301, 3, 2, 2, 2, 308, 311, 3, 2, 2, 2, 309, 307, 3, 2, 2, 2, 309, 310, 3, 2, 2, 2, 310, 29, 3, 2, 2, 2, 311, 309, 3, 2, 2, 2, 312, 313, 8, 16, 1, 2, 313, 314, 5, 32, 17, 2, 314, 320, 3, 2, 2, 2, 315, 316, 12, 3, 2, 2, 316, 317, 7, 10, 2, 2, 317, 319, 5, 32, 17, 2, 318, 315, 3, 2, 2, 2, 319, 322, 3, 2, 2, 2, 320, 318, 3, 2, 2, 2, 320, 321, 3, 2, 2, 2, 321, 31, 3, 2, 2, 2, 322, 320, 3, 2, 2, 2, 323, 324, 8, 17, 1, 2, 324, 325, 5, 34, 18, 2, 325, 331, 3, 2, 2, 2, 326, 327, 12, 3, 2, 2, 327, 328, 7, 11, 2, 2, 328, 330, 5, 34, 18, 2, 329, 326, 3, 2, 2, 2, 330, 333, 3, 2, 2, 2, 331, 329, 3, 2, 2, 2, 331, 332, 3, 2, 2, 2, 332, 33, 3, 2, 2, 2, 333, 331, 3, 2, 2, 2, 334, 335, 8, 18, 1, 2, 335, 336, 5, 36, 19, 2, 336, 342, 3, 2, 2, 2, 337, 338, 12, 3, 2, 2, 338, 339, 9, 2, 2, 2, 339, 341, 5, 36, 19, 2, 340, 337, 3, 2, 2, 2, 341, 344, 3, 2, 2, 2, 342, 340, 3, 2, 2, 2, 342, 343, 3, 2, 2, 2, 343, 35, 3, 2, 2, 2, 344, 342, 3, 2, 2, 2, 345, 346, 8, 19, 1, 2, 346, 347, 5, 38, 20, 2, 347, 353, 3, 2, 2, 2, 348, 349, 12, 3, 2, 2, 349, 350, 9, 3, 2, 2, 350, 352, 5, 38, 20, 2, 351, 348, 3, 2, 2, 2, 352, 355, 3, 2, 2, 2, 353, 351, 3, 2, 2, 2, 353, 354, 3, 2, 2, 2, 354, 37, 3, 2, 2, 2, 355, 353, 3, 2, 2, 2, 356, 357, 8, 20, 1, 2, 357, 358, 5, 40, 21, 2, 358, 364, 3, 2, 2, 2, 359, 360, 12, 3, 2, 2, 360, 361, 9, 4, 2, 2, 361, 363, 5, 40, 21, 2, 362, 359, 3, 2, 2, 2, 363, 366, 3, 2, 2, 2, 364, 362, 3, 2, 2, 2, 364, 365, 3, 2, 2, 2, 365, 39, 3, 2, 2, 2, 366, 364, 3, 2, 2, 2, 367, 368, 8, 21, 1, 2, 368, 369, 5, 42, 22, 2, 369, 375, 3, 2, 2, 2, 370, 371, 12, 3, 2, 2, 371, 372, 9, 5, 2, 2, 372, 374, 5, 42, 22, 2, 373, 370, 3, 2, 2, 2, 374, 377, 3, 2, 2, 2, 375, 373, 3, 2, 2, 2, 375, 376, 3, 2, 2, 2, 376, 41, 3, 2, 2, 2, 377, 375, 3, 2, 2, 2, 378, 379, 8, 22, 1, 2, 379, 380, 5, 44, 23, 2, 380, 386, 3, 2, 2, 2, 381, 382, 12, 3, 2, 2, 382, 383, 9, 6, 2, 2, 383, 385, 5, 44, 23, 2, 384, 381, 3, 2, 2, 2, 385, 388, 3, 2, 2, 2, 386, 384, 3, 2, 2, 2, 386, 387, 3, 2, 2, 2, 387, 43, 3, 2, 2, 2, 388, 386, 3, 2, 2, 2, 389, 390, 8, 23, 1, 2, 390, 391, 5, 46, 24, 2, 391, 397, 3, 2, 2, 2, 392, 393, 12, 3, 2, 2, 393, 394, 9, 7, 2, 2, 394, 396, 5, 46, 24, 2, 395, 392, 3, 2, 2, 2, 396, 399, 3, 2, 2, 2, 397, 395, 3, 2, 2, 2, 397, 398, 3, 2, 2, 2, 398, 45, 3, 2, 2, 2, 399, 397, 3, 2, 2, 2, 400, 404, 5, 54, 28, 2, 401, 402, 9, 8, 2, 2, 402, 404, 5, 46, 24, 2, 403, 400, 3, 2, 2, 2, 403, 401, 3, 2, 2, 2, 404, 47, 3, 2, 2, 2, 405, 406, 8, 25, 1, 2, 406, 417, 7, 79, 2, 2, 407, 408, 5, 60, 31, 2, 408, 409, 7, 12, 2, 2, 409, 410, 7, 79, 2, 2, 410, 417, 3, 2, 2, 2, 411, 412, 5, 60, 31, 2, 412, 413, 7, 13, 2, 2, 413, 414, 5, 26, 14, 2, 414, 415, 7, 14, 2, 2, 415, 417, 3, 2, 2, 2, 416, 405, 3, 2, 2, 2, 416, 407, 3, 2, 2, 2, 416, 411, 3, 2, 2, 2, 417, 428, 3, 2, 2, 2, 418, 419, 12, 6, 2, 2, 419, 420, 7, 12, 2, 2, 420, 427, 7, 79, 2, 2, 421, 422, 12, 4, 2, 2, 422, 423, 7, 13, 2, 2, 423, 424, 5, 26, 14, 2, 424, 425, 7, 14, 2, 2, 425, 427, 3, 2, 2, 2, 426, 418, 3, 2, 2, 2, 426, 421, 3, 2, 2, 2, 427, 430, 3, 2, 2, 2, 428, 426, 3, 2, 2, 2, 428, 429, 3, 2, 2, 2, 429, 49, 3, 2, 2, 2, 430, 428, 3, 2, 2, 2, 431, 432, 7, 75, 2, 2, 432, 442, 5, 48, 25, 2, 433, 434, 7, 76, 2, 2, 434, 442, 5, 48, 25, 2, 435, 436, 5, 48, 25, 2, 436, 437, 7, 75, 2, 2, 437, 442, 3, 2, 2, 2, 438, 439, 5, 48, 25, 2, 439, 440, 7, 76, 2, 2, 440, 442, 3, 2, 2, 2, 441, 431, 3, 2, 2, 2, 441, 433, 3, 2, 2, 2, 441, 435, 3, 2, 2, 2, 441, 438, 3, 2, 2, 2, 442, 51, 3, 2, 2, 2, 443, 450, 5, 50, 26, 2, 444, 447, 5, 48, 25, 2, 445, 446, 9, 9, 2, 2, 446, 448, 5, 26, 14, 2, 447, 445, 3, 2, 2, 2, 447, 448, 3, 2, 2, 2, 448, 450, 3, 2, 2, 2, 449, 443, 3, 2, 2, 2, 449, 444, 3, 2, 2, 2, 450, 53, 3, 2, 2, 2, 451, 454, 5, 58, 30, 2, 452, 454, 5, 52, 27, 2, 453, 451, 3, 2, 2, 2, 453, 452, 3, 2, 2, 2, 454, 55, 3, 2, 2, 2, 455, 456, 7, 79, 2, 2, 456, 465, 7, 15, 2, 2, 457, 462, 5, 26, 14, 2, 458, 459, 7, 6, 2, 2, 459, 461, 5, 26, 14, 2, 460, 458, 3, 2, 2, 2, 461, 464, 3, 2, 2, 2, 462, 460, 3, 2, 2, 2, 462, 463, 3, 2, 2, 2, 463, 466, 3, 2, 2, 2, 464, 462, 3, 2, 2, 2, 465, 457, 3, 2, 2, 2, 465, 466, 3, 2, 2, 2, 466, 467, 3, 2, 2, 2, 467, 468, 7, 16, 2, 2, 468, 57, 3, 2, 2, 2, 469, 475, 5, 60, 31, 2, 470, 472, 7, 79, 2, 2, 471, 473, 5, 6, 4, 2, 472, 471, 3, 2, 2, 2, 472, 473, 3, 2, 2, 2, 473, 475, 3, 2, 2, 2, 474, 469, 3, 2, 2, 2, 474, 470, 3, 2, 2, 2, 475, 59, 3, 2, 2, 2, 476, 485, 5, 76, 39, 2, 477, 485, 5, 56, 29, 2, 478, 485, 7, 83, 2, 2, 479, 485, 7, 78, 2, 2, 480, 481, 7, 3, 2, 2, 481, 482, 5, 26, 14, 2, 482, 483, 7, 4, 2, 2, 483, 485, 3, 2, 2, 2, 484, 476, 3, 2, 2, 2, 484, 477, 3, 2, 2, 2, 484, 478, 3, 2, 2, 2, 484, 479, 3, 2, 2, 2, 484, 480, 3, 2, 2, 2, 485, 61, 3, 2, 2, 2, 486, 488, 5, 82, 42, 2, 487, 486, 3, 2, 2, 2, 487, 488, 3, 2, 2, 2, 488, 63, 3, 2, 2, 2, 489, 490, 7, 31, 2, 2, 490, 491, 7, 3, 2, 2, 491, 492, 5, 62, 32, 2, 492, 493, 7, 17, 2, 2, 493, 494, 5, 26, 14, 2, 494, 495, 7, 17, 2, 2, 495, 496, 5, 52, 27, 2, 496, 497, 7, 4, 2, 2, 497, 498, 5, 112, 57, 2, 498, 65, 3, 2, 2, 2, 499, 501, 7, 13, 2, 2, 500, 502, 5, 26, 14, 2, 501, 500, 3, 2, 2, 2, 501, 502, 3, 2, 2, 2, 502, 503, 3, 2, 2, 2, 503, 505, 7, 7, 2, 2, 504, 506, 5, 26, 14, 2, 505, 504, 3, 2, 2, 2, 505, 506, 3, 2, 2, 2, 506, 507, 3, 2, 2, 2, 507, 508, 7, 14, 2, 2, 508, 67, 3, 2, 2, 2, 509, 511, 5, 66, 34, 2, 510, 509, 3, 2, 2, 2, 510, 511, 3, 2, 2, 2, 511, 69, 3, 2, 2, 2, 512, 513, 7, 31, 2, 2, 513, 514, 7, 3, 2, 2, 514, 515, 5, 80, 41, 2, 515, 516, 7, 18, 2, 2, 516, 517, 5, 26, 14, 2, 517, 518, 5, 68, 35, 2, 518, 519, 7, 4, 2, 2, 519, 520, 5, 112, 57, 2, 520, 71, 3, 2, 2, 2, 521, 522, 5, 26, 14, 2, 522, 73, 3, 2, 2, 2, 523, 525, 7, 3, 2, 2, 524, 526, 5, 72, 37, 2, 525, 524, 3, 2, 2, 2, 525, 526, 3, 2, 2, 2, 526, 531, 3, 2, 2, 2, 527, 528, 7, 6, 2, 2, 528, 530, 5, 72, 37, 2, 529, 527, 3, 2, 2, 2, 530, 533, 3, 2, 2, 2, 531, 529, 3, 2, 2, 2, 531, 532, 3, 2, 2, 2, 532, 534, 3, 2, 2, 2, 533, 531, 3, 2, 2, 2, 534, 535, 7, 4, 2, 2, 535, 75, 3, 2, 2, 2, 536, 538, 9, 10, 2, 2, 537, 539, 5, 6, 4, 2, 538, 537, 3, 2, 2, 2, 538, 539, 3, 2, 2, 2, 539, 540, 3, 2, 2, 2, 540, 541, 5, 74, 38, 2, 541, 542, 5, 18, 10, 2, 542, 77, 3, 2, 2, 2, 543, 544, 7, 79, 2, 2, 544, 79, 3, 2, 2, 2, 545, 546, 9, 11, 2, 2, 546, 547, 7, 79, 2, 2, 547, 548, 7, 7, 2, 2, 548, 549, 5, 2, 2, 2, 549, 81, 3, 2, 2, 2, 550, 553, 5, 80, 41, 2, 551, 552, 7, 52, 2, 2, 552, 554, 5, 26, 14, 2, 553, 551, 3, 2, 2, 2, 553, 554, 3, 2, 2, 2, 554, 83, 3, 2, 2, 2, 555, 557, 7, 42, 2, 2, 556, 555, 3, 2, 2, 2, 556, 557, 3, 2, 2, 2, 557, 558, 3, 2, 2, 2, 558, 559, 5, 76, 39, 2, 559, 85, 3, 2, 2, 2, 560, 561, 5, 52, 27, 2, 561, 87, 3, 2, 2, 2, 562, 564, 7, 30, 2, 2, 563, 565, 7, 34, 2, 2, 564, 563, 3, 2, 2, 2, 564, 565, 3, 2, 2, 2, 565, 566, 3, 2, 2, 2, 566, 567, 7, 3, 2, 2, 567, 568, 5, 26, 14, 2, 568, 569, 7, 4, 2, 2, 569, 572, 5, 112, 57, 2, 570, 571, 7, 19, 2, 2, 571, 573, 5, 112, 57, 2, 572, 570, 3, 2, 2, 2, 572, 573, 3, 2, 2, 2, 573, 89, 3, 2, 2, 2, 574, 575, 7, 32, 2, 2, 575, 576, 7, 3, 2, 2, 576, 577, 5, 26, 14, 2, 577, 578, 7, 4, 2, 2, 578, 579, 5, 112, 57, 2, 579, 91, 3, 2, 2, 2, 580, 582, 7, 33, 2, 2, 581, 583, 5, 26, 14, 2, 582, 581, 3, 2, 2, 2, 582, 583, 3, 2, 2, 2, 583, 93, 3, 2, 2, 2, 584, 585, 7, 36, 2, 2, 585, 95, 3, 2, 2, 2, 586, 587, 7, 35, 2, 2, 587, 97, 3, 2, 2, 2, 588, 589, 7, 37, 2, 2, 589, 591, 5, 78, 40, 2, 590, 592, 5, 74, 38, 2, 591, 590, 3, 2, 2, 2, 591, 592, 3, 2, 2, 2, 592, 99, 3, 2, 2, 2, 593, 594, 7, 40, 2, 2, 594, 595, 5, 24, 13, 2, 595, 596, 5, 112, 57, 2, 596, 101, 3, 2, 2, 2, 597, 598, 7, 39, 2, 2, 598, 600, 5, 112, 57, 2, 599, 601, 5, 100, 51, 2, 600, 599, 3, 2, 2, 2, 601, 602, 3, 2, 2, 2, 602, 600, 3, 2, 2, 2, 602, 603, 3, 2, 2, 2, 603, 103, 3, 2, 2, 2, 604, 605, 9, 12, 2, 2, 605, 606, 7, 3, 2, 2, 606, 607, 5, 26, 14, 2, 607, 608, 7, 4, 2, 2, 608, 612, 3, 2, 2, 2, 609, 612, 7, 50, 2, 2, 610, 612, 7, 51, 2, 2, 611, 604, 3, 2, 2, 2, 611, 609, 3, 2, 2, 2, 611, 610, 3, 2, 2, 2, 612, 105, 3, 2, 2, 2, 613, 614, 5, 82, 42, 2, 614, 615, 7, 17, 2, 2, 615, 643, 3, 2, 2, 2, 616, 617, 5, 84, 43, 2, 617, 618, 7, 17, 2, 2, 618, 643, 3, 2, 2, 2, 619, 620, 5, 86, 44, 2, 620, 621, 7, 17, 2, 2, 621, 643, 3, 2, 2, 2, 622, 623, 5, 92, 47, 2, 623, 624, 7, 17, 2, 2, 624, 643, 3, 2, 2, 2, 625, 626, 5, 94, 48, 2, 626, 627, 7, 17, 2, 2, 627, 643, 3, 2, 2, 2, 628, 629, 5, 96, 49, 2, 629, 630, 7, 17, 2, 2, 630, 643, 3, 2, 2, 2, 631, 632, 5, 98, 50, 2, 632, 633, 7, 17, 2, 2, 633, 643, 3, 2, 2, 2, 634, 643, 5, 88, 45, 2, 635, 636, 5, 104, 53, 2, 636, 637, 7, 17, 2, 2, 637, 643, 3, 2, 2, 2, 638, 643, 5, 90, 46, 2, 639, 643, 5, 70, 36, 2, 640, 643, 5, 64, 33, 2, 641, 643, 5, 102, 52, 2, 642, 613, 3, 2, 2, 2, 642, 616, 3, 2, 2, 2, 642, 619, 3, 2, 2, 2, 642, 622, 3, 2, 2, 2, 642, 625, 3, 2, 2, 2, 642, 628, 3, 2, 2, 2, 642, 631, 3, 2, 2, 2, 642, 634, 3, 2, 2, 2, 642, 635, 3, 2, 2, 2, 642, 638, 3, 2, 2, 2, 642, 639, 3, 2, 2, 2, 642, 640, 3, 2, 2, 2, 642, 641, 3, 2, 2, 2, 643, 107, 3, 2, 2, 2, 644, 646, 5, 106, 54, 2, 645, 644, 3, 2, 2, 2, 646, 649, 3, 2, 2, 2, 647, 645, 3, 2, 2, 2, 647, 648, 3, 2, 2, 2, 648, 109, 3, 2, 2, 2, 649, 647, 3, 2, 2, 2, 650, 652, 7, 29, 2, 2, 651, 650, 3, 2, 2, 2, 651, 652, 3, 2, 2, 2, 652, 653, 3, 2, 2, 2, 653, 654, 7, 15, 2, 2, 654, 655, 5, 108, 55, 2, 655, 656, 7, 16, 2, 2, 656, 111, 3, 2, 2, 2, 657, 660, 5, 106, 54, 2, 658, 660, 5, 110, 56, 2, 659, 657, 3, 2, 2, 2, 659, 658, 3, 2, 2, 2, 660, 113, 3, 2, 2, 2, 661, 662, 5, 110, 56, 2, 662, 115, 3, 2, 2, 2, 663, 664, 7, 79, 2, 2, 664, 665, 7, 7, 2, 2, 665, 666, 5, 2, 2, 2, 666, 667, 7, 17, 2, 2, 667, 117, 3, 2, 2, 2, 668, 670, 5, 116, 59, 2, 669, 668, 3, 2, 2, 2, 670, 673, 3, 2, 2, 2, 671, 669, 3, 2, 2, 2, 671, 672, 3, 2, 2, 2, 672, 119, 3, 2, 2, 2, 673, 671, 3, 2, 2, 2, 674, 675, 7, 20, 2, 2, 675, 676, 7, 79, 2, 2, 676, 121, 3, 2, 2, 2, 677, 678, 7, 21, 2, 2, 678, 679, 7, 78, 2, 2, 679, 123, 3, 2, 2, 2, 680, 681, 7, 34, 2, 2, 681, 682, 7, 78, 2, 2, 682, 125, 3, 2, 2, 2, 683, 684, 7, 8, 2, 2, 684, 686, 7, 79, 2, 2, 685, 687, 5, 120, 61, 2, 686, 685, 3, 2, 2, 2, 686, 687, 3, 2, 2, 2, 687, 689, 3, 2, 2, 2, 688, 690, 5, 122, 62, 2, 689, 688, 3, 2, 2, 2, 689, 690, 3, 2, 2, 2, 690, 692, 3, 2, 2, 2, 691, 693, 5, 124, 63, 2, 692, 691, 3, 2, 2, 2, 692, 693, 3, 2, 2, 2, 693, 694, 3, 2, 2, 2, 694, 695, 7, 17, 2, 2, 695, 127, 3, 2, 2, 2, 696, 697, 7, 8, 2, 2, 697, 698, 7, 79, 2, 2, 698, 699, 7, 52, 2, 2, 699, 700, 5, 2, 2, 2, 700, 701, 7, 17, 2, 2, 701, 129, 3, 2, 2, 2, 702, 704, 7, 47, 2, 2, 703, 705, 7, 28, 2, 2, 704, 703, 3, 2, 2, 2, 704, 705, 3, 2, 2, 2, 705, 706, 3, 2, 2, 2, 706, 707, 7, 25, 2, 2, 707, 708, 7, 79, 2, 2, 708, 709, 5, 8, 5, 2, 709, 710, 7, 3, 2, 2, 710, 711, 5, 4, 3, 2, 711, 712, 7, 4, 2, 2, 712, 713, 5, 14, 8, 2, 713, 714, 7, 17, 2, 2, 714, 131, 3, 2, 2, 2, 715, 718, 7, 47, 2, 2, 716, 717, 7, 22, 2, 2, 717, 719, 7, 78, 2, 2, 718, 716, 3, 2, 2, 2, 718, 719, 3, 2, 2, 2, 719, 720, 3, 2, 2, 2, 720, 721, 7, 24, 2, 2, 721, 722, 7, 79, 2, 2, 722, 723, 5, 8, 5, 2, 723, 724, 5, 10, 6, 2, 724, 725, 5, 14, 8, 2, 725, 726, 5, 16, 9, 2, 726, 727, 7, 17, 2, 2, 727, 133, 3, 2, 2, 2, 728, 729, 7, 47, 2, 2, 729, 730, 7, 26, 2, 2, 730, 731, 7, 79, 2, 2, 731, 732, 5, 10, 6, 2, 732, 733, 5, 14, 8, 2, 733, 734, 7, 17, 2, 2, 734, 135, 3, 2, 2, 2, 735, 737, 7, 28, 2, 2, 736, 735, 3, 2, 2, 2, 736, 737, 3, 2, 2, 2, 737, 738, 3, 2, 2, 2, 738, 739, 7, 25, 2, 2, 739, 740, 7, 79, 2, 2, 740, 741, 5, 8, 5, 2, 741, 742, 5, 22, 12, 2, 742, 745, 5, 14, 8, 2, 743, 746, 5, 114, 58, 2, 744, 746, 7, 17, 2, 2, 745, 743, 3, 2, 2, 2, 745, 744, 3, 2, 2, 2, 746, 137, 3, 2, 2, 2, 747, 748, 7, 79, 2, 2, 748, 749, 5, 6, 4, 2, 749, 750, 5, 22, 12, 2, 750, 751, 5, 14, 8, 2, 751, 752, 5, 16, 9, 2, 752, 753, 5, 114, 58, 2, 753, 139, 3, 2, 2, 2, 754, 755, 7, 22, 2, 2, 755, 757, 7, 78, 2, 2, 756, 754, 3, 2, 2, 2, 756, 757, 3, 2, 2, 2, 757, 758, 3, 2, 2, 2, 758, 759, 7, 24, 2, 2, 759, 760, 7, 79, 2, 2, 760, 761, 5, 8, 5, 2, 761, 762, 5, 22, 12, 2, 762, 763, 5, 14, 8, 2, 763, 766, 5, 16, 9, 2, 764, 767, 5, 114, 58, 2, 765, 767, 7, 17, 2, 2, 766, 764, 3, 2, 2, 2, 766, 765, 3, 2, 2, 2, 767, 141, 3, 2, 2, 2, 768, 769, 7, 46, 2, 2, 769, 770, 7, 79, 2, 2, 770, 771, 7, 7, 2, 2, 771, 772, 5, 2, 2, 2, 772, 773, 5, 122, 62, 2, 773, 774, 7, 17, 2, 2, 774, 143, 3, 2, 2, 2, 775, 776, 7, 46, 2, 2, 776, 777, 7, 79, 2, 2, 777, 778, 7, 7, 2, 2, 778, 779, 5, 2, 2, 2, 779, 780, 7, 52, 2, 2, 780, 781, 5, 26, 14, 2, 781, 782, 7, 17, 2, 2, 782, 145, 3, 2, 2, 2, 783, 784, 7, 23, 2, 2, 784, 785, 7, 79, 2, 2, 785, 786, 7, 15, 2, 2, 786, 787, 5, 118, 60, 2, 787, 788, 7, 16, 2, 2, 788, 147, 3, 2, 2, 2, 789, 801, 5, 146, 74, 2, 790, 801, 5, 126, 64, 2, 791, 801, 5, 128, 65, 2, 792, 801, 5, 136, 69, 2, 793, 801, 5, 138, 70, 2, 794, 801, 5, 140, 71, 2, 795, 801, 5, 132, 67, 2, 796, 801, 5, 130, 66, 2, 797, 801, 5, 134, 68, 2, 798, 801, 5, 142, 72, 2, 799, 801, 5, 144, 73, 2, 800, 789, 3, 2, 2, 2, 800, 790, 3, 2, 2, 2, 800, 791, 3, 2, 2, 2, 800, 792, 3, 2, 2, 2, 800, 793, 3, 2, 2, 2, 800, 794, 3, 2, 2, 2, 800, 795, 3, 2, 2, 2, 800, 796, 3, 2, 2, 2, 800, 797, 3, 2, 2, 2, 800, 798, 3, 2, 2, 2, 800, 799, 3, 2, 2, 2, 801, 149, 3, 2, 2, 2, 802, 803, 7, 27, 2, 2, 803, 804, 7, 79, 2, 2, 804, 808, 7, 15, 2, 2, 805, 807, 5, 148, 75, 2, 806, 805, 3, 2, 2, 2, 807, 810, 3, 2, 2, 2, 808, 806, 3, 2, 2, 2, 808, 809, 3, 2, 2, 2, 809, 811, 3, 2, 2, 2, 810, 808, 3, 2, 2, 2, 811, 812, 7, 16, 2, 2, 812, 151, 3, 2, 2, 2, 813, 816, 5, 150, 76, 2, 814, 816, 5, 148, 75, 2, 815, 813, 3, 2, 2, 2, 815, 814, 3, 2, 2, 2, 816, 819, 3, 2, 2, 2, 817, 815, 3, 2, 2, 2, 817, 818, 3, 2, 2, 2, 818, 153, 3, 2, 2, 2, 819, 817, 3, 2, 2, 2, 79, 156, 170, 177, 185, 188, 204, 208, 212, 218, 223, 229, 236, 240, 248, 251, 259, 262, 267, 271, 277, 290, 294, 309, 320, 331, 342, 353, 364, 375, 386, 397, 403, 416, 426, 428, 441, 447, 449, 453, 462, 465, 472, 474, 484, 487, 501, 505, 510, 525, 531, 538, 553, 556, 564, 572, 582, 591, 602, 611, 642, 647, 651, 659, 671, 686, 689, 692, 704, 718, 736, 745, 756, 766, 800, 808, 815, 817] \ No newline at end of file
diff --git a/deps/v8/src/torque/Torque.tokens b/deps/v8/src/torque/Torque.tokens
new file mode 100644
index 0000000000..63589b27b7
--- /dev/null
+++ b/deps/v8/src/torque/Torque.tokens
@@ -0,0 +1,154 @@
+T__0=1
+T__1=2
+T__2=3
+T__3=4
+T__4=5
+T__5=6
+T__6=7
+T__7=8
+T__8=9
+T__9=10
+T__10=11
+T__11=12
+T__12=13
+T__13=14
+T__14=15
+T__15=16
+T__16=17
+T__17=18
+T__18=19
+T__19=20
+T__20=21
+MACRO=22
+BUILTIN=23
+RUNTIME=24
+MODULE=25
+JAVASCRIPT=26
+DEFERRED=27
+IF=28
+FOR=29
+WHILE=30
+RETURN=31
+CONSTEXPR=32
+CONTINUE=33
+BREAK=34
+GOTO=35
+OTHERWISE=36
+TRY=37
+LABEL=38
+LABELS=39
+TAIL=40
+ISNT=41
+IS=42
+LET=43
+CONST=44
+EXTERN=45
+ASSERT_TOKEN=46
+CHECK_TOKEN=47
+UNREACHABLE_TOKEN=48
+DEBUG_TOKEN=49
+ASSIGNMENT=50
+ASSIGNMENT_OPERATOR=51
+EQUAL=52
+PLUS=53
+MINUS=54
+MULTIPLY=55
+DIVIDE=56
+MODULO=57
+BIT_OR=58
+BIT_AND=59
+BIT_NOT=60
+MAX=61
+MIN=62
+NOT_EQUAL=63
+LESS_THAN=64
+LESS_THAN_EQUAL=65
+GREATER_THAN=66
+GREATER_THAN_EQUAL=67
+SHIFT_LEFT=68
+SHIFT_RIGHT=69
+SHIFT_RIGHT_ARITHMETIC=70
+VARARGS=71
+EQUALITY_OPERATOR=72
+INCREMENT=73
+DECREMENT=74
+NOT=75
+STRING_LITERAL=76
+IDENTIFIER=77
+WS=78
+BLOCK_COMMENT=79
+LINE_COMMENT=80
+DECIMAL_LITERAL=81
+'('=1
+')'=2
+'=>'=3
+','=4
+':'=5
+'type'=6
+'?'=7
+'||'=8
+'&&'=9
+'.'=10
+'['=11
+']'=12
+'{'=13
+'}'=14
+';'=15
+'of'=16
+'else'=17
+'extends'=18
+'generates'=19
+'operator'=20
+'struct'=21
+'macro'=22
+'builtin'=23
+'runtime'=24
+'module'=25
+'javascript'=26
+'deferred'=27
+'if'=28
+'for'=29
+'while'=30
+'return'=31
+'constexpr'=32
+'continue'=33
+'break'=34
+'goto'=35
+'otherwise'=36
+'try'=37
+'label'=38
+'labels'=39
+'tail'=40
+'isnt'=41
+'is'=42
+'let'=43
+'const'=44
+'extern'=45
+'assert'=46
+'check'=47
+'unreachable'=48
+'debug'=49
+'='=50
+'=='=52
+'+'=53
+'-'=54
+'*'=55
+'/'=56
+'%'=57
+'|'=58
+'&'=59
+'~'=60
+'max'=61
+'min'=62
+'!='=63
+'<'=64
+'<='=65
+'>'=66
+'>='=67
+'<<'=68
+'>>'=69
+'>>>'=70
+'...'=71
+'++'=73
+'--'=74
+'!'=75
diff --git a/deps/v8/src/torque/TorqueBaseListener.h b/deps/v8/src/torque/TorqueBaseListener.h
index 037b0c9838..5b2e7613cc 100644
--- a/deps/v8/src/torque/TorqueBaseListener.h
+++ b/deps/v8/src/torque/TorqueBaseListener.h
@@ -141,6 +141,16 @@ class TorqueBaseListener : public TorqueListener {
void exitAssignmentExpression(
TorqueParser::AssignmentExpressionContext* /*ctx*/) override {}
+ void enterStructExpression(
+ TorqueParser::StructExpressionContext* /*ctx*/) override {}
+ void exitStructExpression(
+ TorqueParser::StructExpressionContext* /*ctx*/) override {}
+
+ void enterFunctionPointerExpression(
+ TorqueParser::FunctionPointerExpressionContext* /*ctx*/) override {}
+ void exitFunctionPointerExpression(
+ TorqueParser::FunctionPointerExpressionContext* /*ctx*/) override {}
+
void enterPrimaryExpression(
TorqueParser::PrimaryExpressionContext* /*ctx*/) override {}
void exitPrimaryExpression(
@@ -232,8 +242,10 @@ class TorqueBaseListener : public TorqueListener {
void exitHandlerWithStatement(
TorqueParser::HandlerWithStatementContext* /*ctx*/) override {}
- void enterTryCatch(TorqueParser::TryCatchContext* /*ctx*/) override {}
- void exitTryCatch(TorqueParser::TryCatchContext* /*ctx*/) override {}
+ void enterTryLabelStatement(
+ TorqueParser::TryLabelStatementContext* /*ctx*/) override {}
+ void exitTryLabelStatement(
+ TorqueParser::TryLabelStatementContext* /*ctx*/) override {}
void enterDiagnosticStatement(
TorqueParser::DiagnosticStatementContext* /*ctx*/) override {}
@@ -261,6 +273,16 @@ class TorqueBaseListener : public TorqueListener {
void enterHelperBody(TorqueParser::HelperBodyContext* /*ctx*/) override {}
void exitHelperBody(TorqueParser::HelperBodyContext* /*ctx*/) override {}
+ void enterFieldDeclaration(
+ TorqueParser::FieldDeclarationContext* /*ctx*/) override {}
+ void exitFieldDeclaration(
+ TorqueParser::FieldDeclarationContext* /*ctx*/) override {}
+
+ void enterFieldListDeclaration(
+ TorqueParser::FieldListDeclarationContext* /*ctx*/) override {}
+ void exitFieldListDeclaration(
+ TorqueParser::FieldListDeclarationContext* /*ctx*/) override {}
+
void enterExtendsDeclaration(
TorqueParser::ExtendsDeclarationContext* /*ctx*/) override {}
void exitExtendsDeclaration(
@@ -281,6 +303,11 @@ class TorqueBaseListener : public TorqueListener {
void exitTypeDeclaration(
TorqueParser::TypeDeclarationContext* /*ctx*/) override {}
+ void enterTypeAliasDeclaration(
+ TorqueParser::TypeAliasDeclarationContext* /*ctx*/) override {}
+ void exitTypeAliasDeclaration(
+ TorqueParser::TypeAliasDeclarationContext* /*ctx*/) override {}
+
void enterExternalBuiltin(
TorqueParser::ExternalBuiltinContext* /*ctx*/) override {}
void exitExternalBuiltin(
@@ -311,11 +338,21 @@ class TorqueBaseListener : public TorqueListener {
void exitMacroDeclaration(
TorqueParser::MacroDeclarationContext* /*ctx*/) override {}
+ void enterExternConstDeclaration(
+ TorqueParser::ExternConstDeclarationContext* /*ctx*/) override {}
+ void exitExternConstDeclaration(
+ TorqueParser::ExternConstDeclarationContext* /*ctx*/) override {}
+
void enterConstDeclaration(
TorqueParser::ConstDeclarationContext* /*ctx*/) override {}
void exitConstDeclaration(
TorqueParser::ConstDeclarationContext* /*ctx*/) override {}
+ void enterStructDeclaration(
+ TorqueParser::StructDeclarationContext* /*ctx*/) override {}
+ void exitStructDeclaration(
+ TorqueParser::StructDeclarationContext* /*ctx*/) override {}
+
void enterDeclaration(TorqueParser::DeclarationContext* /*ctx*/) override {}
void exitDeclaration(TorqueParser::DeclarationContext* /*ctx*/) override {}
diff --git a/deps/v8/src/torque/TorqueBaseVisitor.h b/deps/v8/src/torque/TorqueBaseVisitor.h
index 26433eb4f7..df84a2ead5 100644
--- a/deps/v8/src/torque/TorqueBaseVisitor.h
+++ b/deps/v8/src/torque/TorqueBaseVisitor.h
@@ -148,6 +148,16 @@ class TorqueBaseVisitor : public TorqueVisitor {
return visitChildren(ctx);
}
+ antlrcpp::Any visitStructExpression(
+ TorqueParser::StructExpressionContext* ctx) override {
+ return visitChildren(ctx);
+ }
+
+ antlrcpp::Any visitFunctionPointerExpression(
+ TorqueParser::FunctionPointerExpressionContext* ctx) override {
+ return visitChildren(ctx);
+ }
+
antlrcpp::Any visitPrimaryExpression(
TorqueParser::PrimaryExpressionContext* ctx) override {
return visitChildren(ctx);
@@ -248,7 +258,8 @@ class TorqueBaseVisitor : public TorqueVisitor {
return visitChildren(ctx);
}
- antlrcpp::Any visitTryCatch(TorqueParser::TryCatchContext* ctx) override {
+ antlrcpp::Any visitTryLabelStatement(
+ TorqueParser::TryLabelStatementContext* ctx) override {
return visitChildren(ctx);
}
@@ -280,6 +291,16 @@ class TorqueBaseVisitor : public TorqueVisitor {
return visitChildren(ctx);
}
+ antlrcpp::Any visitFieldDeclaration(
+ TorqueParser::FieldDeclarationContext* ctx) override {
+ return visitChildren(ctx);
+ }
+
+ antlrcpp::Any visitFieldListDeclaration(
+ TorqueParser::FieldListDeclarationContext* ctx) override {
+ return visitChildren(ctx);
+ }
+
antlrcpp::Any visitExtendsDeclaration(
TorqueParser::ExtendsDeclarationContext* ctx) override {
return visitChildren(ctx);
@@ -300,6 +321,11 @@ class TorqueBaseVisitor : public TorqueVisitor {
return visitChildren(ctx);
}
+ antlrcpp::Any visitTypeAliasDeclaration(
+ TorqueParser::TypeAliasDeclarationContext* ctx) override {
+ return visitChildren(ctx);
+ }
+
antlrcpp::Any visitExternalBuiltin(
TorqueParser::ExternalBuiltinContext* ctx) override {
return visitChildren(ctx);
@@ -330,11 +356,21 @@ class TorqueBaseVisitor : public TorqueVisitor {
return visitChildren(ctx);
}
+ antlrcpp::Any visitExternConstDeclaration(
+ TorqueParser::ExternConstDeclarationContext* ctx) override {
+ return visitChildren(ctx);
+ }
+
antlrcpp::Any visitConstDeclaration(
TorqueParser::ConstDeclarationContext* ctx) override {
return visitChildren(ctx);
}
+ antlrcpp::Any visitStructDeclaration(
+ TorqueParser::StructDeclarationContext* ctx) override {
+ return visitChildren(ctx);
+ }
+
antlrcpp::Any visitDeclaration(
TorqueParser::DeclarationContext* ctx) override {
return visitChildren(ctx);
diff --git a/deps/v8/src/torque/TorqueLexer.cpp b/deps/v8/src/torque/TorqueLexer.cpp
index 2417af711e..b48f0cbf78 100644
--- a/deps/v8/src/torque/TorqueLexer.cpp
+++ b/deps/v8/src/torque/TorqueLexer.cpp
@@ -75,11 +75,8 @@ std::vector<std::string> TorqueLexer::_ruleNames = {u8"T__0",
u8"RUNTIME",
u8"MODULE",
u8"JAVASCRIPT",
- u8"IMPLICIT",
u8"DEFERRED",
u8"IF",
- u8"CAST_KEYWORD",
- u8"CONVERT_KEYWORD",
u8"FOR",
u8"WHILE",
u8"RETURN",
@@ -89,15 +86,16 @@ std::vector<std::string> TorqueLexer::_ruleNames = {u8"T__0",
u8"GOTO",
u8"OTHERWISE",
u8"TRY",
- u8"CATCH",
u8"LABEL",
u8"LABELS",
u8"TAIL",
u8"ISNT",
u8"IS",
u8"LET",
+ u8"CONST",
u8"EXTERN",
- u8"ASSERT",
+ u8"ASSERT_TOKEN",
+ u8"CHECK_TOKEN",
u8"UNREACHABLE_TOKEN",
u8"DEBUG_TOKEN",
u8"ASSIGNMENT",
@@ -155,25 +153,22 @@ std::vector<std::string> TorqueLexer::_literalNames = {"",
u8"'.'",
u8"'['",
u8"']'",
+ u8"'{'",
+ u8"'}'",
u8"';'",
u8"'of'",
u8"'else'",
- u8"'{'",
- u8"'}'",
u8"'extends'",
u8"'generates'",
u8"'operator'",
- u8"'const'",
+ u8"'struct'",
u8"'macro'",
u8"'builtin'",
u8"'runtime'",
u8"'module'",
u8"'javascript'",
- u8"'implicit'",
u8"'deferred'",
u8"'if'",
- u8"'cast'",
- u8"'convert'",
u8"'for'",
u8"'while'",
u8"'return'",
@@ -183,15 +178,16 @@ std::vector<std::string> TorqueLexer::_literalNames = {"",
u8"'goto'",
u8"'otherwise'",
u8"'try'",
- u8"'catch'",
u8"'label'",
u8"'labels'",
u8"'tail'",
u8"'isnt'",
u8"'is'",
u8"'let'",
+ u8"'const'",
u8"'extern'",
u8"'assert'",
+ u8"'check'",
u8"'unreachable'",
u8"'debug'",
u8"'='",
@@ -249,11 +245,8 @@ std::vector<std::string> TorqueLexer::_symbolicNames = {
u8"RUNTIME",
u8"MODULE",
u8"JAVASCRIPT",
- u8"IMPLICIT",
u8"DEFERRED",
u8"IF",
- u8"CAST_KEYWORD",
- u8"CONVERT_KEYWORD",
u8"FOR",
u8"WHILE",
u8"RETURN",
@@ -263,15 +256,16 @@ std::vector<std::string> TorqueLexer::_symbolicNames = {
u8"GOTO",
u8"OTHERWISE",
u8"TRY",
- u8"CATCH",
u8"LABEL",
u8"LABELS",
u8"TAIL",
u8"ISNT",
u8"IS",
u8"LET",
+ u8"CONST",
u8"EXTERN",
- u8"ASSERT",
+ u8"ASSERT_TOKEN",
+ u8"CHECK_TOKEN",
u8"UNREACHABLE_TOKEN",
u8"DEBUG_TOKEN",
u8"ASSIGNMENT",
@@ -329,7 +323,7 @@ TorqueLexer::Initializer::Initializer() {
_serializedATN = {
0x3, 0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964,
- 0x2, 0x55, 0x2a1, 0x8, 0x1, 0x4, 0x2, 0x9, 0x2,
+ 0x2, 0x53, 0x299, 0x8, 0x1, 0x4, 0x2, 0x9, 0x2,
0x4, 0x3, 0x9, 0x3, 0x4, 0x4, 0x9, 0x4, 0x4,
0x5, 0x9, 0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 0x7,
0x9, 0x7, 0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9,
@@ -367,621 +361,618 @@ TorqueLexer::Initializer::Initializer() {
0x9, 0x4f, 0x4, 0x50, 0x9, 0x50, 0x4, 0x51, 0x9,
0x51, 0x4, 0x52, 0x9, 0x52, 0x4, 0x53, 0x9, 0x53,
0x4, 0x54, 0x9, 0x54, 0x4, 0x55, 0x9, 0x55, 0x4,
- 0x56, 0x9, 0x56, 0x4, 0x57, 0x9, 0x57, 0x4, 0x58,
- 0x9, 0x58, 0x3, 0x2, 0x3, 0x2, 0x3, 0x3, 0x3,
- 0x3, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x5,
- 0x3, 0x5, 0x3, 0x6, 0x3, 0x6, 0x3, 0x7, 0x3,
- 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x8,
- 0x3, 0x8, 0x3, 0x9, 0x3, 0x9, 0x3, 0x9, 0x3,
- 0xa, 0x3, 0xa, 0x3, 0xa, 0x3, 0xb, 0x3, 0xb,
- 0x3, 0xc, 0x3, 0xc, 0x3, 0xd, 0x3, 0xd, 0x3,
- 0xe, 0x3, 0xe, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf,
- 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3,
- 0x10, 0x3, 0x11, 0x3, 0x11, 0x3, 0x12, 0x3, 0x12,
+ 0x56, 0x9, 0x56, 0x3, 0x2, 0x3, 0x2, 0x3, 0x3,
+ 0x3, 0x3, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x3,
+ 0x5, 0x3, 0x5, 0x3, 0x6, 0x3, 0x6, 0x3, 0x7,
+ 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, 0x3,
+ 0x8, 0x3, 0x8, 0x3, 0x9, 0x3, 0x9, 0x3, 0x9,
+ 0x3, 0xa, 0x3, 0xa, 0x3, 0xa, 0x3, 0xb, 0x3,
+ 0xb, 0x3, 0xc, 0x3, 0xc, 0x3, 0xd, 0x3, 0xd,
+ 0x3, 0xe, 0x3, 0xe, 0x3, 0xf, 0x3, 0xf, 0x3,
+ 0x10, 0x3, 0x10, 0x3, 0x11, 0x3, 0x11, 0x3, 0x11,
+ 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3,
+ 0x12, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13,
0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3,
- 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x14,
- 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3,
0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14,
+ 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3,
+ 0x14, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15,
0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3,
- 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15,
- 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3,
- 0x16, 0x3, 0x16, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17,
- 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, 0x18, 0x3,
- 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18,
- 0x3, 0x18, 0x3, 0x18, 0x3, 0x19, 0x3, 0x19, 0x3,
- 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19,
- 0x3, 0x19, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3,
- 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1b,
+ 0x15, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16,
+ 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x17, 0x3,
+ 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17,
+ 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3,
+ 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x19,
+ 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3,
+ 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x1a, 0x3, 0x1a,
+ 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3,
+ 0x1a, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b,
0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3,
- 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b,
- 0x3, 0x1b, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3,
- 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c,
- 0x3, 0x1c, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3,
- 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d,
+ 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1c, 0x3, 0x1c,
+ 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3,
+ 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1d, 0x3, 0x1d,
0x3, 0x1d, 0x3, 0x1e, 0x3, 0x1e, 0x3, 0x1e, 0x3,
- 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f,
- 0x3, 0x20, 0x3, 0x20, 0x3, 0x20, 0x3, 0x20, 0x3,
- 0x20, 0x3, 0x20, 0x3, 0x20, 0x3, 0x20, 0x3, 0x21,
- 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x22, 0x3,
+ 0x1e, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f,
+ 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x20, 0x3, 0x20, 0x3,
+ 0x20, 0x3, 0x20, 0x3, 0x20, 0x3, 0x20, 0x3, 0x20,
+ 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3,
+ 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21,
+ 0x3, 0x21, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3,
0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22,
- 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3,
+ 0x3, 0x22, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3,
0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x24, 0x3, 0x24,
- 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3,
- 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x25,
+ 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x25, 0x3,
+ 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25,
0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3,
- 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x26,
- 0x3, 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x26, 0x3,
- 0x26, 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, 0x3, 0x27,
- 0x3, 0x27, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28, 0x3,
- 0x28, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28,
- 0x3, 0x28, 0x3, 0x28, 0x3, 0x29, 0x3, 0x29, 0x3,
- 0x29, 0x3, 0x29, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a,
- 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2b, 0x3,
- 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b,
- 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x3,
- 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2d, 0x3, 0x2d,
- 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2e, 0x3,
- 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2f,
- 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x30, 0x3, 0x30, 0x3,
- 0x30, 0x3, 0x30, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31,
+ 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x27,
+ 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, 0x3,
+ 0x27, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28,
+ 0x3, 0x28, 0x3, 0x28, 0x3, 0x28, 0x3, 0x29, 0x3,
+ 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x2a,
+ 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3,
+ 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2c, 0x3, 0x2c,
+ 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2d, 0x3, 0x2d, 0x3,
+ 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2e,
+ 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3,
+ 0x2e, 0x3, 0x2e, 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x2f,
+ 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x2f, 0x3,
+ 0x30, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30,
+ 0x3, 0x30, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3,
+ 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31,
0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3,
0x32, 0x3, 0x32, 0x3, 0x32, 0x3, 0x32, 0x3, 0x32,
- 0x3, 0x32, 0x3, 0x32, 0x3, 0x33, 0x3, 0x33, 0x3,
- 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33,
- 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3,
- 0x33, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
- 0x3, 0x34, 0x3, 0x34, 0x3, 0x35, 0x3, 0x35, 0x3,
- 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36,
- 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3,
- 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36,
- 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3,
- 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36,
- 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x5, 0x36, 0x1e6,
- 0xa, 0x36, 0x3, 0x37, 0x3, 0x37, 0x3, 0x37, 0x3,
- 0x38, 0x3, 0x38, 0x3, 0x39, 0x3, 0x39, 0x3, 0x3a,
- 0x3, 0x3a, 0x3, 0x3b, 0x3, 0x3b, 0x3, 0x3c, 0x3,
- 0x3c, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3e, 0x3, 0x3e,
- 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x40, 0x3, 0x40, 0x3,
- 0x40, 0x3, 0x40, 0x3, 0x41, 0x3, 0x41, 0x3, 0x41,
- 0x3, 0x41, 0x3, 0x42, 0x3, 0x42, 0x3, 0x42, 0x3,
- 0x43, 0x3, 0x43, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44,
- 0x3, 0x45, 0x3, 0x45, 0x3, 0x46, 0x3, 0x46, 0x3,
- 0x46, 0x3, 0x47, 0x3, 0x47, 0x3, 0x47, 0x3, 0x48,
- 0x3, 0x48, 0x3, 0x48, 0x3, 0x49, 0x3, 0x49, 0x3,
- 0x49, 0x3, 0x49, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a,
- 0x3, 0x4a, 0x3, 0x4b, 0x3, 0x4b, 0x5, 0x4b, 0x220,
- 0xa, 0x4b, 0x3, 0x4c, 0x3, 0x4c, 0x3, 0x4c, 0x3,
- 0x4d, 0x3, 0x4d, 0x3, 0x4d, 0x3, 0x4e, 0x3, 0x4e,
- 0x3, 0x4f, 0x3, 0x4f, 0x3, 0x4f, 0x7, 0x4f, 0x22d,
- 0xa, 0x4f, 0xc, 0x4f, 0xe, 0x4f, 0x230, 0xb, 0x4f,
- 0x3, 0x4f, 0x3, 0x4f, 0x3, 0x4f, 0x3, 0x4f, 0x7,
- 0x4f, 0x236, 0xa, 0x4f, 0xc, 0x4f, 0xe, 0x4f, 0x239,
- 0xb, 0x4f, 0x3, 0x4f, 0x5, 0x4f, 0x23c, 0xa, 0x4f,
- 0x3, 0x50, 0x3, 0x50, 0x3, 0x50, 0x3, 0x51, 0x3,
- 0x51, 0x7, 0x51, 0x243, 0xa, 0x51, 0xc, 0x51, 0xe,
- 0x51, 0x246, 0xb, 0x51, 0x3, 0x52, 0x6, 0x52, 0x249,
- 0xa, 0x52, 0xd, 0x52, 0xe, 0x52, 0x24a, 0x3, 0x52,
- 0x3, 0x52, 0x3, 0x53, 0x3, 0x53, 0x3, 0x53, 0x3,
- 0x53, 0x7, 0x53, 0x253, 0xa, 0x53, 0xc, 0x53, 0xe,
- 0x53, 0x256, 0xb, 0x53, 0x3, 0x53, 0x3, 0x53, 0x3,
- 0x53, 0x5, 0x53, 0x25b, 0xa, 0x53, 0x3, 0x53, 0x3,
- 0x53, 0x3, 0x54, 0x3, 0x54, 0x3, 0x54, 0x3, 0x54,
- 0x7, 0x54, 0x263, 0xa, 0x54, 0xc, 0x54, 0xe, 0x54,
- 0x266, 0xb, 0x54, 0x3, 0x54, 0x3, 0x54, 0x3, 0x55,
- 0x3, 0x55, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x7,
- 0x56, 0x26f, 0xa, 0x56, 0xc, 0x56, 0xe, 0x56, 0x272,
- 0xb, 0x56, 0x5, 0x56, 0x274, 0xa, 0x56, 0x3, 0x57,
- 0x3, 0x57, 0x5, 0x57, 0x278, 0xa, 0x57, 0x3, 0x57,
- 0x6, 0x57, 0x27b, 0xa, 0x57, 0xd, 0x57, 0xe, 0x57,
- 0x27c, 0x3, 0x58, 0x5, 0x58, 0x280, 0xa, 0x58, 0x3,
- 0x58, 0x3, 0x58, 0x3, 0x58, 0x7, 0x58, 0x285, 0xa,
- 0x58, 0xc, 0x58, 0xe, 0x58, 0x288, 0xb, 0x58, 0x3,
- 0x58, 0x5, 0x58, 0x28b, 0xa, 0x58, 0x3, 0x58, 0x5,
- 0x58, 0x28e, 0xa, 0x58, 0x3, 0x58, 0x3, 0x58, 0x6,
- 0x58, 0x292, 0xa, 0x58, 0xd, 0x58, 0xe, 0x58, 0x293,
- 0x3, 0x58, 0x5, 0x58, 0x297, 0xa, 0x58, 0x3, 0x58,
- 0x5, 0x58, 0x29a, 0xa, 0x58, 0x3, 0x58, 0x3, 0x58,
- 0x5, 0x58, 0x29e, 0xa, 0x58, 0x5, 0x58, 0x2a0, 0xa,
- 0x58, 0x3, 0x254, 0x2, 0x59, 0x3, 0x3, 0x5, 0x4,
- 0x7, 0x5, 0x9, 0x6, 0xb, 0x7, 0xd, 0x8, 0xf,
- 0x9, 0x11, 0xa, 0x13, 0xb, 0x15, 0xc, 0x17, 0xd,
- 0x19, 0xe, 0x1b, 0xf, 0x1d, 0x10, 0x1f, 0x11, 0x21,
- 0x12, 0x23, 0x13, 0x25, 0x14, 0x27, 0x15, 0x29, 0x16,
- 0x2b, 0x17, 0x2d, 0x18, 0x2f, 0x19, 0x31, 0x1a, 0x33,
- 0x1b, 0x35, 0x1c, 0x37, 0x1d, 0x39, 0x1e, 0x3b, 0x1f,
- 0x3d, 0x20, 0x3f, 0x21, 0x41, 0x22, 0x43, 0x23, 0x45,
- 0x24, 0x47, 0x25, 0x49, 0x26, 0x4b, 0x27, 0x4d, 0x28,
- 0x4f, 0x29, 0x51, 0x2a, 0x53, 0x2b, 0x55, 0x2c, 0x57,
- 0x2d, 0x59, 0x2e, 0x5b, 0x2f, 0x5d, 0x30, 0x5f, 0x31,
- 0x61, 0x32, 0x63, 0x33, 0x65, 0x34, 0x67, 0x35, 0x69,
- 0x36, 0x6b, 0x37, 0x6d, 0x38, 0x6f, 0x39, 0x71, 0x3a,
- 0x73, 0x3b, 0x75, 0x3c, 0x77, 0x3d, 0x79, 0x3e, 0x7b,
- 0x3f, 0x7d, 0x40, 0x7f, 0x41, 0x81, 0x42, 0x83, 0x43,
- 0x85, 0x44, 0x87, 0x45, 0x89, 0x46, 0x8b, 0x47, 0x8d,
- 0x48, 0x8f, 0x49, 0x91, 0x4a, 0x93, 0x4b, 0x95, 0x4c,
- 0x97, 0x4d, 0x99, 0x4e, 0x9b, 0x4f, 0x9d, 0x50, 0x9f,
- 0x2, 0xa1, 0x51, 0xa3, 0x52, 0xa5, 0x53, 0xa7, 0x54,
- 0xa9, 0x2, 0xab, 0x2, 0xad, 0x2, 0xaf, 0x55, 0x3,
- 0x2, 0xd, 0x6, 0x2, 0xc, 0xc, 0xf, 0xf, 0x24,
- 0x24, 0x5e, 0x5e, 0x6, 0x2, 0xc, 0xc, 0xf, 0xf,
- 0x29, 0x29, 0x5e, 0x5e, 0x5, 0x2, 0x24, 0x24, 0x29,
- 0x29, 0x5e, 0x5e, 0x4, 0x2, 0x43, 0x5c, 0x63, 0x7c,
- 0x6, 0x2, 0x32, 0x3b, 0x43, 0x5c, 0x61, 0x61, 0x63,
- 0x7c, 0x5, 0x2, 0xb, 0xc, 0xe, 0xf, 0x22, 0x22,
- 0x4, 0x2, 0xc, 0xc, 0xf, 0xf, 0x3, 0x2, 0x32,
- 0x3b, 0x3, 0x2, 0x33, 0x3b, 0x4, 0x2, 0x47, 0x47,
- 0x67, 0x67, 0x4, 0x2, 0x2d, 0x2d, 0x2f, 0x2f, 0x2,
- 0x2bf, 0x2, 0x3, 0x3, 0x2, 0x2, 0x2, 0x2, 0x5,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x7, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x9, 0x3, 0x2, 0x2, 0x2, 0x2, 0xb,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0xd, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0xf, 0x3, 0x2, 0x2, 0x2, 0x2, 0x11,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x13, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x15, 0x3, 0x2, 0x2, 0x2, 0x2, 0x17,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x19, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x1b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1d,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x1f, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x21, 0x3, 0x2, 0x2, 0x2, 0x2, 0x23,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x25, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x27, 0x3, 0x2, 0x2, 0x2, 0x2, 0x29,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x2b, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x2d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2f,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x31, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x33, 0x3, 0x2, 0x2, 0x2, 0x2, 0x35,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x37, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x39, 0x3, 0x2, 0x2, 0x2, 0x2, 0x3b,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x3d, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x3f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x41,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x43, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x45, 0x3, 0x2, 0x2, 0x2, 0x2, 0x47,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x49, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x4b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x4d,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x4f, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x51, 0x3, 0x2, 0x2, 0x2, 0x2, 0x53,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x55, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x57, 0x3, 0x2, 0x2, 0x2, 0x2, 0x59,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x5b, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x5d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x5f,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x61, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x63, 0x3, 0x2, 0x2, 0x2, 0x2, 0x65,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x67, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x69, 0x3, 0x2, 0x2, 0x2, 0x2, 0x6b,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x6d, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x6f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x71,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x73, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x75, 0x3, 0x2, 0x2, 0x2, 0x2, 0x77,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x79, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x7b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x7d,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x7f, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x81, 0x3, 0x2, 0x2, 0x2, 0x2, 0x83,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x85, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x87, 0x3, 0x2, 0x2, 0x2, 0x2, 0x89,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x8b, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x8d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x8f,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x91, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x93, 0x3, 0x2, 0x2, 0x2, 0x2, 0x95,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x97, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x99, 0x3, 0x2, 0x2, 0x2, 0x2, 0x9b,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x9d, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0xa1, 0x3, 0x2, 0x2, 0x2, 0x2, 0xa3,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0xa5, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0xa7, 0x3, 0x2, 0x2, 0x2, 0x2, 0xaf,
- 0x3, 0x2, 0x2, 0x2, 0x3, 0xb1, 0x3, 0x2, 0x2,
- 0x2, 0x5, 0xb3, 0x3, 0x2, 0x2, 0x2, 0x7, 0xb5,
- 0x3, 0x2, 0x2, 0x2, 0x9, 0xb8, 0x3, 0x2, 0x2,
- 0x2, 0xb, 0xba, 0x3, 0x2, 0x2, 0x2, 0xd, 0xbc,
- 0x3, 0x2, 0x2, 0x2, 0xf, 0xc1, 0x3, 0x2, 0x2,
- 0x2, 0x11, 0xc3, 0x3, 0x2, 0x2, 0x2, 0x13, 0xc6,
- 0x3, 0x2, 0x2, 0x2, 0x15, 0xc9, 0x3, 0x2, 0x2,
- 0x2, 0x17, 0xcb, 0x3, 0x2, 0x2, 0x2, 0x19, 0xcd,
- 0x3, 0x2, 0x2, 0x2, 0x1b, 0xcf, 0x3, 0x2, 0x2,
- 0x2, 0x1d, 0xd1, 0x3, 0x2, 0x2, 0x2, 0x1f, 0xd4,
- 0x3, 0x2, 0x2, 0x2, 0x21, 0xd9, 0x3, 0x2, 0x2,
- 0x2, 0x23, 0xdb, 0x3, 0x2, 0x2, 0x2, 0x25, 0xdd,
- 0x3, 0x2, 0x2, 0x2, 0x27, 0xe5, 0x3, 0x2, 0x2,
- 0x2, 0x29, 0xef, 0x3, 0x2, 0x2, 0x2, 0x2b, 0xf8,
- 0x3, 0x2, 0x2, 0x2, 0x2d, 0xfe, 0x3, 0x2, 0x2,
- 0x2, 0x2f, 0x104, 0x3, 0x2, 0x2, 0x2, 0x31, 0x10c,
- 0x3, 0x2, 0x2, 0x2, 0x33, 0x114, 0x3, 0x2, 0x2,
- 0x2, 0x35, 0x11b, 0x3, 0x2, 0x2, 0x2, 0x37, 0x126,
- 0x3, 0x2, 0x2, 0x2, 0x39, 0x12f, 0x3, 0x2, 0x2,
- 0x2, 0x3b, 0x138, 0x3, 0x2, 0x2, 0x2, 0x3d, 0x13b,
- 0x3, 0x2, 0x2, 0x2, 0x3f, 0x140, 0x3, 0x2, 0x2,
- 0x2, 0x41, 0x148, 0x3, 0x2, 0x2, 0x2, 0x43, 0x14c,
- 0x3, 0x2, 0x2, 0x2, 0x45, 0x152, 0x3, 0x2, 0x2,
- 0x2, 0x47, 0x159, 0x3, 0x2, 0x2, 0x2, 0x49, 0x163,
- 0x3, 0x2, 0x2, 0x2, 0x4b, 0x16c, 0x3, 0x2, 0x2,
- 0x2, 0x4d, 0x172, 0x3, 0x2, 0x2, 0x2, 0x4f, 0x177,
- 0x3, 0x2, 0x2, 0x2, 0x51, 0x181, 0x3, 0x2, 0x2,
- 0x2, 0x53, 0x185, 0x3, 0x2, 0x2, 0x2, 0x55, 0x18b,
- 0x3, 0x2, 0x2, 0x2, 0x57, 0x191, 0x3, 0x2, 0x2,
- 0x2, 0x59, 0x198, 0x3, 0x2, 0x2, 0x2, 0x5b, 0x19d,
- 0x3, 0x2, 0x2, 0x2, 0x5d, 0x1a2, 0x3, 0x2, 0x2,
- 0x2, 0x5f, 0x1a5, 0x3, 0x2, 0x2, 0x2, 0x61, 0x1a9,
- 0x3, 0x2, 0x2, 0x2, 0x63, 0x1b0, 0x3, 0x2, 0x2,
- 0x2, 0x65, 0x1b7, 0x3, 0x2, 0x2, 0x2, 0x67, 0x1c3,
- 0x3, 0x2, 0x2, 0x2, 0x69, 0x1c9, 0x3, 0x2, 0x2,
- 0x2, 0x6b, 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x6d, 0x1e7,
- 0x3, 0x2, 0x2, 0x2, 0x6f, 0x1ea, 0x3, 0x2, 0x2,
- 0x2, 0x71, 0x1ec, 0x3, 0x2, 0x2, 0x2, 0x73, 0x1ee,
- 0x3, 0x2, 0x2, 0x2, 0x75, 0x1f0, 0x3, 0x2, 0x2,
- 0x2, 0x77, 0x1f2, 0x3, 0x2, 0x2, 0x2, 0x79, 0x1f4,
- 0x3, 0x2, 0x2, 0x2, 0x7b, 0x1f6, 0x3, 0x2, 0x2,
- 0x2, 0x7d, 0x1f8, 0x3, 0x2, 0x2, 0x2, 0x7f, 0x1fa,
- 0x3, 0x2, 0x2, 0x2, 0x81, 0x1fe, 0x3, 0x2, 0x2,
- 0x2, 0x83, 0x202, 0x3, 0x2, 0x2, 0x2, 0x85, 0x205,
- 0x3, 0x2, 0x2, 0x2, 0x87, 0x207, 0x3, 0x2, 0x2,
- 0x2, 0x89, 0x20a, 0x3, 0x2, 0x2, 0x2, 0x8b, 0x20c,
- 0x3, 0x2, 0x2, 0x2, 0x8d, 0x20f, 0x3, 0x2, 0x2,
- 0x2, 0x8f, 0x212, 0x3, 0x2, 0x2, 0x2, 0x91, 0x215,
- 0x3, 0x2, 0x2, 0x2, 0x93, 0x219, 0x3, 0x2, 0x2,
- 0x2, 0x95, 0x21f, 0x3, 0x2, 0x2, 0x2, 0x97, 0x221,
- 0x3, 0x2, 0x2, 0x2, 0x99, 0x224, 0x3, 0x2, 0x2,
- 0x2, 0x9b, 0x227, 0x3, 0x2, 0x2, 0x2, 0x9d, 0x23b,
- 0x3, 0x2, 0x2, 0x2, 0x9f, 0x23d, 0x3, 0x2, 0x2,
- 0x2, 0xa1, 0x240, 0x3, 0x2, 0x2, 0x2, 0xa3, 0x248,
- 0x3, 0x2, 0x2, 0x2, 0xa5, 0x24e, 0x3, 0x2, 0x2,
- 0x2, 0xa7, 0x25e, 0x3, 0x2, 0x2, 0x2, 0xa9, 0x269,
- 0x3, 0x2, 0x2, 0x2, 0xab, 0x273, 0x3, 0x2, 0x2,
- 0x2, 0xad, 0x275, 0x3, 0x2, 0x2, 0x2, 0xaf, 0x29f,
- 0x3, 0x2, 0x2, 0x2, 0xb1, 0xb2, 0x7, 0x2a, 0x2,
- 0x2, 0xb2, 0x4, 0x3, 0x2, 0x2, 0x2, 0xb3, 0xb4,
- 0x7, 0x2b, 0x2, 0x2, 0xb4, 0x6, 0x3, 0x2, 0x2,
- 0x2, 0xb5, 0xb6, 0x7, 0x3f, 0x2, 0x2, 0xb6, 0xb7,
- 0x7, 0x40, 0x2, 0x2, 0xb7, 0x8, 0x3, 0x2, 0x2,
- 0x2, 0xb8, 0xb9, 0x7, 0x2e, 0x2, 0x2, 0xb9, 0xa,
- 0x3, 0x2, 0x2, 0x2, 0xba, 0xbb, 0x7, 0x3c, 0x2,
- 0x2, 0xbb, 0xc, 0x3, 0x2, 0x2, 0x2, 0xbc, 0xbd,
- 0x7, 0x76, 0x2, 0x2, 0xbd, 0xbe, 0x7, 0x7b, 0x2,
- 0x2, 0xbe, 0xbf, 0x7, 0x72, 0x2, 0x2, 0xbf, 0xc0,
- 0x7, 0x67, 0x2, 0x2, 0xc0, 0xe, 0x3, 0x2, 0x2,
- 0x2, 0xc1, 0xc2, 0x7, 0x41, 0x2, 0x2, 0xc2, 0x10,
- 0x3, 0x2, 0x2, 0x2, 0xc3, 0xc4, 0x7, 0x7e, 0x2,
- 0x2, 0xc4, 0xc5, 0x7, 0x7e, 0x2, 0x2, 0xc5, 0x12,
- 0x3, 0x2, 0x2, 0x2, 0xc6, 0xc7, 0x7, 0x28, 0x2,
- 0x2, 0xc7, 0xc8, 0x7, 0x28, 0x2, 0x2, 0xc8, 0x14,
- 0x3, 0x2, 0x2, 0x2, 0xc9, 0xca, 0x7, 0x30, 0x2,
- 0x2, 0xca, 0x16, 0x3, 0x2, 0x2, 0x2, 0xcb, 0xcc,
- 0x7, 0x5d, 0x2, 0x2, 0xcc, 0x18, 0x3, 0x2, 0x2,
- 0x2, 0xcd, 0xce, 0x7, 0x5f, 0x2, 0x2, 0xce, 0x1a,
- 0x3, 0x2, 0x2, 0x2, 0xcf, 0xd0, 0x7, 0x3d, 0x2,
- 0x2, 0xd0, 0x1c, 0x3, 0x2, 0x2, 0x2, 0xd1, 0xd2,
- 0x7, 0x71, 0x2, 0x2, 0xd2, 0xd3, 0x7, 0x68, 0x2,
- 0x2, 0xd3, 0x1e, 0x3, 0x2, 0x2, 0x2, 0xd4, 0xd5,
- 0x7, 0x67, 0x2, 0x2, 0xd5, 0xd6, 0x7, 0x6e, 0x2,
- 0x2, 0xd6, 0xd7, 0x7, 0x75, 0x2, 0x2, 0xd7, 0xd8,
- 0x7, 0x67, 0x2, 0x2, 0xd8, 0x20, 0x3, 0x2, 0x2,
- 0x2, 0xd9, 0xda, 0x7, 0x7d, 0x2, 0x2, 0xda, 0x22,
- 0x3, 0x2, 0x2, 0x2, 0xdb, 0xdc, 0x7, 0x7f, 0x2,
- 0x2, 0xdc, 0x24, 0x3, 0x2, 0x2, 0x2, 0xdd, 0xde,
- 0x7, 0x67, 0x2, 0x2, 0xde, 0xdf, 0x7, 0x7a, 0x2,
- 0x2, 0xdf, 0xe0, 0x7, 0x76, 0x2, 0x2, 0xe0, 0xe1,
- 0x7, 0x67, 0x2, 0x2, 0xe1, 0xe2, 0x7, 0x70, 0x2,
- 0x2, 0xe2, 0xe3, 0x7, 0x66, 0x2, 0x2, 0xe3, 0xe4,
- 0x7, 0x75, 0x2, 0x2, 0xe4, 0x26, 0x3, 0x2, 0x2,
- 0x2, 0xe5, 0xe6, 0x7, 0x69, 0x2, 0x2, 0xe6, 0xe7,
- 0x7, 0x67, 0x2, 0x2, 0xe7, 0xe8, 0x7, 0x70, 0x2,
- 0x2, 0xe8, 0xe9, 0x7, 0x67, 0x2, 0x2, 0xe9, 0xea,
- 0x7, 0x74, 0x2, 0x2, 0xea, 0xeb, 0x7, 0x63, 0x2,
- 0x2, 0xeb, 0xec, 0x7, 0x76, 0x2, 0x2, 0xec, 0xed,
- 0x7, 0x67, 0x2, 0x2, 0xed, 0xee, 0x7, 0x75, 0x2,
- 0x2, 0xee, 0x28, 0x3, 0x2, 0x2, 0x2, 0xef, 0xf0,
- 0x7, 0x71, 0x2, 0x2, 0xf0, 0xf1, 0x7, 0x72, 0x2,
- 0x2, 0xf1, 0xf2, 0x7, 0x67, 0x2, 0x2, 0xf2, 0xf3,
- 0x7, 0x74, 0x2, 0x2, 0xf3, 0xf4, 0x7, 0x63, 0x2,
- 0x2, 0xf4, 0xf5, 0x7, 0x76, 0x2, 0x2, 0xf5, 0xf6,
- 0x7, 0x71, 0x2, 0x2, 0xf6, 0xf7, 0x7, 0x74, 0x2,
- 0x2, 0xf7, 0x2a, 0x3, 0x2, 0x2, 0x2, 0xf8, 0xf9,
- 0x7, 0x65, 0x2, 0x2, 0xf9, 0xfa, 0x7, 0x71, 0x2,
- 0x2, 0xfa, 0xfb, 0x7, 0x70, 0x2, 0x2, 0xfb, 0xfc,
- 0x7, 0x75, 0x2, 0x2, 0xfc, 0xfd, 0x7, 0x76, 0x2,
- 0x2, 0xfd, 0x2c, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xff,
- 0x7, 0x6f, 0x2, 0x2, 0xff, 0x100, 0x7, 0x63, 0x2,
- 0x2, 0x100, 0x101, 0x7, 0x65, 0x2, 0x2, 0x101, 0x102,
- 0x7, 0x74, 0x2, 0x2, 0x102, 0x103, 0x7, 0x71, 0x2,
- 0x2, 0x103, 0x2e, 0x3, 0x2, 0x2, 0x2, 0x104, 0x105,
- 0x7, 0x64, 0x2, 0x2, 0x105, 0x106, 0x7, 0x77, 0x2,
- 0x2, 0x106, 0x107, 0x7, 0x6b, 0x2, 0x2, 0x107, 0x108,
- 0x7, 0x6e, 0x2, 0x2, 0x108, 0x109, 0x7, 0x76, 0x2,
- 0x2, 0x109, 0x10a, 0x7, 0x6b, 0x2, 0x2, 0x10a, 0x10b,
- 0x7, 0x70, 0x2, 0x2, 0x10b, 0x30, 0x3, 0x2, 0x2,
- 0x2, 0x10c, 0x10d, 0x7, 0x74, 0x2, 0x2, 0x10d, 0x10e,
- 0x7, 0x77, 0x2, 0x2, 0x10e, 0x10f, 0x7, 0x70, 0x2,
- 0x2, 0x10f, 0x110, 0x7, 0x76, 0x2, 0x2, 0x110, 0x111,
- 0x7, 0x6b, 0x2, 0x2, 0x111, 0x112, 0x7, 0x6f, 0x2,
- 0x2, 0x112, 0x113, 0x7, 0x67, 0x2, 0x2, 0x113, 0x32,
- 0x3, 0x2, 0x2, 0x2, 0x114, 0x115, 0x7, 0x6f, 0x2,
- 0x2, 0x115, 0x116, 0x7, 0x71, 0x2, 0x2, 0x116, 0x117,
- 0x7, 0x66, 0x2, 0x2, 0x117, 0x118, 0x7, 0x77, 0x2,
- 0x2, 0x118, 0x119, 0x7, 0x6e, 0x2, 0x2, 0x119, 0x11a,
- 0x7, 0x67, 0x2, 0x2, 0x11a, 0x34, 0x3, 0x2, 0x2,
- 0x2, 0x11b, 0x11c, 0x7, 0x6c, 0x2, 0x2, 0x11c, 0x11d,
- 0x7, 0x63, 0x2, 0x2, 0x11d, 0x11e, 0x7, 0x78, 0x2,
- 0x2, 0x11e, 0x11f, 0x7, 0x63, 0x2, 0x2, 0x11f, 0x120,
- 0x7, 0x75, 0x2, 0x2, 0x120, 0x121, 0x7, 0x65, 0x2,
- 0x2, 0x121, 0x122, 0x7, 0x74, 0x2, 0x2, 0x122, 0x123,
- 0x7, 0x6b, 0x2, 0x2, 0x123, 0x124, 0x7, 0x72, 0x2,
- 0x2, 0x124, 0x125, 0x7, 0x76, 0x2, 0x2, 0x125, 0x36,
- 0x3, 0x2, 0x2, 0x2, 0x126, 0x127, 0x7, 0x6b, 0x2,
- 0x2, 0x127, 0x128, 0x7, 0x6f, 0x2, 0x2, 0x128, 0x129,
- 0x7, 0x72, 0x2, 0x2, 0x129, 0x12a, 0x7, 0x6e, 0x2,
- 0x2, 0x12a, 0x12b, 0x7, 0x6b, 0x2, 0x2, 0x12b, 0x12c,
- 0x7, 0x65, 0x2, 0x2, 0x12c, 0x12d, 0x7, 0x6b, 0x2,
- 0x2, 0x12d, 0x12e, 0x7, 0x76, 0x2, 0x2, 0x12e, 0x38,
- 0x3, 0x2, 0x2, 0x2, 0x12f, 0x130, 0x7, 0x66, 0x2,
- 0x2, 0x130, 0x131, 0x7, 0x67, 0x2, 0x2, 0x131, 0x132,
- 0x7, 0x68, 0x2, 0x2, 0x132, 0x133, 0x7, 0x67, 0x2,
- 0x2, 0x133, 0x134, 0x7, 0x74, 0x2, 0x2, 0x134, 0x135,
- 0x7, 0x74, 0x2, 0x2, 0x135, 0x136, 0x7, 0x67, 0x2,
- 0x2, 0x136, 0x137, 0x7, 0x66, 0x2, 0x2, 0x137, 0x3a,
- 0x3, 0x2, 0x2, 0x2, 0x138, 0x139, 0x7, 0x6b, 0x2,
- 0x2, 0x139, 0x13a, 0x7, 0x68, 0x2, 0x2, 0x13a, 0x3c,
- 0x3, 0x2, 0x2, 0x2, 0x13b, 0x13c, 0x7, 0x65, 0x2,
- 0x2, 0x13c, 0x13d, 0x7, 0x63, 0x2, 0x2, 0x13d, 0x13e,
- 0x7, 0x75, 0x2, 0x2, 0x13e, 0x13f, 0x7, 0x76, 0x2,
- 0x2, 0x13f, 0x3e, 0x3, 0x2, 0x2, 0x2, 0x140, 0x141,
- 0x7, 0x65, 0x2, 0x2, 0x141, 0x142, 0x7, 0x71, 0x2,
- 0x2, 0x142, 0x143, 0x7, 0x70, 0x2, 0x2, 0x143, 0x144,
- 0x7, 0x78, 0x2, 0x2, 0x144, 0x145, 0x7, 0x67, 0x2,
- 0x2, 0x145, 0x146, 0x7, 0x74, 0x2, 0x2, 0x146, 0x147,
- 0x7, 0x76, 0x2, 0x2, 0x147, 0x40, 0x3, 0x2, 0x2,
- 0x2, 0x148, 0x149, 0x7, 0x68, 0x2, 0x2, 0x149, 0x14a,
- 0x7, 0x71, 0x2, 0x2, 0x14a, 0x14b, 0x7, 0x74, 0x2,
- 0x2, 0x14b, 0x42, 0x3, 0x2, 0x2, 0x2, 0x14c, 0x14d,
- 0x7, 0x79, 0x2, 0x2, 0x14d, 0x14e, 0x7, 0x6a, 0x2,
- 0x2, 0x14e, 0x14f, 0x7, 0x6b, 0x2, 0x2, 0x14f, 0x150,
- 0x7, 0x6e, 0x2, 0x2, 0x150, 0x151, 0x7, 0x67, 0x2,
- 0x2, 0x151, 0x44, 0x3, 0x2, 0x2, 0x2, 0x152, 0x153,
- 0x7, 0x74, 0x2, 0x2, 0x153, 0x154, 0x7, 0x67, 0x2,
- 0x2, 0x154, 0x155, 0x7, 0x76, 0x2, 0x2, 0x155, 0x156,
- 0x7, 0x77, 0x2, 0x2, 0x156, 0x157, 0x7, 0x74, 0x2,
- 0x2, 0x157, 0x158, 0x7, 0x70, 0x2, 0x2, 0x158, 0x46,
- 0x3, 0x2, 0x2, 0x2, 0x159, 0x15a, 0x7, 0x65, 0x2,
- 0x2, 0x15a, 0x15b, 0x7, 0x71, 0x2, 0x2, 0x15b, 0x15c,
- 0x7, 0x70, 0x2, 0x2, 0x15c, 0x15d, 0x7, 0x75, 0x2,
- 0x2, 0x15d, 0x15e, 0x7, 0x76, 0x2, 0x2, 0x15e, 0x15f,
- 0x7, 0x67, 0x2, 0x2, 0x15f, 0x160, 0x7, 0x7a, 0x2,
- 0x2, 0x160, 0x161, 0x7, 0x72, 0x2, 0x2, 0x161, 0x162,
- 0x7, 0x74, 0x2, 0x2, 0x162, 0x48, 0x3, 0x2, 0x2,
- 0x2, 0x163, 0x164, 0x7, 0x65, 0x2, 0x2, 0x164, 0x165,
- 0x7, 0x71, 0x2, 0x2, 0x165, 0x166, 0x7, 0x70, 0x2,
- 0x2, 0x166, 0x167, 0x7, 0x76, 0x2, 0x2, 0x167, 0x168,
- 0x7, 0x6b, 0x2, 0x2, 0x168, 0x169, 0x7, 0x70, 0x2,
- 0x2, 0x169, 0x16a, 0x7, 0x77, 0x2, 0x2, 0x16a, 0x16b,
- 0x7, 0x67, 0x2, 0x2, 0x16b, 0x4a, 0x3, 0x2, 0x2,
- 0x2, 0x16c, 0x16d, 0x7, 0x64, 0x2, 0x2, 0x16d, 0x16e,
- 0x7, 0x74, 0x2, 0x2, 0x16e, 0x16f, 0x7, 0x67, 0x2,
- 0x2, 0x16f, 0x170, 0x7, 0x63, 0x2, 0x2, 0x170, 0x171,
- 0x7, 0x6d, 0x2, 0x2, 0x171, 0x4c, 0x3, 0x2, 0x2,
- 0x2, 0x172, 0x173, 0x7, 0x69, 0x2, 0x2, 0x173, 0x174,
- 0x7, 0x71, 0x2, 0x2, 0x174, 0x175, 0x7, 0x76, 0x2,
- 0x2, 0x175, 0x176, 0x7, 0x71, 0x2, 0x2, 0x176, 0x4e,
- 0x3, 0x2, 0x2, 0x2, 0x177, 0x178, 0x7, 0x71, 0x2,
- 0x2, 0x178, 0x179, 0x7, 0x76, 0x2, 0x2, 0x179, 0x17a,
- 0x7, 0x6a, 0x2, 0x2, 0x17a, 0x17b, 0x7, 0x67, 0x2,
- 0x2, 0x17b, 0x17c, 0x7, 0x74, 0x2, 0x2, 0x17c, 0x17d,
- 0x7, 0x79, 0x2, 0x2, 0x17d, 0x17e, 0x7, 0x6b, 0x2,
- 0x2, 0x17e, 0x17f, 0x7, 0x75, 0x2, 0x2, 0x17f, 0x180,
- 0x7, 0x67, 0x2, 0x2, 0x180, 0x50, 0x3, 0x2, 0x2,
- 0x2, 0x181, 0x182, 0x7, 0x76, 0x2, 0x2, 0x182, 0x183,
- 0x7, 0x74, 0x2, 0x2, 0x183, 0x184, 0x7, 0x7b, 0x2,
- 0x2, 0x184, 0x52, 0x3, 0x2, 0x2, 0x2, 0x185, 0x186,
- 0x7, 0x65, 0x2, 0x2, 0x186, 0x187, 0x7, 0x63, 0x2,
- 0x2, 0x187, 0x188, 0x7, 0x76, 0x2, 0x2, 0x188, 0x189,
- 0x7, 0x65, 0x2, 0x2, 0x189, 0x18a, 0x7, 0x6a, 0x2,
- 0x2, 0x18a, 0x54, 0x3, 0x2, 0x2, 0x2, 0x18b, 0x18c,
- 0x7, 0x6e, 0x2, 0x2, 0x18c, 0x18d, 0x7, 0x63, 0x2,
- 0x2, 0x18d, 0x18e, 0x7, 0x64, 0x2, 0x2, 0x18e, 0x18f,
- 0x7, 0x67, 0x2, 0x2, 0x18f, 0x190, 0x7, 0x6e, 0x2,
- 0x2, 0x190, 0x56, 0x3, 0x2, 0x2, 0x2, 0x191, 0x192,
- 0x7, 0x6e, 0x2, 0x2, 0x192, 0x193, 0x7, 0x63, 0x2,
- 0x2, 0x193, 0x194, 0x7, 0x64, 0x2, 0x2, 0x194, 0x195,
- 0x7, 0x67, 0x2, 0x2, 0x195, 0x196, 0x7, 0x6e, 0x2,
- 0x2, 0x196, 0x197, 0x7, 0x75, 0x2, 0x2, 0x197, 0x58,
- 0x3, 0x2, 0x2, 0x2, 0x198, 0x199, 0x7, 0x76, 0x2,
- 0x2, 0x199, 0x19a, 0x7, 0x63, 0x2, 0x2, 0x19a, 0x19b,
- 0x7, 0x6b, 0x2, 0x2, 0x19b, 0x19c, 0x7, 0x6e, 0x2,
- 0x2, 0x19c, 0x5a, 0x3, 0x2, 0x2, 0x2, 0x19d, 0x19e,
- 0x7, 0x6b, 0x2, 0x2, 0x19e, 0x19f, 0x7, 0x75, 0x2,
- 0x2, 0x19f, 0x1a0, 0x7, 0x70, 0x2, 0x2, 0x1a0, 0x1a1,
- 0x7, 0x76, 0x2, 0x2, 0x1a1, 0x5c, 0x3, 0x2, 0x2,
- 0x2, 0x1a2, 0x1a3, 0x7, 0x6b, 0x2, 0x2, 0x1a3, 0x1a4,
- 0x7, 0x75, 0x2, 0x2, 0x1a4, 0x5e, 0x3, 0x2, 0x2,
- 0x2, 0x1a5, 0x1a6, 0x7, 0x6e, 0x2, 0x2, 0x1a6, 0x1a7,
- 0x7, 0x67, 0x2, 0x2, 0x1a7, 0x1a8, 0x7, 0x76, 0x2,
- 0x2, 0x1a8, 0x60, 0x3, 0x2, 0x2, 0x2, 0x1a9, 0x1aa,
- 0x7, 0x67, 0x2, 0x2, 0x1aa, 0x1ab, 0x7, 0x7a, 0x2,
- 0x2, 0x1ab, 0x1ac, 0x7, 0x76, 0x2, 0x2, 0x1ac, 0x1ad,
- 0x7, 0x67, 0x2, 0x2, 0x1ad, 0x1ae, 0x7, 0x74, 0x2,
- 0x2, 0x1ae, 0x1af, 0x7, 0x70, 0x2, 0x2, 0x1af, 0x62,
- 0x3, 0x2, 0x2, 0x2, 0x1b0, 0x1b1, 0x7, 0x63, 0x2,
- 0x2, 0x1b1, 0x1b2, 0x7, 0x75, 0x2, 0x2, 0x1b2, 0x1b3,
- 0x7, 0x75, 0x2, 0x2, 0x1b3, 0x1b4, 0x7, 0x67, 0x2,
- 0x2, 0x1b4, 0x1b5, 0x7, 0x74, 0x2, 0x2, 0x1b5, 0x1b6,
- 0x7, 0x76, 0x2, 0x2, 0x1b6, 0x64, 0x3, 0x2, 0x2,
- 0x2, 0x1b7, 0x1b8, 0x7, 0x77, 0x2, 0x2, 0x1b8, 0x1b9,
- 0x7, 0x70, 0x2, 0x2, 0x1b9, 0x1ba, 0x7, 0x74, 0x2,
- 0x2, 0x1ba, 0x1bb, 0x7, 0x67, 0x2, 0x2, 0x1bb, 0x1bc,
- 0x7, 0x63, 0x2, 0x2, 0x1bc, 0x1bd, 0x7, 0x65, 0x2,
- 0x2, 0x1bd, 0x1be, 0x7, 0x6a, 0x2, 0x2, 0x1be, 0x1bf,
- 0x7, 0x63, 0x2, 0x2, 0x1bf, 0x1c0, 0x7, 0x64, 0x2,
- 0x2, 0x1c0, 0x1c1, 0x7, 0x6e, 0x2, 0x2, 0x1c1, 0x1c2,
- 0x7, 0x67, 0x2, 0x2, 0x1c2, 0x66, 0x3, 0x2, 0x2,
- 0x2, 0x1c3, 0x1c4, 0x7, 0x66, 0x2, 0x2, 0x1c4, 0x1c5,
- 0x7, 0x67, 0x2, 0x2, 0x1c5, 0x1c6, 0x7, 0x64, 0x2,
- 0x2, 0x1c6, 0x1c7, 0x7, 0x77, 0x2, 0x2, 0x1c7, 0x1c8,
- 0x7, 0x69, 0x2, 0x2, 0x1c8, 0x68, 0x3, 0x2, 0x2,
- 0x2, 0x1c9, 0x1ca, 0x7, 0x3f, 0x2, 0x2, 0x1ca, 0x6a,
- 0x3, 0x2, 0x2, 0x2, 0x1cb, 0x1cc, 0x7, 0x2c, 0x2,
- 0x2, 0x1cc, 0x1e6, 0x7, 0x3f, 0x2, 0x2, 0x1cd, 0x1ce,
- 0x7, 0x31, 0x2, 0x2, 0x1ce, 0x1e6, 0x7, 0x3f, 0x2,
- 0x2, 0x1cf, 0x1d0, 0x7, 0x27, 0x2, 0x2, 0x1d0, 0x1e6,
- 0x7, 0x3f, 0x2, 0x2, 0x1d1, 0x1d2, 0x7, 0x2d, 0x2,
- 0x2, 0x1d2, 0x1e6, 0x7, 0x3f, 0x2, 0x2, 0x1d3, 0x1d4,
- 0x7, 0x2f, 0x2, 0x2, 0x1d4, 0x1e6, 0x7, 0x3f, 0x2,
- 0x2, 0x1d5, 0x1d6, 0x7, 0x3e, 0x2, 0x2, 0x1d6, 0x1d7,
- 0x7, 0x3e, 0x2, 0x2, 0x1d7, 0x1e6, 0x7, 0x3f, 0x2,
- 0x2, 0x1d8, 0x1d9, 0x7, 0x40, 0x2, 0x2, 0x1d9, 0x1da,
- 0x7, 0x40, 0x2, 0x2, 0x1da, 0x1e6, 0x7, 0x3f, 0x2,
- 0x2, 0x1db, 0x1dc, 0x7, 0x40, 0x2, 0x2, 0x1dc, 0x1dd,
- 0x7, 0x40, 0x2, 0x2, 0x1dd, 0x1de, 0x7, 0x40, 0x2,
- 0x2, 0x1de, 0x1e6, 0x7, 0x3f, 0x2, 0x2, 0x1df, 0x1e0,
- 0x7, 0x28, 0x2, 0x2, 0x1e0, 0x1e6, 0x7, 0x3f, 0x2,
- 0x2, 0x1e1, 0x1e2, 0x7, 0x60, 0x2, 0x2, 0x1e2, 0x1e6,
- 0x7, 0x3f, 0x2, 0x2, 0x1e3, 0x1e4, 0x7, 0x7e, 0x2,
- 0x2, 0x1e4, 0x1e6, 0x7, 0x3f, 0x2, 0x2, 0x1e5, 0x1cb,
- 0x3, 0x2, 0x2, 0x2, 0x1e5, 0x1cd, 0x3, 0x2, 0x2,
- 0x2, 0x1e5, 0x1cf, 0x3, 0x2, 0x2, 0x2, 0x1e5, 0x1d1,
- 0x3, 0x2, 0x2, 0x2, 0x1e5, 0x1d3, 0x3, 0x2, 0x2,
- 0x2, 0x1e5, 0x1d5, 0x3, 0x2, 0x2, 0x2, 0x1e5, 0x1d8,
- 0x3, 0x2, 0x2, 0x2, 0x1e5, 0x1db, 0x3, 0x2, 0x2,
- 0x2, 0x1e5, 0x1df, 0x3, 0x2, 0x2, 0x2, 0x1e5, 0x1e1,
- 0x3, 0x2, 0x2, 0x2, 0x1e5, 0x1e3, 0x3, 0x2, 0x2,
- 0x2, 0x1e6, 0x6c, 0x3, 0x2, 0x2, 0x2, 0x1e7, 0x1e8,
- 0x7, 0x3f, 0x2, 0x2, 0x1e8, 0x1e9, 0x7, 0x3f, 0x2,
- 0x2, 0x1e9, 0x6e, 0x3, 0x2, 0x2, 0x2, 0x1ea, 0x1eb,
- 0x7, 0x2d, 0x2, 0x2, 0x1eb, 0x70, 0x3, 0x2, 0x2,
- 0x2, 0x1ec, 0x1ed, 0x7, 0x2f, 0x2, 0x2, 0x1ed, 0x72,
- 0x3, 0x2, 0x2, 0x2, 0x1ee, 0x1ef, 0x7, 0x2c, 0x2,
- 0x2, 0x1ef, 0x74, 0x3, 0x2, 0x2, 0x2, 0x1f0, 0x1f1,
- 0x7, 0x31, 0x2, 0x2, 0x1f1, 0x76, 0x3, 0x2, 0x2,
- 0x2, 0x1f2, 0x1f3, 0x7, 0x27, 0x2, 0x2, 0x1f3, 0x78,
- 0x3, 0x2, 0x2, 0x2, 0x1f4, 0x1f5, 0x7, 0x7e, 0x2,
- 0x2, 0x1f5, 0x7a, 0x3, 0x2, 0x2, 0x2, 0x1f6, 0x1f7,
- 0x7, 0x28, 0x2, 0x2, 0x1f7, 0x7c, 0x3, 0x2, 0x2,
- 0x2, 0x1f8, 0x1f9, 0x7, 0x80, 0x2, 0x2, 0x1f9, 0x7e,
- 0x3, 0x2, 0x2, 0x2, 0x1fa, 0x1fb, 0x7, 0x6f, 0x2,
- 0x2, 0x1fb, 0x1fc, 0x7, 0x63, 0x2, 0x2, 0x1fc, 0x1fd,
- 0x7, 0x7a, 0x2, 0x2, 0x1fd, 0x80, 0x3, 0x2, 0x2,
- 0x2, 0x1fe, 0x1ff, 0x7, 0x6f, 0x2, 0x2, 0x1ff, 0x200,
- 0x7, 0x6b, 0x2, 0x2, 0x200, 0x201, 0x7, 0x70, 0x2,
- 0x2, 0x201, 0x82, 0x3, 0x2, 0x2, 0x2, 0x202, 0x203,
- 0x7, 0x23, 0x2, 0x2, 0x203, 0x204, 0x7, 0x3f, 0x2,
- 0x2, 0x204, 0x84, 0x3, 0x2, 0x2, 0x2, 0x205, 0x206,
- 0x7, 0x3e, 0x2, 0x2, 0x206, 0x86, 0x3, 0x2, 0x2,
- 0x2, 0x207, 0x208, 0x7, 0x3e, 0x2, 0x2, 0x208, 0x209,
- 0x7, 0x3f, 0x2, 0x2, 0x209, 0x88, 0x3, 0x2, 0x2,
- 0x2, 0x20a, 0x20b, 0x7, 0x40, 0x2, 0x2, 0x20b, 0x8a,
- 0x3, 0x2, 0x2, 0x2, 0x20c, 0x20d, 0x7, 0x40, 0x2,
- 0x2, 0x20d, 0x20e, 0x7, 0x3f, 0x2, 0x2, 0x20e, 0x8c,
- 0x3, 0x2, 0x2, 0x2, 0x20f, 0x210, 0x7, 0x3e, 0x2,
- 0x2, 0x210, 0x211, 0x7, 0x3e, 0x2, 0x2, 0x211, 0x8e,
- 0x3, 0x2, 0x2, 0x2, 0x212, 0x213, 0x7, 0x40, 0x2,
- 0x2, 0x213, 0x214, 0x7, 0x40, 0x2, 0x2, 0x214, 0x90,
- 0x3, 0x2, 0x2, 0x2, 0x215, 0x216, 0x7, 0x40, 0x2,
- 0x2, 0x216, 0x217, 0x7, 0x40, 0x2, 0x2, 0x217, 0x218,
- 0x7, 0x40, 0x2, 0x2, 0x218, 0x92, 0x3, 0x2, 0x2,
- 0x2, 0x219, 0x21a, 0x7, 0x30, 0x2, 0x2, 0x21a, 0x21b,
- 0x7, 0x30, 0x2, 0x2, 0x21b, 0x21c, 0x7, 0x30, 0x2,
- 0x2, 0x21c, 0x94, 0x3, 0x2, 0x2, 0x2, 0x21d, 0x220,
- 0x5, 0x6d, 0x37, 0x2, 0x21e, 0x220, 0x5, 0x83, 0x42,
- 0x2, 0x21f, 0x21d, 0x3, 0x2, 0x2, 0x2, 0x21f, 0x21e,
- 0x3, 0x2, 0x2, 0x2, 0x220, 0x96, 0x3, 0x2, 0x2,
- 0x2, 0x221, 0x222, 0x7, 0x2d, 0x2, 0x2, 0x222, 0x223,
- 0x7, 0x2d, 0x2, 0x2, 0x223, 0x98, 0x3, 0x2, 0x2,
- 0x2, 0x224, 0x225, 0x7, 0x2f, 0x2, 0x2, 0x225, 0x226,
- 0x7, 0x2f, 0x2, 0x2, 0x226, 0x9a, 0x3, 0x2, 0x2,
- 0x2, 0x227, 0x228, 0x7, 0x23, 0x2, 0x2, 0x228, 0x9c,
- 0x3, 0x2, 0x2, 0x2, 0x229, 0x22e, 0x7, 0x24, 0x2,
- 0x2, 0x22a, 0x22d, 0x5, 0x9f, 0x50, 0x2, 0x22b, 0x22d,
- 0xa, 0x2, 0x2, 0x2, 0x22c, 0x22a, 0x3, 0x2, 0x2,
- 0x2, 0x22c, 0x22b, 0x3, 0x2, 0x2, 0x2, 0x22d, 0x230,
- 0x3, 0x2, 0x2, 0x2, 0x22e, 0x22c, 0x3, 0x2, 0x2,
- 0x2, 0x22e, 0x22f, 0x3, 0x2, 0x2, 0x2, 0x22f, 0x231,
- 0x3, 0x2, 0x2, 0x2, 0x230, 0x22e, 0x3, 0x2, 0x2,
- 0x2, 0x231, 0x23c, 0x7, 0x24, 0x2, 0x2, 0x232, 0x237,
- 0x7, 0x29, 0x2, 0x2, 0x233, 0x236, 0x5, 0x9f, 0x50,
- 0x2, 0x234, 0x236, 0xa, 0x3, 0x2, 0x2, 0x235, 0x233,
- 0x3, 0x2, 0x2, 0x2, 0x235, 0x234, 0x3, 0x2, 0x2,
- 0x2, 0x236, 0x239, 0x3, 0x2, 0x2, 0x2, 0x237, 0x235,
- 0x3, 0x2, 0x2, 0x2, 0x237, 0x238, 0x3, 0x2, 0x2,
- 0x2, 0x238, 0x23a, 0x3, 0x2, 0x2, 0x2, 0x239, 0x237,
- 0x3, 0x2, 0x2, 0x2, 0x23a, 0x23c, 0x7, 0x29, 0x2,
- 0x2, 0x23b, 0x229, 0x3, 0x2, 0x2, 0x2, 0x23b, 0x232,
- 0x3, 0x2, 0x2, 0x2, 0x23c, 0x9e, 0x3, 0x2, 0x2,
- 0x2, 0x23d, 0x23e, 0x7, 0x5e, 0x2, 0x2, 0x23e, 0x23f,
- 0x9, 0x4, 0x2, 0x2, 0x23f, 0xa0, 0x3, 0x2, 0x2,
- 0x2, 0x240, 0x244, 0x9, 0x5, 0x2, 0x2, 0x241, 0x243,
- 0x9, 0x6, 0x2, 0x2, 0x242, 0x241, 0x3, 0x2, 0x2,
- 0x2, 0x243, 0x246, 0x3, 0x2, 0x2, 0x2, 0x244, 0x242,
- 0x3, 0x2, 0x2, 0x2, 0x244, 0x245, 0x3, 0x2, 0x2,
- 0x2, 0x245, 0xa2, 0x3, 0x2, 0x2, 0x2, 0x246, 0x244,
- 0x3, 0x2, 0x2, 0x2, 0x247, 0x249, 0x9, 0x7, 0x2,
- 0x2, 0x248, 0x247, 0x3, 0x2, 0x2, 0x2, 0x249, 0x24a,
- 0x3, 0x2, 0x2, 0x2, 0x24a, 0x248, 0x3, 0x2, 0x2,
- 0x2, 0x24a, 0x24b, 0x3, 0x2, 0x2, 0x2, 0x24b, 0x24c,
- 0x3, 0x2, 0x2, 0x2, 0x24c, 0x24d, 0x8, 0x52, 0x2,
- 0x2, 0x24d, 0xa4, 0x3, 0x2, 0x2, 0x2, 0x24e, 0x24f,
- 0x7, 0x31, 0x2, 0x2, 0x24f, 0x250, 0x7, 0x2c, 0x2,
- 0x2, 0x250, 0x254, 0x3, 0x2, 0x2, 0x2, 0x251, 0x253,
- 0xb, 0x2, 0x2, 0x2, 0x252, 0x251, 0x3, 0x2, 0x2,
- 0x2, 0x253, 0x256, 0x3, 0x2, 0x2, 0x2, 0x254, 0x255,
- 0x3, 0x2, 0x2, 0x2, 0x254, 0x252, 0x3, 0x2, 0x2,
- 0x2, 0x255, 0x25a, 0x3, 0x2, 0x2, 0x2, 0x256, 0x254,
- 0x3, 0x2, 0x2, 0x2, 0x257, 0x258, 0x7, 0x2c, 0x2,
- 0x2, 0x258, 0x25b, 0x7, 0x31, 0x2, 0x2, 0x259, 0x25b,
- 0x7, 0x2, 0x2, 0x3, 0x25a, 0x257, 0x3, 0x2, 0x2,
- 0x2, 0x25a, 0x259, 0x3, 0x2, 0x2, 0x2, 0x25b, 0x25c,
- 0x3, 0x2, 0x2, 0x2, 0x25c, 0x25d, 0x8, 0x53, 0x2,
- 0x2, 0x25d, 0xa6, 0x3, 0x2, 0x2, 0x2, 0x25e, 0x25f,
- 0x7, 0x31, 0x2, 0x2, 0x25f, 0x260, 0x7, 0x31, 0x2,
- 0x2, 0x260, 0x264, 0x3, 0x2, 0x2, 0x2, 0x261, 0x263,
- 0xa, 0x8, 0x2, 0x2, 0x262, 0x261, 0x3, 0x2, 0x2,
- 0x2, 0x263, 0x266, 0x3, 0x2, 0x2, 0x2, 0x264, 0x262,
- 0x3, 0x2, 0x2, 0x2, 0x264, 0x265, 0x3, 0x2, 0x2,
- 0x2, 0x265, 0x267, 0x3, 0x2, 0x2, 0x2, 0x266, 0x264,
- 0x3, 0x2, 0x2, 0x2, 0x267, 0x268, 0x8, 0x54, 0x2,
- 0x2, 0x268, 0xa8, 0x3, 0x2, 0x2, 0x2, 0x269, 0x26a,
- 0x9, 0x9, 0x2, 0x2, 0x26a, 0xaa, 0x3, 0x2, 0x2,
- 0x2, 0x26b, 0x274, 0x7, 0x32, 0x2, 0x2, 0x26c, 0x270,
- 0x9, 0xa, 0x2, 0x2, 0x26d, 0x26f, 0x5, 0xa9, 0x55,
- 0x2, 0x26e, 0x26d, 0x3, 0x2, 0x2, 0x2, 0x26f, 0x272,
- 0x3, 0x2, 0x2, 0x2, 0x270, 0x26e, 0x3, 0x2, 0x2,
- 0x2, 0x270, 0x271, 0x3, 0x2, 0x2, 0x2, 0x271, 0x274,
- 0x3, 0x2, 0x2, 0x2, 0x272, 0x270, 0x3, 0x2, 0x2,
- 0x2, 0x273, 0x26b, 0x3, 0x2, 0x2, 0x2, 0x273, 0x26c,
- 0x3, 0x2, 0x2, 0x2, 0x274, 0xac, 0x3, 0x2, 0x2,
- 0x2, 0x275, 0x277, 0x9, 0xb, 0x2, 0x2, 0x276, 0x278,
- 0x9, 0xc, 0x2, 0x2, 0x277, 0x276, 0x3, 0x2, 0x2,
- 0x2, 0x277, 0x278, 0x3, 0x2, 0x2, 0x2, 0x278, 0x27a,
- 0x3, 0x2, 0x2, 0x2, 0x279, 0x27b, 0x5, 0xa9, 0x55,
- 0x2, 0x27a, 0x279, 0x3, 0x2, 0x2, 0x2, 0x27b, 0x27c,
- 0x3, 0x2, 0x2, 0x2, 0x27c, 0x27a, 0x3, 0x2, 0x2,
- 0x2, 0x27c, 0x27d, 0x3, 0x2, 0x2, 0x2, 0x27d, 0xae,
- 0x3, 0x2, 0x2, 0x2, 0x27e, 0x280, 0x5, 0x71, 0x39,
- 0x2, 0x27f, 0x27e, 0x3, 0x2, 0x2, 0x2, 0x27f, 0x280,
- 0x3, 0x2, 0x2, 0x2, 0x280, 0x281, 0x3, 0x2, 0x2,
- 0x2, 0x281, 0x282, 0x5, 0xab, 0x56, 0x2, 0x282, 0x286,
- 0x7, 0x30, 0x2, 0x2, 0x283, 0x285, 0x5, 0xa9, 0x55,
- 0x2, 0x284, 0x283, 0x3, 0x2, 0x2, 0x2, 0x285, 0x288,
- 0x3, 0x2, 0x2, 0x2, 0x286, 0x284, 0x3, 0x2, 0x2,
- 0x2, 0x286, 0x287, 0x3, 0x2, 0x2, 0x2, 0x287, 0x28a,
- 0x3, 0x2, 0x2, 0x2, 0x288, 0x286, 0x3, 0x2, 0x2,
- 0x2, 0x289, 0x28b, 0x5, 0xad, 0x57, 0x2, 0x28a, 0x289,
- 0x3, 0x2, 0x2, 0x2, 0x28a, 0x28b, 0x3, 0x2, 0x2,
- 0x2, 0x28b, 0x2a0, 0x3, 0x2, 0x2, 0x2, 0x28c, 0x28e,
- 0x5, 0x71, 0x39, 0x2, 0x28d, 0x28c, 0x3, 0x2, 0x2,
- 0x2, 0x28d, 0x28e, 0x3, 0x2, 0x2, 0x2, 0x28e, 0x28f,
- 0x3, 0x2, 0x2, 0x2, 0x28f, 0x291, 0x7, 0x30, 0x2,
- 0x2, 0x290, 0x292, 0x5, 0xa9, 0x55, 0x2, 0x291, 0x290,
- 0x3, 0x2, 0x2, 0x2, 0x292, 0x293, 0x3, 0x2, 0x2,
- 0x2, 0x293, 0x291, 0x3, 0x2, 0x2, 0x2, 0x293, 0x294,
- 0x3, 0x2, 0x2, 0x2, 0x294, 0x296, 0x3, 0x2, 0x2,
- 0x2, 0x295, 0x297, 0x5, 0xad, 0x57, 0x2, 0x296, 0x295,
- 0x3, 0x2, 0x2, 0x2, 0x296, 0x297, 0x3, 0x2, 0x2,
- 0x2, 0x297, 0x2a0, 0x3, 0x2, 0x2, 0x2, 0x298, 0x29a,
- 0x5, 0x71, 0x39, 0x2, 0x299, 0x298, 0x3, 0x2, 0x2,
- 0x2, 0x299, 0x29a, 0x3, 0x2, 0x2, 0x2, 0x29a, 0x29b,
- 0x3, 0x2, 0x2, 0x2, 0x29b, 0x29d, 0x5, 0xab, 0x56,
- 0x2, 0x29c, 0x29e, 0x5, 0xad, 0x57, 0x2, 0x29d, 0x29c,
- 0x3, 0x2, 0x2, 0x2, 0x29d, 0x29e, 0x3, 0x2, 0x2,
- 0x2, 0x29e, 0x2a0, 0x3, 0x2, 0x2, 0x2, 0x29f, 0x27f,
- 0x3, 0x2, 0x2, 0x2, 0x29f, 0x28d, 0x3, 0x2, 0x2,
- 0x2, 0x29f, 0x299, 0x3, 0x2, 0x2, 0x2, 0x2a0, 0xb0,
- 0x3, 0x2, 0x2, 0x2, 0x1c, 0x2, 0x1e5, 0x21f, 0x22c,
- 0x22e, 0x235, 0x237, 0x23b, 0x244, 0x24a, 0x254, 0x25a, 0x264,
- 0x270, 0x273, 0x277, 0x27c, 0x27f, 0x286, 0x28a, 0x28d, 0x293,
- 0x296, 0x299, 0x29d, 0x29f, 0x3, 0x2, 0x3, 0x2,
+ 0x3, 0x32, 0x3, 0x33, 0x3, 0x33, 0x3, 0x34, 0x3,
+ 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
+ 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3,
+ 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
+ 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3,
+ 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
+ 0x3, 0x34, 0x3, 0x34, 0x5, 0x34, 0x1d3, 0xa, 0x34,
+ 0x3, 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, 0x36, 0x3,
+ 0x36, 0x3, 0x37, 0x3, 0x37, 0x3, 0x38, 0x3, 0x38,
+ 0x3, 0x39, 0x3, 0x39, 0x3, 0x3a, 0x3, 0x3a, 0x3,
+ 0x3b, 0x3, 0x3b, 0x3, 0x3c, 0x3, 0x3c, 0x3, 0x3d,
+ 0x3, 0x3d, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3,
+ 0x3e, 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x3f,
+ 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x3, 0x41, 0x3,
+ 0x41, 0x3, 0x42, 0x3, 0x42, 0x3, 0x42, 0x3, 0x43,
+ 0x3, 0x43, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3,
+ 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x46, 0x3, 0x46,
+ 0x3, 0x46, 0x3, 0x47, 0x3, 0x47, 0x3, 0x47, 0x3,
+ 0x47, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48,
+ 0x3, 0x49, 0x3, 0x49, 0x5, 0x49, 0x20d, 0xa, 0x49,
+ 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4b, 0x3,
+ 0x4b, 0x3, 0x4b, 0x3, 0x4c, 0x3, 0x4c, 0x3, 0x4d,
+ 0x3, 0x4d, 0x3, 0x4d, 0x7, 0x4d, 0x21a, 0xa, 0x4d,
+ 0xc, 0x4d, 0xe, 0x4d, 0x21d, 0xb, 0x4d, 0x3, 0x4d,
+ 0x3, 0x4d, 0x3, 0x4d, 0x3, 0x4d, 0x7, 0x4d, 0x223,
+ 0xa, 0x4d, 0xc, 0x4d, 0xe, 0x4d, 0x226, 0xb, 0x4d,
+ 0x3, 0x4d, 0x5, 0x4d, 0x229, 0xa, 0x4d, 0x3, 0x4e,
+ 0x3, 0x4e, 0x3, 0x4e, 0x3, 0x4f, 0x3, 0x4f, 0x7,
+ 0x4f, 0x230, 0xa, 0x4f, 0xc, 0x4f, 0xe, 0x4f, 0x233,
+ 0xb, 0x4f, 0x3, 0x50, 0x6, 0x50, 0x236, 0xa, 0x50,
+ 0xd, 0x50, 0xe, 0x50, 0x237, 0x3, 0x50, 0x3, 0x50,
+ 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x7,
+ 0x51, 0x240, 0xa, 0x51, 0xc, 0x51, 0xe, 0x51, 0x243,
+ 0xb, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x5,
+ 0x51, 0x248, 0xa, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3,
+ 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x7, 0x52,
+ 0x250, 0xa, 0x52, 0xc, 0x52, 0xe, 0x52, 0x253, 0xb,
+ 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x53, 0x3, 0x53,
+ 0x3, 0x54, 0x3, 0x54, 0x3, 0x54, 0x7, 0x54, 0x25c,
+ 0xa, 0x54, 0xc, 0x54, 0xe, 0x54, 0x25f, 0xb, 0x54,
+ 0x5, 0x54, 0x261, 0xa, 0x54, 0x3, 0x55, 0x3, 0x55,
+ 0x5, 0x55, 0x265, 0xa, 0x55, 0x3, 0x55, 0x6, 0x55,
+ 0x268, 0xa, 0x55, 0xd, 0x55, 0xe, 0x55, 0x269, 0x3,
+ 0x56, 0x5, 0x56, 0x26d, 0xa, 0x56, 0x3, 0x56, 0x3,
+ 0x56, 0x3, 0x56, 0x7, 0x56, 0x272, 0xa, 0x56, 0xc,
+ 0x56, 0xe, 0x56, 0x275, 0xb, 0x56, 0x3, 0x56, 0x5,
+ 0x56, 0x278, 0xa, 0x56, 0x3, 0x56, 0x5, 0x56, 0x27b,
+ 0xa, 0x56, 0x3, 0x56, 0x3, 0x56, 0x6, 0x56, 0x27f,
+ 0xa, 0x56, 0xd, 0x56, 0xe, 0x56, 0x280, 0x3, 0x56,
+ 0x5, 0x56, 0x284, 0xa, 0x56, 0x3, 0x56, 0x5, 0x56,
+ 0x287, 0xa, 0x56, 0x3, 0x56, 0x3, 0x56, 0x5, 0x56,
+ 0x28b, 0xa, 0x56, 0x3, 0x56, 0x5, 0x56, 0x28e, 0xa,
+ 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56,
+ 0x6, 0x56, 0x294, 0xa, 0x56, 0xd, 0x56, 0xe, 0x56,
+ 0x295, 0x5, 0x56, 0x298, 0xa, 0x56, 0x3, 0x241, 0x2,
+ 0x57, 0x3, 0x3, 0x5, 0x4, 0x7, 0x5, 0x9, 0x6,
+ 0xb, 0x7, 0xd, 0x8, 0xf, 0x9, 0x11, 0xa, 0x13,
+ 0xb, 0x15, 0xc, 0x17, 0xd, 0x19, 0xe, 0x1b, 0xf,
+ 0x1d, 0x10, 0x1f, 0x11, 0x21, 0x12, 0x23, 0x13, 0x25,
+ 0x14, 0x27, 0x15, 0x29, 0x16, 0x2b, 0x17, 0x2d, 0x18,
+ 0x2f, 0x19, 0x31, 0x1a, 0x33, 0x1b, 0x35, 0x1c, 0x37,
+ 0x1d, 0x39, 0x1e, 0x3b, 0x1f, 0x3d, 0x20, 0x3f, 0x21,
+ 0x41, 0x22, 0x43, 0x23, 0x45, 0x24, 0x47, 0x25, 0x49,
+ 0x26, 0x4b, 0x27, 0x4d, 0x28, 0x4f, 0x29, 0x51, 0x2a,
+ 0x53, 0x2b, 0x55, 0x2c, 0x57, 0x2d, 0x59, 0x2e, 0x5b,
+ 0x2f, 0x5d, 0x30, 0x5f, 0x31, 0x61, 0x32, 0x63, 0x33,
+ 0x65, 0x34, 0x67, 0x35, 0x69, 0x36, 0x6b, 0x37, 0x6d,
+ 0x38, 0x6f, 0x39, 0x71, 0x3a, 0x73, 0x3b, 0x75, 0x3c,
+ 0x77, 0x3d, 0x79, 0x3e, 0x7b, 0x3f, 0x7d, 0x40, 0x7f,
+ 0x41, 0x81, 0x42, 0x83, 0x43, 0x85, 0x44, 0x87, 0x45,
+ 0x89, 0x46, 0x8b, 0x47, 0x8d, 0x48, 0x8f, 0x49, 0x91,
+ 0x4a, 0x93, 0x4b, 0x95, 0x4c, 0x97, 0x4d, 0x99, 0x4e,
+ 0x9b, 0x2, 0x9d, 0x4f, 0x9f, 0x50, 0xa1, 0x51, 0xa3,
+ 0x52, 0xa5, 0x2, 0xa7, 0x2, 0xa9, 0x2, 0xab, 0x53,
+ 0x3, 0x2, 0xe, 0x6, 0x2, 0xc, 0xc, 0xf, 0xf,
+ 0x24, 0x24, 0x5e, 0x5e, 0x6, 0x2, 0xc, 0xc, 0xf,
+ 0xf, 0x29, 0x29, 0x5e, 0x5e, 0x7, 0x2, 0x24, 0x24,
+ 0x29, 0x29, 0x5e, 0x5e, 0x70, 0x70, 0x74, 0x74, 0x4,
+ 0x2, 0x43, 0x5c, 0x63, 0x7c, 0x6, 0x2, 0x32, 0x3b,
+ 0x43, 0x5c, 0x61, 0x61, 0x63, 0x7c, 0x5, 0x2, 0xb,
+ 0xc, 0xe, 0xf, 0x22, 0x22, 0x4, 0x2, 0xc, 0xc,
+ 0xf, 0xf, 0x3, 0x2, 0x32, 0x3b, 0x3, 0x2, 0x33,
+ 0x3b, 0x4, 0x2, 0x47, 0x47, 0x67, 0x67, 0x4, 0x2,
+ 0x2d, 0x2d, 0x2f, 0x2f, 0x5, 0x2, 0x32, 0x3b, 0x43,
+ 0x48, 0x63, 0x68, 0x2, 0x2ba, 0x2, 0x3, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x5, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x7, 0x3, 0x2, 0x2, 0x2, 0x2, 0x9, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0xb, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0xd, 0x3, 0x2, 0x2, 0x2, 0x2, 0xf, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x11, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x13, 0x3, 0x2, 0x2, 0x2, 0x2, 0x15, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x17, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x19, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1b, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x1d, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x1f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x21, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x23, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x25, 0x3, 0x2, 0x2, 0x2, 0x2, 0x27, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x29, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x2b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2d, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x2f, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x31, 0x3, 0x2, 0x2, 0x2, 0x2, 0x33, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x35, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x37, 0x3, 0x2, 0x2, 0x2, 0x2, 0x39, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x3b, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x3d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x3f, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x41, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x43, 0x3, 0x2, 0x2, 0x2, 0x2, 0x45, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x47, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x49, 0x3, 0x2, 0x2, 0x2, 0x2, 0x4b, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x4d, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x4f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x51, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x53, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x55, 0x3, 0x2, 0x2, 0x2, 0x2, 0x57, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x59, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x5b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x5d, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x5f, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x61, 0x3, 0x2, 0x2, 0x2, 0x2, 0x63, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x65, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x67, 0x3, 0x2, 0x2, 0x2, 0x2, 0x69, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x6b, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x6d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x6f, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x71, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x73, 0x3, 0x2, 0x2, 0x2, 0x2, 0x75, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x77, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x79, 0x3, 0x2, 0x2, 0x2, 0x2, 0x7b, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x7d, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x7f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x81, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x83, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x85, 0x3, 0x2, 0x2, 0x2, 0x2, 0x87, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x89, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x8b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x8d, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x8f, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x91, 0x3, 0x2, 0x2, 0x2, 0x2, 0x93, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x95, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x97, 0x3, 0x2, 0x2, 0x2, 0x2, 0x99, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0x9d, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0x9f, 0x3, 0x2, 0x2, 0x2, 0x2, 0xa1, 0x3, 0x2,
+ 0x2, 0x2, 0x2, 0xa3, 0x3, 0x2, 0x2, 0x2, 0x2,
+ 0xab, 0x3, 0x2, 0x2, 0x2, 0x3, 0xad, 0x3, 0x2,
+ 0x2, 0x2, 0x5, 0xaf, 0x3, 0x2, 0x2, 0x2, 0x7,
+ 0xb1, 0x3, 0x2, 0x2, 0x2, 0x9, 0xb4, 0x3, 0x2,
+ 0x2, 0x2, 0xb, 0xb6, 0x3, 0x2, 0x2, 0x2, 0xd,
+ 0xb8, 0x3, 0x2, 0x2, 0x2, 0xf, 0xbd, 0x3, 0x2,
+ 0x2, 0x2, 0x11, 0xbf, 0x3, 0x2, 0x2, 0x2, 0x13,
+ 0xc2, 0x3, 0x2, 0x2, 0x2, 0x15, 0xc5, 0x3, 0x2,
+ 0x2, 0x2, 0x17, 0xc7, 0x3, 0x2, 0x2, 0x2, 0x19,
+ 0xc9, 0x3, 0x2, 0x2, 0x2, 0x1b, 0xcb, 0x3, 0x2,
+ 0x2, 0x2, 0x1d, 0xcd, 0x3, 0x2, 0x2, 0x2, 0x1f,
+ 0xcf, 0x3, 0x2, 0x2, 0x2, 0x21, 0xd1, 0x3, 0x2,
+ 0x2, 0x2, 0x23, 0xd4, 0x3, 0x2, 0x2, 0x2, 0x25,
+ 0xd9, 0x3, 0x2, 0x2, 0x2, 0x27, 0xe1, 0x3, 0x2,
+ 0x2, 0x2, 0x29, 0xeb, 0x3, 0x2, 0x2, 0x2, 0x2b,
+ 0xf4, 0x3, 0x2, 0x2, 0x2, 0x2d, 0xfb, 0x3, 0x2,
+ 0x2, 0x2, 0x2f, 0x101, 0x3, 0x2, 0x2, 0x2, 0x31,
+ 0x109, 0x3, 0x2, 0x2, 0x2, 0x33, 0x111, 0x3, 0x2,
+ 0x2, 0x2, 0x35, 0x118, 0x3, 0x2, 0x2, 0x2, 0x37,
+ 0x123, 0x3, 0x2, 0x2, 0x2, 0x39, 0x12c, 0x3, 0x2,
+ 0x2, 0x2, 0x3b, 0x12f, 0x3, 0x2, 0x2, 0x2, 0x3d,
+ 0x133, 0x3, 0x2, 0x2, 0x2, 0x3f, 0x139, 0x3, 0x2,
+ 0x2, 0x2, 0x41, 0x140, 0x3, 0x2, 0x2, 0x2, 0x43,
+ 0x14a, 0x3, 0x2, 0x2, 0x2, 0x45, 0x153, 0x3, 0x2,
+ 0x2, 0x2, 0x47, 0x159, 0x3, 0x2, 0x2, 0x2, 0x49,
+ 0x15e, 0x3, 0x2, 0x2, 0x2, 0x4b, 0x168, 0x3, 0x2,
+ 0x2, 0x2, 0x4d, 0x16c, 0x3, 0x2, 0x2, 0x2, 0x4f,
+ 0x172, 0x3, 0x2, 0x2, 0x2, 0x51, 0x179, 0x3, 0x2,
+ 0x2, 0x2, 0x53, 0x17e, 0x3, 0x2, 0x2, 0x2, 0x55,
+ 0x183, 0x3, 0x2, 0x2, 0x2, 0x57, 0x186, 0x3, 0x2,
+ 0x2, 0x2, 0x59, 0x18a, 0x3, 0x2, 0x2, 0x2, 0x5b,
+ 0x190, 0x3, 0x2, 0x2, 0x2, 0x5d, 0x197, 0x3, 0x2,
+ 0x2, 0x2, 0x5f, 0x19e, 0x3, 0x2, 0x2, 0x2, 0x61,
+ 0x1a4, 0x3, 0x2, 0x2, 0x2, 0x63, 0x1b0, 0x3, 0x2,
+ 0x2, 0x2, 0x65, 0x1b6, 0x3, 0x2, 0x2, 0x2, 0x67,
+ 0x1d2, 0x3, 0x2, 0x2, 0x2, 0x69, 0x1d4, 0x3, 0x2,
+ 0x2, 0x2, 0x6b, 0x1d7, 0x3, 0x2, 0x2, 0x2, 0x6d,
+ 0x1d9, 0x3, 0x2, 0x2, 0x2, 0x6f, 0x1db, 0x3, 0x2,
+ 0x2, 0x2, 0x71, 0x1dd, 0x3, 0x2, 0x2, 0x2, 0x73,
+ 0x1df, 0x3, 0x2, 0x2, 0x2, 0x75, 0x1e1, 0x3, 0x2,
+ 0x2, 0x2, 0x77, 0x1e3, 0x3, 0x2, 0x2, 0x2, 0x79,
+ 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x7b, 0x1e7, 0x3, 0x2,
+ 0x2, 0x2, 0x7d, 0x1eb, 0x3, 0x2, 0x2, 0x2, 0x7f,
+ 0x1ef, 0x3, 0x2, 0x2, 0x2, 0x81, 0x1f2, 0x3, 0x2,
+ 0x2, 0x2, 0x83, 0x1f4, 0x3, 0x2, 0x2, 0x2, 0x85,
+ 0x1f7, 0x3, 0x2, 0x2, 0x2, 0x87, 0x1f9, 0x3, 0x2,
+ 0x2, 0x2, 0x89, 0x1fc, 0x3, 0x2, 0x2, 0x2, 0x8b,
+ 0x1ff, 0x3, 0x2, 0x2, 0x2, 0x8d, 0x202, 0x3, 0x2,
+ 0x2, 0x2, 0x8f, 0x206, 0x3, 0x2, 0x2, 0x2, 0x91,
+ 0x20c, 0x3, 0x2, 0x2, 0x2, 0x93, 0x20e, 0x3, 0x2,
+ 0x2, 0x2, 0x95, 0x211, 0x3, 0x2, 0x2, 0x2, 0x97,
+ 0x214, 0x3, 0x2, 0x2, 0x2, 0x99, 0x228, 0x3, 0x2,
+ 0x2, 0x2, 0x9b, 0x22a, 0x3, 0x2, 0x2, 0x2, 0x9d,
+ 0x22d, 0x3, 0x2, 0x2, 0x2, 0x9f, 0x235, 0x3, 0x2,
+ 0x2, 0x2, 0xa1, 0x23b, 0x3, 0x2, 0x2, 0x2, 0xa3,
+ 0x24b, 0x3, 0x2, 0x2, 0x2, 0xa5, 0x256, 0x3, 0x2,
+ 0x2, 0x2, 0xa7, 0x260, 0x3, 0x2, 0x2, 0x2, 0xa9,
+ 0x262, 0x3, 0x2, 0x2, 0x2, 0xab, 0x297, 0x3, 0x2,
+ 0x2, 0x2, 0xad, 0xae, 0x7, 0x2a, 0x2, 0x2, 0xae,
+ 0x4, 0x3, 0x2, 0x2, 0x2, 0xaf, 0xb0, 0x7, 0x2b,
+ 0x2, 0x2, 0xb0, 0x6, 0x3, 0x2, 0x2, 0x2, 0xb1,
+ 0xb2, 0x7, 0x3f, 0x2, 0x2, 0xb2, 0xb3, 0x7, 0x40,
+ 0x2, 0x2, 0xb3, 0x8, 0x3, 0x2, 0x2, 0x2, 0xb4,
+ 0xb5, 0x7, 0x2e, 0x2, 0x2, 0xb5, 0xa, 0x3, 0x2,
+ 0x2, 0x2, 0xb6, 0xb7, 0x7, 0x3c, 0x2, 0x2, 0xb7,
+ 0xc, 0x3, 0x2, 0x2, 0x2, 0xb8, 0xb9, 0x7, 0x76,
+ 0x2, 0x2, 0xb9, 0xba, 0x7, 0x7b, 0x2, 0x2, 0xba,
+ 0xbb, 0x7, 0x72, 0x2, 0x2, 0xbb, 0xbc, 0x7, 0x67,
+ 0x2, 0x2, 0xbc, 0xe, 0x3, 0x2, 0x2, 0x2, 0xbd,
+ 0xbe, 0x7, 0x41, 0x2, 0x2, 0xbe, 0x10, 0x3, 0x2,
+ 0x2, 0x2, 0xbf, 0xc0, 0x7, 0x7e, 0x2, 0x2, 0xc0,
+ 0xc1, 0x7, 0x7e, 0x2, 0x2, 0xc1, 0x12, 0x3, 0x2,
+ 0x2, 0x2, 0xc2, 0xc3, 0x7, 0x28, 0x2, 0x2, 0xc3,
+ 0xc4, 0x7, 0x28, 0x2, 0x2, 0xc4, 0x14, 0x3, 0x2,
+ 0x2, 0x2, 0xc5, 0xc6, 0x7, 0x30, 0x2, 0x2, 0xc6,
+ 0x16, 0x3, 0x2, 0x2, 0x2, 0xc7, 0xc8, 0x7, 0x5d,
+ 0x2, 0x2, 0xc8, 0x18, 0x3, 0x2, 0x2, 0x2, 0xc9,
+ 0xca, 0x7, 0x5f, 0x2, 0x2, 0xca, 0x1a, 0x3, 0x2,
+ 0x2, 0x2, 0xcb, 0xcc, 0x7, 0x7d, 0x2, 0x2, 0xcc,
+ 0x1c, 0x3, 0x2, 0x2, 0x2, 0xcd, 0xce, 0x7, 0x7f,
+ 0x2, 0x2, 0xce, 0x1e, 0x3, 0x2, 0x2, 0x2, 0xcf,
+ 0xd0, 0x7, 0x3d, 0x2, 0x2, 0xd0, 0x20, 0x3, 0x2,
+ 0x2, 0x2, 0xd1, 0xd2, 0x7, 0x71, 0x2, 0x2, 0xd2,
+ 0xd3, 0x7, 0x68, 0x2, 0x2, 0xd3, 0x22, 0x3, 0x2,
+ 0x2, 0x2, 0xd4, 0xd5, 0x7, 0x67, 0x2, 0x2, 0xd5,
+ 0xd6, 0x7, 0x6e, 0x2, 0x2, 0xd6, 0xd7, 0x7, 0x75,
+ 0x2, 0x2, 0xd7, 0xd8, 0x7, 0x67, 0x2, 0x2, 0xd8,
+ 0x24, 0x3, 0x2, 0x2, 0x2, 0xd9, 0xda, 0x7, 0x67,
+ 0x2, 0x2, 0xda, 0xdb, 0x7, 0x7a, 0x2, 0x2, 0xdb,
+ 0xdc, 0x7, 0x76, 0x2, 0x2, 0xdc, 0xdd, 0x7, 0x67,
+ 0x2, 0x2, 0xdd, 0xde, 0x7, 0x70, 0x2, 0x2, 0xde,
+ 0xdf, 0x7, 0x66, 0x2, 0x2, 0xdf, 0xe0, 0x7, 0x75,
+ 0x2, 0x2, 0xe0, 0x26, 0x3, 0x2, 0x2, 0x2, 0xe1,
+ 0xe2, 0x7, 0x69, 0x2, 0x2, 0xe2, 0xe3, 0x7, 0x67,
+ 0x2, 0x2, 0xe3, 0xe4, 0x7, 0x70, 0x2, 0x2, 0xe4,
+ 0xe5, 0x7, 0x67, 0x2, 0x2, 0xe5, 0xe6, 0x7, 0x74,
+ 0x2, 0x2, 0xe6, 0xe7, 0x7, 0x63, 0x2, 0x2, 0xe7,
+ 0xe8, 0x7, 0x76, 0x2, 0x2, 0xe8, 0xe9, 0x7, 0x67,
+ 0x2, 0x2, 0xe9, 0xea, 0x7, 0x75, 0x2, 0x2, 0xea,
+ 0x28, 0x3, 0x2, 0x2, 0x2, 0xeb, 0xec, 0x7, 0x71,
+ 0x2, 0x2, 0xec, 0xed, 0x7, 0x72, 0x2, 0x2, 0xed,
+ 0xee, 0x7, 0x67, 0x2, 0x2, 0xee, 0xef, 0x7, 0x74,
+ 0x2, 0x2, 0xef, 0xf0, 0x7, 0x63, 0x2, 0x2, 0xf0,
+ 0xf1, 0x7, 0x76, 0x2, 0x2, 0xf1, 0xf2, 0x7, 0x71,
+ 0x2, 0x2, 0xf2, 0xf3, 0x7, 0x74, 0x2, 0x2, 0xf3,
+ 0x2a, 0x3, 0x2, 0x2, 0x2, 0xf4, 0xf5, 0x7, 0x75,
+ 0x2, 0x2, 0xf5, 0xf6, 0x7, 0x76, 0x2, 0x2, 0xf6,
+ 0xf7, 0x7, 0x74, 0x2, 0x2, 0xf7, 0xf8, 0x7, 0x77,
+ 0x2, 0x2, 0xf8, 0xf9, 0x7, 0x65, 0x2, 0x2, 0xf9,
+ 0xfa, 0x7, 0x76, 0x2, 0x2, 0xfa, 0x2c, 0x3, 0x2,
+ 0x2, 0x2, 0xfb, 0xfc, 0x7, 0x6f, 0x2, 0x2, 0xfc,
+ 0xfd, 0x7, 0x63, 0x2, 0x2, 0xfd, 0xfe, 0x7, 0x65,
+ 0x2, 0x2, 0xfe, 0xff, 0x7, 0x74, 0x2, 0x2, 0xff,
+ 0x100, 0x7, 0x71, 0x2, 0x2, 0x100, 0x2e, 0x3, 0x2,
+ 0x2, 0x2, 0x101, 0x102, 0x7, 0x64, 0x2, 0x2, 0x102,
+ 0x103, 0x7, 0x77, 0x2, 0x2, 0x103, 0x104, 0x7, 0x6b,
+ 0x2, 0x2, 0x104, 0x105, 0x7, 0x6e, 0x2, 0x2, 0x105,
+ 0x106, 0x7, 0x76, 0x2, 0x2, 0x106, 0x107, 0x7, 0x6b,
+ 0x2, 0x2, 0x107, 0x108, 0x7, 0x70, 0x2, 0x2, 0x108,
+ 0x30, 0x3, 0x2, 0x2, 0x2, 0x109, 0x10a, 0x7, 0x74,
+ 0x2, 0x2, 0x10a, 0x10b, 0x7, 0x77, 0x2, 0x2, 0x10b,
+ 0x10c, 0x7, 0x70, 0x2, 0x2, 0x10c, 0x10d, 0x7, 0x76,
+ 0x2, 0x2, 0x10d, 0x10e, 0x7, 0x6b, 0x2, 0x2, 0x10e,
+ 0x10f, 0x7, 0x6f, 0x2, 0x2, 0x10f, 0x110, 0x7, 0x67,
+ 0x2, 0x2, 0x110, 0x32, 0x3, 0x2, 0x2, 0x2, 0x111,
+ 0x112, 0x7, 0x6f, 0x2, 0x2, 0x112, 0x113, 0x7, 0x71,
+ 0x2, 0x2, 0x113, 0x114, 0x7, 0x66, 0x2, 0x2, 0x114,
+ 0x115, 0x7, 0x77, 0x2, 0x2, 0x115, 0x116, 0x7, 0x6e,
+ 0x2, 0x2, 0x116, 0x117, 0x7, 0x67, 0x2, 0x2, 0x117,
+ 0x34, 0x3, 0x2, 0x2, 0x2, 0x118, 0x119, 0x7, 0x6c,
+ 0x2, 0x2, 0x119, 0x11a, 0x7, 0x63, 0x2, 0x2, 0x11a,
+ 0x11b, 0x7, 0x78, 0x2, 0x2, 0x11b, 0x11c, 0x7, 0x63,
+ 0x2, 0x2, 0x11c, 0x11d, 0x7, 0x75, 0x2, 0x2, 0x11d,
+ 0x11e, 0x7, 0x65, 0x2, 0x2, 0x11e, 0x11f, 0x7, 0x74,
+ 0x2, 0x2, 0x11f, 0x120, 0x7, 0x6b, 0x2, 0x2, 0x120,
+ 0x121, 0x7, 0x72, 0x2, 0x2, 0x121, 0x122, 0x7, 0x76,
+ 0x2, 0x2, 0x122, 0x36, 0x3, 0x2, 0x2, 0x2, 0x123,
+ 0x124, 0x7, 0x66, 0x2, 0x2, 0x124, 0x125, 0x7, 0x67,
+ 0x2, 0x2, 0x125, 0x126, 0x7, 0x68, 0x2, 0x2, 0x126,
+ 0x127, 0x7, 0x67, 0x2, 0x2, 0x127, 0x128, 0x7, 0x74,
+ 0x2, 0x2, 0x128, 0x129, 0x7, 0x74, 0x2, 0x2, 0x129,
+ 0x12a, 0x7, 0x67, 0x2, 0x2, 0x12a, 0x12b, 0x7, 0x66,
+ 0x2, 0x2, 0x12b, 0x38, 0x3, 0x2, 0x2, 0x2, 0x12c,
+ 0x12d, 0x7, 0x6b, 0x2, 0x2, 0x12d, 0x12e, 0x7, 0x68,
+ 0x2, 0x2, 0x12e, 0x3a, 0x3, 0x2, 0x2, 0x2, 0x12f,
+ 0x130, 0x7, 0x68, 0x2, 0x2, 0x130, 0x131, 0x7, 0x71,
+ 0x2, 0x2, 0x131, 0x132, 0x7, 0x74, 0x2, 0x2, 0x132,
+ 0x3c, 0x3, 0x2, 0x2, 0x2, 0x133, 0x134, 0x7, 0x79,
+ 0x2, 0x2, 0x134, 0x135, 0x7, 0x6a, 0x2, 0x2, 0x135,
+ 0x136, 0x7, 0x6b, 0x2, 0x2, 0x136, 0x137, 0x7, 0x6e,
+ 0x2, 0x2, 0x137, 0x138, 0x7, 0x67, 0x2, 0x2, 0x138,
+ 0x3e, 0x3, 0x2, 0x2, 0x2, 0x139, 0x13a, 0x7, 0x74,
+ 0x2, 0x2, 0x13a, 0x13b, 0x7, 0x67, 0x2, 0x2, 0x13b,
+ 0x13c, 0x7, 0x76, 0x2, 0x2, 0x13c, 0x13d, 0x7, 0x77,
+ 0x2, 0x2, 0x13d, 0x13e, 0x7, 0x74, 0x2, 0x2, 0x13e,
+ 0x13f, 0x7, 0x70, 0x2, 0x2, 0x13f, 0x40, 0x3, 0x2,
+ 0x2, 0x2, 0x140, 0x141, 0x7, 0x65, 0x2, 0x2, 0x141,
+ 0x142, 0x7, 0x71, 0x2, 0x2, 0x142, 0x143, 0x7, 0x70,
+ 0x2, 0x2, 0x143, 0x144, 0x7, 0x75, 0x2, 0x2, 0x144,
+ 0x145, 0x7, 0x76, 0x2, 0x2, 0x145, 0x146, 0x7, 0x67,
+ 0x2, 0x2, 0x146, 0x147, 0x7, 0x7a, 0x2, 0x2, 0x147,
+ 0x148, 0x7, 0x72, 0x2, 0x2, 0x148, 0x149, 0x7, 0x74,
+ 0x2, 0x2, 0x149, 0x42, 0x3, 0x2, 0x2, 0x2, 0x14a,
+ 0x14b, 0x7, 0x65, 0x2, 0x2, 0x14b, 0x14c, 0x7, 0x71,
+ 0x2, 0x2, 0x14c, 0x14d, 0x7, 0x70, 0x2, 0x2, 0x14d,
+ 0x14e, 0x7, 0x76, 0x2, 0x2, 0x14e, 0x14f, 0x7, 0x6b,
+ 0x2, 0x2, 0x14f, 0x150, 0x7, 0x70, 0x2, 0x2, 0x150,
+ 0x151, 0x7, 0x77, 0x2, 0x2, 0x151, 0x152, 0x7, 0x67,
+ 0x2, 0x2, 0x152, 0x44, 0x3, 0x2, 0x2, 0x2, 0x153,
+ 0x154, 0x7, 0x64, 0x2, 0x2, 0x154, 0x155, 0x7, 0x74,
+ 0x2, 0x2, 0x155, 0x156, 0x7, 0x67, 0x2, 0x2, 0x156,
+ 0x157, 0x7, 0x63, 0x2, 0x2, 0x157, 0x158, 0x7, 0x6d,
+ 0x2, 0x2, 0x158, 0x46, 0x3, 0x2, 0x2, 0x2, 0x159,
+ 0x15a, 0x7, 0x69, 0x2, 0x2, 0x15a, 0x15b, 0x7, 0x71,
+ 0x2, 0x2, 0x15b, 0x15c, 0x7, 0x76, 0x2, 0x2, 0x15c,
+ 0x15d, 0x7, 0x71, 0x2, 0x2, 0x15d, 0x48, 0x3, 0x2,
+ 0x2, 0x2, 0x15e, 0x15f, 0x7, 0x71, 0x2, 0x2, 0x15f,
+ 0x160, 0x7, 0x76, 0x2, 0x2, 0x160, 0x161, 0x7, 0x6a,
+ 0x2, 0x2, 0x161, 0x162, 0x7, 0x67, 0x2, 0x2, 0x162,
+ 0x163, 0x7, 0x74, 0x2, 0x2, 0x163, 0x164, 0x7, 0x79,
+ 0x2, 0x2, 0x164, 0x165, 0x7, 0x6b, 0x2, 0x2, 0x165,
+ 0x166, 0x7, 0x75, 0x2, 0x2, 0x166, 0x167, 0x7, 0x67,
+ 0x2, 0x2, 0x167, 0x4a, 0x3, 0x2, 0x2, 0x2, 0x168,
+ 0x169, 0x7, 0x76, 0x2, 0x2, 0x169, 0x16a, 0x7, 0x74,
+ 0x2, 0x2, 0x16a, 0x16b, 0x7, 0x7b, 0x2, 0x2, 0x16b,
+ 0x4c, 0x3, 0x2, 0x2, 0x2, 0x16c, 0x16d, 0x7, 0x6e,
+ 0x2, 0x2, 0x16d, 0x16e, 0x7, 0x63, 0x2, 0x2, 0x16e,
+ 0x16f, 0x7, 0x64, 0x2, 0x2, 0x16f, 0x170, 0x7, 0x67,
+ 0x2, 0x2, 0x170, 0x171, 0x7, 0x6e, 0x2, 0x2, 0x171,
+ 0x4e, 0x3, 0x2, 0x2, 0x2, 0x172, 0x173, 0x7, 0x6e,
+ 0x2, 0x2, 0x173, 0x174, 0x7, 0x63, 0x2, 0x2, 0x174,
+ 0x175, 0x7, 0x64, 0x2, 0x2, 0x175, 0x176, 0x7, 0x67,
+ 0x2, 0x2, 0x176, 0x177, 0x7, 0x6e, 0x2, 0x2, 0x177,
+ 0x178, 0x7, 0x75, 0x2, 0x2, 0x178, 0x50, 0x3, 0x2,
+ 0x2, 0x2, 0x179, 0x17a, 0x7, 0x76, 0x2, 0x2, 0x17a,
+ 0x17b, 0x7, 0x63, 0x2, 0x2, 0x17b, 0x17c, 0x7, 0x6b,
+ 0x2, 0x2, 0x17c, 0x17d, 0x7, 0x6e, 0x2, 0x2, 0x17d,
+ 0x52, 0x3, 0x2, 0x2, 0x2, 0x17e, 0x17f, 0x7, 0x6b,
+ 0x2, 0x2, 0x17f, 0x180, 0x7, 0x75, 0x2, 0x2, 0x180,
+ 0x181, 0x7, 0x70, 0x2, 0x2, 0x181, 0x182, 0x7, 0x76,
+ 0x2, 0x2, 0x182, 0x54, 0x3, 0x2, 0x2, 0x2, 0x183,
+ 0x184, 0x7, 0x6b, 0x2, 0x2, 0x184, 0x185, 0x7, 0x75,
+ 0x2, 0x2, 0x185, 0x56, 0x3, 0x2, 0x2, 0x2, 0x186,
+ 0x187, 0x7, 0x6e, 0x2, 0x2, 0x187, 0x188, 0x7, 0x67,
+ 0x2, 0x2, 0x188, 0x189, 0x7, 0x76, 0x2, 0x2, 0x189,
+ 0x58, 0x3, 0x2, 0x2, 0x2, 0x18a, 0x18b, 0x7, 0x65,
+ 0x2, 0x2, 0x18b, 0x18c, 0x7, 0x71, 0x2, 0x2, 0x18c,
+ 0x18d, 0x7, 0x70, 0x2, 0x2, 0x18d, 0x18e, 0x7, 0x75,
+ 0x2, 0x2, 0x18e, 0x18f, 0x7, 0x76, 0x2, 0x2, 0x18f,
+ 0x5a, 0x3, 0x2, 0x2, 0x2, 0x190, 0x191, 0x7, 0x67,
+ 0x2, 0x2, 0x191, 0x192, 0x7, 0x7a, 0x2, 0x2, 0x192,
+ 0x193, 0x7, 0x76, 0x2, 0x2, 0x193, 0x194, 0x7, 0x67,
+ 0x2, 0x2, 0x194, 0x195, 0x7, 0x74, 0x2, 0x2, 0x195,
+ 0x196, 0x7, 0x70, 0x2, 0x2, 0x196, 0x5c, 0x3, 0x2,
+ 0x2, 0x2, 0x197, 0x198, 0x7, 0x63, 0x2, 0x2, 0x198,
+ 0x199, 0x7, 0x75, 0x2, 0x2, 0x199, 0x19a, 0x7, 0x75,
+ 0x2, 0x2, 0x19a, 0x19b, 0x7, 0x67, 0x2, 0x2, 0x19b,
+ 0x19c, 0x7, 0x74, 0x2, 0x2, 0x19c, 0x19d, 0x7, 0x76,
+ 0x2, 0x2, 0x19d, 0x5e, 0x3, 0x2, 0x2, 0x2, 0x19e,
+ 0x19f, 0x7, 0x65, 0x2, 0x2, 0x19f, 0x1a0, 0x7, 0x6a,
+ 0x2, 0x2, 0x1a0, 0x1a1, 0x7, 0x67, 0x2, 0x2, 0x1a1,
+ 0x1a2, 0x7, 0x65, 0x2, 0x2, 0x1a2, 0x1a3, 0x7, 0x6d,
+ 0x2, 0x2, 0x1a3, 0x60, 0x3, 0x2, 0x2, 0x2, 0x1a4,
+ 0x1a5, 0x7, 0x77, 0x2, 0x2, 0x1a5, 0x1a6, 0x7, 0x70,
+ 0x2, 0x2, 0x1a6, 0x1a7, 0x7, 0x74, 0x2, 0x2, 0x1a7,
+ 0x1a8, 0x7, 0x67, 0x2, 0x2, 0x1a8, 0x1a9, 0x7, 0x63,
+ 0x2, 0x2, 0x1a9, 0x1aa, 0x7, 0x65, 0x2, 0x2, 0x1aa,
+ 0x1ab, 0x7, 0x6a, 0x2, 0x2, 0x1ab, 0x1ac, 0x7, 0x63,
+ 0x2, 0x2, 0x1ac, 0x1ad, 0x7, 0x64, 0x2, 0x2, 0x1ad,
+ 0x1ae, 0x7, 0x6e, 0x2, 0x2, 0x1ae, 0x1af, 0x7, 0x67,
+ 0x2, 0x2, 0x1af, 0x62, 0x3, 0x2, 0x2, 0x2, 0x1b0,
+ 0x1b1, 0x7, 0x66, 0x2, 0x2, 0x1b1, 0x1b2, 0x7, 0x67,
+ 0x2, 0x2, 0x1b2, 0x1b3, 0x7, 0x64, 0x2, 0x2, 0x1b3,
+ 0x1b4, 0x7, 0x77, 0x2, 0x2, 0x1b4, 0x1b5, 0x7, 0x69,
+ 0x2, 0x2, 0x1b5, 0x64, 0x3, 0x2, 0x2, 0x2, 0x1b6,
+ 0x1b7, 0x7, 0x3f, 0x2, 0x2, 0x1b7, 0x66, 0x3, 0x2,
+ 0x2, 0x2, 0x1b8, 0x1b9, 0x7, 0x2c, 0x2, 0x2, 0x1b9,
+ 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1ba, 0x1bb, 0x7, 0x31,
+ 0x2, 0x2, 0x1bb, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1bc,
+ 0x1bd, 0x7, 0x27, 0x2, 0x2, 0x1bd, 0x1d3, 0x7, 0x3f,
+ 0x2, 0x2, 0x1be, 0x1bf, 0x7, 0x2d, 0x2, 0x2, 0x1bf,
+ 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1c0, 0x1c1, 0x7, 0x2f,
+ 0x2, 0x2, 0x1c1, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1c2,
+ 0x1c3, 0x7, 0x3e, 0x2, 0x2, 0x1c3, 0x1c4, 0x7, 0x3e,
+ 0x2, 0x2, 0x1c4, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1c5,
+ 0x1c6, 0x7, 0x40, 0x2, 0x2, 0x1c6, 0x1c7, 0x7, 0x40,
+ 0x2, 0x2, 0x1c7, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1c8,
+ 0x1c9, 0x7, 0x40, 0x2, 0x2, 0x1c9, 0x1ca, 0x7, 0x40,
+ 0x2, 0x2, 0x1ca, 0x1cb, 0x7, 0x40, 0x2, 0x2, 0x1cb,
+ 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1cc, 0x1cd, 0x7, 0x28,
+ 0x2, 0x2, 0x1cd, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1ce,
+ 0x1cf, 0x7, 0x60, 0x2, 0x2, 0x1cf, 0x1d3, 0x7, 0x3f,
+ 0x2, 0x2, 0x1d0, 0x1d1, 0x7, 0x7e, 0x2, 0x2, 0x1d1,
+ 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1d2, 0x1b8, 0x3, 0x2,
+ 0x2, 0x2, 0x1d2, 0x1ba, 0x3, 0x2, 0x2, 0x2, 0x1d2,
+ 0x1bc, 0x3, 0x2, 0x2, 0x2, 0x1d2, 0x1be, 0x3, 0x2,
+ 0x2, 0x2, 0x1d2, 0x1c0, 0x3, 0x2, 0x2, 0x2, 0x1d2,
+ 0x1c2, 0x3, 0x2, 0x2, 0x2, 0x1d2, 0x1c5, 0x3, 0x2,
+ 0x2, 0x2, 0x1d2, 0x1c8, 0x3, 0x2, 0x2, 0x2, 0x1d2,
+ 0x1cc, 0x3, 0x2, 0x2, 0x2, 0x1d2, 0x1ce, 0x3, 0x2,
+ 0x2, 0x2, 0x1d2, 0x1d0, 0x3, 0x2, 0x2, 0x2, 0x1d3,
+ 0x68, 0x3, 0x2, 0x2, 0x2, 0x1d4, 0x1d5, 0x7, 0x3f,
+ 0x2, 0x2, 0x1d5, 0x1d6, 0x7, 0x3f, 0x2, 0x2, 0x1d6,
+ 0x6a, 0x3, 0x2, 0x2, 0x2, 0x1d7, 0x1d8, 0x7, 0x2d,
+ 0x2, 0x2, 0x1d8, 0x6c, 0x3, 0x2, 0x2, 0x2, 0x1d9,
+ 0x1da, 0x7, 0x2f, 0x2, 0x2, 0x1da, 0x6e, 0x3, 0x2,
+ 0x2, 0x2, 0x1db, 0x1dc, 0x7, 0x2c, 0x2, 0x2, 0x1dc,
+ 0x70, 0x3, 0x2, 0x2, 0x2, 0x1dd, 0x1de, 0x7, 0x31,
+ 0x2, 0x2, 0x1de, 0x72, 0x3, 0x2, 0x2, 0x2, 0x1df,
+ 0x1e0, 0x7, 0x27, 0x2, 0x2, 0x1e0, 0x74, 0x3, 0x2,
+ 0x2, 0x2, 0x1e1, 0x1e2, 0x7, 0x7e, 0x2, 0x2, 0x1e2,
+ 0x76, 0x3, 0x2, 0x2, 0x2, 0x1e3, 0x1e4, 0x7, 0x28,
+ 0x2, 0x2, 0x1e4, 0x78, 0x3, 0x2, 0x2, 0x2, 0x1e5,
+ 0x1e6, 0x7, 0x80, 0x2, 0x2, 0x1e6, 0x7a, 0x3, 0x2,
+ 0x2, 0x2, 0x1e7, 0x1e8, 0x7, 0x6f, 0x2, 0x2, 0x1e8,
+ 0x1e9, 0x7, 0x63, 0x2, 0x2, 0x1e9, 0x1ea, 0x7, 0x7a,
+ 0x2, 0x2, 0x1ea, 0x7c, 0x3, 0x2, 0x2, 0x2, 0x1eb,
+ 0x1ec, 0x7, 0x6f, 0x2, 0x2, 0x1ec, 0x1ed, 0x7, 0x6b,
+ 0x2, 0x2, 0x1ed, 0x1ee, 0x7, 0x70, 0x2, 0x2, 0x1ee,
+ 0x7e, 0x3, 0x2, 0x2, 0x2, 0x1ef, 0x1f0, 0x7, 0x23,
+ 0x2, 0x2, 0x1f0, 0x1f1, 0x7, 0x3f, 0x2, 0x2, 0x1f1,
+ 0x80, 0x3, 0x2, 0x2, 0x2, 0x1f2, 0x1f3, 0x7, 0x3e,
+ 0x2, 0x2, 0x1f3, 0x82, 0x3, 0x2, 0x2, 0x2, 0x1f4,
+ 0x1f5, 0x7, 0x3e, 0x2, 0x2, 0x1f5, 0x1f6, 0x7, 0x3f,
+ 0x2, 0x2, 0x1f6, 0x84, 0x3, 0x2, 0x2, 0x2, 0x1f7,
+ 0x1f8, 0x7, 0x40, 0x2, 0x2, 0x1f8, 0x86, 0x3, 0x2,
+ 0x2, 0x2, 0x1f9, 0x1fa, 0x7, 0x40, 0x2, 0x2, 0x1fa,
+ 0x1fb, 0x7, 0x3f, 0x2, 0x2, 0x1fb, 0x88, 0x3, 0x2,
+ 0x2, 0x2, 0x1fc, 0x1fd, 0x7, 0x3e, 0x2, 0x2, 0x1fd,
+ 0x1fe, 0x7, 0x3e, 0x2, 0x2, 0x1fe, 0x8a, 0x3, 0x2,
+ 0x2, 0x2, 0x1ff, 0x200, 0x7, 0x40, 0x2, 0x2, 0x200,
+ 0x201, 0x7, 0x40, 0x2, 0x2, 0x201, 0x8c, 0x3, 0x2,
+ 0x2, 0x2, 0x202, 0x203, 0x7, 0x40, 0x2, 0x2, 0x203,
+ 0x204, 0x7, 0x40, 0x2, 0x2, 0x204, 0x205, 0x7, 0x40,
+ 0x2, 0x2, 0x205, 0x8e, 0x3, 0x2, 0x2, 0x2, 0x206,
+ 0x207, 0x7, 0x30, 0x2, 0x2, 0x207, 0x208, 0x7, 0x30,
+ 0x2, 0x2, 0x208, 0x209, 0x7, 0x30, 0x2, 0x2, 0x209,
+ 0x90, 0x3, 0x2, 0x2, 0x2, 0x20a, 0x20d, 0x5, 0x69,
+ 0x35, 0x2, 0x20b, 0x20d, 0x5, 0x7f, 0x40, 0x2, 0x20c,
+ 0x20a, 0x3, 0x2, 0x2, 0x2, 0x20c, 0x20b, 0x3, 0x2,
+ 0x2, 0x2, 0x20d, 0x92, 0x3, 0x2, 0x2, 0x2, 0x20e,
+ 0x20f, 0x7, 0x2d, 0x2, 0x2, 0x20f, 0x210, 0x7, 0x2d,
+ 0x2, 0x2, 0x210, 0x94, 0x3, 0x2, 0x2, 0x2, 0x211,
+ 0x212, 0x7, 0x2f, 0x2, 0x2, 0x212, 0x213, 0x7, 0x2f,
+ 0x2, 0x2, 0x213, 0x96, 0x3, 0x2, 0x2, 0x2, 0x214,
+ 0x215, 0x7, 0x23, 0x2, 0x2, 0x215, 0x98, 0x3, 0x2,
+ 0x2, 0x2, 0x216, 0x21b, 0x7, 0x24, 0x2, 0x2, 0x217,
+ 0x21a, 0x5, 0x9b, 0x4e, 0x2, 0x218, 0x21a, 0xa, 0x2,
+ 0x2, 0x2, 0x219, 0x217, 0x3, 0x2, 0x2, 0x2, 0x219,
+ 0x218, 0x3, 0x2, 0x2, 0x2, 0x21a, 0x21d, 0x3, 0x2,
+ 0x2, 0x2, 0x21b, 0x219, 0x3, 0x2, 0x2, 0x2, 0x21b,
+ 0x21c, 0x3, 0x2, 0x2, 0x2, 0x21c, 0x21e, 0x3, 0x2,
+ 0x2, 0x2, 0x21d, 0x21b, 0x3, 0x2, 0x2, 0x2, 0x21e,
+ 0x229, 0x7, 0x24, 0x2, 0x2, 0x21f, 0x224, 0x7, 0x29,
+ 0x2, 0x2, 0x220, 0x223, 0x5, 0x9b, 0x4e, 0x2, 0x221,
+ 0x223, 0xa, 0x3, 0x2, 0x2, 0x222, 0x220, 0x3, 0x2,
+ 0x2, 0x2, 0x222, 0x221, 0x3, 0x2, 0x2, 0x2, 0x223,
+ 0x226, 0x3, 0x2, 0x2, 0x2, 0x224, 0x222, 0x3, 0x2,
+ 0x2, 0x2, 0x224, 0x225, 0x3, 0x2, 0x2, 0x2, 0x225,
+ 0x227, 0x3, 0x2, 0x2, 0x2, 0x226, 0x224, 0x3, 0x2,
+ 0x2, 0x2, 0x227, 0x229, 0x7, 0x29, 0x2, 0x2, 0x228,
+ 0x216, 0x3, 0x2, 0x2, 0x2, 0x228, 0x21f, 0x3, 0x2,
+ 0x2, 0x2, 0x229, 0x9a, 0x3, 0x2, 0x2, 0x2, 0x22a,
+ 0x22b, 0x7, 0x5e, 0x2, 0x2, 0x22b, 0x22c, 0x9, 0x4,
+ 0x2, 0x2, 0x22c, 0x9c, 0x3, 0x2, 0x2, 0x2, 0x22d,
+ 0x231, 0x9, 0x5, 0x2, 0x2, 0x22e, 0x230, 0x9, 0x6,
+ 0x2, 0x2, 0x22f, 0x22e, 0x3, 0x2, 0x2, 0x2, 0x230,
+ 0x233, 0x3, 0x2, 0x2, 0x2, 0x231, 0x22f, 0x3, 0x2,
+ 0x2, 0x2, 0x231, 0x232, 0x3, 0x2, 0x2, 0x2, 0x232,
+ 0x9e, 0x3, 0x2, 0x2, 0x2, 0x233, 0x231, 0x3, 0x2,
+ 0x2, 0x2, 0x234, 0x236, 0x9, 0x7, 0x2, 0x2, 0x235,
+ 0x234, 0x3, 0x2, 0x2, 0x2, 0x236, 0x237, 0x3, 0x2,
+ 0x2, 0x2, 0x237, 0x235, 0x3, 0x2, 0x2, 0x2, 0x237,
+ 0x238, 0x3, 0x2, 0x2, 0x2, 0x238, 0x239, 0x3, 0x2,
+ 0x2, 0x2, 0x239, 0x23a, 0x8, 0x50, 0x2, 0x2, 0x23a,
+ 0xa0, 0x3, 0x2, 0x2, 0x2, 0x23b, 0x23c, 0x7, 0x31,
+ 0x2, 0x2, 0x23c, 0x23d, 0x7, 0x2c, 0x2, 0x2, 0x23d,
+ 0x241, 0x3, 0x2, 0x2, 0x2, 0x23e, 0x240, 0xb, 0x2,
+ 0x2, 0x2, 0x23f, 0x23e, 0x3, 0x2, 0x2, 0x2, 0x240,
+ 0x243, 0x3, 0x2, 0x2, 0x2, 0x241, 0x242, 0x3, 0x2,
+ 0x2, 0x2, 0x241, 0x23f, 0x3, 0x2, 0x2, 0x2, 0x242,
+ 0x247, 0x3, 0x2, 0x2, 0x2, 0x243, 0x241, 0x3, 0x2,
+ 0x2, 0x2, 0x244, 0x245, 0x7, 0x2c, 0x2, 0x2, 0x245,
+ 0x248, 0x7, 0x31, 0x2, 0x2, 0x246, 0x248, 0x7, 0x2,
+ 0x2, 0x3, 0x247, 0x244, 0x3, 0x2, 0x2, 0x2, 0x247,
+ 0x246, 0x3, 0x2, 0x2, 0x2, 0x248, 0x249, 0x3, 0x2,
+ 0x2, 0x2, 0x249, 0x24a, 0x8, 0x51, 0x2, 0x2, 0x24a,
+ 0xa2, 0x3, 0x2, 0x2, 0x2, 0x24b, 0x24c, 0x7, 0x31,
+ 0x2, 0x2, 0x24c, 0x24d, 0x7, 0x31, 0x2, 0x2, 0x24d,
+ 0x251, 0x3, 0x2, 0x2, 0x2, 0x24e, 0x250, 0xa, 0x8,
+ 0x2, 0x2, 0x24f, 0x24e, 0x3, 0x2, 0x2, 0x2, 0x250,
+ 0x253, 0x3, 0x2, 0x2, 0x2, 0x251, 0x24f, 0x3, 0x2,
+ 0x2, 0x2, 0x251, 0x252, 0x3, 0x2, 0x2, 0x2, 0x252,
+ 0x254, 0x3, 0x2, 0x2, 0x2, 0x253, 0x251, 0x3, 0x2,
+ 0x2, 0x2, 0x254, 0x255, 0x8, 0x52, 0x2, 0x2, 0x255,
+ 0xa4, 0x3, 0x2, 0x2, 0x2, 0x256, 0x257, 0x9, 0x9,
+ 0x2, 0x2, 0x257, 0xa6, 0x3, 0x2, 0x2, 0x2, 0x258,
+ 0x261, 0x7, 0x32, 0x2, 0x2, 0x259, 0x25d, 0x9, 0xa,
+ 0x2, 0x2, 0x25a, 0x25c, 0x5, 0xa5, 0x53, 0x2, 0x25b,
+ 0x25a, 0x3, 0x2, 0x2, 0x2, 0x25c, 0x25f, 0x3, 0x2,
+ 0x2, 0x2, 0x25d, 0x25b, 0x3, 0x2, 0x2, 0x2, 0x25d,
+ 0x25e, 0x3, 0x2, 0x2, 0x2, 0x25e, 0x261, 0x3, 0x2,
+ 0x2, 0x2, 0x25f, 0x25d, 0x3, 0x2, 0x2, 0x2, 0x260,
+ 0x258, 0x3, 0x2, 0x2, 0x2, 0x260, 0x259, 0x3, 0x2,
+ 0x2, 0x2, 0x261, 0xa8, 0x3, 0x2, 0x2, 0x2, 0x262,
+ 0x264, 0x9, 0xb, 0x2, 0x2, 0x263, 0x265, 0x9, 0xc,
+ 0x2, 0x2, 0x264, 0x263, 0x3, 0x2, 0x2, 0x2, 0x264,
+ 0x265, 0x3, 0x2, 0x2, 0x2, 0x265, 0x267, 0x3, 0x2,
+ 0x2, 0x2, 0x266, 0x268, 0x5, 0xa5, 0x53, 0x2, 0x267,
+ 0x266, 0x3, 0x2, 0x2, 0x2, 0x268, 0x269, 0x3, 0x2,
+ 0x2, 0x2, 0x269, 0x267, 0x3, 0x2, 0x2, 0x2, 0x269,
+ 0x26a, 0x3, 0x2, 0x2, 0x2, 0x26a, 0xaa, 0x3, 0x2,
+ 0x2, 0x2, 0x26b, 0x26d, 0x5, 0x6d, 0x37, 0x2, 0x26c,
+ 0x26b, 0x3, 0x2, 0x2, 0x2, 0x26c, 0x26d, 0x3, 0x2,
+ 0x2, 0x2, 0x26d, 0x26e, 0x3, 0x2, 0x2, 0x2, 0x26e,
+ 0x26f, 0x5, 0xa7, 0x54, 0x2, 0x26f, 0x273, 0x7, 0x30,
+ 0x2, 0x2, 0x270, 0x272, 0x5, 0xa5, 0x53, 0x2, 0x271,
+ 0x270, 0x3, 0x2, 0x2, 0x2, 0x272, 0x275, 0x3, 0x2,
+ 0x2, 0x2, 0x273, 0x271, 0x3, 0x2, 0x2, 0x2, 0x273,
+ 0x274, 0x3, 0x2, 0x2, 0x2, 0x274, 0x277, 0x3, 0x2,
+ 0x2, 0x2, 0x275, 0x273, 0x3, 0x2, 0x2, 0x2, 0x276,
+ 0x278, 0x5, 0xa9, 0x55, 0x2, 0x277, 0x276, 0x3, 0x2,
+ 0x2, 0x2, 0x277, 0x278, 0x3, 0x2, 0x2, 0x2, 0x278,
+ 0x298, 0x3, 0x2, 0x2, 0x2, 0x279, 0x27b, 0x5, 0x6d,
+ 0x37, 0x2, 0x27a, 0x279, 0x3, 0x2, 0x2, 0x2, 0x27a,
+ 0x27b, 0x3, 0x2, 0x2, 0x2, 0x27b, 0x27c, 0x3, 0x2,
+ 0x2, 0x2, 0x27c, 0x27e, 0x7, 0x30, 0x2, 0x2, 0x27d,
+ 0x27f, 0x5, 0xa5, 0x53, 0x2, 0x27e, 0x27d, 0x3, 0x2,
+ 0x2, 0x2, 0x27f, 0x280, 0x3, 0x2, 0x2, 0x2, 0x280,
+ 0x27e, 0x3, 0x2, 0x2, 0x2, 0x280, 0x281, 0x3, 0x2,
+ 0x2, 0x2, 0x281, 0x283, 0x3, 0x2, 0x2, 0x2, 0x282,
+ 0x284, 0x5, 0xa9, 0x55, 0x2, 0x283, 0x282, 0x3, 0x2,
+ 0x2, 0x2, 0x283, 0x284, 0x3, 0x2, 0x2, 0x2, 0x284,
+ 0x298, 0x3, 0x2, 0x2, 0x2, 0x285, 0x287, 0x5, 0x6d,
+ 0x37, 0x2, 0x286, 0x285, 0x3, 0x2, 0x2, 0x2, 0x286,
+ 0x287, 0x3, 0x2, 0x2, 0x2, 0x287, 0x288, 0x3, 0x2,
+ 0x2, 0x2, 0x288, 0x28a, 0x5, 0xa7, 0x54, 0x2, 0x289,
+ 0x28b, 0x5, 0xa9, 0x55, 0x2, 0x28a, 0x289, 0x3, 0x2,
+ 0x2, 0x2, 0x28a, 0x28b, 0x3, 0x2, 0x2, 0x2, 0x28b,
+ 0x298, 0x3, 0x2, 0x2, 0x2, 0x28c, 0x28e, 0x5, 0x6d,
+ 0x37, 0x2, 0x28d, 0x28c, 0x3, 0x2, 0x2, 0x2, 0x28d,
+ 0x28e, 0x3, 0x2, 0x2, 0x2, 0x28e, 0x28f, 0x3, 0x2,
+ 0x2, 0x2, 0x28f, 0x290, 0x7, 0x32, 0x2, 0x2, 0x290,
+ 0x291, 0x7, 0x7a, 0x2, 0x2, 0x291, 0x293, 0x3, 0x2,
+ 0x2, 0x2, 0x292, 0x294, 0x9, 0xd, 0x2, 0x2, 0x293,
+ 0x292, 0x3, 0x2, 0x2, 0x2, 0x294, 0x295, 0x3, 0x2,
+ 0x2, 0x2, 0x295, 0x293, 0x3, 0x2, 0x2, 0x2, 0x295,
+ 0x296, 0x3, 0x2, 0x2, 0x2, 0x296, 0x298, 0x3, 0x2,
+ 0x2, 0x2, 0x297, 0x26c, 0x3, 0x2, 0x2, 0x2, 0x297,
+ 0x27a, 0x3, 0x2, 0x2, 0x2, 0x297, 0x286, 0x3, 0x2,
+ 0x2, 0x2, 0x297, 0x28d, 0x3, 0x2, 0x2, 0x2, 0x298,
+ 0xac, 0x3, 0x2, 0x2, 0x2, 0x1e, 0x2, 0x1d2, 0x20c,
+ 0x219, 0x21b, 0x222, 0x224, 0x228, 0x231, 0x237, 0x241, 0x247,
+ 0x251, 0x25d, 0x260, 0x264, 0x269, 0x26c, 0x273, 0x277, 0x27a,
+ 0x280, 0x283, 0x286, 0x28a, 0x28d, 0x295, 0x297, 0x3, 0x2,
+ 0x3, 0x2,
};
atn::ATNDeserializer deserializer;
diff --git a/deps/v8/src/torque/TorqueLexer.h b/deps/v8/src/torque/TorqueLexer.h
index 4d7873f44c..c95a80debd 100644
--- a/deps/v8/src/torque/TorqueLexer.h
+++ b/deps/v8/src/torque/TorqueLexer.h
@@ -39,63 +39,61 @@ class TorqueLexer : public antlr4::Lexer {
RUNTIME = 24,
MODULE = 25,
JAVASCRIPT = 26,
- IMPLICIT = 27,
- DEFERRED = 28,
- IF = 29,
- CAST_KEYWORD = 30,
- CONVERT_KEYWORD = 31,
- FOR = 32,
- WHILE = 33,
- RETURN = 34,
- CONSTEXPR = 35,
- CONTINUE = 36,
- BREAK = 37,
- GOTO = 38,
- OTHERWISE = 39,
- TRY = 40,
- CATCH = 41,
- LABEL = 42,
- LABELS = 43,
- TAIL = 44,
- ISNT = 45,
- IS = 46,
- LET = 47,
- EXTERN = 48,
- ASSERT = 49,
- UNREACHABLE_TOKEN = 50,
- DEBUG_TOKEN = 51,
- ASSIGNMENT = 52,
- ASSIGNMENT_OPERATOR = 53,
- EQUAL = 54,
- PLUS = 55,
- MINUS = 56,
- MULTIPLY = 57,
- DIVIDE = 58,
- MODULO = 59,
- BIT_OR = 60,
- BIT_AND = 61,
- BIT_NOT = 62,
- MAX = 63,
- MIN = 64,
- NOT_EQUAL = 65,
- LESS_THAN = 66,
- LESS_THAN_EQUAL = 67,
- GREATER_THAN = 68,
- GREATER_THAN_EQUAL = 69,
- SHIFT_LEFT = 70,
- SHIFT_RIGHT = 71,
- SHIFT_RIGHT_ARITHMETIC = 72,
- VARARGS = 73,
- EQUALITY_OPERATOR = 74,
- INCREMENT = 75,
- DECREMENT = 76,
- NOT = 77,
- STRING_LITERAL = 78,
- IDENTIFIER = 79,
- WS = 80,
- BLOCK_COMMENT = 81,
- LINE_COMMENT = 82,
- DECIMAL_LITERAL = 83
+ DEFERRED = 27,
+ IF = 28,
+ FOR = 29,
+ WHILE = 30,
+ RETURN = 31,
+ CONSTEXPR = 32,
+ CONTINUE = 33,
+ BREAK = 34,
+ GOTO = 35,
+ OTHERWISE = 36,
+ TRY = 37,
+ LABEL = 38,
+ LABELS = 39,
+ TAIL = 40,
+ ISNT = 41,
+ IS = 42,
+ LET = 43,
+ CONST = 44,
+ EXTERN = 45,
+ ASSERT_TOKEN = 46,
+ CHECK_TOKEN = 47,
+ UNREACHABLE_TOKEN = 48,
+ DEBUG_TOKEN = 49,
+ ASSIGNMENT = 50,
+ ASSIGNMENT_OPERATOR = 51,
+ EQUAL = 52,
+ PLUS = 53,
+ MINUS = 54,
+ MULTIPLY = 55,
+ DIVIDE = 56,
+ MODULO = 57,
+ BIT_OR = 58,
+ BIT_AND = 59,
+ BIT_NOT = 60,
+ MAX = 61,
+ MIN = 62,
+ NOT_EQUAL = 63,
+ LESS_THAN = 64,
+ LESS_THAN_EQUAL = 65,
+ GREATER_THAN = 66,
+ GREATER_THAN_EQUAL = 67,
+ SHIFT_LEFT = 68,
+ SHIFT_RIGHT = 69,
+ SHIFT_RIGHT_ARITHMETIC = 70,
+ VARARGS = 71,
+ EQUALITY_OPERATOR = 72,
+ INCREMENT = 73,
+ DECREMENT = 74,
+ NOT = 75,
+ STRING_LITERAL = 76,
+ IDENTIFIER = 77,
+ WS = 78,
+ BLOCK_COMMENT = 79,
+ LINE_COMMENT = 80,
+ DECIMAL_LITERAL = 81
};
explicit TorqueLexer(antlr4::CharStream* input);
diff --git a/deps/v8/src/torque/TorqueLexer.interp b/deps/v8/src/torque/TorqueLexer.interp
new file mode 100644
index 0000000000..bbe1cb77ee
--- /dev/null
+++ b/deps/v8/src/torque/TorqueLexer.interp
@@ -0,0 +1,264 @@
+token literal names:
+null
+'('
+')'
+'=>'
+','
+':'
+'type'
+'?'
+'||'
+'&&'
+'.'
+'['
+']'
+'{'
+'}'
+';'
+'of'
+'else'
+'extends'
+'generates'
+'operator'
+'struct'
+'macro'
+'builtin'
+'runtime'
+'module'
+'javascript'
+'deferred'
+'if'
+'for'
+'while'
+'return'
+'constexpr'
+'continue'
+'break'
+'goto'
+'otherwise'
+'try'
+'label'
+'labels'
+'tail'
+'isnt'
+'is'
+'let'
+'const'
+'extern'
+'assert'
+'check'
+'unreachable'
+'debug'
+'='
+null
+'=='
+'+'
+'-'
+'*'
+'/'
+'%'
+'|'
+'&'
+'~'
+'max'
+'min'
+'!='
+'<'
+'<='
+'>'
+'>='
+'<<'
+'>>'
+'>>>'
+'...'
+null
+'++'
+'--'
+'!'
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+null
+MACRO
+BUILTIN
+RUNTIME
+MODULE
+JAVASCRIPT
+DEFERRED
+IF
+FOR
+WHILE
+RETURN
+CONSTEXPR
+CONTINUE
+BREAK
+GOTO
+OTHERWISE
+TRY
+LABEL
+LABELS
+TAIL
+ISNT
+IS
+LET
+CONST
+EXTERN
+ASSERT_TOKEN
+CHECK_TOKEN
+UNREACHABLE_TOKEN
+DEBUG_TOKEN
+ASSIGNMENT
+ASSIGNMENT_OPERATOR
+EQUAL
+PLUS
+MINUS
+MULTIPLY
+DIVIDE
+MODULO
+BIT_OR
+BIT_AND
+BIT_NOT
+MAX
+MIN
+NOT_EQUAL
+LESS_THAN
+LESS_THAN_EQUAL
+GREATER_THAN
+GREATER_THAN_EQUAL
+SHIFT_LEFT
+SHIFT_RIGHT
+SHIFT_RIGHT_ARITHMETIC
+VARARGS
+EQUALITY_OPERATOR
+INCREMENT
+DECREMENT
+NOT
+STRING_LITERAL
+IDENTIFIER
+WS
+BLOCK_COMMENT
+LINE_COMMENT
+DECIMAL_LITERAL
+
+rule names:
+T__0
+T__1
+T__2
+T__3
+T__4
+T__5
+T__6
+T__7
+T__8
+T__9
+T__10
+T__11
+T__12
+T__13
+T__14
+T__15
+T__16
+T__17
+T__18
+T__19
+T__20
+MACRO
+BUILTIN
+RUNTIME
+MODULE
+JAVASCRIPT
+DEFERRED
+IF
+FOR
+WHILE
+RETURN
+CONSTEXPR
+CONTINUE
+BREAK
+GOTO
+OTHERWISE
+TRY
+LABEL
+LABELS
+TAIL
+ISNT
+IS
+LET
+CONST
+EXTERN
+ASSERT_TOKEN
+CHECK_TOKEN
+UNREACHABLE_TOKEN
+DEBUG_TOKEN
+ASSIGNMENT
+ASSIGNMENT_OPERATOR
+EQUAL
+PLUS
+MINUS
+MULTIPLY
+DIVIDE
+MODULO
+BIT_OR
+BIT_AND
+BIT_NOT
+MAX
+MIN
+NOT_EQUAL
+LESS_THAN
+LESS_THAN_EQUAL
+GREATER_THAN
+GREATER_THAN_EQUAL
+SHIFT_LEFT
+SHIFT_RIGHT
+SHIFT_RIGHT_ARITHMETIC
+VARARGS
+EQUALITY_OPERATOR
+INCREMENT
+DECREMENT
+NOT
+STRING_LITERAL
+ESCAPE
+IDENTIFIER
+WS
+BLOCK_COMMENT
+LINE_COMMENT
+DECIMAL_DIGIT
+DECIMAL_INTEGER_LITERAL
+EXPONENT_PART
+DECIMAL_LITERAL
+
+channel names:
+DEFAULT_TOKEN_CHANNEL
+HIDDEN
+
+mode names:
+DEFAULT_MODE
+
+atn:
+[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 83, 665, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44, 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 4, 49, 9, 49, 4, 50, 9, 50, 4, 51, 9, 51, 4, 52, 9, 52, 4, 53, 9, 53, 4, 54, 9, 54, 4, 55, 9, 55, 4, 56, 9, 56, 4, 57, 9, 57, 4, 58, 9, 58, 4, 59, 9, 59, 4, 60, 9, 60, 4, 61, 9, 61, 4, 62, 9, 62, 4, 63, 9, 63, 4, 64, 9, 64, 4, 65, 9, 65, 4, 66, 9, 66, 4, 67, 9, 67, 4, 68, 9, 68, 4, 69, 9, 69, 4, 70, 9, 70, 4, 71, 9, 71, 4, 72, 9, 72, 4, 73, 9, 73, 4, 74, 9, 74, 4, 75, 9, 75, 4, 76, 9, 76, 4, 77, 9, 77, 4, 78, 9, 78, 4, 79, 9, 79, 4, 80, 9, 80, 4, 81, 9, 81, 4, 82, 9, 82, 4, 83, 9, 83, 4, 84, 9, 84, 4, 85, 9, 85, 4, 86, 9, 86, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 35, 3, 35, 3, 35, 3, 35, 3, 35, 3, 35, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 38, 3, 38, 3, 38, 3, 38, 3, 39, 3, 39, 3, 39, 3, 39, 3, 39, 3, 39, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 43, 3, 43, 3, 43, 3, 44, 3, 44, 3, 44, 3, 44, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 47, 3, 47, 3, 47, 3, 47, 3, 47, 3, 47, 3, 47, 3, 48, 3, 48, 3, 48, 3, 48, 3, 48, 3, 48, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 51, 3, 51, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 5, 52, 467, 10, 52, 3, 53, 3, 53, 3, 53, 3, 54, 3, 54, 3, 55, 3, 55, 3, 56, 3, 56, 3, 57, 3, 57, 3, 58, 3, 58, 3, 59, 3, 59, 3, 60, 3, 60, 3, 61, 3, 61, 3, 62, 3, 62, 3, 62, 3, 62, 3, 63, 3, 63, 3, 63, 3, 63, 3, 64, 3, 64, 3, 64, 3, 65, 3, 65, 3, 66, 3, 66, 3, 66, 3, 67, 3, 67, 3, 68, 3, 68, 3, 68, 3, 69, 3, 69, 3, 69, 3, 70, 3, 70, 3, 70, 3, 71, 3, 71, 3, 71, 3, 71, 3, 72, 3, 72, 3, 72, 3, 72, 3, 73, 3, 73, 5, 73, 525, 10, 73, 3, 74, 3, 74, 3, 74, 3, 75, 3, 75, 3, 75, 3, 76, 3, 76, 3, 77, 3, 77, 3, 77, 7, 77, 538, 10, 77, 12, 77, 14, 77, 541, 11, 77, 3, 77, 3, 77, 3, 77, 3, 77, 7, 77, 547, 10, 77, 12, 77, 14, 77, 550, 11, 77, 3, 77, 5, 77, 553, 10, 77, 3, 78, 3, 78, 3, 78, 3, 79, 3, 79, 7, 79, 560, 10, 79, 12, 79, 14, 79, 563, 11, 79, 3, 80, 6, 80, 566, 10, 80, 13, 80, 14, 80, 567, 3, 80, 3, 80, 3, 81, 3, 81, 3, 81, 3, 81, 7, 81, 576, 10, 81, 12, 81, 14, 81, 579, 11, 81, 3, 81, 3, 81, 3, 81, 5, 81, 584, 10, 81, 3, 81, 3, 81, 3, 82, 3, 82, 3, 82, 3, 82, 7, 82, 592, 10, 82, 12, 82, 14, 82, 595, 11, 82, 3, 82, 3, 82, 3, 83, 3, 83, 3, 84, 3, 84, 3, 84, 7, 84, 604, 10, 84, 12, 84, 14, 84, 607, 11, 84, 5, 84, 609, 10, 84, 3, 85, 3, 85, 5, 85, 613, 10, 85, 3, 85, 6, 85, 616, 10, 85, 13, 85, 14, 85, 617, 3, 86, 5, 86, 621, 10, 86, 3, 86, 3, 86, 3, 86, 7, 86, 626, 10, 86, 12, 86, 14, 86, 629, 11, 86, 3, 86, 5, 86, 632, 10, 86, 3, 86, 5, 86, 635, 10, 86, 3, 86, 3, 86, 6, 86, 639, 10, 86, 13, 86, 14, 86, 640, 3, 86, 5, 86, 644, 10, 86, 3, 86, 5, 86, 647, 10, 86, 3, 86, 3, 86, 5, 86, 651, 10, 86, 3, 86, 5, 86, 654, 10, 86, 3, 86, 3, 86, 3, 86, 3, 86, 6, 86, 660, 10, 86, 13, 86, 14, 86, 661, 5, 86, 664, 10, 86, 3, 577, 2, 87, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 69, 36, 71, 37, 73, 38, 75, 39, 77, 40, 79, 41, 81, 42, 83, 43, 85, 44, 87, 45, 89, 46, 91, 47, 93, 48, 95, 49, 97, 50, 99, 51, 101, 52, 103, 53, 105, 54, 107, 55, 109, 56, 111, 57, 113, 58, 115, 59, 117, 60, 119, 61, 121, 62, 123, 63, 125, 64, 127, 65, 129, 66, 131, 67, 133, 68, 135, 69, 137, 70, 139, 71, 141, 72, 143, 73, 145, 74, 147, 75, 149, 76, 151, 77, 153, 78, 155, 2, 157, 79, 159, 80, 161, 81, 163, 82, 165, 2, 167, 2, 169, 2, 171, 83, 3, 2, 14, 6, 2, 12, 12, 15, 15, 36, 36, 94, 94, 6, 2, 12, 12, 15, 15, 41, 41, 94, 94, 7, 2, 36, 36, 41, 41, 94, 94, 112, 112, 116, 116, 4, 2, 67, 92, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 11, 12, 14, 15, 34, 34, 4, 2, 12, 12, 15, 15, 3, 2, 50, 59, 3, 2, 51, 59, 4, 2, 71, 71, 103, 103, 4, 2, 45, 45, 47, 47, 5, 2, 50, 59, 67, 72, 99, 104, 2, 698, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, 71, 3, 2, 2, 2, 2, 73, 3, 2, 2, 2, 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, 2, 79, 3, 2, 2, 2, 2, 81, 3, 2, 2, 2, 2, 83, 3, 2, 2, 2, 2, 85, 3, 2, 2, 2, 2, 87, 3, 2, 2, 2, 2, 89, 3, 2, 2, 2, 2, 91, 3, 2, 2, 2, 2, 93, 3, 2, 2, 2, 2, 95, 3, 2, 2, 2, 2, 97, 3, 2, 2, 2, 2, 99, 3, 2, 2, 2, 2, 101, 3, 2, 2, 2, 2, 103, 3, 2, 2, 2, 2, 105, 3, 2, 2, 2, 2, 107, 3, 2, 2, 2, 2, 109, 3, 2, 2, 2, 2, 111, 3, 2, 2, 2, 2, 113, 3, 2, 2, 2, 2, 115, 3, 2, 2, 2, 2, 117, 3, 2, 2, 2, 2, 119, 3, 2, 2, 2, 2, 121, 3, 2, 2, 2, 2, 123, 3, 2, 2, 2, 2, 125, 3, 2, 2, 2, 2, 127, 3, 2, 2, 2, 2, 129, 3, 2, 2, 2, 2, 131, 3, 2, 2, 2, 2, 133, 3, 2, 2, 2, 2, 135, 3, 2, 2, 2, 2, 137, 3, 2, 2, 2, 2, 139, 3, 2, 2, 2, 2, 141, 3, 2, 2, 2, 2, 143, 3, 2, 2, 2, 2, 145, 3, 2, 2, 2, 2, 147, 3, 2, 2, 2, 2, 149, 3, 2, 2, 2, 2, 151, 3, 2, 2, 2, 2, 153, 3, 2, 2, 2, 2, 157, 3, 2, 2, 2, 2, 159, 3, 2, 2, 2, 2, 161, 3, 2, 2, 2, 2, 163, 3, 2, 2, 2, 2, 171, 3, 2, 2, 2, 3, 173, 3, 2, 2, 2, 5, 175, 3, 2, 2, 2, 7, 177, 3, 2, 2, 2, 9, 180, 3, 2, 2, 2, 11, 182, 3, 2, 2, 2, 13, 184, 3, 2, 2, 2, 15, 189, 3, 2, 2, 2, 17, 191, 3, 2, 2, 2, 19, 194, 3, 2, 2, 2, 21, 197, 3, 2, 2, 2, 23, 199, 3, 2, 2, 2, 25, 201, 3, 2, 2, 2, 27, 203, 3, 2, 2, 2, 29, 205, 3, 2, 2, 2, 31, 207, 3, 2, 2, 2, 33, 209, 3, 2, 2, 2, 35, 212, 3, 2, 2, 2, 37, 217, 3, 2, 2, 2, 39, 225, 3, 2, 2, 2, 41, 235, 3, 2, 2, 2, 43, 244, 3, 2, 2, 2, 45, 251, 3, 2, 2, 2, 47, 257, 3, 2, 2, 2, 49, 265, 3, 2, 2, 2, 51, 273, 3, 2, 2, 2, 53, 280, 3, 2, 2, 2, 55, 291, 3, 2, 2, 2, 57, 300, 3, 2, 2, 2, 59, 303, 3, 2, 2, 2, 61, 307, 3, 2, 2, 2, 63, 313, 3, 2, 2, 2, 65, 320, 3, 2, 2, 2, 67, 330, 3, 2, 2, 2, 69, 339, 3, 2, 2, 2, 71, 345, 3, 2, 2, 2, 73, 350, 3, 2, 2, 2, 75, 360, 3, 2, 2, 2, 77, 364, 3, 2, 2, 2, 79, 370, 3, 2, 2, 2, 81, 377, 3, 2, 2, 2, 83, 382, 3, 2, 2, 2, 85, 387, 3, 2, 2, 2, 87, 390, 3, 2, 2, 2, 89, 394, 3, 2, 2, 2, 91, 400, 3, 2, 2, 2, 93, 407, 3, 2, 2, 2, 95, 414, 3, 2, 2, 2, 97, 420, 3, 2, 2, 2, 99, 432, 3, 2, 2, 2, 101, 438, 3, 2, 2, 2, 103, 466, 3, 2, 2, 2, 105, 468, 3, 2, 2, 2, 107, 471, 3, 2, 2, 2, 109, 473, 3, 2, 2, 2, 111, 475, 3, 2, 2, 2, 113, 477, 3, 2, 2, 2, 115, 479, 3, 2, 2, 2, 117, 481, 3, 2, 2, 2, 119, 483, 3, 2, 2, 2, 121, 485, 3, 2, 2, 2, 123, 487, 3, 2, 2, 2, 125, 491, 3, 2, 2, 2, 127, 495, 3, 2, 2, 2, 129, 498, 3, 2, 2, 2, 131, 500, 3, 2, 2, 2, 133, 503, 3, 2, 2, 2, 135, 505, 3, 2, 2, 2, 137, 508, 3, 2, 2, 2, 139, 511, 3, 2, 2, 2, 141, 514, 3, 2, 2, 2, 143, 518, 3, 2, 2, 2, 145, 524, 3, 2, 2, 2, 147, 526, 3, 2, 2, 2, 149, 529, 3, 2, 2, 2, 151, 532, 3, 2, 2, 2, 153, 552, 3, 2, 2, 2, 155, 554, 3, 2, 2, 2, 157, 557, 3, 2, 2, 2, 159, 565, 3, 2, 2, 2, 161, 571, 3, 2, 2, 2, 163, 587, 3, 2, 2, 2, 165, 598, 3, 2, 2, 2, 167, 608, 3, 2, 2, 2, 169, 610, 3, 2, 2, 2, 171, 663, 3, 2, 2, 2, 173, 174, 7, 42, 2, 2, 174, 4, 3, 2, 2, 2, 175, 176, 7, 43, 2, 2, 176, 6, 3, 2, 2, 2, 177, 178, 7, 63, 2, 2, 178, 179, 7, 64, 2, 2, 179, 8, 3, 2, 2, 2, 180, 181, 7, 46, 2, 2, 181, 10, 3, 2, 2, 2, 182, 183, 7, 60, 2, 2, 183, 12, 3, 2, 2, 2, 184, 185, 7, 118, 2, 2, 185, 186, 7, 123, 2, 2, 186, 187, 7, 114, 2, 2, 187, 188, 7, 103, 2, 2, 188, 14, 3, 2, 2, 2, 189, 190, 7, 65, 2, 2, 190, 16, 3, 2, 2, 2, 191, 192, 7, 126, 2, 2, 192, 193, 7, 126, 2, 2, 193, 18, 3, 2, 2, 2, 194, 195, 7, 40, 2, 2, 195, 196, 7, 40, 2, 2, 196, 20, 3, 2, 2, 2, 197, 198, 7, 48, 2, 2, 198, 22, 3, 2, 2, 2, 199, 200, 7, 93, 2, 2, 200, 24, 3, 2, 2, 2, 201, 202, 7, 95, 2, 2, 202, 26, 3, 2, 2, 2, 203, 204, 7, 125, 2, 2, 204, 28, 3, 2, 2, 2, 205, 206, 7, 127, 2, 2, 206, 30, 3, 2, 2, 2, 207, 208, 7, 61, 2, 2, 208, 32, 3, 2, 2, 2, 209, 210, 7, 113, 2, 2, 210, 211, 7, 104, 2, 2, 211, 34, 3, 2, 2, 2, 212, 213, 7, 103, 2, 2, 213, 214, 7, 110, 2, 2, 214, 215, 7, 117, 2, 2, 215, 216, 7, 103, 2, 2, 216, 36, 3, 2, 2, 2, 217, 218, 7, 103, 2, 2, 218, 219, 7, 122, 2, 2, 219, 220, 7, 118, 2, 2, 220, 221, 7, 103, 2, 2, 221, 222, 7, 112, 2, 2, 222, 223, 7, 102, 2, 2, 223, 224, 7, 117, 2, 2, 224, 38, 3, 2, 2, 2, 225, 226, 7, 105, 2, 2, 226, 227, 7, 103, 2, 2, 227, 228, 7, 112, 2, 2, 228, 229, 7, 103, 2, 2, 229, 230, 7, 116, 2, 2, 230, 231, 7, 99, 2, 2, 231, 232, 7, 118, 2, 2, 232, 233, 7, 103, 2, 2, 233, 234, 7, 117, 2, 2, 234, 40, 3, 2, 2, 2, 235, 236, 7, 113, 2, 2, 236, 237, 7, 114, 2, 2, 237, 238, 7, 103, 2, 2, 238, 239, 7, 116, 2, 2, 239, 240, 7, 99, 2, 2, 240, 241, 7, 118, 2, 2, 241, 242, 7, 113, 2, 2, 242, 243, 7, 116, 2, 2, 243, 42, 3, 2, 2, 2, 244, 245, 7, 117, 2, 2, 245, 246, 7, 118, 2, 2, 246, 247, 7, 116, 2, 2, 247, 248, 7, 119, 2, 2, 248, 249, 7, 101, 2, 2, 249, 250, 7, 118, 2, 2, 250, 44, 3, 2, 2, 2, 251, 252, 7, 111, 2, 2, 252, 253, 7, 99, 2, 2, 253, 254, 7, 101, 2, 2, 254, 255, 7, 116, 2, 2, 255, 256, 7, 113, 2, 2, 256, 46, 3, 2, 2, 2, 257, 258, 7, 100, 2, 2, 258, 259, 7, 119, 2, 2, 259, 260, 7, 107, 2, 2, 260, 261, 7, 110, 2, 2, 261, 262, 7, 118, 2, 2, 262, 263, 7, 107, 2, 2, 263, 264, 7, 112, 2, 2, 264, 48, 3, 2, 2, 2, 265, 266, 7, 116, 2, 2, 266, 267, 7, 119, 2, 2, 267, 268, 7, 112, 2, 2, 268, 269, 7, 118, 2, 2, 269, 270, 7, 107, 2, 2, 270, 271, 7, 111, 2, 2, 271, 272, 7, 103, 2, 2, 272, 50, 3, 2, 2, 2, 273, 274, 7, 111, 2, 2, 274, 275, 7, 113, 2, 2, 275, 276, 7, 102, 2, 2, 276, 277, 7, 119, 2, 2, 277, 278, 7, 110, 2, 2, 278, 279, 7, 103, 2, 2, 279, 52, 3, 2, 2, 2, 280, 281, 7, 108, 2, 2, 281, 282, 7, 99, 2, 2, 282, 283, 7, 120, 2, 2, 283, 284, 7, 99, 2, 2, 284, 285, 7, 117, 2, 2, 285, 286, 7, 101, 2, 2, 286, 287, 7, 116, 2, 2, 287, 288, 7, 107, 2, 2, 288, 289, 7, 114, 2, 2, 289, 290, 7, 118, 2, 2, 290, 54, 3, 2, 2, 2, 291, 292, 7, 102, 2, 2, 292, 293, 7, 103, 2, 2, 293, 294, 7, 104, 2, 2, 294, 295, 7, 103, 2, 2, 295, 296, 7, 116, 2, 2, 296, 297, 7, 116, 2, 2, 297, 298, 7, 103, 2, 2, 298, 299, 7, 102, 2, 2, 299, 56, 3, 2, 2, 2, 300, 301, 7, 107, 2, 2, 301, 302, 7, 104, 2, 2, 302, 58, 3, 2, 2, 2, 303, 304, 7, 104, 2, 2, 304, 305, 7, 113, 2, 2, 305, 306, 7, 116, 2, 2, 306, 60, 3, 2, 2, 2, 307, 308, 7, 121, 2, 2, 308, 309, 7, 106, 2, 2, 309, 310, 7, 107, 2, 2, 310, 311, 7, 110, 2, 2, 311, 312, 7, 103, 2, 2, 312, 62, 3, 2, 2, 2, 313, 314, 7, 116, 2, 2, 314, 315, 7, 103, 2, 2, 315, 316, 7, 118, 2, 2, 316, 317, 7, 119, 2, 2, 317, 318, 7, 116, 2, 2, 318, 319, 7, 112, 2, 2, 319, 64, 3, 2, 2, 2, 320, 321, 7, 101, 2, 2, 321, 322, 7, 113, 2, 2, 322, 323, 7, 112, 2, 2, 323, 324, 7, 117, 2, 2, 324, 325, 7, 118, 2, 2, 325, 326, 7, 103, 2, 2, 326, 327, 7, 122, 2, 2, 327, 328, 7, 114, 2, 2, 328, 329, 7, 116, 2, 2, 329, 66, 3, 2, 2, 2, 330, 331, 7, 101, 2, 2, 331, 332, 7, 113, 2, 2, 332, 333, 7, 112, 2, 2, 333, 334, 7, 118, 2, 2, 334, 335, 7, 107, 2, 2, 335, 336, 7, 112, 2, 2, 336, 337, 7, 119, 2, 2, 337, 338, 7, 103, 2, 2, 338, 68, 3, 2, 2, 2, 339, 340, 7, 100, 2, 2, 340, 341, 7, 116, 2, 2, 341, 342, 7, 103, 2, 2, 342, 343, 7, 99, 2, 2, 343, 344, 7, 109, 2, 2, 344, 70, 3, 2, 2, 2, 345, 346, 7, 105, 2, 2, 346, 347, 7, 113, 2, 2, 347, 348, 7, 118, 2, 2, 348, 349, 7, 113, 2, 2, 349, 72, 3, 2, 2, 2, 350, 351, 7, 113, 2, 2, 351, 352, 7, 118, 2, 2, 352, 353, 7, 106, 2, 2, 353, 354, 7, 103, 2, 2, 354, 355, 7, 116, 2, 2, 355, 356, 7, 121, 2, 2, 356, 357, 7, 107, 2, 2, 357, 358, 7, 117, 2, 2, 358, 359, 7, 103, 2, 2, 359, 74, 3, 2, 2, 2, 360, 361, 7, 118, 2, 2, 361, 362, 7, 116, 2, 2, 362, 363, 7, 123, 2, 2, 363, 76, 3, 2, 2, 2, 364, 365, 7, 110, 2, 2, 365, 366, 7, 99, 2, 2, 366, 367, 7, 100, 2, 2, 367, 368, 7, 103, 2, 2, 368, 369, 7, 110, 2, 2, 369, 78, 3, 2, 2, 2, 370, 371, 7, 110, 2, 2, 371, 372, 7, 99, 2, 2, 372, 373, 7, 100, 2, 2, 373, 374, 7, 103, 2, 2, 374, 375, 7, 110, 2, 2, 375, 376, 7, 117, 2, 2, 376, 80, 3, 2, 2, 2, 377, 378, 7, 118, 2, 2, 378, 379, 7, 99, 2, 2, 379, 380, 7, 107, 2, 2, 380, 381, 7, 110, 2, 2, 381, 82, 3, 2, 2, 2, 382, 383, 7, 107, 2, 2, 383, 384, 7, 117, 2, 2, 384, 385, 7, 112, 2, 2, 385, 386, 7, 118, 2, 2, 386, 84, 3, 2, 2, 2, 387, 388, 7, 107, 2, 2, 388, 389, 7, 117, 2, 2, 389, 86, 3, 2, 2, 2, 390, 391, 7, 110, 2, 2, 391, 392, 7, 103, 2, 2, 392, 393, 7, 118, 2, 2, 393, 88, 3, 2, 2, 2, 394, 395, 7, 101, 2, 2, 395, 396, 7, 113, 2, 2, 396, 397, 7, 112, 2, 2, 397, 398, 7, 117, 2, 2, 398, 399, 7, 118, 2, 2, 399, 90, 3, 2, 2, 2, 400, 401, 7, 103, 2, 2, 401, 402, 7, 122, 2, 2, 402, 403, 7, 118, 2, 2, 403, 404, 7, 103, 2, 2, 404, 405, 7, 116, 2, 2, 405, 406, 7, 112, 2, 2, 406, 92, 3, 2, 2, 2, 407, 408, 7, 99, 2, 2, 408, 409, 7, 117, 2, 2, 409, 410, 7, 117, 2, 2, 410, 411, 7, 103, 2, 2, 411, 412, 7, 116, 2, 2, 412, 413, 7, 118, 2, 2, 413, 94, 3, 2, 2, 2, 414, 415, 7, 101, 2, 2, 415, 416, 7, 106, 2, 2, 416, 417, 7, 103, 2, 2, 417, 418, 7, 101, 2, 2, 418, 419, 7, 109, 2, 2, 419, 96, 3, 2, 2, 2, 420, 421, 7, 119, 2, 2, 421, 422, 7, 112, 2, 2, 422, 423, 7, 116, 2, 2, 423, 424, 7, 103, 2, 2, 424, 425, 7, 99, 2, 2, 425, 426, 7, 101, 2, 2, 426, 427, 7, 106, 2, 2, 427, 428, 7, 99, 2, 2, 428, 429, 7, 100, 2, 2, 429, 430, 7, 110, 2, 2, 430, 431, 7, 103, 2, 2, 431, 98, 3, 2, 2, 2, 432, 433, 7, 102, 2, 2, 433, 434, 7, 103, 2, 2, 434, 435, 7, 100, 2, 2, 435, 436, 7, 119, 2, 2, 436, 437, 7, 105, 2, 2, 437, 100, 3, 2, 2, 2, 438, 439, 7, 63, 2, 2, 439, 102, 3, 2, 2, 2, 440, 441, 7, 44, 2, 2, 441, 467, 7, 63, 2, 2, 442, 443, 7, 49, 2, 2, 443, 467, 7, 63, 2, 2, 444, 445, 7, 39, 2, 2, 445, 467, 7, 63, 2, 2, 446, 447, 7, 45, 2, 2, 447, 467, 7, 63, 2, 2, 448, 449, 7, 47, 2, 2, 449, 467, 7, 63, 2, 2, 450, 451, 7, 62, 2, 2, 451, 452, 7, 62, 2, 2, 452, 467, 7, 63, 2, 2, 453, 454, 7, 64, 2, 2, 454, 455, 7, 64, 2, 2, 455, 467, 7, 63, 2, 2, 456, 457, 7, 64, 2, 2, 457, 458, 7, 64, 2, 2, 458, 459, 7, 64, 2, 2, 459, 467, 7, 63, 2, 2, 460, 461, 7, 40, 2, 2, 461, 467, 7, 63, 2, 2, 462, 463, 7, 96, 2, 2, 463, 467, 7, 63, 2, 2, 464, 465, 7, 126, 2, 2, 465, 467, 7, 63, 2, 2, 466, 440, 3, 2, 2, 2, 466, 442, 3, 2, 2, 2, 466, 444, 3, 2, 2, 2, 466, 446, 3, 2, 2, 2, 466, 448, 3, 2, 2, 2, 466, 450, 3, 2, 2, 2, 466, 453, 3, 2, 2, 2, 466, 456, 3, 2, 2, 2, 466, 460, 3, 2, 2, 2, 466, 462, 3, 2, 2, 2, 466, 464, 3, 2, 2, 2, 467, 104, 3, 2, 2, 2, 468, 469, 7, 63, 2, 2, 469, 470, 7, 63, 2, 2, 470, 106, 3, 2, 2, 2, 471, 472, 7, 45, 2, 2, 472, 108, 3, 2, 2, 2, 473, 474, 7, 47, 2, 2, 474, 110, 3, 2, 2, 2, 475, 476, 7, 44, 2, 2, 476, 112, 3, 2, 2, 2, 477, 478, 7, 49, 2, 2, 478, 114, 3, 2, 2, 2, 479, 480, 7, 39, 2, 2, 480, 116, 3, 2, 2, 2, 481, 482, 7, 126, 2, 2, 482, 118, 3, 2, 2, 2, 483, 484, 7, 40, 2, 2, 484, 120, 3, 2, 2, 2, 485, 486, 7, 128, 2, 2, 486, 122, 3, 2, 2, 2, 487, 488, 7, 111, 2, 2, 488, 489, 7, 99, 2, 2, 489, 490, 7, 122, 2, 2, 490, 124, 3, 2, 2, 2, 491, 492, 7, 111, 2, 2, 492, 493, 7, 107, 2, 2, 493, 494, 7, 112, 2, 2, 494, 126, 3, 2, 2, 2, 495, 496, 7, 35, 2, 2, 496, 497, 7, 63, 2, 2, 497, 128, 3, 2, 2, 2, 498, 499, 7, 62, 2, 2, 499, 130, 3, 2, 2, 2, 500, 501, 7, 62, 2, 2, 501, 502, 7, 63, 2, 2, 502, 132, 3, 2, 2, 2, 503, 504, 7, 64, 2, 2, 504, 134, 3, 2, 2, 2, 505, 506, 7, 64, 2, 2, 506, 507, 7, 63, 2, 2, 507, 136, 3, 2, 2, 2, 508, 509, 7, 62, 2, 2, 509, 510, 7, 62, 2, 2, 510, 138, 3, 2, 2, 2, 511, 512, 7, 64, 2, 2, 512, 513, 7, 64, 2, 2, 513, 140, 3, 2, 2, 2, 514, 515, 7, 64, 2, 2, 515, 516, 7, 64, 2, 2, 516, 517, 7, 64, 2, 2, 517, 142, 3, 2, 2, 2, 518, 519, 7, 48, 2, 2, 519, 520, 7, 48, 2, 2, 520, 521, 7, 48, 2, 2, 521, 144, 3, 2, 2, 2, 522, 525, 5, 105, 53, 2, 523, 525, 5, 127, 64, 2, 524, 522, 3, 2, 2, 2, 524, 523, 3, 2, 2, 2, 525, 146, 3, 2, 2, 2, 526, 527, 7, 45, 2, 2, 527, 528, 7, 45, 2, 2, 528, 148, 3, 2, 2, 2, 529, 530, 7, 47, 2, 2, 530, 531, 7, 47, 2, 2, 531, 150, 3, 2, 2, 2, 532, 533, 7, 35, 2, 2, 533, 152, 3, 2, 2, 2, 534, 539, 7, 36, 2, 2, 535, 538, 5, 155, 78, 2, 536, 538, 10, 2, 2, 2, 537, 535, 3, 2, 2, 2, 537, 536, 3, 2, 2, 2, 538, 541, 3, 2, 2, 2, 539, 537, 3, 2, 2, 2, 539, 540, 3, 2, 2, 2, 540, 542, 3, 2, 2, 2, 541, 539, 3, 2, 2, 2, 542, 553, 7, 36, 2, 2, 543, 548, 7, 41, 2, 2, 544, 547, 5, 155, 78, 2, 545, 547, 10, 3, 2, 2, 546, 544, 3, 2, 2, 2, 546, 545, 3, 2, 2, 2, 547, 550, 3, 2, 2, 2, 548, 546, 3, 2, 2, 2, 548, 549, 3, 2, 2, 2, 549, 551, 3, 2, 2, 2, 550, 548, 3, 2, 2, 2, 551, 553, 7, 41, 2, 2, 552, 534, 3, 2, 2, 2, 552, 543, 3, 2, 2, 2, 553, 154, 3, 2, 2, 2, 554, 555, 7, 94, 2, 2, 555, 556, 9, 4, 2, 2, 556, 156, 3, 2, 2, 2, 557, 561, 9, 5, 2, 2, 558, 560, 9, 6, 2, 2, 559, 558, 3, 2, 2, 2, 560, 563, 3, 2, 2, 2, 561, 559, 3, 2, 2, 2, 561, 562, 3, 2, 2, 2, 562, 158, 3, 2, 2, 2, 563, 561, 3, 2, 2, 2, 564, 566, 9, 7, 2, 2, 565, 564, 3, 2, 2, 2, 566, 567, 3, 2, 2, 2, 567, 565, 3, 2, 2, 2, 567, 568, 3, 2, 2, 2, 568, 569, 3, 2, 2, 2, 569, 570, 8, 80, 2, 2, 570, 160, 3, 2, 2, 2, 571, 572, 7, 49, 2, 2, 572, 573, 7, 44, 2, 2, 573, 577, 3, 2, 2, 2, 574, 576, 11, 2, 2, 2, 575, 574, 3, 2, 2, 2, 576, 579, 3, 2, 2, 2, 577, 578, 3, 2, 2, 2, 577, 575, 3, 2, 2, 2, 578, 583, 3, 2, 2, 2, 579, 577, 3, 2, 2, 2, 580, 581, 7, 44, 2, 2, 581, 584, 7, 49, 2, 2, 582, 584, 7, 2, 2, 3, 583, 580, 3, 2, 2, 2, 583, 582, 3, 2, 2, 2, 584, 585, 3, 2, 2, 2, 585, 586, 8, 81, 2, 2, 586, 162, 3, 2, 2, 2, 587, 588, 7, 49, 2, 2, 588, 589, 7, 49, 2, 2, 589, 593, 3, 2, 2, 2, 590, 592, 10, 8, 2, 2, 591, 590, 3, 2, 2, 2, 592, 595, 3, 2, 2, 2, 593, 591, 3, 2, 2, 2, 593, 594, 3, 2, 2, 2, 594, 596, 3, 2, 2, 2, 595, 593, 3, 2, 2, 2, 596, 597, 8, 82, 2, 2, 597, 164, 3, 2, 2, 2, 598, 599, 9, 9, 2, 2, 599, 166, 3, 2, 2, 2, 600, 609, 7, 50, 2, 2, 601, 605, 9, 10, 2, 2, 602, 604, 5, 165, 83, 2, 603, 602, 3, 2, 2, 2, 604, 607, 3, 2, 2, 2, 605, 603, 3, 2, 2, 2, 605, 606, 3, 2, 2, 2, 606, 609, 3, 2, 2, 2, 607, 605, 3, 2, 2, 2, 608, 600, 3, 2, 2, 2, 608, 601, 3, 2, 2, 2, 609, 168, 3, 2, 2, 2, 610, 612, 9, 11, 2, 2, 611, 613, 9, 12, 2, 2, 612, 611, 3, 2, 2, 2, 612, 613, 3, 2, 2, 2, 613, 615, 3, 2, 2, 2, 614, 616, 5, 165, 83, 2, 615, 614, 3, 2, 2, 2, 616, 617, 3, 2, 2, 2, 617, 615, 3, 2, 2, 2, 617, 618, 3, 2, 2, 2, 618, 170, 3, 2, 2, 2, 619, 621, 5, 109, 55, 2, 620, 619, 3, 2, 2, 2, 620, 621, 3, 2, 2, 2, 621, 622, 3, 2, 2, 2, 622, 623, 5, 167, 84, 2, 623, 627, 7, 48, 2, 2, 624, 626, 5, 165, 83, 2, 625, 624, 3, 2, 2, 2, 626, 629, 3, 2, 2, 2, 627, 625, 3, 2, 2, 2, 627, 628, 3, 2, 2, 2, 628, 631, 3, 2, 2, 2, 629, 627, 3, 2, 2, 2, 630, 632, 5, 169, 85, 2, 631, 630, 3, 2, 2, 2, 631, 632, 3, 2, 2, 2, 632, 664, 3, 2, 2, 2, 633, 635, 5, 109, 55, 2, 634, 633, 3, 2, 2, 2, 634, 635, 3, 2, 2, 2, 635, 636, 3, 2, 2, 2, 636, 638, 7, 48, 2, 2, 637, 639, 5, 165, 83, 2, 638, 637, 3, 2, 2, 2, 639, 640, 3, 2, 2, 2, 640, 638, 3, 2, 2, 2, 640, 641, 3, 2, 2, 2, 641, 643, 3, 2, 2, 2, 642, 644, 5, 169, 85, 2, 643, 642, 3, 2, 2, 2, 643, 644, 3, 2, 2, 2, 644, 664, 3, 2, 2, 2, 645, 647, 5, 109, 55, 2, 646, 645, 3, 2, 2, 2, 646, 647, 3, 2, 2, 2, 647, 648, 3, 2, 2, 2, 648, 650, 5, 167, 84, 2, 649, 651, 5, 169, 85, 2, 650, 649, 3, 2, 2, 2, 650, 651, 3, 2, 2, 2, 651, 664, 3, 2, 2, 2, 652, 654, 5, 109, 55, 2, 653, 652, 3, 2, 2, 2, 653, 654, 3, 2, 2, 2, 654, 655, 3, 2, 2, 2, 655, 656, 7, 50, 2, 2, 656, 657, 7, 122, 2, 2, 657, 659, 3, 2, 2, 2, 658, 660, 9, 13, 2, 2, 659, 658, 3, 2, 2, 2, 660, 661, 3, 2, 2, 2, 661, 659, 3, 2, 2, 2, 661, 662, 3, 2, 2, 2, 662, 664, 3, 2, 2, 2, 663, 620, 3, 2, 2, 2, 663, 634, 3, 2, 2, 2, 663, 646, 3, 2, 2, 2, 663, 653, 3, 2, 2, 2, 664, 172, 3, 2, 2, 2, 30, 2, 466, 524, 537, 539, 546, 548, 552, 561, 567, 577, 583, 593, 605, 608, 612, 617, 620, 627, 631, 634, 640, 643, 646, 650, 653, 661, 663, 3, 2, 3, 2] \ No newline at end of file
diff --git a/deps/v8/src/torque/TorqueLexer.tokens b/deps/v8/src/torque/TorqueLexer.tokens
new file mode 100644
index 0000000000..63589b27b7
--- /dev/null
+++ b/deps/v8/src/torque/TorqueLexer.tokens
@@ -0,0 +1,154 @@
+T__0=1
+T__1=2
+T__2=3
+T__3=4
+T__4=5
+T__5=6
+T__6=7
+T__7=8
+T__8=9
+T__9=10
+T__10=11
+T__11=12
+T__12=13
+T__13=14
+T__14=15
+T__15=16
+T__16=17
+T__17=18
+T__18=19
+T__19=20
+T__20=21
+MACRO=22
+BUILTIN=23
+RUNTIME=24
+MODULE=25
+JAVASCRIPT=26
+DEFERRED=27
+IF=28
+FOR=29
+WHILE=30
+RETURN=31
+CONSTEXPR=32
+CONTINUE=33
+BREAK=34
+GOTO=35
+OTHERWISE=36
+TRY=37
+LABEL=38
+LABELS=39
+TAIL=40
+ISNT=41
+IS=42
+LET=43
+CONST=44
+EXTERN=45
+ASSERT_TOKEN=46
+CHECK_TOKEN=47
+UNREACHABLE_TOKEN=48
+DEBUG_TOKEN=49
+ASSIGNMENT=50
+ASSIGNMENT_OPERATOR=51
+EQUAL=52
+PLUS=53
+MINUS=54
+MULTIPLY=55
+DIVIDE=56
+MODULO=57
+BIT_OR=58
+BIT_AND=59
+BIT_NOT=60
+MAX=61
+MIN=62
+NOT_EQUAL=63
+LESS_THAN=64
+LESS_THAN_EQUAL=65
+GREATER_THAN=66
+GREATER_THAN_EQUAL=67
+SHIFT_LEFT=68
+SHIFT_RIGHT=69
+SHIFT_RIGHT_ARITHMETIC=70
+VARARGS=71
+EQUALITY_OPERATOR=72
+INCREMENT=73
+DECREMENT=74
+NOT=75
+STRING_LITERAL=76
+IDENTIFIER=77
+WS=78
+BLOCK_COMMENT=79
+LINE_COMMENT=80
+DECIMAL_LITERAL=81
+'('=1
+')'=2
+'=>'=3
+','=4
+':'=5
+'type'=6
+'?'=7
+'||'=8
+'&&'=9
+'.'=10
+'['=11
+']'=12
+'{'=13
+'}'=14
+';'=15
+'of'=16
+'else'=17
+'extends'=18
+'generates'=19
+'operator'=20
+'struct'=21
+'macro'=22
+'builtin'=23
+'runtime'=24
+'module'=25
+'javascript'=26
+'deferred'=27
+'if'=28
+'for'=29
+'while'=30
+'return'=31
+'constexpr'=32
+'continue'=33
+'break'=34
+'goto'=35
+'otherwise'=36
+'try'=37
+'label'=38
+'labels'=39
+'tail'=40
+'isnt'=41
+'is'=42
+'let'=43
+'const'=44
+'extern'=45
+'assert'=46
+'check'=47
+'unreachable'=48
+'debug'=49
+'='=50
+'=='=52
+'+'=53
+'-'=54
+'*'=55
+'/'=56
+'%'=57
+'|'=58
+'&'=59
+'~'=60
+'max'=61
+'min'=62
+'!='=63
+'<'=64
+'<='=65
+'>'=66
+'>='=67
+'<<'=68
+'>>'=69
+'>>>'=70
+'...'=71
+'++'=73
+'--'=74
+'!'=75
diff --git a/deps/v8/src/torque/TorqueListener.h b/deps/v8/src/torque/TorqueListener.h
index 3119c4435d..937ed606ba 100644
--- a/deps/v8/src/torque/TorqueListener.h
+++ b/deps/v8/src/torque/TorqueListener.h
@@ -137,6 +137,16 @@ class TorqueListener : public antlr4::tree::ParseTreeListener {
virtual void exitAssignmentExpression(
TorqueParser::AssignmentExpressionContext* ctx) = 0;
+ virtual void enterStructExpression(
+ TorqueParser::StructExpressionContext* ctx) = 0;
+ virtual void exitStructExpression(
+ TorqueParser::StructExpressionContext* ctx) = 0;
+
+ virtual void enterFunctionPointerExpression(
+ TorqueParser::FunctionPointerExpressionContext* ctx) = 0;
+ virtual void exitFunctionPointerExpression(
+ TorqueParser::FunctionPointerExpressionContext* ctx) = 0;
+
virtual void enterPrimaryExpression(
TorqueParser::PrimaryExpressionContext* ctx) = 0;
virtual void exitPrimaryExpression(
@@ -221,8 +231,10 @@ class TorqueListener : public antlr4::tree::ParseTreeListener {
virtual void exitHandlerWithStatement(
TorqueParser::HandlerWithStatementContext* ctx) = 0;
- virtual void enterTryCatch(TorqueParser::TryCatchContext* ctx) = 0;
- virtual void exitTryCatch(TorqueParser::TryCatchContext* ctx) = 0;
+ virtual void enterTryLabelStatement(
+ TorqueParser::TryLabelStatementContext* ctx) = 0;
+ virtual void exitTryLabelStatement(
+ TorqueParser::TryLabelStatementContext* ctx) = 0;
virtual void enterDiagnosticStatement(
TorqueParser::DiagnosticStatementContext* ctx) = 0;
@@ -246,6 +258,16 @@ class TorqueListener : public antlr4::tree::ParseTreeListener {
virtual void enterHelperBody(TorqueParser::HelperBodyContext* ctx) = 0;
virtual void exitHelperBody(TorqueParser::HelperBodyContext* ctx) = 0;
+ virtual void enterFieldDeclaration(
+ TorqueParser::FieldDeclarationContext* ctx) = 0;
+ virtual void exitFieldDeclaration(
+ TorqueParser::FieldDeclarationContext* ctx) = 0;
+
+ virtual void enterFieldListDeclaration(
+ TorqueParser::FieldListDeclarationContext* ctx) = 0;
+ virtual void exitFieldListDeclaration(
+ TorqueParser::FieldListDeclarationContext* ctx) = 0;
+
virtual void enterExtendsDeclaration(
TorqueParser::ExtendsDeclarationContext* ctx) = 0;
virtual void exitExtendsDeclaration(
@@ -266,6 +288,11 @@ class TorqueListener : public antlr4::tree::ParseTreeListener {
virtual void exitTypeDeclaration(
TorqueParser::TypeDeclarationContext* ctx) = 0;
+ virtual void enterTypeAliasDeclaration(
+ TorqueParser::TypeAliasDeclarationContext* ctx) = 0;
+ virtual void exitTypeAliasDeclaration(
+ TorqueParser::TypeAliasDeclarationContext* ctx) = 0;
+
virtual void enterExternalBuiltin(
TorqueParser::ExternalBuiltinContext* ctx) = 0;
virtual void exitExternalBuiltin(
@@ -294,11 +321,21 @@ class TorqueListener : public antlr4::tree::ParseTreeListener {
virtual void exitMacroDeclaration(
TorqueParser::MacroDeclarationContext* ctx) = 0;
+ virtual void enterExternConstDeclaration(
+ TorqueParser::ExternConstDeclarationContext* ctx) = 0;
+ virtual void exitExternConstDeclaration(
+ TorqueParser::ExternConstDeclarationContext* ctx) = 0;
+
virtual void enterConstDeclaration(
TorqueParser::ConstDeclarationContext* ctx) = 0;
virtual void exitConstDeclaration(
TorqueParser::ConstDeclarationContext* ctx) = 0;
+ virtual void enterStructDeclaration(
+ TorqueParser::StructDeclarationContext* ctx) = 0;
+ virtual void exitStructDeclaration(
+ TorqueParser::StructDeclarationContext* ctx) = 0;
+
virtual void enterDeclaration(TorqueParser::DeclarationContext* ctx) = 0;
virtual void exitDeclaration(TorqueParser::DeclarationContext* ctx) = 0;
diff --git a/deps/v8/src/torque/TorqueParser.cpp b/deps/v8/src/torque/TorqueParser.cpp
index b5cbcf3ca5..24548073a1 100644
--- a/deps/v8/src/torque/TorqueParser.cpp
+++ b/deps/v8/src/torque/TorqueParser.cpp
@@ -50,8 +50,16 @@ TorqueParser::TypeListContext* TorqueParser::TypeContext::typeList() {
return getRuleContext<TorqueParser::TypeListContext>(0);
}
-TorqueParser::TypeContext* TorqueParser::TypeContext::type() {
- return getRuleContext<TorqueParser::TypeContext>(0);
+std::vector<TorqueParser::TypeContext*> TorqueParser::TypeContext::type() {
+ return getRuleContexts<TorqueParser::TypeContext>();
+}
+
+TorqueParser::TypeContext* TorqueParser::TypeContext::type(size_t i) {
+ return getRuleContext<TorqueParser::TypeContext>(i);
+}
+
+tree::TerminalNode* TorqueParser::TypeContext::BIT_OR() {
+ return getToken(TorqueParser::BIT_OR, 0);
}
size_t TorqueParser::TypeContext::getRuleIndex() const {
@@ -76,60 +84,101 @@ antlrcpp::Any TorqueParser::TypeContext::accept(
return visitor->visitChildren(this);
}
-TorqueParser::TypeContext* TorqueParser::type() {
- TypeContext* _localctx =
- _tracker.createInstance<TypeContext>(_ctx, getState());
- enterRule(_localctx, 0, TorqueParser::RuleType);
+TorqueParser::TypeContext* TorqueParser::type() { return type(0); }
+
+TorqueParser::TypeContext* TorqueParser::type(int precedence) {
+ ParserRuleContext* parentContext = _ctx;
+ size_t parentState = getState();
+ TorqueParser::TypeContext* _localctx =
+ _tracker.createInstance<TypeContext>(_ctx, parentState);
+ TorqueParser::TypeContext* previousContext = _localctx;
+ size_t startState = 0;
+ enterRecursionRule(_localctx, 0, TorqueParser::RuleType, precedence);
+
size_t _la = 0;
- auto onExit = finally([=] { exitRule(); });
+ auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
try {
- setState(149);
+ size_t alt;
+ enterOuterAlt(_localctx, 1);
+ setState(168);
_errHandler->sync(this);
switch (_input->LA(1)) {
case TorqueParser::CONSTEXPR:
case TorqueParser::IDENTIFIER: {
- enterOuterAlt(_localctx, 1);
- setState(139);
+ setState(154);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::CONSTEXPR) {
- setState(138);
+ setState(153);
match(TorqueParser::CONSTEXPR);
}
- setState(141);
+ setState(156);
match(TorqueParser::IDENTIFIER);
break;
}
case TorqueParser::BUILTIN: {
- enterOuterAlt(_localctx, 2);
- setState(142);
+ setState(157);
match(TorqueParser::BUILTIN);
- setState(143);
+ setState(158);
match(TorqueParser::T__0);
- setState(144);
+ setState(159);
typeList();
- setState(145);
+ setState(160);
match(TorqueParser::T__1);
- setState(146);
+ setState(161);
match(TorqueParser::T__2);
- setState(147);
- type();
+ setState(162);
+ type(3);
+ break;
+ }
+
+ case TorqueParser::T__0: {
+ setState(164);
+ match(TorqueParser::T__0);
+ setState(165);
+ type(0);
+ setState(166);
+ match(TorqueParser::T__1);
break;
}
default:
throw NoViableAltException(this);
}
+ _ctx->stop = _input->LT(-1);
+ setState(175);
+ _errHandler->sync(this);
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 2,
+ _ctx);
+ while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
+ if (alt == 1) {
+ if (!_parseListeners.empty()) triggerExitRuleEvent();
+ previousContext = _localctx;
+ _localctx =
+ _tracker.createInstance<TypeContext>(parentContext, parentState);
+ pushNewRecursionContext(_localctx, startState, RuleType);
+ setState(170);
+ if (!(precpred(_ctx, 2)))
+ throw FailedPredicateException(this, "precpred(_ctx, 2)");
+ setState(171);
+ match(TorqueParser::BIT_OR);
+ setState(172);
+ type(3);
+ }
+ setState(177);
+ _errHandler->sync(this);
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input,
+ 2, _ctx);
+ }
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
_localctx->exception = std::current_exception();
_errHandler->recover(this, _localctx->exception);
}
-
return _localctx;
}
@@ -181,26 +230,26 @@ TorqueParser::TypeListContext* TorqueParser::typeList() {
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(159);
+ setState(186);
_errHandler->sync(this);
_la = _input->LA(1);
- if (((((_la - 23) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 23)) & ((1ULL << (TorqueParser::BUILTIN - 23)) |
- (1ULL << (TorqueParser::CONSTEXPR - 23)) |
- (1ULL << (TorqueParser::IDENTIFIER - 23)))) !=
- 0)) {
- setState(151);
- type();
- setState(156);
+ if ((((_la & ~0x3fULL) == 0) &&
+ ((1ULL << _la) &
+ ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::BUILTIN) |
+ (1ULL << TorqueParser::CONSTEXPR))) != 0) ||
+ _la == TorqueParser::IDENTIFIER) {
+ setState(178);
+ type(0);
+ setState(183);
_errHandler->sync(this);
_la = _input->LA(1);
while (_la == TorqueParser::T__3) {
- setState(152);
+ setState(179);
match(TorqueParser::T__3);
- setState(153);
- type();
- setState(158);
+ setState(180);
+ type(0);
+ setState(185);
_errHandler->sync(this);
_la = _input->LA(1);
}
@@ -265,11 +314,11 @@ TorqueParser::genericSpecializationTypeList() {
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(161);
+ setState(188);
match(TorqueParser::LESS_THAN);
- setState(162);
+ setState(189);
typeList();
- setState(163);
+ setState(190);
match(TorqueParser::GREATER_THAN);
} catch (RecognitionException& e) {
@@ -334,36 +383,36 @@ TorqueParser::optionalGenericTypeList() {
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(179);
+ setState(206);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::LESS_THAN) {
- setState(165);
+ setState(192);
match(TorqueParser::LESS_THAN);
- setState(166);
+ setState(193);
match(TorqueParser::IDENTIFIER);
- setState(167);
+ setState(194);
match(TorqueParser::T__4);
- setState(168);
+ setState(195);
match(TorqueParser::T__5);
- setState(175);
+ setState(202);
_errHandler->sync(this);
_la = _input->LA(1);
while (_la == TorqueParser::T__3) {
- setState(169);
+ setState(196);
match(TorqueParser::T__3);
- setState(170);
+ setState(197);
match(TorqueParser::IDENTIFIER);
- setState(171);
+ setState(198);
match(TorqueParser::T__4);
- setState(172);
+ setState(199);
match(TorqueParser::T__5);
- setState(177);
+ setState(204);
_errHandler->sync(this);
_la = _input->LA(1);
}
- setState(178);
+ setState(205);
match(TorqueParser::GREATER_THAN);
}
@@ -432,64 +481,64 @@ TorqueParser::typeListMaybeVarArgs() {
auto onExit = finally([=] { exitRule(); });
try {
size_t alt;
- setState(200);
+ setState(227);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 9, _ctx)) {
+ _input, 10, _ctx)) {
case 1: {
enterOuterAlt(_localctx, 1);
- setState(181);
+ setState(208);
match(TorqueParser::T__0);
- setState(183);
+ setState(210);
_errHandler->sync(this);
_la = _input->LA(1);
- if (((((_la - 23) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 23)) &
- ((1ULL << (TorqueParser::BUILTIN - 23)) |
- (1ULL << (TorqueParser::CONSTEXPR - 23)) |
- (1ULL << (TorqueParser::IDENTIFIER - 23)))) != 0)) {
- setState(182);
- type();
+ if ((((_la & ~0x3fULL) == 0) &&
+ ((1ULL << _la) &
+ ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::BUILTIN) |
+ (1ULL << TorqueParser::CONSTEXPR))) != 0) ||
+ _la == TorqueParser::IDENTIFIER) {
+ setState(209);
+ type(0);
}
- setState(189);
+ setState(216);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 7, _ctx);
+ _input, 8, _ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
- setState(185);
+ setState(212);
match(TorqueParser::T__3);
- setState(186);
- type();
+ setState(213);
+ type(0);
}
- setState(191);
+ setState(218);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 7, _ctx);
+ _input, 8, _ctx);
}
- setState(194);
+ setState(221);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::T__3) {
- setState(192);
+ setState(219);
match(TorqueParser::T__3);
- setState(193);
+ setState(220);
match(TorqueParser::VARARGS);
}
- setState(196);
+ setState(223);
match(TorqueParser::T__1);
break;
}
case 2: {
enterOuterAlt(_localctx, 2);
- setState(197);
+ setState(224);
match(TorqueParser::T__0);
- setState(198);
+ setState(225);
match(TorqueParser::VARARGS);
- setState(199);
+ setState(226);
match(TorqueParser::T__1);
break;
}
@@ -552,18 +601,18 @@ TorqueParser::LabelParameterContext* TorqueParser::labelParameter() {
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(202);
+ setState(229);
match(TorqueParser::IDENTIFIER);
- setState(207);
+ setState(234);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::T__0) {
- setState(203);
+ setState(230);
match(TorqueParser::T__0);
- setState(204);
+ setState(231);
typeList();
- setState(205);
+ setState(232);
match(TorqueParser::T__1);
}
@@ -620,15 +669,15 @@ TorqueParser::OptionalTypeContext* TorqueParser::optionalType() {
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(211);
+ setState(238);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::T__4) {
- setState(209);
+ setState(236);
match(TorqueParser::T__4);
- setState(210);
- type();
+ setState(237);
+ type(0);
}
} catch (RecognitionException& e) {
@@ -694,24 +743,24 @@ TorqueParser::OptionalLabelListContext* TorqueParser::optionalLabelList() {
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(222);
+ setState(249);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::LABELS) {
- setState(213);
+ setState(240);
match(TorqueParser::LABELS);
- setState(214);
+ setState(241);
labelParameter();
- setState(219);
+ setState(246);
_errHandler->sync(this);
_la = _input->LA(1);
while (_la == TorqueParser::T__3) {
- setState(215);
+ setState(242);
match(TorqueParser::T__3);
- setState(216);
+ setState(243);
labelParameter();
- setState(221);
+ setState(248);
_errHandler->sync(this);
_la = _input->LA(1);
}
@@ -780,31 +829,31 @@ TorqueParser::OptionalOtherwiseContext* TorqueParser::optionalOtherwise() {
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(233);
+ setState(260);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 15, _ctx)) {
+ _input, 16, _ctx)) {
case 1: {
- setState(224);
+ setState(251);
match(TorqueParser::OTHERWISE);
- setState(225);
+ setState(252);
match(TorqueParser::IDENTIFIER);
- setState(230);
+ setState(257);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 14, _ctx);
+ _input, 15, _ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
- setState(226);
+ setState(253);
match(TorqueParser::T__3);
- setState(227);
+ setState(254);
match(TorqueParser::IDENTIFIER);
}
- setState(232);
+ setState(259);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 14, _ctx);
+ _input, 15, _ctx);
}
break;
}
@@ -867,21 +916,21 @@ TorqueParser::ParameterContext* TorqueParser::parameter() {
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(235);
+ setState(262);
match(TorqueParser::IDENTIFIER);
- setState(236);
+ setState(263);
match(TorqueParser::T__4);
- setState(238);
+ setState(265);
_errHandler->sync(this);
_la = _input->LA(1);
- if (((((_la - 23) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 23)) & ((1ULL << (TorqueParser::BUILTIN - 23)) |
- (1ULL << (TorqueParser::CONSTEXPR - 23)) |
- (1ULL << (TorqueParser::IDENTIFIER - 23)))) !=
- 0)) {
- setState(237);
- type();
+ if ((((_la & ~0x3fULL) == 0) &&
+ ((1ULL << _la) &
+ ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::BUILTIN) |
+ (1ULL << TorqueParser::CONSTEXPR))) != 0) ||
+ _la == TorqueParser::IDENTIFIER) {
+ setState(264);
+ type(0);
}
} catch (RecognitionException& e) {
@@ -950,56 +999,56 @@ TorqueParser::ParameterListContext* TorqueParser::parameterList() {
auto onExit = finally([=] { exitRule(); });
try {
- setState(261);
+ setState(288);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 19, _ctx)) {
+ _input, 20, _ctx)) {
case 1: {
enterOuterAlt(_localctx, 1);
- setState(240);
+ setState(267);
match(TorqueParser::T__0);
- setState(242);
+ setState(269);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::IDENTIFIER) {
- setState(241);
+ setState(268);
parameter();
}
- setState(248);
+ setState(275);
_errHandler->sync(this);
_la = _input->LA(1);
while (_la == TorqueParser::T__3) {
- setState(244);
+ setState(271);
match(TorqueParser::T__3);
- setState(245);
+ setState(272);
parameter();
- setState(250);
+ setState(277);
_errHandler->sync(this);
_la = _input->LA(1);
}
- setState(251);
+ setState(278);
match(TorqueParser::T__1);
break;
}
case 2: {
enterOuterAlt(_localctx, 2);
- setState(252);
+ setState(279);
match(TorqueParser::T__0);
- setState(253);
+ setState(280);
parameter();
- setState(254);
+ setState(281);
match(TorqueParser::T__3);
- setState(255);
+ setState(282);
parameter();
- setState(256);
+ setState(283);
match(TorqueParser::T__3);
- setState(257);
+ setState(284);
match(TorqueParser::VARARGS);
- setState(258);
+ setState(285);
match(TorqueParser::IDENTIFIER);
- setState(259);
+ setState(286);
match(TorqueParser::T__1);
break;
}
@@ -1058,20 +1107,22 @@ TorqueParser::LabelDeclarationContext* TorqueParser::labelDeclaration() {
LabelDeclarationContext* _localctx =
_tracker.createInstance<LabelDeclarationContext>(_ctx, getState());
enterRule(_localctx, 22, TorqueParser::RuleLabelDeclaration);
- size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(263);
+ setState(290);
match(TorqueParser::IDENTIFIER);
- setState(265);
+ setState(292);
_errHandler->sync(this);
- _la = _input->LA(1);
- if (_la == TorqueParser::T__0) {
- setState(264);
- parameterList();
+ switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
+ _input, 21, _ctx)) {
+ case 1: {
+ setState(291);
+ parameterList();
+ break;
+ }
}
} catch (RecognitionException& e) {
@@ -1127,7 +1178,7 @@ TorqueParser::ExpressionContext* TorqueParser::expression() {
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(267);
+ setState(294);
conditionalExpression(0);
} catch (RecognitionException& e) {
@@ -1207,12 +1258,12 @@ TorqueParser::ConditionalExpressionContext* TorqueParser::conditionalExpression(
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(270);
+ setState(297);
logicalORExpression(0);
_ctx->stop = _input->LT(-1);
- setState(280);
+ setState(307);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 21,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 22,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
@@ -1222,23 +1273,23 @@ TorqueParser::ConditionalExpressionContext* TorqueParser::conditionalExpression(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState,
RuleConditionalExpression);
- setState(272);
+ setState(299);
if (!(precpred(_ctx, 1)))
throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(273);
+ setState(300);
match(TorqueParser::T__6);
- setState(274);
+ setState(301);
logicalORExpression(0);
- setState(275);
+ setState(302);
match(TorqueParser::T__4);
- setState(276);
+ setState(303);
logicalORExpression(0);
}
- setState(282);
+ setState(309);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 21, _ctx);
+ _input, 22, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -1308,12 +1359,12 @@ TorqueParser::LogicalORExpressionContext* TorqueParser::logicalORExpression(
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(284);
+ setState(311);
logicalANDExpression(0);
_ctx->stop = _input->LT(-1);
- setState(291);
+ setState(318);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 22,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 23,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
@@ -1322,19 +1373,19 @@ TorqueParser::LogicalORExpressionContext* TorqueParser::logicalORExpression(
_localctx = _tracker.createInstance<LogicalORExpressionContext>(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState, RuleLogicalORExpression);
- setState(286);
+ setState(313);
if (!(precpred(_ctx, 1)))
throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(287);
+ setState(314);
match(TorqueParser::T__7);
- setState(288);
+ setState(315);
logicalANDExpression(0);
}
- setState(293);
+ setState(320);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 22, _ctx);
+ _input, 23, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -1406,12 +1457,12 @@ TorqueParser::LogicalANDExpressionContext* TorqueParser::logicalANDExpression(
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(295);
+ setState(322);
bitwiseExpression(0);
_ctx->stop = _input->LT(-1);
- setState(302);
+ setState(329);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 23,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 24,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
@@ -1421,19 +1472,19 @@ TorqueParser::LogicalANDExpressionContext* TorqueParser::logicalANDExpression(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState,
RuleLogicalANDExpression);
- setState(297);
+ setState(324);
if (!(precpred(_ctx, 1)))
throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(298);
+ setState(325);
match(TorqueParser::T__8);
- setState(299);
+ setState(326);
bitwiseExpression(0);
}
- setState(304);
+ setState(331);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 23, _ctx);
+ _input, 24, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -1513,12 +1564,12 @@ TorqueParser::BitwiseExpressionContext* TorqueParser::bitwiseExpression(
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(306);
+ setState(333);
equalityExpression(0);
_ctx->stop = _input->LT(-1);
- setState(313);
+ setState(340);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 24,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 25,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
@@ -1527,11 +1578,11 @@ TorqueParser::BitwiseExpressionContext* TorqueParser::bitwiseExpression(
_localctx = _tracker.createInstance<BitwiseExpressionContext>(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState, RuleBitwiseExpression);
- setState(308);
+ setState(335);
if (!(precpred(_ctx, 1)))
throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(309);
+ setState(336);
dynamic_cast<BitwiseExpressionContext*>(_localctx)->op = _input->LT(1);
_la = _input->LA(1);
if (!(_la == TorqueParser::BIT_OR
@@ -1543,13 +1594,13 @@ TorqueParser::BitwiseExpressionContext* TorqueParser::bitwiseExpression(
_errHandler->reportMatch(this);
consume();
}
- setState(310);
+ setState(337);
equalityExpression(0);
}
- setState(315);
+ setState(342);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 24, _ctx);
+ _input, 25, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -1629,12 +1680,12 @@ TorqueParser::EqualityExpressionContext* TorqueParser::equalityExpression(
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(317);
+ setState(344);
relationalExpression(0);
_ctx->stop = _input->LT(-1);
- setState(324);
+ setState(351);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 25,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 26,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
@@ -1643,11 +1694,11 @@ TorqueParser::EqualityExpressionContext* TorqueParser::equalityExpression(
_localctx = _tracker.createInstance<EqualityExpressionContext>(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState, RuleEqualityExpression);
- setState(319);
+ setState(346);
if (!(precpred(_ctx, 1)))
throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(320);
+ setState(347);
dynamic_cast<EqualityExpressionContext*>(_localctx)->op = _input->LT(1);
_la = _input->LA(1);
if (!(_la == TorqueParser::EQUAL
@@ -1659,13 +1710,13 @@ TorqueParser::EqualityExpressionContext* TorqueParser::equalityExpression(
_errHandler->reportMatch(this);
consume();
}
- setState(321);
+ setState(348);
relationalExpression(0);
}
- setState(326);
+ setState(353);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 25, _ctx);
+ _input, 26, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -1757,12 +1808,12 @@ TorqueParser::RelationalExpressionContext* TorqueParser::relationalExpression(
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(328);
+ setState(355);
shiftExpression(0);
_ctx->stop = _input->LT(-1);
- setState(335);
+ setState(362);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 26,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 27,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
@@ -1772,33 +1823,33 @@ TorqueParser::RelationalExpressionContext* TorqueParser::relationalExpression(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState,
RuleRelationalExpression);
- setState(330);
+ setState(357);
if (!(precpred(_ctx, 1)))
throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(331);
+ setState(358);
dynamic_cast<RelationalExpressionContext*>(_localctx)->op =
_input->LT(1);
_la = _input->LA(1);
- if (!(((((_la - 66) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 66)) &
- ((1ULL << (TorqueParser::LESS_THAN - 66)) |
- (1ULL << (TorqueParser::LESS_THAN_EQUAL - 66)) |
- (1ULL << (TorqueParser::GREATER_THAN - 66)) |
- (1ULL << (TorqueParser::GREATER_THAN_EQUAL - 66)))) != 0))) {
+ if (!(((((_la - 64) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 64)) &
+ ((1ULL << (TorqueParser::LESS_THAN - 64)) |
+ (1ULL << (TorqueParser::LESS_THAN_EQUAL - 64)) |
+ (1ULL << (TorqueParser::GREATER_THAN - 64)) |
+ (1ULL << (TorqueParser::GREATER_THAN_EQUAL - 64)))) != 0))) {
dynamic_cast<RelationalExpressionContext*>(_localctx)->op =
_errHandler->recoverInline(this);
} else {
_errHandler->reportMatch(this);
consume();
}
- setState(332);
+ setState(359);
shiftExpression(0);
}
- setState(337);
+ setState(364);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 26, _ctx);
+ _input, 27, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -1883,12 +1934,12 @@ TorqueParser::ShiftExpressionContext* TorqueParser::shiftExpression(
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(339);
+ setState(366);
additiveExpression(0);
_ctx->stop = _input->LT(-1);
- setState(346);
+ setState(373);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 27,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 28,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
@@ -1897,18 +1948,18 @@ TorqueParser::ShiftExpressionContext* TorqueParser::shiftExpression(
_localctx = _tracker.createInstance<ShiftExpressionContext>(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState, RuleShiftExpression);
- setState(341);
+ setState(368);
if (!(precpred(_ctx, 1)))
throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(342);
+ setState(369);
dynamic_cast<ShiftExpressionContext*>(_localctx)->op = _input->LT(1);
_la = _input->LA(1);
- if (!(((((_la - 70) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 70)) &
- ((1ULL << (TorqueParser::SHIFT_LEFT - 70)) |
- (1ULL << (TorqueParser::SHIFT_RIGHT - 70)) |
- (1ULL << (TorqueParser::SHIFT_RIGHT_ARITHMETIC - 70)))) !=
+ if (!(((((_la - 68) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 68)) &
+ ((1ULL << (TorqueParser::SHIFT_LEFT - 68)) |
+ (1ULL << (TorqueParser::SHIFT_RIGHT - 68)) |
+ (1ULL << (TorqueParser::SHIFT_RIGHT_ARITHMETIC - 68)))) !=
0))) {
dynamic_cast<ShiftExpressionContext*>(_localctx)->op =
_errHandler->recoverInline(this);
@@ -1916,13 +1967,13 @@ TorqueParser::ShiftExpressionContext* TorqueParser::shiftExpression(
_errHandler->reportMatch(this);
consume();
}
- setState(343);
+ setState(370);
additiveExpression(0);
}
- setState(348);
+ setState(375);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 27, _ctx);
+ _input, 28, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -2002,12 +2053,12 @@ TorqueParser::AdditiveExpressionContext* TorqueParser::additiveExpression(
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(350);
+ setState(377);
multiplicativeExpression(0);
_ctx->stop = _input->LT(-1);
- setState(357);
+ setState(384);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 28,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 29,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
@@ -2016,11 +2067,11 @@ TorqueParser::AdditiveExpressionContext* TorqueParser::additiveExpression(
_localctx = _tracker.createInstance<AdditiveExpressionContext>(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState, RuleAdditiveExpression);
- setState(352);
+ setState(379);
if (!(precpred(_ctx, 1)))
throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(353);
+ setState(380);
dynamic_cast<AdditiveExpressionContext*>(_localctx)->op = _input->LT(1);
_la = _input->LA(1);
if (!(_la == TorqueParser::PLUS
@@ -2032,13 +2083,13 @@ TorqueParser::AdditiveExpressionContext* TorqueParser::additiveExpression(
_errHandler->reportMatch(this);
consume();
}
- setState(354);
+ setState(381);
multiplicativeExpression(0);
}
- setState(359);
+ setState(386);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 28, _ctx);
+ _input, 29, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -2126,12 +2177,12 @@ TorqueParser::multiplicativeExpression(int precedence) {
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(361);
+ setState(388);
unaryExpression();
_ctx->stop = _input->LT(-1);
- setState(368);
+ setState(395);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 29,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 30,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
@@ -2141,11 +2192,11 @@ TorqueParser::multiplicativeExpression(int precedence) {
parentContext, parentState);
pushNewRecursionContext(_localctx, startState,
RuleMultiplicativeExpression);
- setState(363);
+ setState(390);
if (!(precpred(_ctx, 1)))
throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(364);
+ setState(391);
dynamic_cast<MultiplicativeExpressionContext*>(_localctx)->op =
_input->LT(1);
_la = _input->LA(1);
@@ -2159,13 +2210,13 @@ TorqueParser::multiplicativeExpression(int precedence) {
_errHandler->reportMatch(this);
consume();
}
- setState(365);
+ setState(392);
unaryExpression();
}
- setState(370);
+ setState(397);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 29, _ctx);
+ _input, 30, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -2240,12 +2291,10 @@ TorqueParser::UnaryExpressionContext* TorqueParser::unaryExpression() {
auto onExit = finally([=] { exitRule(); });
try {
- setState(374);
+ setState(401);
_errHandler->sync(this);
switch (_input->LA(1)) {
case TorqueParser::T__0:
- case TorqueParser::CAST_KEYWORD:
- case TorqueParser::CONVERT_KEYWORD:
case TorqueParser::MAX:
case TorqueParser::MIN:
case TorqueParser::INCREMENT:
@@ -2254,7 +2303,7 @@ TorqueParser::UnaryExpressionContext* TorqueParser::unaryExpression() {
case TorqueParser::IDENTIFIER:
case TorqueParser::DECIMAL_LITERAL: {
enterOuterAlt(_localctx, 1);
- setState(371);
+ setState(398);
assignmentExpression();
break;
}
@@ -2264,14 +2313,14 @@ TorqueParser::UnaryExpressionContext* TorqueParser::unaryExpression() {
case TorqueParser::BIT_NOT:
case TorqueParser::NOT: {
enterOuterAlt(_localctx, 2);
- setState(372);
+ setState(399);
dynamic_cast<UnaryExpressionContext*>(_localctx)->op = _input->LT(1);
_la = _input->LA(1);
- if (!(((((_la - 55) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 55)) & ((1ULL << (TorqueParser::PLUS - 55)) |
- (1ULL << (TorqueParser::MINUS - 55)) |
- (1ULL << (TorqueParser::BIT_NOT - 55)) |
- (1ULL << (TorqueParser::NOT - 55)))) !=
+ if (!(((((_la - 53) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 53)) & ((1ULL << (TorqueParser::PLUS - 53)) |
+ (1ULL << (TorqueParser::MINUS - 53)) |
+ (1ULL << (TorqueParser::BIT_NOT - 53)) |
+ (1ULL << (TorqueParser::NOT - 53)))) !=
0))) {
dynamic_cast<UnaryExpressionContext*>(_localctx)->op =
_errHandler->recoverInline(this);
@@ -2279,7 +2328,7 @@ TorqueParser::UnaryExpressionContext* TorqueParser::unaryExpression() {
_errHandler->reportMatch(this);
consume();
}
- setState(373);
+ setState(400);
unaryExpression();
break;
}
@@ -2308,14 +2357,9 @@ tree::TerminalNode* TorqueParser::LocationExpressionContext::IDENTIFIER() {
return getToken(TorqueParser::IDENTIFIER, 0);
}
-TorqueParser::GenericSpecializationTypeListContext*
-TorqueParser::LocationExpressionContext::genericSpecializationTypeList() {
- return getRuleContext<TorqueParser::GenericSpecializationTypeListContext>(0);
-}
-
-TorqueParser::LocationExpressionContext*
-TorqueParser::LocationExpressionContext::locationExpression() {
- return getRuleContext<TorqueParser::LocationExpressionContext>(0);
+TorqueParser::PrimaryExpressionContext*
+TorqueParser::LocationExpressionContext::primaryExpression() {
+ return getRuleContext<TorqueParser::PrimaryExpressionContext>(0);
}
TorqueParser::ExpressionContext*
@@ -2323,6 +2367,11 @@ TorqueParser::LocationExpressionContext::expression() {
return getRuleContext<TorqueParser::ExpressionContext>(0);
}
+TorqueParser::LocationExpressionContext*
+TorqueParser::LocationExpressionContext::locationExpression() {
+ return getRuleContext<TorqueParser::LocationExpressionContext>(0);
+}
+
size_t TorqueParser::LocationExpressionContext::getRuleIndex() const {
return TorqueParser::RuleLocationExpression;
}
@@ -2366,44 +2415,63 @@ TorqueParser::LocationExpressionContext* TorqueParser::locationExpression(
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(377);
- match(TorqueParser::IDENTIFIER);
- setState(379);
+ setState(414);
_errHandler->sync(this);
-
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 31, _ctx)) {
+ _input, 32, _ctx)) {
case 1: {
- setState(378);
- genericSpecializationTypeList();
+ setState(404);
+ match(TorqueParser::IDENTIFIER);
+ break;
+ }
+
+ case 2: {
+ setState(405);
+ primaryExpression();
+ setState(406);
+ match(TorqueParser::T__9);
+ setState(407);
+ match(TorqueParser::IDENTIFIER);
+ break;
+ }
+
+ case 3: {
+ setState(409);
+ primaryExpression();
+ setState(410);
+ match(TorqueParser::T__10);
+ setState(411);
+ expression();
+ setState(412);
+ match(TorqueParser::T__11);
break;
}
}
_ctx->stop = _input->LT(-1);
- setState(391);
+ setState(426);
_errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 33,
+ alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 34,
_ctx);
while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
if (alt == 1) {
if (!_parseListeners.empty()) triggerExitRuleEvent();
previousContext = _localctx;
- setState(389);
+ setState(424);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 32, _ctx)) {
+ _input, 33, _ctx)) {
case 1: {
_localctx = _tracker.createInstance<LocationExpressionContext>(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState,
RuleLocationExpression);
- setState(381);
+ setState(416);
- if (!(precpred(_ctx, 2)))
- throw FailedPredicateException(this, "precpred(_ctx, 2)");
- setState(382);
+ if (!(precpred(_ctx, 4)))
+ throw FailedPredicateException(this, "precpred(_ctx, 4)");
+ setState(417);
match(TorqueParser::T__9);
- setState(383);
+ setState(418);
match(TorqueParser::IDENTIFIER);
break;
}
@@ -2413,24 +2481,24 @@ TorqueParser::LocationExpressionContext* TorqueParser::locationExpression(
parentContext, parentState);
pushNewRecursionContext(_localctx, startState,
RuleLocationExpression);
- setState(384);
+ setState(419);
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(385);
+ if (!(precpred(_ctx, 2)))
+ throw FailedPredicateException(this, "precpred(_ctx, 2)");
+ setState(420);
match(TorqueParser::T__10);
- setState(386);
+ setState(421);
expression();
- setState(387);
+ setState(422);
match(TorqueParser::T__11);
break;
}
}
}
- setState(393);
+ setState(428);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 33, _ctx);
+ _input, 34, _ctx);
}
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -2491,33 +2559,33 @@ TorqueParser::IncrementDecrementContext* TorqueParser::incrementDecrement() {
auto onExit = finally([=] { exitRule(); });
try {
- setState(404);
+ setState(439);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 34, _ctx)) {
+ _input, 35, _ctx)) {
case 1: {
enterOuterAlt(_localctx, 1);
- setState(394);
+ setState(429);
match(TorqueParser::INCREMENT);
- setState(395);
+ setState(430);
locationExpression(0);
break;
}
case 2: {
enterOuterAlt(_localctx, 2);
- setState(396);
+ setState(431);
match(TorqueParser::DECREMENT);
- setState(397);
+ setState(432);
locationExpression(0);
break;
}
case 3: {
enterOuterAlt(_localctx, 3);
- setState(398);
+ setState(433);
locationExpression(0);
- setState(399);
+ setState(434);
dynamic_cast<IncrementDecrementContext*>(_localctx)->op =
match(TorqueParser::INCREMENT);
break;
@@ -2525,9 +2593,9 @@ TorqueParser::IncrementDecrementContext* TorqueParser::incrementDecrement() {
case 4: {
enterOuterAlt(_localctx, 4);
- setState(401);
+ setState(436);
locationExpression(0);
- setState(402);
+ setState(437);
dynamic_cast<IncrementDecrementContext*>(_localctx)->op =
match(TorqueParser::DECREMENT);
break;
@@ -2604,28 +2672,28 @@ TorqueParser::AssignmentContext* TorqueParser::assignment() {
auto onExit = finally([=] { exitRule(); });
try {
- setState(412);
+ setState(447);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 36, _ctx)) {
+ _input, 37, _ctx)) {
case 1: {
enterOuterAlt(_localctx, 1);
- setState(406);
+ setState(441);
incrementDecrement();
break;
}
case 2: {
enterOuterAlt(_localctx, 2);
- setState(407);
+ setState(442);
locationExpression(0);
- setState(410);
+ setState(445);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 35, _ctx)) {
+ _input, 36, _ctx)) {
case 1: {
- setState(408);
+ setState(443);
_la = _input->LA(1);
if (!(_la == TorqueParser::ASSIGNMENT
@@ -2635,7 +2703,7 @@ TorqueParser::AssignmentContext* TorqueParser::assignment() {
_errHandler->reportMatch(this);
consume();
}
- setState(409);
+ setState(444);
expression();
break;
}
@@ -2660,9 +2728,9 @@ TorqueParser::AssignmentExpressionContext::AssignmentExpressionContext(
ParserRuleContext* parent, size_t invokingState)
: ParserRuleContext(parent, invokingState) {}
-TorqueParser::PrimaryExpressionContext*
-TorqueParser::AssignmentExpressionContext::primaryExpression() {
- return getRuleContext<TorqueParser::PrimaryExpressionContext>(0);
+TorqueParser::FunctionPointerExpressionContext*
+TorqueParser::AssignmentExpressionContext::functionPointerExpression() {
+ return getRuleContext<TorqueParser::FunctionPointerExpressionContext>(0);
}
TorqueParser::AssignmentContext*
@@ -2703,20 +2771,20 @@ TorqueParser::assignmentExpression() {
auto onExit = finally([=] { exitRule(); });
try {
- setState(416);
+ setState(451);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 37, _ctx)) {
+ _input, 38, _ctx)) {
case 1: {
enterOuterAlt(_localctx, 1);
- setState(414);
- primaryExpression();
+ setState(449);
+ functionPointerExpression();
break;
}
case 2: {
enterOuterAlt(_localctx, 2);
- setState(415);
+ setState(450);
assignment();
break;
}
@@ -2731,6 +2799,205 @@ TorqueParser::assignmentExpression() {
return _localctx;
}
+//----------------- StructExpressionContext
+//------------------------------------------------------------------
+
+TorqueParser::StructExpressionContext::StructExpressionContext(
+ ParserRuleContext* parent, size_t invokingState)
+ : ParserRuleContext(parent, invokingState) {}
+
+tree::TerminalNode* TorqueParser::StructExpressionContext::IDENTIFIER() {
+ return getToken(TorqueParser::IDENTIFIER, 0);
+}
+
+std::vector<TorqueParser::ExpressionContext*>
+TorqueParser::StructExpressionContext::expression() {
+ return getRuleContexts<TorqueParser::ExpressionContext>();
+}
+
+TorqueParser::ExpressionContext*
+TorqueParser::StructExpressionContext::expression(size_t i) {
+ return getRuleContext<TorqueParser::ExpressionContext>(i);
+}
+
+size_t TorqueParser::StructExpressionContext::getRuleIndex() const {
+ return TorqueParser::RuleStructExpression;
+}
+
+void TorqueParser::StructExpressionContext::enterRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr) parserListener->enterStructExpression(this);
+}
+
+void TorqueParser::StructExpressionContext::exitRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr) parserListener->exitStructExpression(this);
+}
+
+antlrcpp::Any TorqueParser::StructExpressionContext::accept(
+ tree::ParseTreeVisitor* visitor) {
+ if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
+ return parserVisitor->visitStructExpression(this);
+ else
+ return visitor->visitChildren(this);
+}
+
+TorqueParser::StructExpressionContext* TorqueParser::structExpression() {
+ StructExpressionContext* _localctx =
+ _tracker.createInstance<StructExpressionContext>(_ctx, getState());
+ enterRule(_localctx, 54, TorqueParser::RuleStructExpression);
+ size_t _la = 0;
+
+ auto onExit = finally([=] { exitRule(); });
+ try {
+ enterOuterAlt(_localctx, 1);
+ setState(453);
+ match(TorqueParser::IDENTIFIER);
+ setState(454);
+ match(TorqueParser::T__12);
+ setState(463);
+ _errHandler->sync(this);
+
+ _la = _input->LA(1);
+ if ((((_la & ~0x3fULL) == 0) &&
+ ((1ULL << _la) &
+ ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
+ (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
+ (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
+ ((((_la - 73) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 73)) &
+ ((1ULL << (TorqueParser::INCREMENT - 73)) |
+ (1ULL << (TorqueParser::DECREMENT - 73)) |
+ (1ULL << (TorqueParser::NOT - 73)) |
+ (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
+ (1ULL << (TorqueParser::IDENTIFIER - 73)) |
+ (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
+ setState(455);
+ expression();
+ setState(460);
+ _errHandler->sync(this);
+ _la = _input->LA(1);
+ while (_la == TorqueParser::T__3) {
+ setState(456);
+ match(TorqueParser::T__3);
+ setState(457);
+ expression();
+ setState(462);
+ _errHandler->sync(this);
+ _la = _input->LA(1);
+ }
+ }
+ setState(465);
+ match(TorqueParser::T__13);
+
+ } catch (RecognitionException& e) {
+ _errHandler->reportError(this, e);
+ _localctx->exception = std::current_exception();
+ _errHandler->recover(this, _localctx->exception);
+ }
+
+ return _localctx;
+}
+
+//----------------- FunctionPointerExpressionContext
+//------------------------------------------------------------------
+
+TorqueParser::FunctionPointerExpressionContext::
+ FunctionPointerExpressionContext(ParserRuleContext* parent,
+ size_t invokingState)
+ : ParserRuleContext(parent, invokingState) {}
+
+TorqueParser::PrimaryExpressionContext*
+TorqueParser::FunctionPointerExpressionContext::primaryExpression() {
+ return getRuleContext<TorqueParser::PrimaryExpressionContext>(0);
+}
+
+tree::TerminalNode*
+TorqueParser::FunctionPointerExpressionContext::IDENTIFIER() {
+ return getToken(TorqueParser::IDENTIFIER, 0);
+}
+
+TorqueParser::GenericSpecializationTypeListContext* TorqueParser::
+ FunctionPointerExpressionContext::genericSpecializationTypeList() {
+ return getRuleContext<TorqueParser::GenericSpecializationTypeListContext>(0);
+}
+
+size_t TorqueParser::FunctionPointerExpressionContext::getRuleIndex() const {
+ return TorqueParser::RuleFunctionPointerExpression;
+}
+
+void TorqueParser::FunctionPointerExpressionContext::enterRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr)
+ parserListener->enterFunctionPointerExpression(this);
+}
+
+void TorqueParser::FunctionPointerExpressionContext::exitRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr)
+ parserListener->exitFunctionPointerExpression(this);
+}
+
+antlrcpp::Any TorqueParser::FunctionPointerExpressionContext::accept(
+ tree::ParseTreeVisitor* visitor) {
+ if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
+ return parserVisitor->visitFunctionPointerExpression(this);
+ else
+ return visitor->visitChildren(this);
+}
+
+TorqueParser::FunctionPointerExpressionContext*
+TorqueParser::functionPointerExpression() {
+ FunctionPointerExpressionContext* _localctx =
+ _tracker.createInstance<FunctionPointerExpressionContext>(_ctx,
+ getState());
+ enterRule(_localctx, 56, TorqueParser::RuleFunctionPointerExpression);
+
+ auto onExit = finally([=] { exitRule(); });
+ try {
+ setState(472);
+ _errHandler->sync(this);
+ switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
+ _input, 42, _ctx)) {
+ case 1: {
+ enterOuterAlt(_localctx, 1);
+ setState(467);
+ primaryExpression();
+ break;
+ }
+
+ case 2: {
+ enterOuterAlt(_localctx, 2);
+ setState(468);
+ match(TorqueParser::IDENTIFIER);
+ setState(470);
+ _errHandler->sync(this);
+
+ switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
+ _input, 41, _ctx)) {
+ case 1: {
+ setState(469);
+ genericSpecializationTypeList();
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ } catch (RecognitionException& e) {
+ _errHandler->reportError(this, e);
+ _localctx->exception = std::current_exception();
+ _errHandler->recover(this, _localctx->exception);
+ }
+
+ return _localctx;
+}
+
//----------------- PrimaryExpressionContext
//------------------------------------------------------------------
@@ -2743,6 +3010,11 @@ TorqueParser::PrimaryExpressionContext::helperCall() {
return getRuleContext<TorqueParser::HelperCallContext>(0);
}
+TorqueParser::StructExpressionContext*
+TorqueParser::PrimaryExpressionContext::structExpression() {
+ return getRuleContext<TorqueParser::StructExpressionContext>(0);
+}
+
tree::TerminalNode* TorqueParser::PrimaryExpressionContext::DECIMAL_LITERAL() {
return getToken(TorqueParser::DECIMAL_LITERAL, 0);
}
@@ -2751,31 +3023,11 @@ tree::TerminalNode* TorqueParser::PrimaryExpressionContext::STRING_LITERAL() {
return getToken(TorqueParser::STRING_LITERAL, 0);
}
-tree::TerminalNode* TorqueParser::PrimaryExpressionContext::CAST_KEYWORD() {
- return getToken(TorqueParser::CAST_KEYWORD, 0);
-}
-
-TorqueParser::TypeContext* TorqueParser::PrimaryExpressionContext::type() {
- return getRuleContext<TorqueParser::TypeContext>(0);
-}
-
TorqueParser::ExpressionContext*
TorqueParser::PrimaryExpressionContext::expression() {
return getRuleContext<TorqueParser::ExpressionContext>(0);
}
-tree::TerminalNode* TorqueParser::PrimaryExpressionContext::OTHERWISE() {
- return getToken(TorqueParser::OTHERWISE, 0);
-}
-
-tree::TerminalNode* TorqueParser::PrimaryExpressionContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-tree::TerminalNode* TorqueParser::PrimaryExpressionContext::CONVERT_KEYWORD() {
- return getToken(TorqueParser::CONVERT_KEYWORD, 0);
-}
-
size_t TorqueParser::PrimaryExpressionContext::getRuleIndex() const {
return TorqueParser::RulePrimaryExpression;
}
@@ -2803,91 +3055,52 @@ antlrcpp::Any TorqueParser::PrimaryExpressionContext::accept(
TorqueParser::PrimaryExpressionContext* TorqueParser::primaryExpression() {
PrimaryExpressionContext* _localctx =
_tracker.createInstance<PrimaryExpressionContext>(_ctx, getState());
- enterRule(_localctx, 54, TorqueParser::RulePrimaryExpression);
+ enterRule(_localctx, 58, TorqueParser::RulePrimaryExpression);
auto onExit = finally([=] { exitRule(); });
try {
- setState(443);
+ setState(482);
_errHandler->sync(this);
- switch (_input->LA(1)) {
- case TorqueParser::MAX:
- case TorqueParser::MIN:
- case TorqueParser::IDENTIFIER: {
+ switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
+ _input, 43, _ctx)) {
+ case 1: {
enterOuterAlt(_localctx, 1);
- setState(418);
+ setState(474);
helperCall();
break;
}
- case TorqueParser::DECIMAL_LITERAL: {
+ case 2: {
enterOuterAlt(_localctx, 2);
- setState(419);
- match(TorqueParser::DECIMAL_LITERAL);
+ setState(475);
+ structExpression();
break;
}
- case TorqueParser::STRING_LITERAL: {
+ case 3: {
enterOuterAlt(_localctx, 3);
- setState(420);
- match(TorqueParser::STRING_LITERAL);
+ setState(476);
+ match(TorqueParser::DECIMAL_LITERAL);
break;
}
- case TorqueParser::CAST_KEYWORD: {
+ case 4: {
enterOuterAlt(_localctx, 4);
- setState(421);
- match(TorqueParser::CAST_KEYWORD);
- setState(422);
- match(TorqueParser::LESS_THAN);
- setState(423);
- type();
- setState(424);
- match(TorqueParser::GREATER_THAN);
- setState(425);
- match(TorqueParser::T__0);
- setState(426);
- expression();
- setState(427);
- match(TorqueParser::T__1);
- setState(428);
- match(TorqueParser::OTHERWISE);
- setState(429);
- match(TorqueParser::IDENTIFIER);
+ setState(477);
+ match(TorqueParser::STRING_LITERAL);
break;
}
- case TorqueParser::CONVERT_KEYWORD: {
+ case 5: {
enterOuterAlt(_localctx, 5);
- setState(431);
- match(TorqueParser::CONVERT_KEYWORD);
- setState(432);
- match(TorqueParser::LESS_THAN);
- setState(433);
- type();
- setState(434);
- match(TorqueParser::GREATER_THAN);
- setState(435);
- match(TorqueParser::T__0);
- setState(436);
- expression();
- setState(437);
- match(TorqueParser::T__1);
- break;
- }
-
- case TorqueParser::T__0: {
- enterOuterAlt(_localctx, 6);
- setState(439);
+ setState(478);
match(TorqueParser::T__0);
- setState(440);
+ setState(479);
expression();
- setState(441);
+ setState(480);
match(TorqueParser::T__1);
break;
}
-
- default:
- throw NoViableAltException(this);
}
} catch (RecognitionException& e) {
@@ -2939,18 +3152,20 @@ antlrcpp::Any TorqueParser::ForInitializationContext::accept(
TorqueParser::ForInitializationContext* TorqueParser::forInitialization() {
ForInitializationContext* _localctx =
_tracker.createInstance<ForInitializationContext>(_ctx, getState());
- enterRule(_localctx, 56, TorqueParser::RuleForInitialization);
+ enterRule(_localctx, 60, TorqueParser::RuleForInitialization);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(446);
+ setState(485);
_errHandler->sync(this);
_la = _input->LA(1);
- if (_la == TorqueParser::LET) {
- setState(445);
+ if (_la == TorqueParser::LET
+
+ || _la == TorqueParser::CONST) {
+ setState(484);
variableDeclarationWithInitialization();
}
@@ -3018,28 +3233,28 @@ antlrcpp::Any TorqueParser::ForLoopContext::accept(
TorqueParser::ForLoopContext* TorqueParser::forLoop() {
ForLoopContext* _localctx =
_tracker.createInstance<ForLoopContext>(_ctx, getState());
- enterRule(_localctx, 58, TorqueParser::RuleForLoop);
+ enterRule(_localctx, 62, TorqueParser::RuleForLoop);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(448);
+ setState(487);
match(TorqueParser::FOR);
- setState(449);
+ setState(488);
match(TorqueParser::T__0);
- setState(450);
+ setState(489);
forInitialization();
- setState(451);
- match(TorqueParser::T__12);
- setState(452);
+ setState(490);
+ match(TorqueParser::T__14);
+ setState(491);
expression();
- setState(453);
- match(TorqueParser::T__12);
- setState(454);
+ setState(492);
+ match(TorqueParser::T__14);
+ setState(493);
assignment();
- setState(455);
+ setState(494);
match(TorqueParser::T__1);
- setState(456);
+ setState(495);
statementBlock();
} catch (RecognitionException& e) {
@@ -3095,63 +3310,57 @@ antlrcpp::Any TorqueParser::RangeSpecifierContext::accept(
TorqueParser::RangeSpecifierContext* TorqueParser::rangeSpecifier() {
RangeSpecifierContext* _localctx =
_tracker.createInstance<RangeSpecifierContext>(_ctx, getState());
- enterRule(_localctx, 60, TorqueParser::RuleRangeSpecifier);
+ enterRule(_localctx, 64, TorqueParser::RuleRangeSpecifier);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(458);
+ setState(497);
match(TorqueParser::T__10);
- setState(460);
+ setState(499);
_errHandler->sync(this);
_la = _input->LA(1);
if ((((_la & ~0x3fULL) == 0) &&
((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::CAST_KEYWORD) |
- (1ULL << TorqueParser::CONVERT_KEYWORD) |
- (1ULL << TorqueParser::PLUS) | (1ULL << TorqueParser::MINUS) |
- (1ULL << TorqueParser::BIT_NOT) | (1ULL << TorqueParser::MAX))) !=
- 0) ||
- ((((_la - 64) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 64)) &
- ((1ULL << (TorqueParser::MIN - 64)) |
- (1ULL << (TorqueParser::INCREMENT - 64)) |
- (1ULL << (TorqueParser::DECREMENT - 64)) |
- (1ULL << (TorqueParser::NOT - 64)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 64)) |
- (1ULL << (TorqueParser::IDENTIFIER - 64)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 64)))) != 0)) {
- setState(459);
+ ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
+ (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
+ (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
+ ((((_la - 73) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 73)) &
+ ((1ULL << (TorqueParser::INCREMENT - 73)) |
+ (1ULL << (TorqueParser::DECREMENT - 73)) |
+ (1ULL << (TorqueParser::NOT - 73)) |
+ (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
+ (1ULL << (TorqueParser::IDENTIFIER - 73)) |
+ (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
+ setState(498);
dynamic_cast<RangeSpecifierContext*>(_localctx)->begin = expression();
}
- setState(462);
+ setState(501);
match(TorqueParser::T__4);
- setState(464);
+ setState(503);
_errHandler->sync(this);
_la = _input->LA(1);
if ((((_la & ~0x3fULL) == 0) &&
((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::CAST_KEYWORD) |
- (1ULL << TorqueParser::CONVERT_KEYWORD) |
- (1ULL << TorqueParser::PLUS) | (1ULL << TorqueParser::MINUS) |
- (1ULL << TorqueParser::BIT_NOT) | (1ULL << TorqueParser::MAX))) !=
- 0) ||
- ((((_la - 64) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 64)) &
- ((1ULL << (TorqueParser::MIN - 64)) |
- (1ULL << (TorqueParser::INCREMENT - 64)) |
- (1ULL << (TorqueParser::DECREMENT - 64)) |
- (1ULL << (TorqueParser::NOT - 64)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 64)) |
- (1ULL << (TorqueParser::IDENTIFIER - 64)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 64)))) != 0)) {
- setState(463);
+ ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
+ (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
+ (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
+ ((((_la - 73) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 73)) &
+ ((1ULL << (TorqueParser::INCREMENT - 73)) |
+ (1ULL << (TorqueParser::DECREMENT - 73)) |
+ (1ULL << (TorqueParser::NOT - 73)) |
+ (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
+ (1ULL << (TorqueParser::IDENTIFIER - 73)) |
+ (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
+ setState(502);
dynamic_cast<RangeSpecifierContext*>(_localctx)->end = expression();
}
- setState(466);
+ setState(505);
match(TorqueParser::T__11);
} catch (RecognitionException& e) {
@@ -3202,18 +3411,18 @@ antlrcpp::Any TorqueParser::ForOfRangeContext::accept(
TorqueParser::ForOfRangeContext* TorqueParser::forOfRange() {
ForOfRangeContext* _localctx =
_tracker.createInstance<ForOfRangeContext>(_ctx, getState());
- enterRule(_localctx, 62, TorqueParser::RuleForOfRange);
+ enterRule(_localctx, 66, TorqueParser::RuleForOfRange);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(469);
+ setState(508);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::T__10) {
- setState(468);
+ setState(507);
rangeSpecifier();
}
@@ -3282,26 +3491,26 @@ antlrcpp::Any TorqueParser::ForOfLoopContext::accept(
TorqueParser::ForOfLoopContext* TorqueParser::forOfLoop() {
ForOfLoopContext* _localctx =
_tracker.createInstance<ForOfLoopContext>(_ctx, getState());
- enterRule(_localctx, 64, TorqueParser::RuleForOfLoop);
+ enterRule(_localctx, 68, TorqueParser::RuleForOfLoop);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(471);
+ setState(510);
match(TorqueParser::FOR);
- setState(472);
+ setState(511);
match(TorqueParser::T__0);
- setState(473);
+ setState(512);
variableDeclaration();
- setState(474);
- match(TorqueParser::T__13);
- setState(475);
+ setState(513);
+ match(TorqueParser::T__15);
+ setState(514);
expression();
- setState(476);
+ setState(515);
forOfRange();
- setState(477);
+ setState(516);
match(TorqueParser::T__1);
- setState(478);
+ setState(517);
statementBlock();
} catch (RecognitionException& e) {
@@ -3351,12 +3560,12 @@ antlrcpp::Any TorqueParser::ArgumentContext::accept(
TorqueParser::ArgumentContext* TorqueParser::argument() {
ArgumentContext* _localctx =
_tracker.createInstance<ArgumentContext>(_ctx, getState());
- enterRule(_localctx, 66, TorqueParser::RuleArgument);
+ enterRule(_localctx, 70, TorqueParser::RuleArgument);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(480);
+ setState(519);
expression();
} catch (RecognitionException& e) {
@@ -3412,50 +3621,47 @@ antlrcpp::Any TorqueParser::ArgumentListContext::accept(
TorqueParser::ArgumentListContext* TorqueParser::argumentList() {
ArgumentListContext* _localctx =
_tracker.createInstance<ArgumentListContext>(_ctx, getState());
- enterRule(_localctx, 68, TorqueParser::RuleArgumentList);
+ enterRule(_localctx, 72, TorqueParser::RuleArgumentList);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(482);
+ setState(521);
match(TorqueParser::T__0);
- setState(484);
+ setState(523);
_errHandler->sync(this);
_la = _input->LA(1);
if ((((_la & ~0x3fULL) == 0) &&
((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::CAST_KEYWORD) |
- (1ULL << TorqueParser::CONVERT_KEYWORD) |
- (1ULL << TorqueParser::PLUS) | (1ULL << TorqueParser::MINUS) |
- (1ULL << TorqueParser::BIT_NOT) | (1ULL << TorqueParser::MAX))) !=
- 0) ||
- ((((_la - 64) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 64)) &
- ((1ULL << (TorqueParser::MIN - 64)) |
- (1ULL << (TorqueParser::INCREMENT - 64)) |
- (1ULL << (TorqueParser::DECREMENT - 64)) |
- (1ULL << (TorqueParser::NOT - 64)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 64)) |
- (1ULL << (TorqueParser::IDENTIFIER - 64)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 64)))) != 0)) {
- setState(483);
+ ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
+ (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
+ (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
+ ((((_la - 73) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 73)) &
+ ((1ULL << (TorqueParser::INCREMENT - 73)) |
+ (1ULL << (TorqueParser::DECREMENT - 73)) |
+ (1ULL << (TorqueParser::NOT - 73)) |
+ (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
+ (1ULL << (TorqueParser::IDENTIFIER - 73)) |
+ (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
+ setState(522);
argument();
}
- setState(490);
+ setState(529);
_errHandler->sync(this);
_la = _input->LA(1);
while (_la == TorqueParser::T__3) {
- setState(486);
+ setState(525);
match(TorqueParser::T__3);
- setState(487);
+ setState(526);
argument();
- setState(492);
+ setState(531);
_errHandler->sync(this);
_la = _input->LA(1);
}
- setState(493);
+ setState(532);
match(TorqueParser::T__1);
} catch (RecognitionException& e) {
@@ -3528,35 +3734,35 @@ antlrcpp::Any TorqueParser::HelperCallContext::accept(
TorqueParser::HelperCallContext* TorqueParser::helperCall() {
HelperCallContext* _localctx =
_tracker.createInstance<HelperCallContext>(_ctx, getState());
- enterRule(_localctx, 70, TorqueParser::RuleHelperCall);
+ enterRule(_localctx, 74, TorqueParser::RuleHelperCall);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(495);
+ setState(534);
_la = _input->LA(1);
- if (!(((((_la - 63) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 63)) &
- ((1ULL << (TorqueParser::MAX - 63)) |
- (1ULL << (TorqueParser::MIN - 63)) |
- (1ULL << (TorqueParser::IDENTIFIER - 63)))) != 0))) {
+ if (!(((((_la - 61) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 61)) &
+ ((1ULL << (TorqueParser::MAX - 61)) |
+ (1ULL << (TorqueParser::MIN - 61)) |
+ (1ULL << (TorqueParser::IDENTIFIER - 61)))) != 0))) {
_errHandler->recoverInline(this);
} else {
_errHandler->reportMatch(this);
consume();
}
- setState(497);
+ setState(536);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::LESS_THAN) {
- setState(496);
+ setState(535);
genericSpecializationTypeList();
}
- setState(499);
+ setState(538);
argumentList();
- setState(500);
+ setState(539);
optionalOtherwise();
} catch (RecognitionException& e) {
@@ -3606,12 +3812,12 @@ antlrcpp::Any TorqueParser::LabelReferenceContext::accept(
TorqueParser::LabelReferenceContext* TorqueParser::labelReference() {
LabelReferenceContext* _localctx =
_tracker.createInstance<LabelReferenceContext>(_ctx, getState());
- enterRule(_localctx, 72, TorqueParser::RuleLabelReference);
+ enterRule(_localctx, 76, TorqueParser::RuleLabelReference);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(502);
+ setState(541);
match(TorqueParser::IDENTIFIER);
} catch (RecognitionException& e) {
@@ -3630,10 +3836,6 @@ TorqueParser::VariableDeclarationContext::VariableDeclarationContext(
ParserRuleContext* parent, size_t invokingState)
: ParserRuleContext(parent, invokingState) {}
-tree::TerminalNode* TorqueParser::VariableDeclarationContext::LET() {
- return getToken(TorqueParser::LET, 0);
-}
-
tree::TerminalNode* TorqueParser::VariableDeclarationContext::IDENTIFIER() {
return getToken(TorqueParser::IDENTIFIER, 0);
}
@@ -3642,6 +3844,14 @@ TorqueParser::TypeContext* TorqueParser::VariableDeclarationContext::type() {
return getRuleContext<TorqueParser::TypeContext>(0);
}
+tree::TerminalNode* TorqueParser::VariableDeclarationContext::LET() {
+ return getToken(TorqueParser::LET, 0);
+}
+
+tree::TerminalNode* TorqueParser::VariableDeclarationContext::CONST() {
+ return getToken(TorqueParser::CONST, 0);
+}
+
size_t TorqueParser::VariableDeclarationContext::getRuleIndex() const {
return TorqueParser::RuleVariableDeclaration;
}
@@ -3669,19 +3879,28 @@ antlrcpp::Any TorqueParser::VariableDeclarationContext::accept(
TorqueParser::VariableDeclarationContext* TorqueParser::variableDeclaration() {
VariableDeclarationContext* _localctx =
_tracker.createInstance<VariableDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 74, TorqueParser::RuleVariableDeclaration);
+ enterRule(_localctx, 78, TorqueParser::RuleVariableDeclaration);
+ size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(504);
- match(TorqueParser::LET);
- setState(505);
+ setState(543);
+ _la = _input->LA(1);
+ if (!(_la == TorqueParser::LET
+
+ || _la == TorqueParser::CONST)) {
+ _errHandler->recoverInline(this);
+ } else {
+ _errHandler->reportMatch(this);
+ consume();
+ }
+ setState(544);
match(TorqueParser::IDENTIFIER);
- setState(506);
+ setState(545);
match(TorqueParser::T__4);
- setState(507);
- type();
+ setState(546);
+ type(0);
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -3749,23 +3968,23 @@ TorqueParser::variableDeclarationWithInitialization() {
VariableDeclarationWithInitializationContext* _localctx =
_tracker.createInstance<VariableDeclarationWithInitializationContext>(
_ctx, getState());
- enterRule(_localctx, 76,
+ enterRule(_localctx, 80,
TorqueParser::RuleVariableDeclarationWithInitialization);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(509);
+ setState(548);
variableDeclaration();
- setState(512);
+ setState(551);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::ASSIGNMENT) {
- setState(510);
+ setState(549);
match(TorqueParser::ASSIGNMENT);
- setState(511);
+ setState(550);
expression();
}
@@ -3821,21 +4040,21 @@ antlrcpp::Any TorqueParser::HelperCallStatementContext::accept(
TorqueParser::HelperCallStatementContext* TorqueParser::helperCallStatement() {
HelperCallStatementContext* _localctx =
_tracker.createInstance<HelperCallStatementContext>(_ctx, getState());
- enterRule(_localctx, 78, TorqueParser::RuleHelperCallStatement);
+ enterRule(_localctx, 82, TorqueParser::RuleHelperCallStatement);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(515);
+ setState(554);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::TAIL) {
- setState(514);
+ setState(553);
match(TorqueParser::TAIL);
}
- setState(517);
+ setState(556);
helperCall();
} catch (RecognitionException& e) {
@@ -3886,12 +4105,12 @@ antlrcpp::Any TorqueParser::ExpressionStatementContext::accept(
TorqueParser::ExpressionStatementContext* TorqueParser::expressionStatement() {
ExpressionStatementContext* _localctx =
_tracker.createInstance<ExpressionStatementContext>(_ctx, getState());
- enterRule(_localctx, 80, TorqueParser::RuleExpressionStatement);
+ enterRule(_localctx, 84, TorqueParser::RuleExpressionStatement);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(519);
+ setState(558);
assignment();
} catch (RecognitionException& e) {
@@ -3960,39 +4179,39 @@ antlrcpp::Any TorqueParser::IfStatementContext::accept(
TorqueParser::IfStatementContext* TorqueParser::ifStatement() {
IfStatementContext* _localctx =
_tracker.createInstance<IfStatementContext>(_ctx, getState());
- enterRule(_localctx, 82, TorqueParser::RuleIfStatement);
+ enterRule(_localctx, 86, TorqueParser::RuleIfStatement);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(521);
+ setState(560);
match(TorqueParser::IF);
- setState(523);
+ setState(562);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::CONSTEXPR) {
- setState(522);
+ setState(561);
match(TorqueParser::CONSTEXPR);
}
- setState(525);
+ setState(564);
match(TorqueParser::T__0);
- setState(526);
+ setState(565);
expression();
- setState(527);
+ setState(566);
match(TorqueParser::T__1);
- setState(528);
+ setState(567);
statementBlock();
- setState(531);
+ setState(570);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 49, _ctx)) {
+ _input, 54, _ctx)) {
case 1: {
- setState(529);
- match(TorqueParser::T__14);
- setState(530);
+ setState(568);
+ match(TorqueParser::T__16);
+ setState(569);
statementBlock();
break;
}
@@ -4054,20 +4273,20 @@ antlrcpp::Any TorqueParser::WhileLoopContext::accept(
TorqueParser::WhileLoopContext* TorqueParser::whileLoop() {
WhileLoopContext* _localctx =
_tracker.createInstance<WhileLoopContext>(_ctx, getState());
- enterRule(_localctx, 84, TorqueParser::RuleWhileLoop);
+ enterRule(_localctx, 88, TorqueParser::RuleWhileLoop);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(533);
+ setState(572);
match(TorqueParser::WHILE);
- setState(534);
+ setState(573);
match(TorqueParser::T__0);
- setState(535);
+ setState(574);
expression();
- setState(536);
+ setState(575);
match(TorqueParser::T__1);
- setState(537);
+ setState(576);
statementBlock();
} catch (RecognitionException& e) {
@@ -4122,35 +4341,32 @@ antlrcpp::Any TorqueParser::ReturnStatementContext::accept(
TorqueParser::ReturnStatementContext* TorqueParser::returnStatement() {
ReturnStatementContext* _localctx =
_tracker.createInstance<ReturnStatementContext>(_ctx, getState());
- enterRule(_localctx, 86, TorqueParser::RuleReturnStatement);
+ enterRule(_localctx, 90, TorqueParser::RuleReturnStatement);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(539);
+ setState(578);
match(TorqueParser::RETURN);
- setState(541);
+ setState(580);
_errHandler->sync(this);
_la = _input->LA(1);
if ((((_la & ~0x3fULL) == 0) &&
((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::CAST_KEYWORD) |
- (1ULL << TorqueParser::CONVERT_KEYWORD) |
- (1ULL << TorqueParser::PLUS) | (1ULL << TorqueParser::MINUS) |
- (1ULL << TorqueParser::BIT_NOT) | (1ULL << TorqueParser::MAX))) !=
- 0) ||
- ((((_la - 64) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 64)) &
- ((1ULL << (TorqueParser::MIN - 64)) |
- (1ULL << (TorqueParser::INCREMENT - 64)) |
- (1ULL << (TorqueParser::DECREMENT - 64)) |
- (1ULL << (TorqueParser::NOT - 64)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 64)) |
- (1ULL << (TorqueParser::IDENTIFIER - 64)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 64)))) != 0)) {
- setState(540);
+ ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
+ (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
+ (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
+ ((((_la - 73) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 73)) &
+ ((1ULL << (TorqueParser::INCREMENT - 73)) |
+ (1ULL << (TorqueParser::DECREMENT - 73)) |
+ (1ULL << (TorqueParser::NOT - 73)) |
+ (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
+ (1ULL << (TorqueParser::IDENTIFIER - 73)) |
+ (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
+ setState(579);
expression();
}
@@ -4201,12 +4417,12 @@ antlrcpp::Any TorqueParser::BreakStatementContext::accept(
TorqueParser::BreakStatementContext* TorqueParser::breakStatement() {
BreakStatementContext* _localctx =
_tracker.createInstance<BreakStatementContext>(_ctx, getState());
- enterRule(_localctx, 88, TorqueParser::RuleBreakStatement);
+ enterRule(_localctx, 92, TorqueParser::RuleBreakStatement);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(543);
+ setState(582);
match(TorqueParser::BREAK);
} catch (RecognitionException& e) {
@@ -4256,12 +4472,12 @@ antlrcpp::Any TorqueParser::ContinueStatementContext::accept(
TorqueParser::ContinueStatementContext* TorqueParser::continueStatement() {
ContinueStatementContext* _localctx =
_tracker.createInstance<ContinueStatementContext>(_ctx, getState());
- enterRule(_localctx, 90, TorqueParser::RuleContinueStatement);
+ enterRule(_localctx, 94, TorqueParser::RuleContinueStatement);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(545);
+ setState(584);
match(TorqueParser::CONTINUE);
} catch (RecognitionException& e) {
@@ -4321,22 +4537,22 @@ antlrcpp::Any TorqueParser::GotoStatementContext::accept(
TorqueParser::GotoStatementContext* TorqueParser::gotoStatement() {
GotoStatementContext* _localctx =
_tracker.createInstance<GotoStatementContext>(_ctx, getState());
- enterRule(_localctx, 92, TorqueParser::RuleGotoStatement);
+ enterRule(_localctx, 96, TorqueParser::RuleGotoStatement);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(547);
+ setState(586);
match(TorqueParser::GOTO);
- setState(548);
+ setState(587);
labelReference();
- setState(550);
+ setState(589);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::T__0) {
- setState(549);
+ setState(588);
argumentList();
}
@@ -4356,19 +4572,6 @@ TorqueParser::HandlerWithStatementContext::HandlerWithStatementContext(
ParserRuleContext* parent, size_t invokingState)
: ParserRuleContext(parent, invokingState) {}
-TorqueParser::StatementBlockContext*
-TorqueParser::HandlerWithStatementContext::statementBlock() {
- return getRuleContext<TorqueParser::StatementBlockContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::HandlerWithStatementContext::CATCH() {
- return getToken(TorqueParser::CATCH, 0);
-}
-
-tree::TerminalNode* TorqueParser::HandlerWithStatementContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
tree::TerminalNode* TorqueParser::HandlerWithStatementContext::LABEL() {
return getToken(TorqueParser::LABEL, 0);
}
@@ -4378,6 +4581,11 @@ TorqueParser::HandlerWithStatementContext::labelDeclaration() {
return getRuleContext<TorqueParser::LabelDeclarationContext>(0);
}
+TorqueParser::StatementBlockContext*
+TorqueParser::HandlerWithStatementContext::statementBlock() {
+ return getRuleContext<TorqueParser::StatementBlockContext>(0);
+}
+
size_t TorqueParser::HandlerWithStatementContext::getRuleIndex() const {
return TorqueParser::RuleHandlerWithStatement;
}
@@ -4407,34 +4615,16 @@ TorqueParser::HandlerWithStatementContext*
TorqueParser::handlerWithStatement() {
HandlerWithStatementContext* _localctx =
_tracker.createInstance<HandlerWithStatementContext>(_ctx, getState());
- enterRule(_localctx, 94, TorqueParser::RuleHandlerWithStatement);
+ enterRule(_localctx, 98, TorqueParser::RuleHandlerWithStatement);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(556);
- _errHandler->sync(this);
- switch (_input->LA(1)) {
- case TorqueParser::CATCH: {
- setState(552);
- match(TorqueParser::CATCH);
- setState(553);
- match(TorqueParser::IDENTIFIER);
- break;
- }
-
- case TorqueParser::LABEL: {
- setState(554);
- match(TorqueParser::LABEL);
- setState(555);
- labelDeclaration();
- break;
- }
-
- default:
- throw NoViableAltException(this);
- }
- setState(558);
+ setState(591);
+ match(TorqueParser::LABEL);
+ setState(592);
+ labelDeclaration();
+ setState(593);
statementBlock();
} catch (RecognitionException& e) {
@@ -4446,76 +4636,76 @@ TorqueParser::handlerWithStatement() {
return _localctx;
}
-//----------------- TryCatchContext
+//----------------- TryLabelStatementContext
//------------------------------------------------------------------
-TorqueParser::TryCatchContext::TryCatchContext(ParserRuleContext* parent,
- size_t invokingState)
+TorqueParser::TryLabelStatementContext::TryLabelStatementContext(
+ ParserRuleContext* parent, size_t invokingState)
: ParserRuleContext(parent, invokingState) {}
-tree::TerminalNode* TorqueParser::TryCatchContext::TRY() {
+tree::TerminalNode* TorqueParser::TryLabelStatementContext::TRY() {
return getToken(TorqueParser::TRY, 0);
}
TorqueParser::StatementBlockContext*
-TorqueParser::TryCatchContext::statementBlock() {
+TorqueParser::TryLabelStatementContext::statementBlock() {
return getRuleContext<TorqueParser::StatementBlockContext>(0);
}
std::vector<TorqueParser::HandlerWithStatementContext*>
-TorqueParser::TryCatchContext::handlerWithStatement() {
+TorqueParser::TryLabelStatementContext::handlerWithStatement() {
return getRuleContexts<TorqueParser::HandlerWithStatementContext>();
}
TorqueParser::HandlerWithStatementContext*
-TorqueParser::TryCatchContext::handlerWithStatement(size_t i) {
+TorqueParser::TryLabelStatementContext::handlerWithStatement(size_t i) {
return getRuleContext<TorqueParser::HandlerWithStatementContext>(i);
}
-size_t TorqueParser::TryCatchContext::getRuleIndex() const {
- return TorqueParser::RuleTryCatch;
+size_t TorqueParser::TryLabelStatementContext::getRuleIndex() const {
+ return TorqueParser::RuleTryLabelStatement;
}
-void TorqueParser::TryCatchContext::enterRule(
+void TorqueParser::TryLabelStatementContext::enterRule(
tree::ParseTreeListener* listener) {
auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterTryCatch(this);
+ if (parserListener != nullptr) parserListener->enterTryLabelStatement(this);
}
-void TorqueParser::TryCatchContext::exitRule(
+void TorqueParser::TryLabelStatementContext::exitRule(
tree::ParseTreeListener* listener) {
auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitTryCatch(this);
+ if (parserListener != nullptr) parserListener->exitTryLabelStatement(this);
}
-antlrcpp::Any TorqueParser::TryCatchContext::accept(
+antlrcpp::Any TorqueParser::TryLabelStatementContext::accept(
tree::ParseTreeVisitor* visitor) {
if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitTryCatch(this);
+ return parserVisitor->visitTryLabelStatement(this);
else
return visitor->visitChildren(this);
}
-TorqueParser::TryCatchContext* TorqueParser::tryCatch() {
- TryCatchContext* _localctx =
- _tracker.createInstance<TryCatchContext>(_ctx, getState());
- enterRule(_localctx, 96, TorqueParser::RuleTryCatch);
+TorqueParser::TryLabelStatementContext* TorqueParser::tryLabelStatement() {
+ TryLabelStatementContext* _localctx =
+ _tracker.createInstance<TryLabelStatementContext>(_ctx, getState());
+ enterRule(_localctx, 100, TorqueParser::RuleTryLabelStatement);
auto onExit = finally([=] { exitRule(); });
try {
size_t alt;
enterOuterAlt(_localctx, 1);
- setState(560);
+ setState(595);
match(TorqueParser::TRY);
- setState(561);
+ setState(596);
statementBlock();
- setState(563);
+ setState(598);
_errHandler->sync(this);
alt = 1;
do {
switch (alt) {
case 1: {
- setState(562);
+ setState(597);
handlerWithStatement();
break;
}
@@ -4523,10 +4713,10 @@ TorqueParser::TryCatchContext* TorqueParser::tryCatch() {
default:
throw NoViableAltException(this);
}
- setState(565);
+ setState(600);
_errHandler->sync(this);
alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 53, _ctx);
+ _input, 57, _ctx);
} while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER);
} catch (RecognitionException& e) {
@@ -4545,15 +4735,19 @@ TorqueParser::DiagnosticStatementContext::DiagnosticStatementContext(
ParserRuleContext* parent, size_t invokingState)
: ParserRuleContext(parent, invokingState) {}
-tree::TerminalNode* TorqueParser::DiagnosticStatementContext::ASSERT() {
- return getToken(TorqueParser::ASSERT, 0);
-}
-
TorqueParser::ExpressionContext*
TorqueParser::DiagnosticStatementContext::expression() {
return getRuleContext<TorqueParser::ExpressionContext>(0);
}
+tree::TerminalNode* TorqueParser::DiagnosticStatementContext::ASSERT_TOKEN() {
+ return getToken(TorqueParser::ASSERT_TOKEN, 0);
+}
+
+tree::TerminalNode* TorqueParser::DiagnosticStatementContext::CHECK_TOKEN() {
+ return getToken(TorqueParser::CHECK_TOKEN, 0);
+}
+
tree::TerminalNode*
TorqueParser::DiagnosticStatementContext::UNREACHABLE_TOKEN() {
return getToken(TorqueParser::UNREACHABLE_TOKEN, 0);
@@ -4590,36 +4784,46 @@ antlrcpp::Any TorqueParser::DiagnosticStatementContext::accept(
TorqueParser::DiagnosticStatementContext* TorqueParser::diagnosticStatement() {
DiagnosticStatementContext* _localctx =
_tracker.createInstance<DiagnosticStatementContext>(_ctx, getState());
- enterRule(_localctx, 98, TorqueParser::RuleDiagnosticStatement);
+ enterRule(_localctx, 102, TorqueParser::RuleDiagnosticStatement);
+ size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
- setState(574);
+ setState(609);
_errHandler->sync(this);
switch (_input->LA(1)) {
- case TorqueParser::ASSERT: {
+ case TorqueParser::ASSERT_TOKEN:
+ case TorqueParser::CHECK_TOKEN: {
enterOuterAlt(_localctx, 1);
- setState(567);
- match(TorqueParser::ASSERT);
- setState(568);
+ setState(602);
+ _la = _input->LA(1);
+ if (!(_la == TorqueParser::ASSERT_TOKEN
+
+ || _la == TorqueParser::CHECK_TOKEN)) {
+ _errHandler->recoverInline(this);
+ } else {
+ _errHandler->reportMatch(this);
+ consume();
+ }
+ setState(603);
match(TorqueParser::T__0);
- setState(569);
+ setState(604);
expression();
- setState(570);
+ setState(605);
match(TorqueParser::T__1);
break;
}
case TorqueParser::UNREACHABLE_TOKEN: {
enterOuterAlt(_localctx, 2);
- setState(572);
+ setState(607);
match(TorqueParser::UNREACHABLE_TOKEN);
break;
}
case TorqueParser::DEBUG_TOKEN: {
enterOuterAlt(_localctx, 3);
- setState(573);
+ setState(608);
match(TorqueParser::DEBUG_TOKEN);
break;
}
@@ -4702,8 +4906,9 @@ TorqueParser::ForLoopContext* TorqueParser::StatementContext::forLoop() {
return getRuleContext<TorqueParser::ForLoopContext>(0);
}
-TorqueParser::TryCatchContext* TorqueParser::StatementContext::tryCatch() {
- return getRuleContext<TorqueParser::TryCatchContext>(0);
+TorqueParser::TryLabelStatementContext*
+TorqueParser::StatementContext::tryLabelStatement() {
+ return getRuleContext<TorqueParser::TryLabelStatementContext>(0);
}
size_t TorqueParser::StatementContext::getRuleIndex() const {
@@ -4733,118 +4938,118 @@ antlrcpp::Any TorqueParser::StatementContext::accept(
TorqueParser::StatementContext* TorqueParser::statement() {
StatementContext* _localctx =
_tracker.createInstance<StatementContext>(_ctx, getState());
- enterRule(_localctx, 100, TorqueParser::RuleStatement);
+ enterRule(_localctx, 104, TorqueParser::RuleStatement);
auto onExit = finally([=] { exitRule(); });
try {
- setState(605);
+ setState(640);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 55, _ctx)) {
+ _input, 59, _ctx)) {
case 1: {
enterOuterAlt(_localctx, 1);
- setState(576);
+ setState(611);
variableDeclarationWithInitialization();
- setState(577);
- match(TorqueParser::T__12);
+ setState(612);
+ match(TorqueParser::T__14);
break;
}
case 2: {
enterOuterAlt(_localctx, 2);
- setState(579);
+ setState(614);
helperCallStatement();
- setState(580);
- match(TorqueParser::T__12);
+ setState(615);
+ match(TorqueParser::T__14);
break;
}
case 3: {
enterOuterAlt(_localctx, 3);
- setState(582);
+ setState(617);
expressionStatement();
- setState(583);
- match(TorqueParser::T__12);
+ setState(618);
+ match(TorqueParser::T__14);
break;
}
case 4: {
enterOuterAlt(_localctx, 4);
- setState(585);
+ setState(620);
returnStatement();
- setState(586);
- match(TorqueParser::T__12);
+ setState(621);
+ match(TorqueParser::T__14);
break;
}
case 5: {
enterOuterAlt(_localctx, 5);
- setState(588);
+ setState(623);
breakStatement();
- setState(589);
- match(TorqueParser::T__12);
+ setState(624);
+ match(TorqueParser::T__14);
break;
}
case 6: {
enterOuterAlt(_localctx, 6);
- setState(591);
+ setState(626);
continueStatement();
- setState(592);
- match(TorqueParser::T__12);
+ setState(627);
+ match(TorqueParser::T__14);
break;
}
case 7: {
enterOuterAlt(_localctx, 7);
- setState(594);
+ setState(629);
gotoStatement();
- setState(595);
- match(TorqueParser::T__12);
+ setState(630);
+ match(TorqueParser::T__14);
break;
}
case 8: {
enterOuterAlt(_localctx, 8);
- setState(597);
+ setState(632);
ifStatement();
break;
}
case 9: {
enterOuterAlt(_localctx, 9);
- setState(598);
+ setState(633);
diagnosticStatement();
- setState(599);
- match(TorqueParser::T__12);
+ setState(634);
+ match(TorqueParser::T__14);
break;
}
case 10: {
enterOuterAlt(_localctx, 10);
- setState(601);
+ setState(636);
whileLoop();
break;
}
case 11: {
enterOuterAlt(_localctx, 11);
- setState(602);
+ setState(637);
forOfLoop();
break;
}
case 12: {
enterOuterAlt(_localctx, 12);
- setState(603);
+ setState(638);
forLoop();
break;
}
case 13: {
enterOuterAlt(_localctx, 13);
- setState(604);
- tryCatch();
+ setState(639);
+ tryLabelStatement();
break;
}
}
@@ -4902,38 +5107,39 @@ antlrcpp::Any TorqueParser::StatementListContext::accept(
TorqueParser::StatementListContext* TorqueParser::statementList() {
StatementListContext* _localctx =
_tracker.createInstance<StatementListContext>(_ctx, getState());
- enterRule(_localctx, 102, TorqueParser::RuleStatementList);
+ enterRule(_localctx, 106, TorqueParser::RuleStatementList);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(610);
+ setState(645);
_errHandler->sync(this);
_la = _input->LA(1);
- while (((((_la - 29) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 29)) &
- ((1ULL << (TorqueParser::IF - 29)) |
- (1ULL << (TorqueParser::FOR - 29)) |
- (1ULL << (TorqueParser::WHILE - 29)) |
- (1ULL << (TorqueParser::RETURN - 29)) |
- (1ULL << (TorqueParser::CONTINUE - 29)) |
- (1ULL << (TorqueParser::BREAK - 29)) |
- (1ULL << (TorqueParser::GOTO - 29)) |
- (1ULL << (TorqueParser::TRY - 29)) |
- (1ULL << (TorqueParser::TAIL - 29)) |
- (1ULL << (TorqueParser::LET - 29)) |
- (1ULL << (TorqueParser::ASSERT - 29)) |
- (1ULL << (TorqueParser::UNREACHABLE_TOKEN - 29)) |
- (1ULL << (TorqueParser::DEBUG_TOKEN - 29)) |
- (1ULL << (TorqueParser::MAX - 29)) |
- (1ULL << (TorqueParser::MIN - 29)) |
- (1ULL << (TorqueParser::INCREMENT - 29)) |
- (1ULL << (TorqueParser::DECREMENT - 29)) |
- (1ULL << (TorqueParser::IDENTIFIER - 29)))) != 0)) {
- setState(607);
+ while (
+ (((_la & ~0x3fULL) == 0) &&
+ ((1ULL << _la) &
+ ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::IF) |
+ (1ULL << TorqueParser::FOR) | (1ULL << TorqueParser::WHILE) |
+ (1ULL << TorqueParser::RETURN) | (1ULL << TorqueParser::CONTINUE) |
+ (1ULL << TorqueParser::BREAK) | (1ULL << TorqueParser::GOTO) |
+ (1ULL << TorqueParser::TRY) | (1ULL << TorqueParser::TAIL) |
+ (1ULL << TorqueParser::LET) | (1ULL << TorqueParser::CONST) |
+ (1ULL << TorqueParser::ASSERT_TOKEN) |
+ (1ULL << TorqueParser::CHECK_TOKEN) |
+ (1ULL << TorqueParser::UNREACHABLE_TOKEN) |
+ (1ULL << TorqueParser::DEBUG_TOKEN) | (1ULL << TorqueParser::MAX) |
+ (1ULL << TorqueParser::MIN))) != 0) ||
+ ((((_la - 73) & ~0x3fULL) == 0) &&
+ ((1ULL << (_la - 73)) &
+ ((1ULL << (TorqueParser::INCREMENT - 73)) |
+ (1ULL << (TorqueParser::DECREMENT - 73)) |
+ (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
+ (1ULL << (TorqueParser::IDENTIFIER - 73)) |
+ (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
+ setState(642);
statement();
- setState(612);
+ setState(647);
_errHandler->sync(this);
_la = _input->LA(1);
}
@@ -4990,26 +5196,26 @@ antlrcpp::Any TorqueParser::StatementScopeContext::accept(
TorqueParser::StatementScopeContext* TorqueParser::statementScope() {
StatementScopeContext* _localctx =
_tracker.createInstance<StatementScopeContext>(_ctx, getState());
- enterRule(_localctx, 104, TorqueParser::RuleStatementScope);
+ enterRule(_localctx, 108, TorqueParser::RuleStatementScope);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(614);
+ setState(649);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::DEFERRED) {
- setState(613);
+ setState(648);
match(TorqueParser::DEFERRED);
}
- setState(616);
- match(TorqueParser::T__15);
- setState(617);
+ setState(651);
+ match(TorqueParser::T__12);
+ setState(652);
statementList();
- setState(618);
- match(TorqueParser::T__16);
+ setState(653);
+ match(TorqueParser::T__13);
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -5064,13 +5270,14 @@ antlrcpp::Any TorqueParser::StatementBlockContext::accept(
TorqueParser::StatementBlockContext* TorqueParser::statementBlock() {
StatementBlockContext* _localctx =
_tracker.createInstance<StatementBlockContext>(_ctx, getState());
- enterRule(_localctx, 106, TorqueParser::RuleStatementBlock);
+ enterRule(_localctx, 110, TorqueParser::RuleStatementBlock);
auto onExit = finally([=] { exitRule(); });
try {
- setState(622);
+ setState(657);
_errHandler->sync(this);
switch (_input->LA(1)) {
+ case TorqueParser::T__0:
case TorqueParser::IF:
case TorqueParser::FOR:
case TorqueParser::WHILE:
@@ -5081,24 +5288,28 @@ TorqueParser::StatementBlockContext* TorqueParser::statementBlock() {
case TorqueParser::TRY:
case TorqueParser::TAIL:
case TorqueParser::LET:
- case TorqueParser::ASSERT:
+ case TorqueParser::CONST:
+ case TorqueParser::ASSERT_TOKEN:
+ case TorqueParser::CHECK_TOKEN:
case TorqueParser::UNREACHABLE_TOKEN:
case TorqueParser::DEBUG_TOKEN:
case TorqueParser::MAX:
case TorqueParser::MIN:
case TorqueParser::INCREMENT:
case TorqueParser::DECREMENT:
- case TorqueParser::IDENTIFIER: {
+ case TorqueParser::STRING_LITERAL:
+ case TorqueParser::IDENTIFIER:
+ case TorqueParser::DECIMAL_LITERAL: {
enterOuterAlt(_localctx, 1);
- setState(620);
+ setState(655);
statement();
break;
}
- case TorqueParser::T__15:
+ case TorqueParser::T__12:
case TorqueParser::DEFERRED: {
enterOuterAlt(_localctx, 2);
- setState(621);
+ setState(656);
statementScope();
break;
}
@@ -5155,12 +5366,12 @@ antlrcpp::Any TorqueParser::HelperBodyContext::accept(
TorqueParser::HelperBodyContext* TorqueParser::helperBody() {
HelperBodyContext* _localctx =
_tracker.createInstance<HelperBodyContext>(_ctx, getState());
- enterRule(_localctx, 108, TorqueParser::RuleHelperBody);
+ enterRule(_localctx, 112, TorqueParser::RuleHelperBody);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(624);
+ setState(659);
statementScope();
} catch (RecognitionException& e) {
@@ -5172,6 +5383,143 @@ TorqueParser::HelperBodyContext* TorqueParser::helperBody() {
return _localctx;
}
+//----------------- FieldDeclarationContext
+//------------------------------------------------------------------
+
+TorqueParser::FieldDeclarationContext::FieldDeclarationContext(
+ ParserRuleContext* parent, size_t invokingState)
+ : ParserRuleContext(parent, invokingState) {}
+
+tree::TerminalNode* TorqueParser::FieldDeclarationContext::IDENTIFIER() {
+ return getToken(TorqueParser::IDENTIFIER, 0);
+}
+
+TorqueParser::TypeContext* TorqueParser::FieldDeclarationContext::type() {
+ return getRuleContext<TorqueParser::TypeContext>(0);
+}
+
+size_t TorqueParser::FieldDeclarationContext::getRuleIndex() const {
+ return TorqueParser::RuleFieldDeclaration;
+}
+
+void TorqueParser::FieldDeclarationContext::enterRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr) parserListener->enterFieldDeclaration(this);
+}
+
+void TorqueParser::FieldDeclarationContext::exitRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr) parserListener->exitFieldDeclaration(this);
+}
+
+antlrcpp::Any TorqueParser::FieldDeclarationContext::accept(
+ tree::ParseTreeVisitor* visitor) {
+ if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
+ return parserVisitor->visitFieldDeclaration(this);
+ else
+ return visitor->visitChildren(this);
+}
+
+TorqueParser::FieldDeclarationContext* TorqueParser::fieldDeclaration() {
+ FieldDeclarationContext* _localctx =
+ _tracker.createInstance<FieldDeclarationContext>(_ctx, getState());
+ enterRule(_localctx, 114, TorqueParser::RuleFieldDeclaration);
+
+ auto onExit = finally([=] { exitRule(); });
+ try {
+ enterOuterAlt(_localctx, 1);
+ setState(661);
+ match(TorqueParser::IDENTIFIER);
+ setState(662);
+ match(TorqueParser::T__4);
+ setState(663);
+ type(0);
+ setState(664);
+ match(TorqueParser::T__14);
+
+ } catch (RecognitionException& e) {
+ _errHandler->reportError(this, e);
+ _localctx->exception = std::current_exception();
+ _errHandler->recover(this, _localctx->exception);
+ }
+
+ return _localctx;
+}
+
+//----------------- FieldListDeclarationContext
+//------------------------------------------------------------------
+
+TorqueParser::FieldListDeclarationContext::FieldListDeclarationContext(
+ ParserRuleContext* parent, size_t invokingState)
+ : ParserRuleContext(parent, invokingState) {}
+
+std::vector<TorqueParser::FieldDeclarationContext*>
+TorqueParser::FieldListDeclarationContext::fieldDeclaration() {
+ return getRuleContexts<TorqueParser::FieldDeclarationContext>();
+}
+
+TorqueParser::FieldDeclarationContext*
+TorqueParser::FieldListDeclarationContext::fieldDeclaration(size_t i) {
+ return getRuleContext<TorqueParser::FieldDeclarationContext>(i);
+}
+
+size_t TorqueParser::FieldListDeclarationContext::getRuleIndex() const {
+ return TorqueParser::RuleFieldListDeclaration;
+}
+
+void TorqueParser::FieldListDeclarationContext::enterRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr)
+ parserListener->enterFieldListDeclaration(this);
+}
+
+void TorqueParser::FieldListDeclarationContext::exitRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr) parserListener->exitFieldListDeclaration(this);
+}
+
+antlrcpp::Any TorqueParser::FieldListDeclarationContext::accept(
+ tree::ParseTreeVisitor* visitor) {
+ if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
+ return parserVisitor->visitFieldListDeclaration(this);
+ else
+ return visitor->visitChildren(this);
+}
+
+TorqueParser::FieldListDeclarationContext*
+TorqueParser::fieldListDeclaration() {
+ FieldListDeclarationContext* _localctx =
+ _tracker.createInstance<FieldListDeclarationContext>(_ctx, getState());
+ enterRule(_localctx, 116, TorqueParser::RuleFieldListDeclaration);
+ size_t _la = 0;
+
+ auto onExit = finally([=] { exitRule(); });
+ try {
+ enterOuterAlt(_localctx, 1);
+ setState(669);
+ _errHandler->sync(this);
+ _la = _input->LA(1);
+ while (_la == TorqueParser::IDENTIFIER) {
+ setState(666);
+ fieldDeclaration();
+ setState(671);
+ _errHandler->sync(this);
+ _la = _input->LA(1);
+ }
+
+ } catch (RecognitionException& e) {
+ _errHandler->reportError(this, e);
+ _localctx->exception = std::current_exception();
+ _errHandler->recover(this, _localctx->exception);
+ }
+
+ return _localctx;
+}
+
//----------------- ExtendsDeclarationContext
//------------------------------------------------------------------
@@ -5210,14 +5558,14 @@ antlrcpp::Any TorqueParser::ExtendsDeclarationContext::accept(
TorqueParser::ExtendsDeclarationContext* TorqueParser::extendsDeclaration() {
ExtendsDeclarationContext* _localctx =
_tracker.createInstance<ExtendsDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 110, TorqueParser::RuleExtendsDeclaration);
+ enterRule(_localctx, 118, TorqueParser::RuleExtendsDeclaration);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(626);
+ setState(672);
match(TorqueParser::T__17);
- setState(627);
+ setState(673);
match(TorqueParser::IDENTIFIER);
} catch (RecognitionException& e) {
@@ -5270,14 +5618,14 @@ TorqueParser::GeneratesDeclarationContext*
TorqueParser::generatesDeclaration() {
GeneratesDeclarationContext* _localctx =
_tracker.createInstance<GeneratesDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 112, TorqueParser::RuleGeneratesDeclaration);
+ enterRule(_localctx, 120, TorqueParser::RuleGeneratesDeclaration);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(629);
+ setState(675);
match(TorqueParser::T__18);
- setState(630);
+ setState(676);
match(TorqueParser::STRING_LITERAL);
} catch (RecognitionException& e) {
@@ -5330,14 +5678,14 @@ TorqueParser::ConstexprDeclarationContext*
TorqueParser::constexprDeclaration() {
ConstexprDeclarationContext* _localctx =
_tracker.createInstance<ConstexprDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 114, TorqueParser::RuleConstexprDeclaration);
+ enterRule(_localctx, 122, TorqueParser::RuleConstexprDeclaration);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(632);
+ setState(678);
match(TorqueParser::CONSTEXPR);
- setState(633);
+ setState(679);
match(TorqueParser::STRING_LITERAL);
} catch (RecognitionException& e) {
@@ -5402,42 +5750,111 @@ antlrcpp::Any TorqueParser::TypeDeclarationContext::accept(
TorqueParser::TypeDeclarationContext* TorqueParser::typeDeclaration() {
TypeDeclarationContext* _localctx =
_tracker.createInstance<TypeDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 116, TorqueParser::RuleTypeDeclaration);
+ enterRule(_localctx, 124, TorqueParser::RuleTypeDeclaration);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(635);
+ setState(681);
match(TorqueParser::T__5);
- setState(636);
+ setState(682);
match(TorqueParser::IDENTIFIER);
- setState(638);
+ setState(684);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::T__17) {
- setState(637);
+ setState(683);
extendsDeclaration();
}
- setState(641);
+ setState(687);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::T__18) {
- setState(640);
+ setState(686);
generatesDeclaration();
}
- setState(644);
+ setState(690);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::CONSTEXPR) {
- setState(643);
+ setState(689);
constexprDeclaration();
}
- setState(646);
- match(TorqueParser::T__12);
+ setState(692);
+ match(TorqueParser::T__14);
+
+ } catch (RecognitionException& e) {
+ _errHandler->reportError(this, e);
+ _localctx->exception = std::current_exception();
+ _errHandler->recover(this, _localctx->exception);
+ }
+
+ return _localctx;
+}
+
+//----------------- TypeAliasDeclarationContext
+//------------------------------------------------------------------
+
+TorqueParser::TypeAliasDeclarationContext::TypeAliasDeclarationContext(
+ ParserRuleContext* parent, size_t invokingState)
+ : ParserRuleContext(parent, invokingState) {}
+
+tree::TerminalNode* TorqueParser::TypeAliasDeclarationContext::IDENTIFIER() {
+ return getToken(TorqueParser::IDENTIFIER, 0);
+}
+
+TorqueParser::TypeContext* TorqueParser::TypeAliasDeclarationContext::type() {
+ return getRuleContext<TorqueParser::TypeContext>(0);
+}
+
+size_t TorqueParser::TypeAliasDeclarationContext::getRuleIndex() const {
+ return TorqueParser::RuleTypeAliasDeclaration;
+}
+
+void TorqueParser::TypeAliasDeclarationContext::enterRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr)
+ parserListener->enterTypeAliasDeclaration(this);
+}
+
+void TorqueParser::TypeAliasDeclarationContext::exitRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr) parserListener->exitTypeAliasDeclaration(this);
+}
+
+antlrcpp::Any TorqueParser::TypeAliasDeclarationContext::accept(
+ tree::ParseTreeVisitor* visitor) {
+ if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
+ return parserVisitor->visitTypeAliasDeclaration(this);
+ else
+ return visitor->visitChildren(this);
+}
+
+TorqueParser::TypeAliasDeclarationContext*
+TorqueParser::typeAliasDeclaration() {
+ TypeAliasDeclarationContext* _localctx =
+ _tracker.createInstance<TypeAliasDeclarationContext>(_ctx, getState());
+ enterRule(_localctx, 126, TorqueParser::RuleTypeAliasDeclaration);
+
+ auto onExit = finally([=] { exitRule(); });
+ try {
+ enterOuterAlt(_localctx, 1);
+ setState(694);
+ match(TorqueParser::T__5);
+ setState(695);
+ match(TorqueParser::IDENTIFIER);
+ setState(696);
+ match(TorqueParser::ASSIGNMENT);
+ setState(697);
+ type(0);
+ setState(698);
+ match(TorqueParser::T__14);
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -5513,38 +5930,38 @@ antlrcpp::Any TorqueParser::ExternalBuiltinContext::accept(
TorqueParser::ExternalBuiltinContext* TorqueParser::externalBuiltin() {
ExternalBuiltinContext* _localctx =
_tracker.createInstance<ExternalBuiltinContext>(_ctx, getState());
- enterRule(_localctx, 118, TorqueParser::RuleExternalBuiltin);
+ enterRule(_localctx, 128, TorqueParser::RuleExternalBuiltin);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(648);
+ setState(700);
match(TorqueParser::EXTERN);
- setState(650);
+ setState(702);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::JAVASCRIPT) {
- setState(649);
+ setState(701);
match(TorqueParser::JAVASCRIPT);
}
- setState(652);
+ setState(704);
match(TorqueParser::BUILTIN);
- setState(653);
+ setState(705);
match(TorqueParser::IDENTIFIER);
- setState(654);
+ setState(706);
optionalGenericTypeList();
- setState(655);
+ setState(707);
match(TorqueParser::T__0);
- setState(656);
+ setState(708);
typeList();
- setState(657);
+ setState(709);
match(TorqueParser::T__1);
- setState(658);
+ setState(710);
optionalType();
- setState(659);
- match(TorqueParser::T__12);
+ setState(711);
+ match(TorqueParser::T__14);
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -5598,9 +6015,6 @@ tree::TerminalNode* TorqueParser::ExternalMacroContext::STRING_LITERAL() {
return getToken(TorqueParser::STRING_LITERAL, 0);
}
-tree::TerminalNode* TorqueParser::ExternalMacroContext::IMPLICIT() {
- return getToken(TorqueParser::IMPLICIT, 0);
-}
size_t TorqueParser::ExternalMacroContext::getRuleIndex() const {
return TorqueParser::RuleExternalMacro;
@@ -5629,48 +6043,38 @@ antlrcpp::Any TorqueParser::ExternalMacroContext::accept(
TorqueParser::ExternalMacroContext* TorqueParser::externalMacro() {
ExternalMacroContext* _localctx =
_tracker.createInstance<ExternalMacroContext>(_ctx, getState());
- enterRule(_localctx, 120, TorqueParser::RuleExternalMacro);
+ enterRule(_localctx, 130, TorqueParser::RuleExternalMacro);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(661);
+ setState(713);
match(TorqueParser::EXTERN);
- setState(667);
+ setState(716);
_errHandler->sync(this);
_la = _input->LA(1);
- if (_la == TorqueParser::T__19
-
- || _la == TorqueParser::IMPLICIT) {
- setState(663);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::IMPLICIT) {
- setState(662);
- match(TorqueParser::IMPLICIT);
- }
- setState(665);
+ if (_la == TorqueParser::T__19) {
+ setState(714);
match(TorqueParser::T__19);
- setState(666);
+ setState(715);
match(TorqueParser::STRING_LITERAL);
}
- setState(669);
+ setState(718);
match(TorqueParser::MACRO);
- setState(670);
+ setState(719);
match(TorqueParser::IDENTIFIER);
- setState(671);
+ setState(720);
optionalGenericTypeList();
- setState(672);
+ setState(721);
typeListMaybeVarArgs();
- setState(673);
+ setState(722);
optionalType();
- setState(674);
+ setState(723);
optionalLabelList();
- setState(675);
- match(TorqueParser::T__12);
+ setState(724);
+ match(TorqueParser::T__14);
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -5737,23 +6141,23 @@ antlrcpp::Any TorqueParser::ExternalRuntimeContext::accept(
TorqueParser::ExternalRuntimeContext* TorqueParser::externalRuntime() {
ExternalRuntimeContext* _localctx =
_tracker.createInstance<ExternalRuntimeContext>(_ctx, getState());
- enterRule(_localctx, 122, TorqueParser::RuleExternalRuntime);
+ enterRule(_localctx, 132, TorqueParser::RuleExternalRuntime);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(677);
+ setState(726);
match(TorqueParser::EXTERN);
- setState(678);
+ setState(727);
match(TorqueParser::RUNTIME);
- setState(679);
+ setState(728);
match(TorqueParser::IDENTIFIER);
- setState(680);
+ setState(729);
typeListMaybeVarArgs();
- setState(681);
+ setState(730);
optionalType();
- setState(682);
- match(TorqueParser::T__12);
+ setState(731);
+ match(TorqueParser::T__14);
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -5830,32 +6234,49 @@ antlrcpp::Any TorqueParser::BuiltinDeclarationContext::accept(
TorqueParser::BuiltinDeclarationContext* TorqueParser::builtinDeclaration() {
BuiltinDeclarationContext* _localctx =
_tracker.createInstance<BuiltinDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 124, TorqueParser::RuleBuiltinDeclaration);
+ enterRule(_localctx, 134, TorqueParser::RuleBuiltinDeclaration);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(685);
+ setState(734);
_errHandler->sync(this);
_la = _input->LA(1);
if (_la == TorqueParser::JAVASCRIPT) {
- setState(684);
+ setState(733);
match(TorqueParser::JAVASCRIPT);
}
- setState(687);
+ setState(736);
match(TorqueParser::BUILTIN);
- setState(688);
+ setState(737);
match(TorqueParser::IDENTIFIER);
- setState(689);
+ setState(738);
optionalGenericTypeList();
- setState(690);
+ setState(739);
parameterList();
- setState(691);
+ setState(740);
optionalType();
- setState(692);
- helperBody();
+ setState(743);
+ _errHandler->sync(this);
+ switch (_input->LA(1)) {
+ case TorqueParser::T__12:
+ case TorqueParser::DEFERRED: {
+ setState(741);
+ helperBody();
+ break;
+ }
+
+ case TorqueParser::T__14: {
+ setState(742);
+ match(TorqueParser::T__14);
+ break;
+ }
+
+ default:
+ throw NoViableAltException(this);
+ }
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -5932,22 +6353,22 @@ TorqueParser::GenericSpecializationContext*
TorqueParser::genericSpecialization() {
GenericSpecializationContext* _localctx =
_tracker.createInstance<GenericSpecializationContext>(_ctx, getState());
- enterRule(_localctx, 126, TorqueParser::RuleGenericSpecialization);
+ enterRule(_localctx, 136, TorqueParser::RuleGenericSpecialization);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(694);
+ setState(745);
match(TorqueParser::IDENTIFIER);
- setState(695);
+ setState(746);
genericSpecializationTypeList();
- setState(696);
+ setState(747);
parameterList();
- setState(697);
+ setState(748);
optionalType();
- setState(698);
+ setState(749);
optionalLabelList();
- setState(699);
+ setState(750);
helperBody();
} catch (RecognitionException& e) {
@@ -5999,6 +6420,10 @@ TorqueParser::MacroDeclarationContext::helperBody() {
return getRuleContext<TorqueParser::HelperBodyContext>(0);
}
+tree::TerminalNode* TorqueParser::MacroDeclarationContext::STRING_LITERAL() {
+ return getToken(TorqueParser::STRING_LITERAL, 0);
+}
+
size_t TorqueParser::MacroDeclarationContext::getRuleIndex() const {
return TorqueParser::RuleMacroDeclaration;
}
@@ -6026,25 +6451,134 @@ antlrcpp::Any TorqueParser::MacroDeclarationContext::accept(
TorqueParser::MacroDeclarationContext* TorqueParser::macroDeclaration() {
MacroDeclarationContext* _localctx =
_tracker.createInstance<MacroDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 128, TorqueParser::RuleMacroDeclaration);
+ enterRule(_localctx, 138, TorqueParser::RuleMacroDeclaration);
+ size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(701);
+ setState(754);
+ _errHandler->sync(this);
+
+ _la = _input->LA(1);
+ if (_la == TorqueParser::T__19) {
+ setState(752);
+ match(TorqueParser::T__19);
+ setState(753);
+ match(TorqueParser::STRING_LITERAL);
+ }
+ setState(756);
match(TorqueParser::MACRO);
- setState(702);
+ setState(757);
match(TorqueParser::IDENTIFIER);
- setState(703);
+ setState(758);
optionalGenericTypeList();
- setState(704);
+ setState(759);
parameterList();
- setState(705);
+ setState(760);
optionalType();
- setState(706);
+ setState(761);
optionalLabelList();
- setState(707);
- helperBody();
+ setState(764);
+ _errHandler->sync(this);
+ switch (_input->LA(1)) {
+ case TorqueParser::T__12:
+ case TorqueParser::DEFERRED: {
+ setState(762);
+ helperBody();
+ break;
+ }
+
+ case TorqueParser::T__14: {
+ setState(763);
+ match(TorqueParser::T__14);
+ break;
+ }
+
+ default:
+ throw NoViableAltException(this);
+ }
+
+ } catch (RecognitionException& e) {
+ _errHandler->reportError(this, e);
+ _localctx->exception = std::current_exception();
+ _errHandler->recover(this, _localctx->exception);
+ }
+
+ return _localctx;
+}
+
+//----------------- ExternConstDeclarationContext
+//------------------------------------------------------------------
+
+TorqueParser::ExternConstDeclarationContext::ExternConstDeclarationContext(
+ ParserRuleContext* parent, size_t invokingState)
+ : ParserRuleContext(parent, invokingState) {}
+
+tree::TerminalNode* TorqueParser::ExternConstDeclarationContext::CONST() {
+ return getToken(TorqueParser::CONST, 0);
+}
+
+tree::TerminalNode* TorqueParser::ExternConstDeclarationContext::IDENTIFIER() {
+ return getToken(TorqueParser::IDENTIFIER, 0);
+}
+
+TorqueParser::TypeContext* TorqueParser::ExternConstDeclarationContext::type() {
+ return getRuleContext<TorqueParser::TypeContext>(0);
+}
+
+TorqueParser::GeneratesDeclarationContext*
+TorqueParser::ExternConstDeclarationContext::generatesDeclaration() {
+ return getRuleContext<TorqueParser::GeneratesDeclarationContext>(0);
+}
+
+size_t TorqueParser::ExternConstDeclarationContext::getRuleIndex() const {
+ return TorqueParser::RuleExternConstDeclaration;
+}
+
+void TorqueParser::ExternConstDeclarationContext::enterRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr)
+ parserListener->enterExternConstDeclaration(this);
+}
+
+void TorqueParser::ExternConstDeclarationContext::exitRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr)
+ parserListener->exitExternConstDeclaration(this);
+}
+
+antlrcpp::Any TorqueParser::ExternConstDeclarationContext::accept(
+ tree::ParseTreeVisitor* visitor) {
+ if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
+ return parserVisitor->visitExternConstDeclaration(this);
+ else
+ return visitor->visitChildren(this);
+}
+
+TorqueParser::ExternConstDeclarationContext*
+TorqueParser::externConstDeclaration() {
+ ExternConstDeclarationContext* _localctx =
+ _tracker.createInstance<ExternConstDeclarationContext>(_ctx, getState());
+ enterRule(_localctx, 140, TorqueParser::RuleExternConstDeclaration);
+
+ auto onExit = finally([=] { exitRule(); });
+ try {
+ enterOuterAlt(_localctx, 1);
+ setState(766);
+ match(TorqueParser::CONST);
+ setState(767);
+ match(TorqueParser::IDENTIFIER);
+ setState(768);
+ match(TorqueParser::T__4);
+ setState(769);
+ type(0);
+ setState(770);
+ generatesDeclaration();
+ setState(771);
+ match(TorqueParser::T__14);
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -6062,6 +6596,10 @@ TorqueParser::ConstDeclarationContext::ConstDeclarationContext(
ParserRuleContext* parent, size_t invokingState)
: ParserRuleContext(parent, invokingState) {}
+tree::TerminalNode* TorqueParser::ConstDeclarationContext::CONST() {
+ return getToken(TorqueParser::CONST, 0);
+}
+
tree::TerminalNode* TorqueParser::ConstDeclarationContext::IDENTIFIER() {
return getToken(TorqueParser::IDENTIFIER, 0);
}
@@ -6070,8 +6608,13 @@ TorqueParser::TypeContext* TorqueParser::ConstDeclarationContext::type() {
return getRuleContext<TorqueParser::TypeContext>(0);
}
-tree::TerminalNode* TorqueParser::ConstDeclarationContext::STRING_LITERAL() {
- return getToken(TorqueParser::STRING_LITERAL, 0);
+tree::TerminalNode* TorqueParser::ConstDeclarationContext::ASSIGNMENT() {
+ return getToken(TorqueParser::ASSIGNMENT, 0);
+}
+
+TorqueParser::ExpressionContext*
+TorqueParser::ConstDeclarationContext::expression() {
+ return getRuleContext<TorqueParser::ExpressionContext>(0);
}
size_t TorqueParser::ConstDeclarationContext::getRuleIndex() const {
@@ -6101,25 +6644,93 @@ antlrcpp::Any TorqueParser::ConstDeclarationContext::accept(
TorqueParser::ConstDeclarationContext* TorqueParser::constDeclaration() {
ConstDeclarationContext* _localctx =
_tracker.createInstance<ConstDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 130, TorqueParser::RuleConstDeclaration);
+ enterRule(_localctx, 142, TorqueParser::RuleConstDeclaration);
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(709);
- match(TorqueParser::T__20);
- setState(710);
+ setState(773);
+ match(TorqueParser::CONST);
+ setState(774);
match(TorqueParser::IDENTIFIER);
- setState(711);
+ setState(775);
match(TorqueParser::T__4);
- setState(712);
- type();
- setState(713);
+ setState(776);
+ type(0);
+ setState(777);
match(TorqueParser::ASSIGNMENT);
- setState(714);
- match(TorqueParser::STRING_LITERAL);
- setState(715);
+ setState(778);
+ expression();
+ setState(779);
+ match(TorqueParser::T__14);
+
+ } catch (RecognitionException& e) {
+ _errHandler->reportError(this, e);
+ _localctx->exception = std::current_exception();
+ _errHandler->recover(this, _localctx->exception);
+ }
+
+ return _localctx;
+}
+
+//----------------- StructDeclarationContext
+//------------------------------------------------------------------
+
+TorqueParser::StructDeclarationContext::StructDeclarationContext(
+ ParserRuleContext* parent, size_t invokingState)
+ : ParserRuleContext(parent, invokingState) {}
+
+tree::TerminalNode* TorqueParser::StructDeclarationContext::IDENTIFIER() {
+ return getToken(TorqueParser::IDENTIFIER, 0);
+}
+
+TorqueParser::FieldListDeclarationContext*
+TorqueParser::StructDeclarationContext::fieldListDeclaration() {
+ return getRuleContext<TorqueParser::FieldListDeclarationContext>(0);
+}
+
+size_t TorqueParser::StructDeclarationContext::getRuleIndex() const {
+ return TorqueParser::RuleStructDeclaration;
+}
+
+void TorqueParser::StructDeclarationContext::enterRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr) parserListener->enterStructDeclaration(this);
+}
+
+void TorqueParser::StructDeclarationContext::exitRule(
+ tree::ParseTreeListener* listener) {
+ auto parserListener = dynamic_cast<TorqueListener*>(listener);
+ if (parserListener != nullptr) parserListener->exitStructDeclaration(this);
+}
+
+antlrcpp::Any TorqueParser::StructDeclarationContext::accept(
+ tree::ParseTreeVisitor* visitor) {
+ if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
+ return parserVisitor->visitStructDeclaration(this);
+ else
+ return visitor->visitChildren(this);
+}
+
+TorqueParser::StructDeclarationContext* TorqueParser::structDeclaration() {
+ StructDeclarationContext* _localctx =
+ _tracker.createInstance<StructDeclarationContext>(_ctx, getState());
+ enterRule(_localctx, 144, TorqueParser::RuleStructDeclaration);
+
+ auto onExit = finally([=] { exitRule(); });
+ try {
+ enterOuterAlt(_localctx, 1);
+ setState(781);
+ match(TorqueParser::T__20);
+ setState(782);
+ match(TorqueParser::IDENTIFIER);
+ setState(783);
match(TorqueParser::T__12);
+ setState(784);
+ fieldListDeclaration();
+ setState(785);
+ match(TorqueParser::T__13);
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -6137,11 +6748,21 @@ TorqueParser::DeclarationContext::DeclarationContext(ParserRuleContext* parent,
size_t invokingState)
: ParserRuleContext(parent, invokingState) {}
+TorqueParser::StructDeclarationContext*
+TorqueParser::DeclarationContext::structDeclaration() {
+ return getRuleContext<TorqueParser::StructDeclarationContext>(0);
+}
+
TorqueParser::TypeDeclarationContext*
TorqueParser::DeclarationContext::typeDeclaration() {
return getRuleContext<TorqueParser::TypeDeclarationContext>(0);
}
+TorqueParser::TypeAliasDeclarationContext*
+TorqueParser::DeclarationContext::typeAliasDeclaration() {
+ return getRuleContext<TorqueParser::TypeAliasDeclarationContext>(0);
+}
+
TorqueParser::BuiltinDeclarationContext*
TorqueParser::DeclarationContext::builtinDeclaration() {
return getRuleContext<TorqueParser::BuiltinDeclarationContext>(0);
@@ -6172,6 +6793,11 @@ TorqueParser::DeclarationContext::externalRuntime() {
return getRuleContext<TorqueParser::ExternalRuntimeContext>(0);
}
+TorqueParser::ExternConstDeclarationContext*
+TorqueParser::DeclarationContext::externConstDeclaration() {
+ return getRuleContext<TorqueParser::ExternConstDeclarationContext>(0);
+}
+
TorqueParser::ConstDeclarationContext*
TorqueParser::DeclarationContext::constDeclaration() {
return getRuleContext<TorqueParser::ConstDeclarationContext>(0);
@@ -6204,66 +6830,87 @@ antlrcpp::Any TorqueParser::DeclarationContext::accept(
TorqueParser::DeclarationContext* TorqueParser::declaration() {
DeclarationContext* _localctx =
_tracker.createInstance<DeclarationContext>(_ctx, getState());
- enterRule(_localctx, 132, TorqueParser::RuleDeclaration);
+ enterRule(_localctx, 146, TorqueParser::RuleDeclaration);
auto onExit = finally([=] { exitRule(); });
try {
- setState(725);
+ setState(798);
_errHandler->sync(this);
switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 66, _ctx)) {
+ _input, 73, _ctx)) {
case 1: {
enterOuterAlt(_localctx, 1);
- setState(717);
- typeDeclaration();
+ setState(787);
+ structDeclaration();
break;
}
case 2: {
enterOuterAlt(_localctx, 2);
- setState(718);
- builtinDeclaration();
+ setState(788);
+ typeDeclaration();
break;
}
case 3: {
enterOuterAlt(_localctx, 3);
- setState(719);
- genericSpecialization();
+ setState(789);
+ typeAliasDeclaration();
break;
}
case 4: {
enterOuterAlt(_localctx, 4);
- setState(720);
- macroDeclaration();
+ setState(790);
+ builtinDeclaration();
break;
}
case 5: {
enterOuterAlt(_localctx, 5);
- setState(721);
- externalMacro();
+ setState(791);
+ genericSpecialization();
break;
}
case 6: {
enterOuterAlt(_localctx, 6);
- setState(722);
- externalBuiltin();
+ setState(792);
+ macroDeclaration();
break;
}
case 7: {
enterOuterAlt(_localctx, 7);
- setState(723);
- externalRuntime();
+ setState(793);
+ externalMacro();
break;
}
case 8: {
enterOuterAlt(_localctx, 8);
- setState(724);
+ setState(794);
+ externalBuiltin();
+ break;
+ }
+
+ case 9: {
+ enterOuterAlt(_localctx, 9);
+ setState(795);
+ externalRuntime();
+ break;
+ }
+
+ case 10: {
+ enterOuterAlt(_localctx, 10);
+ setState(796);
+ externConstDeclaration();
+ break;
+ }
+
+ case 11: {
+ enterOuterAlt(_localctx, 11);
+ setState(797);
constDeclaration();
break;
}
@@ -6330,36 +6977,38 @@ antlrcpp::Any TorqueParser::ModuleDeclarationContext::accept(
TorqueParser::ModuleDeclarationContext* TorqueParser::moduleDeclaration() {
ModuleDeclarationContext* _localctx =
_tracker.createInstance<ModuleDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 134, TorqueParser::RuleModuleDeclaration);
+ enterRule(_localctx, 148, TorqueParser::RuleModuleDeclaration);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(727);
+ setState(800);
match(TorqueParser::MODULE);
- setState(728);
+ setState(801);
match(TorqueParser::IDENTIFIER);
- setState(729);
- match(TorqueParser::T__15);
- setState(733);
+ setState(802);
+ match(TorqueParser::T__12);
+ setState(806);
_errHandler->sync(this);
_la = _input->LA(1);
- while ((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__5) | (1ULL << TorqueParser::T__20) |
- (1ULL << TorqueParser::MACRO) | (1ULL << TorqueParser::BUILTIN) |
- (1ULL << TorqueParser::JAVASCRIPT) |
- (1ULL << TorqueParser::EXTERN))) != 0) ||
- _la == TorqueParser::IDENTIFIER) {
- setState(730);
+ while (
+ (((_la & ~0x3fULL) == 0) &&
+ ((1ULL << _la) &
+ ((1ULL << TorqueParser::T__5) | (1ULL << TorqueParser::T__19) |
+ (1ULL << TorqueParser::T__20) | (1ULL << TorqueParser::MACRO) |
+ (1ULL << TorqueParser::BUILTIN) |
+ (1ULL << TorqueParser::JAVASCRIPT) | (1ULL << TorqueParser::CONST) |
+ (1ULL << TorqueParser::EXTERN))) != 0) ||
+ _la == TorqueParser::IDENTIFIER) {
+ setState(803);
declaration();
- setState(735);
+ setState(808);
_errHandler->sync(this);
_la = _input->LA(1);
}
- setState(736);
- match(TorqueParser::T__16);
+ setState(809);
+ match(TorqueParser::T__13);
} catch (RecognitionException& e) {
_errHandler->reportError(this, e);
@@ -6422,40 +7071,43 @@ antlrcpp::Any TorqueParser::FileContext::accept(
TorqueParser::FileContext* TorqueParser::file() {
FileContext* _localctx =
_tracker.createInstance<FileContext>(_ctx, getState());
- enterRule(_localctx, 136, TorqueParser::RuleFile);
+ enterRule(_localctx, 150, TorqueParser::RuleFile);
size_t _la = 0;
auto onExit = finally([=] { exitRule(); });
try {
enterOuterAlt(_localctx, 1);
- setState(742);
+ setState(815);
_errHandler->sync(this);
_la = _input->LA(1);
while (
(((_la & ~0x3fULL) == 0) &&
((1ULL << _la) &
- ((1ULL << TorqueParser::T__5) | (1ULL << TorqueParser::T__20) |
- (1ULL << TorqueParser::MACRO) | (1ULL << TorqueParser::BUILTIN) |
- (1ULL << TorqueParser::MODULE) | (1ULL << TorqueParser::JAVASCRIPT) |
+ ((1ULL << TorqueParser::T__5) | (1ULL << TorqueParser::T__19) |
+ (1ULL << TorqueParser::T__20) | (1ULL << TorqueParser::MACRO) |
+ (1ULL << TorqueParser::BUILTIN) | (1ULL << TorqueParser::MODULE) |
+ (1ULL << TorqueParser::JAVASCRIPT) | (1ULL << TorqueParser::CONST) |
(1ULL << TorqueParser::EXTERN))) != 0) ||
_la == TorqueParser::IDENTIFIER) {
- setState(740);
+ setState(813);
_errHandler->sync(this);
switch (_input->LA(1)) {
case TorqueParser::MODULE: {
- setState(738);
+ setState(811);
moduleDeclaration();
break;
}
case TorqueParser::T__5:
+ case TorqueParser::T__19:
case TorqueParser::T__20:
case TorqueParser::MACRO:
case TorqueParser::BUILTIN:
case TorqueParser::JAVASCRIPT:
+ case TorqueParser::CONST:
case TorqueParser::EXTERN:
case TorqueParser::IDENTIFIER: {
- setState(739);
+ setState(812);
declaration();
break;
}
@@ -6463,7 +7115,7 @@ TorqueParser::FileContext* TorqueParser::file() {
default:
throw NoViableAltException(this);
}
- setState(744);
+ setState(817);
_errHandler->sync(this);
_la = _input->LA(1);
}
@@ -6480,6 +7132,8 @@ TorqueParser::FileContext* TorqueParser::file() {
bool TorqueParser::sempred(RuleContext* context, size_t ruleIndex,
size_t predicateIndex) {
switch (ruleIndex) {
+ case 0:
+ return typeSempred(dynamic_cast<TypeContext*>(context), predicateIndex);
case 13:
return conditionalExpressionSempred(
dynamic_cast<ConditionalExpressionContext*>(context), predicateIndex);
@@ -6518,10 +7172,21 @@ bool TorqueParser::sempred(RuleContext* context, size_t ruleIndex,
return true;
}
+bool TorqueParser::typeSempred(TypeContext* _localctx, size_t predicateIndex) {
+ switch (predicateIndex) {
+ case 0:
+ return precpred(_ctx, 2);
+
+ default:
+ break;
+ }
+ return true;
+}
+
bool TorqueParser::conditionalExpressionSempred(
ConditionalExpressionContext* _localctx, size_t predicateIndex) {
switch (predicateIndex) {
- case 0:
+ case 1:
return precpred(_ctx, 1);
default:
@@ -6533,7 +7198,7 @@ bool TorqueParser::conditionalExpressionSempred(
bool TorqueParser::logicalORExpressionSempred(
LogicalORExpressionContext* _localctx, size_t predicateIndex) {
switch (predicateIndex) {
- case 1:
+ case 2:
return precpred(_ctx, 1);
default:
@@ -6545,7 +7210,7 @@ bool TorqueParser::logicalORExpressionSempred(
bool TorqueParser::logicalANDExpressionSempred(
LogicalANDExpressionContext* _localctx, size_t predicateIndex) {
switch (predicateIndex) {
- case 2:
+ case 3:
return precpred(_ctx, 1);
default:
@@ -6557,7 +7222,7 @@ bool TorqueParser::logicalANDExpressionSempred(
bool TorqueParser::bitwiseExpressionSempred(BitwiseExpressionContext* _localctx,
size_t predicateIndex) {
switch (predicateIndex) {
- case 3:
+ case 4:
return precpred(_ctx, 1);
default:
@@ -6569,7 +7234,7 @@ bool TorqueParser::bitwiseExpressionSempred(BitwiseExpressionContext* _localctx,
bool TorqueParser::equalityExpressionSempred(
EqualityExpressionContext* _localctx, size_t predicateIndex) {
switch (predicateIndex) {
- case 4:
+ case 5:
return precpred(_ctx, 1);
default:
@@ -6581,7 +7246,7 @@ bool TorqueParser::equalityExpressionSempred(
bool TorqueParser::relationalExpressionSempred(
RelationalExpressionContext* _localctx, size_t predicateIndex) {
switch (predicateIndex) {
- case 5:
+ case 6:
return precpred(_ctx, 1);
default:
@@ -6593,7 +7258,7 @@ bool TorqueParser::relationalExpressionSempred(
bool TorqueParser::shiftExpressionSempred(ShiftExpressionContext* _localctx,
size_t predicateIndex) {
switch (predicateIndex) {
- case 6:
+ case 7:
return precpred(_ctx, 1);
default:
@@ -6605,7 +7270,7 @@ bool TorqueParser::shiftExpressionSempred(ShiftExpressionContext* _localctx,
bool TorqueParser::additiveExpressionSempred(
AdditiveExpressionContext* _localctx, size_t predicateIndex) {
switch (predicateIndex) {
- case 7:
+ case 8:
return precpred(_ctx, 1);
default:
@@ -6617,7 +7282,7 @@ bool TorqueParser::additiveExpressionSempred(
bool TorqueParser::multiplicativeExpressionSempred(
MultiplicativeExpressionContext* _localctx, size_t predicateIndex) {
switch (predicateIndex) {
- case 8:
+ case 9:
return precpred(_ctx, 1);
default:
@@ -6629,10 +7294,10 @@ bool TorqueParser::multiplicativeExpressionSempred(
bool TorqueParser::locationExpressionSempred(
LocationExpressionContext* _localctx, size_t predicateIndex) {
switch (predicateIndex) {
- case 9:
- return precpred(_ctx, 2);
case 10:
- return precpred(_ctx, 1);
+ return precpred(_ctx, 4);
+ case 11:
+ return precpred(_ctx, 2);
default:
break;
@@ -6676,6 +7341,8 @@ std::vector<std::string> TorqueParser::_ruleNames = {
"incrementDecrement",
"assignment",
"assignmentExpression",
+ "structExpression",
+ "functionPointerExpression",
"primaryExpression",
"forInitialization",
"forLoop",
@@ -6697,24 +7364,29 @@ std::vector<std::string> TorqueParser::_ruleNames = {
"continueStatement",
"gotoStatement",
"handlerWithStatement",
- "tryCatch",
+ "tryLabelStatement",
"diagnosticStatement",
"statement",
"statementList",
"statementScope",
"statementBlock",
"helperBody",
+ "fieldDeclaration",
+ "fieldListDeclaration",
"extendsDeclaration",
"generatesDeclaration",
"constexprDeclaration",
"typeDeclaration",
+ "typeAliasDeclaration",
"externalBuiltin",
"externalMacro",
"externalRuntime",
"builtinDeclaration",
"genericSpecialization",
"macroDeclaration",
+ "externConstDeclaration",
"constDeclaration",
+ "structDeclaration",
"declaration",
"moduleDeclaration",
"file"};
@@ -6732,25 +7404,22 @@ std::vector<std::string> TorqueParser::_literalNames = {"",
"'.'",
"'['",
"']'",
+ "'{'",
+ "'}'",
"';'",
"'of'",
"'else'",
- "'{'",
- "'}'",
"'extends'",
"'generates'",
"'operator'",
- "'const'",
+ "'struct'",
"'macro'",
"'builtin'",
"'runtime'",
"'module'",
"'javascript'",
- "'implicit'",
"'deferred'",
"'if'",
- "'cast'",
- "'convert'",
"'for'",
"'while'",
"'return'",
@@ -6760,15 +7429,16 @@ std::vector<std::string> TorqueParser::_literalNames = {"",
"'goto'",
"'otherwise'",
"'try'",
- "'catch'",
"'label'",
"'labels'",
"'tail'",
"'isnt'",
"'is'",
"'let'",
+ "'const'",
"'extern'",
"'assert'",
+ "'check'",
"'unreachable'",
"'debug'",
"'='",
@@ -6826,11 +7496,8 @@ std::vector<std::string> TorqueParser::_symbolicNames = {
"RUNTIME",
"MODULE",
"JAVASCRIPT",
- "IMPLICIT",
"DEFERRED",
"IF",
- "CAST_KEYWORD",
- "CONVERT_KEYWORD",
"FOR",
"WHILE",
"RETURN",
@@ -6840,15 +7507,16 @@ std::vector<std::string> TorqueParser::_symbolicNames = {
"GOTO",
"OTHERWISE",
"TRY",
- "CATCH",
"LABEL",
"LABELS",
"TAIL",
"ISNT",
"IS",
"LET",
+ "CONST",
"EXTERN",
- "ASSERT",
+ "ASSERT_TOKEN",
+ "CHECK_TOKEN",
"UNREACHABLE_TOKEN",
"DEBUG_TOKEN",
"ASSIGNMENT",
@@ -6904,7 +7572,7 @@ TorqueParser::Initializer::Initializer() {
_serializedATN = {
0x3, 0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964,
- 0x3, 0x55, 0x2ec, 0x4, 0x2, 0x9, 0x2, 0x4, 0x3,
+ 0x3, 0x53, 0x335, 0x4, 0x2, 0x9, 0x2, 0x4, 0x3,
0x9, 0x3, 0x4, 0x4, 0x9, 0x4, 0x4, 0x5, 0x9,
0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 0x7, 0x9, 0x7,
0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9, 0x9, 0x4,
@@ -6935,687 +7603,758 @@ TorqueParser::Initializer::Initializer() {
0x40, 0x9, 0x40, 0x4, 0x41, 0x9, 0x41, 0x4, 0x42,
0x9, 0x42, 0x4, 0x43, 0x9, 0x43, 0x4, 0x44, 0x9,
0x44, 0x4, 0x45, 0x9, 0x45, 0x4, 0x46, 0x9, 0x46,
- 0x3, 0x2, 0x5, 0x2, 0x8e, 0xa, 0x2, 0x3, 0x2,
+ 0x4, 0x47, 0x9, 0x47, 0x4, 0x48, 0x9, 0x48, 0x4,
+ 0x49, 0x9, 0x49, 0x4, 0x4a, 0x9, 0x4a, 0x4, 0x4b,
+ 0x9, 0x4b, 0x4, 0x4c, 0x9, 0x4c, 0x4, 0x4d, 0x9,
+ 0x4d, 0x3, 0x2, 0x3, 0x2, 0x5, 0x2, 0x9d, 0xa,
+ 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2,
0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3,
- 0x2, 0x3, 0x2, 0x3, 0x2, 0x5, 0x2, 0x98, 0xa,
- 0x2, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x7, 0x3,
- 0x9d, 0xa, 0x3, 0xc, 0x3, 0xe, 0x3, 0xa0, 0xb,
- 0x3, 0x5, 0x3, 0xa2, 0xa, 0x3, 0x3, 0x4, 0x3,
- 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x5, 0x3, 0x5,
+ 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x5, 0x2,
+ 0xab, 0xa, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2,
+ 0x7, 0x2, 0xb0, 0xa, 0x2, 0xc, 0x2, 0xe, 0x2,
+ 0xb3, 0xb, 0x2, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3,
+ 0x7, 0x3, 0xb8, 0xa, 0x3, 0xc, 0x3, 0xe, 0x3,
+ 0xbb, 0xb, 0x3, 0x5, 0x3, 0xbd, 0xa, 0x3, 0x3,
+ 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x5,
0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3,
- 0x5, 0x3, 0x5, 0x7, 0x5, 0xb0, 0xa, 0x5, 0xc,
- 0x5, 0xe, 0x5, 0xb3, 0xb, 0x5, 0x3, 0x5, 0x5,
- 0x5, 0xb6, 0xa, 0x5, 0x3, 0x6, 0x3, 0x6, 0x5,
- 0x6, 0xba, 0xa, 0x6, 0x3, 0x6, 0x3, 0x6, 0x7,
- 0x6, 0xbe, 0xa, 0x6, 0xc, 0x6, 0xe, 0x6, 0xc1,
- 0xb, 0x6, 0x3, 0x6, 0x3, 0x6, 0x5, 0x6, 0xc5,
- 0xa, 0x6, 0x3, 0x6, 0x3, 0x6, 0x3, 0x6, 0x3,
- 0x6, 0x5, 0x6, 0xcb, 0xa, 0x6, 0x3, 0x7, 0x3,
- 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, 0x5, 0x7,
- 0xd2, 0xa, 0x7, 0x3, 0x8, 0x3, 0x8, 0x5, 0x8,
- 0xd6, 0xa, 0x8, 0x3, 0x9, 0x3, 0x9, 0x3, 0x9,
- 0x3, 0x9, 0x7, 0x9, 0xdc, 0xa, 0x9, 0xc, 0x9,
- 0xe, 0x9, 0xdf, 0xb, 0x9, 0x5, 0x9, 0xe1, 0xa,
- 0x9, 0x3, 0xa, 0x3, 0xa, 0x3, 0xa, 0x3, 0xa,
- 0x7, 0xa, 0xe7, 0xa, 0xa, 0xc, 0xa, 0xe, 0xa,
- 0xea, 0xb, 0xa, 0x5, 0xa, 0xec, 0xa, 0xa, 0x3,
- 0xb, 0x3, 0xb, 0x3, 0xb, 0x5, 0xb, 0xf1, 0xa,
- 0xb, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0xf5, 0xa,
- 0xc, 0x3, 0xc, 0x3, 0xc, 0x7, 0xc, 0xf9, 0xa,
- 0xc, 0xc, 0xc, 0xe, 0xc, 0xfc, 0xb, 0xc, 0x3,
+ 0x5, 0x3, 0x5, 0x3, 0x5, 0x7, 0x5, 0xcb, 0xa,
+ 0x5, 0xc, 0x5, 0xe, 0x5, 0xce, 0xb, 0x5, 0x3,
+ 0x5, 0x5, 0x5, 0xd1, 0xa, 0x5, 0x3, 0x6, 0x3,
+ 0x6, 0x5, 0x6, 0xd5, 0xa, 0x6, 0x3, 0x6, 0x3,
+ 0x6, 0x7, 0x6, 0xd9, 0xa, 0x6, 0xc, 0x6, 0xe,
+ 0x6, 0xdc, 0xb, 0x6, 0x3, 0x6, 0x3, 0x6, 0x5,
+ 0x6, 0xe0, 0xa, 0x6, 0x3, 0x6, 0x3, 0x6, 0x3,
+ 0x6, 0x3, 0x6, 0x5, 0x6, 0xe6, 0xa, 0x6, 0x3,
+ 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7,
+ 0x5, 0x7, 0xed, 0xa, 0x7, 0x3, 0x8, 0x3, 0x8,
+ 0x5, 0x8, 0xf1, 0xa, 0x8, 0x3, 0x9, 0x3, 0x9,
+ 0x3, 0x9, 0x3, 0x9, 0x7, 0x9, 0xf7, 0xa, 0x9,
+ 0xc, 0x9, 0xe, 0x9, 0xfa, 0xb, 0x9, 0x5, 0x9,
+ 0xfc, 0xa, 0x9, 0x3, 0xa, 0x3, 0xa, 0x3, 0xa,
+ 0x3, 0xa, 0x7, 0xa, 0x102, 0xa, 0xa, 0xc, 0xa,
+ 0xe, 0xa, 0x105, 0xb, 0xa, 0x5, 0xa, 0x107, 0xa,
+ 0xa, 0x3, 0xb, 0x3, 0xb, 0x3, 0xb, 0x5, 0xb,
+ 0x10c, 0xa, 0xb, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc,
+ 0x110, 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x7, 0xc,
+ 0x114, 0xa, 0xc, 0xc, 0xc, 0xe, 0xc, 0x117, 0xb,
0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc,
0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3,
- 0xc, 0x5, 0xc, 0x108, 0xa, 0xc, 0x3, 0xd, 0x3,
- 0xd, 0x5, 0xd, 0x10c, 0xa, 0xd, 0x3, 0xe, 0x3,
- 0xe, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf,
+ 0xc, 0x3, 0xc, 0x5, 0xc, 0x123, 0xa, 0xc, 0x3,
+ 0xd, 0x3, 0xd, 0x5, 0xd, 0x127, 0xa, 0xd, 0x3,
+ 0xe, 0x3, 0xe, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf,
0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3,
- 0xf, 0x7, 0xf, 0x119, 0xa, 0xf, 0xc, 0xf, 0xe,
- 0xf, 0x11c, 0xb, 0xf, 0x3, 0x10, 0x3, 0x10, 0x3,
- 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x7, 0x10,
- 0x124, 0xa, 0x10, 0xc, 0x10, 0xe, 0x10, 0x127, 0xb,
- 0x10, 0x3, 0x11, 0x3, 0x11, 0x3, 0x11, 0x3, 0x11,
- 0x3, 0x11, 0x3, 0x11, 0x7, 0x11, 0x12f, 0xa, 0x11,
- 0xc, 0x11, 0xe, 0x11, 0x132, 0xb, 0x11, 0x3, 0x12,
+ 0xf, 0x3, 0xf, 0x7, 0xf, 0x134, 0xa, 0xf, 0xc,
+ 0xf, 0xe, 0xf, 0x137, 0xb, 0xf, 0x3, 0x10, 0x3,
+ 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10,
+ 0x7, 0x10, 0x13f, 0xa, 0x10, 0xc, 0x10, 0xe, 0x10,
+ 0x142, 0xb, 0x10, 0x3, 0x11, 0x3, 0x11, 0x3, 0x11,
+ 0x3, 0x11, 0x3, 0x11, 0x3, 0x11, 0x7, 0x11, 0x14a,
+ 0xa, 0x11, 0xc, 0x11, 0xe, 0x11, 0x14d, 0xb, 0x11,
0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3,
- 0x12, 0x7, 0x12, 0x13a, 0xa, 0x12, 0xc, 0x12, 0xe,
- 0x12, 0x13d, 0xb, 0x12, 0x3, 0x13, 0x3, 0x13, 0x3,
- 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x7, 0x13,
- 0x145, 0xa, 0x13, 0xc, 0x13, 0xe, 0x13, 0x148, 0xb,
- 0x13, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14,
- 0x3, 0x14, 0x3, 0x14, 0x7, 0x14, 0x150, 0xa, 0x14,
- 0xc, 0x14, 0xe, 0x14, 0x153, 0xb, 0x14, 0x3, 0x15,
+ 0x12, 0x3, 0x12, 0x7, 0x12, 0x155, 0xa, 0x12, 0xc,
+ 0x12, 0xe, 0x12, 0x158, 0xb, 0x12, 0x3, 0x13, 0x3,
+ 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13,
+ 0x7, 0x13, 0x160, 0xa, 0x13, 0xc, 0x13, 0xe, 0x13,
+ 0x163, 0xb, 0x13, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14,
+ 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x7, 0x14, 0x16b,
+ 0xa, 0x14, 0xc, 0x14, 0xe, 0x14, 0x16e, 0xb, 0x14,
0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3,
- 0x15, 0x7, 0x15, 0x15b, 0xa, 0x15, 0xc, 0x15, 0xe,
- 0x15, 0x15e, 0xb, 0x15, 0x3, 0x16, 0x3, 0x16, 0x3,
- 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x7, 0x16,
- 0x166, 0xa, 0x16, 0xc, 0x16, 0xe, 0x16, 0x169, 0xb,
- 0x16, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17,
- 0x3, 0x17, 0x3, 0x17, 0x7, 0x17, 0x171, 0xa, 0x17,
- 0xc, 0x17, 0xe, 0x17, 0x174, 0xb, 0x17, 0x3, 0x18,
- 0x3, 0x18, 0x3, 0x18, 0x5, 0x18, 0x179, 0xa, 0x18,
- 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x5, 0x19, 0x17e,
+ 0x15, 0x3, 0x15, 0x7, 0x15, 0x176, 0xa, 0x15, 0xc,
+ 0x15, 0xe, 0x15, 0x179, 0xb, 0x15, 0x3, 0x16, 0x3,
+ 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16,
+ 0x7, 0x16, 0x181, 0xa, 0x16, 0xc, 0x16, 0xe, 0x16,
+ 0x184, 0xb, 0x16, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17,
+ 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x7, 0x17, 0x18c,
+ 0xa, 0x17, 0xc, 0x17, 0xe, 0x17, 0x18f, 0xb, 0x17,
+ 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x5, 0x18, 0x194,
+ 0xa, 0x18, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3,
+ 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19,
+ 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x5, 0x19, 0x1a1,
0xa, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3,
0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19,
- 0x7, 0x19, 0x188, 0xa, 0x19, 0xc, 0x19, 0xe, 0x19,
- 0x18b, 0xb, 0x19, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a,
+ 0x7, 0x19, 0x1ab, 0xa, 0x19, 0xc, 0x19, 0xe, 0x19,
+ 0x1ae, 0xb, 0x19, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a,
0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3,
- 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x5, 0x1a, 0x197, 0xa,
+ 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x5, 0x1a, 0x1ba, 0xa,
0x1a, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b,
- 0x5, 0x1b, 0x19d, 0xa, 0x1b, 0x5, 0x1b, 0x19f, 0xa,
- 0x1b, 0x3, 0x1c, 0x3, 0x1c, 0x5, 0x1c, 0x1a3, 0xa,
+ 0x5, 0x1b, 0x1c0, 0xa, 0x1b, 0x5, 0x1b, 0x1c2, 0xa,
+ 0x1b, 0x3, 0x1c, 0x3, 0x1c, 0x5, 0x1c, 0x1c6, 0xa,
0x1c, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d,
- 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3,
- 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d,
- 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3,
- 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d,
- 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x5, 0x1d, 0x1be,
- 0xa, 0x1d, 0x3, 0x1e, 0x5, 0x1e, 0x1c1, 0xa, 0x1e,
+ 0x3, 0x1d, 0x7, 0x1d, 0x1cd, 0xa, 0x1d, 0xc, 0x1d,
+ 0xe, 0x1d, 0x1d0, 0xb, 0x1d, 0x5, 0x1d, 0x1d2, 0xa,
+ 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1e, 0x3, 0x1e,
+ 0x3, 0x1e, 0x5, 0x1e, 0x1d9, 0xa, 0x1e, 0x5, 0x1e,
+ 0x1db, 0xa, 0x1e, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f,
0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3,
- 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f,
- 0x3, 0x1f, 0x3, 0x20, 0x3, 0x20, 0x5, 0x20, 0x1cf,
- 0xa, 0x20, 0x3, 0x20, 0x3, 0x20, 0x5, 0x20, 0x1d3,
- 0xa, 0x20, 0x3, 0x20, 0x3, 0x20, 0x3, 0x21, 0x5,
- 0x21, 0x1d8, 0xa, 0x21, 0x3, 0x22, 0x3, 0x22, 0x3,
- 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22,
- 0x3, 0x22, 0x3, 0x22, 0x3, 0x23, 0x3, 0x23, 0x3,
- 0x24, 0x3, 0x24, 0x5, 0x24, 0x1e7, 0xa, 0x24, 0x3,
- 0x24, 0x3, 0x24, 0x7, 0x24, 0x1eb, 0xa, 0x24, 0xc,
- 0x24, 0xe, 0x24, 0x1ee, 0xb, 0x24, 0x3, 0x24, 0x3,
- 0x24, 0x3, 0x25, 0x3, 0x25, 0x5, 0x25, 0x1f4, 0xa,
- 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x26,
- 0x3, 0x26, 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, 0x3,
- 0x27, 0x3, 0x27, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28,
- 0x5, 0x28, 0x203, 0xa, 0x28, 0x3, 0x29, 0x5, 0x29,
- 0x206, 0xa, 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x2a,
- 0x3, 0x2a, 0x3, 0x2b, 0x3, 0x2b, 0x5, 0x2b, 0x20e,
- 0xa, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3,
- 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x5, 0x2b, 0x216, 0xa,
- 0x2b, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c,
- 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2d, 0x3, 0x2d, 0x5,
- 0x2d, 0x220, 0xa, 0x2d, 0x3, 0x2e, 0x3, 0x2e, 0x3,
- 0x2f, 0x3, 0x2f, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30,
- 0x5, 0x30, 0x229, 0xa, 0x30, 0x3, 0x31, 0x3, 0x31,
- 0x3, 0x31, 0x3, 0x31, 0x5, 0x31, 0x22f, 0xa, 0x31,
- 0x3, 0x31, 0x3, 0x31, 0x3, 0x32, 0x3, 0x32, 0x3,
- 0x32, 0x6, 0x32, 0x236, 0xa, 0x32, 0xd, 0x32, 0xe,
- 0x32, 0x237, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3,
- 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x5, 0x33,
- 0x241, 0xa, 0x33, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
- 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3,
- 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
- 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3,
- 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
- 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3,
- 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x5, 0x34,
- 0x260, 0xa, 0x34, 0x3, 0x35, 0x7, 0x35, 0x263, 0xa,
- 0x35, 0xc, 0x35, 0xe, 0x35, 0x266, 0xb, 0x35, 0x3,
- 0x36, 0x5, 0x36, 0x269, 0xa, 0x36, 0x3, 0x36, 0x3,
- 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x37, 0x3, 0x37,
- 0x5, 0x37, 0x271, 0xa, 0x37, 0x3, 0x38, 0x3, 0x38,
- 0x3, 0x39, 0x3, 0x39, 0x3, 0x39, 0x3, 0x3a, 0x3,
- 0x3a, 0x3, 0x3a, 0x3, 0x3b, 0x3, 0x3b, 0x3, 0x3b,
- 0x3, 0x3c, 0x3, 0x3c, 0x3, 0x3c, 0x5, 0x3c, 0x281,
- 0xa, 0x3c, 0x3, 0x3c, 0x5, 0x3c, 0x284, 0xa, 0x3c,
- 0x3, 0x3c, 0x5, 0x3c, 0x287, 0xa, 0x3c, 0x3, 0x3c,
- 0x3, 0x3c, 0x3, 0x3d, 0x3, 0x3d, 0x5, 0x3d, 0x28d,
- 0xa, 0x3d, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3d, 0x3,
- 0x3d, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3d,
- 0x3, 0x3d, 0x3, 0x3e, 0x3, 0x3e, 0x5, 0x3e, 0x29a,
- 0xa, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x5, 0x3e, 0x29e,
- 0xa, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3,
- 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e,
- 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x3f, 0x3,
- 0x3f, 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x40, 0x5, 0x40,
- 0x2b0, 0xa, 0x40, 0x3, 0x40, 0x3, 0x40, 0x3, 0x40,
- 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x3,
- 0x41, 0x3, 0x41, 0x3, 0x41, 0x3, 0x41, 0x3, 0x41,
- 0x3, 0x41, 0x3, 0x41, 0x3, 0x42, 0x3, 0x42, 0x3,
+ 0x1f, 0x5, 0x1f, 0x1e5, 0xa, 0x1f, 0x3, 0x20, 0x5,
+ 0x20, 0x1e8, 0xa, 0x20, 0x3, 0x21, 0x3, 0x21, 0x3,
+ 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21,
+ 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x22, 0x3,
+ 0x22, 0x5, 0x22, 0x1f6, 0xa, 0x22, 0x3, 0x22, 0x3,
+ 0x22, 0x5, 0x22, 0x1fa, 0xa, 0x22, 0x3, 0x22, 0x3,
+ 0x22, 0x3, 0x23, 0x5, 0x23, 0x1ff, 0xa, 0x23, 0x3,
+ 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24,
+ 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3,
+ 0x25, 0x3, 0x25, 0x3, 0x26, 0x3, 0x26, 0x5, 0x26,
+ 0x20e, 0xa, 0x26, 0x3, 0x26, 0x3, 0x26, 0x7, 0x26,
+ 0x212, 0xa, 0x26, 0xc, 0x26, 0xe, 0x26, 0x215, 0xb,
+ 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x27, 0x3, 0x27,
+ 0x5, 0x27, 0x21b, 0xa, 0x27, 0x3, 0x27, 0x3, 0x27,
+ 0x3, 0x27, 0x3, 0x28, 0x3, 0x28, 0x3, 0x29, 0x3,
+ 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x2a,
+ 0x3, 0x2a, 0x3, 0x2a, 0x5, 0x2a, 0x22a, 0xa, 0x2a,
+ 0x3, 0x2b, 0x5, 0x2b, 0x22d, 0xa, 0x2b, 0x3, 0x2b,
+ 0x3, 0x2b, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2d, 0x3,
+ 0x2d, 0x5, 0x2d, 0x235, 0xa, 0x2d, 0x3, 0x2d, 0x3,
+ 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d,
+ 0x5, 0x2d, 0x23d, 0xa, 0x2d, 0x3, 0x2e, 0x3, 0x2e,
+ 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3,
+ 0x2f, 0x3, 0x2f, 0x5, 0x2f, 0x247, 0xa, 0x2f, 0x3,
+ 0x30, 0x3, 0x30, 0x3, 0x31, 0x3, 0x31, 0x3, 0x32,
+ 0x3, 0x32, 0x3, 0x32, 0x5, 0x32, 0x250, 0xa, 0x32,
+ 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3,
+ 0x34, 0x3, 0x34, 0x3, 0x34, 0x6, 0x34, 0x259, 0xa,
+ 0x34, 0xd, 0x34, 0xe, 0x34, 0x25a, 0x3, 0x35, 0x3,
+ 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, 0x35,
+ 0x3, 0x35, 0x5, 0x35, 0x264, 0xa, 0x35, 0x3, 0x36,
+ 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3,
+ 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36,
+ 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3,
+ 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36,
+ 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3,
+ 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36,
+ 0x3, 0x36, 0x5, 0x36, 0x283, 0xa, 0x36, 0x3, 0x37,
+ 0x7, 0x37, 0x286, 0xa, 0x37, 0xc, 0x37, 0xe, 0x37,
+ 0x289, 0xb, 0x37, 0x3, 0x38, 0x5, 0x38, 0x28c, 0xa,
+ 0x38, 0x3, 0x38, 0x3, 0x38, 0x3, 0x38, 0x3, 0x38,
+ 0x3, 0x39, 0x3, 0x39, 0x5, 0x39, 0x294, 0xa, 0x39,
+ 0x3, 0x3a, 0x3, 0x3a, 0x3, 0x3b, 0x3, 0x3b, 0x3,
+ 0x3b, 0x3, 0x3b, 0x3, 0x3b, 0x3, 0x3c, 0x7, 0x3c,
+ 0x29e, 0xa, 0x3c, 0xc, 0x3c, 0xe, 0x3c, 0x2a1, 0xb,
+ 0x3c, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3e,
+ 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3f, 0x3, 0x3f, 0x3,
+ 0x3f, 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x5, 0x40,
+ 0x2af, 0xa, 0x40, 0x3, 0x40, 0x5, 0x40, 0x2b2, 0xa,
+ 0x40, 0x3, 0x40, 0x5, 0x40, 0x2b5, 0xa, 0x40, 0x3,
+ 0x40, 0x3, 0x40, 0x3, 0x41, 0x3, 0x41, 0x3, 0x41,
+ 0x3, 0x41, 0x3, 0x41, 0x3, 0x41, 0x3, 0x42, 0x3,
+ 0x42, 0x5, 0x42, 0x2c1, 0xa, 0x42, 0x3, 0x42, 0x3,
0x42, 0x3, 0x42, 0x3, 0x42, 0x3, 0x42, 0x3, 0x42,
- 0x3, 0x42, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, 0x3,
+ 0x3, 0x42, 0x3, 0x42, 0x3, 0x42, 0x3, 0x43, 0x3,
+ 0x43, 0x3, 0x43, 0x5, 0x43, 0x2cf, 0xa, 0x43, 0x3,
0x43, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43,
- 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3,
- 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x5, 0x44,
- 0x2d8, 0xa, 0x44, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45,
- 0x3, 0x45, 0x7, 0x45, 0x2de, 0xa, 0x45, 0xc, 0x45,
- 0xe, 0x45, 0x2e1, 0xb, 0x45, 0x3, 0x45, 0x3, 0x45,
- 0x3, 0x46, 0x3, 0x46, 0x7, 0x46, 0x2e7, 0xa, 0x46,
- 0xc, 0x46, 0xe, 0x46, 0x2ea, 0xb, 0x46, 0x3, 0x46,
- 0x2, 0xc, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28,
- 0x2a, 0x2c, 0x30, 0x47, 0x2, 0x4, 0x6, 0x8, 0xa,
- 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c,
- 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e,
- 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40,
- 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52,
- 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x62, 0x64,
- 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76,
- 0x78, 0x7a, 0x7c, 0x7e, 0x80, 0x82, 0x84, 0x86, 0x88,
- 0x8a, 0x2, 0xb, 0x3, 0x2, 0x3e, 0x3f, 0x4, 0x2,
- 0x38, 0x38, 0x43, 0x43, 0x3, 0x2, 0x44, 0x47, 0x3,
- 0x2, 0x48, 0x4a, 0x3, 0x2, 0x39, 0x3a, 0x3, 0x2,
- 0x3b, 0x3d, 0x5, 0x2, 0x39, 0x3a, 0x40, 0x40, 0x4f,
- 0x4f, 0x3, 0x2, 0x36, 0x37, 0x4, 0x2, 0x41, 0x42,
- 0x51, 0x51, 0x2, 0x304, 0x2, 0x97, 0x3, 0x2, 0x2,
- 0x2, 0x4, 0xa1, 0x3, 0x2, 0x2, 0x2, 0x6, 0xa3,
- 0x3, 0x2, 0x2, 0x2, 0x8, 0xb5, 0x3, 0x2, 0x2,
- 0x2, 0xa, 0xca, 0x3, 0x2, 0x2, 0x2, 0xc, 0xcc,
- 0x3, 0x2, 0x2, 0x2, 0xe, 0xd5, 0x3, 0x2, 0x2,
- 0x2, 0x10, 0xe0, 0x3, 0x2, 0x2, 0x2, 0x12, 0xeb,
- 0x3, 0x2, 0x2, 0x2, 0x14, 0xed, 0x3, 0x2, 0x2,
- 0x2, 0x16, 0x107, 0x3, 0x2, 0x2, 0x2, 0x18, 0x109,
- 0x3, 0x2, 0x2, 0x2, 0x1a, 0x10d, 0x3, 0x2, 0x2,
- 0x2, 0x1c, 0x10f, 0x3, 0x2, 0x2, 0x2, 0x1e, 0x11d,
- 0x3, 0x2, 0x2, 0x2, 0x20, 0x128, 0x3, 0x2, 0x2,
- 0x2, 0x22, 0x133, 0x3, 0x2, 0x2, 0x2, 0x24, 0x13e,
- 0x3, 0x2, 0x2, 0x2, 0x26, 0x149, 0x3, 0x2, 0x2,
- 0x2, 0x28, 0x154, 0x3, 0x2, 0x2, 0x2, 0x2a, 0x15f,
- 0x3, 0x2, 0x2, 0x2, 0x2c, 0x16a, 0x3, 0x2, 0x2,
- 0x2, 0x2e, 0x178, 0x3, 0x2, 0x2, 0x2, 0x30, 0x17a,
- 0x3, 0x2, 0x2, 0x2, 0x32, 0x196, 0x3, 0x2, 0x2,
- 0x2, 0x34, 0x19e, 0x3, 0x2, 0x2, 0x2, 0x36, 0x1a2,
- 0x3, 0x2, 0x2, 0x2, 0x38, 0x1bd, 0x3, 0x2, 0x2,
- 0x2, 0x3a, 0x1c0, 0x3, 0x2, 0x2, 0x2, 0x3c, 0x1c2,
- 0x3, 0x2, 0x2, 0x2, 0x3e, 0x1cc, 0x3, 0x2, 0x2,
- 0x2, 0x40, 0x1d7, 0x3, 0x2, 0x2, 0x2, 0x42, 0x1d9,
- 0x3, 0x2, 0x2, 0x2, 0x44, 0x1e2, 0x3, 0x2, 0x2,
- 0x2, 0x46, 0x1e4, 0x3, 0x2, 0x2, 0x2, 0x48, 0x1f1,
- 0x3, 0x2, 0x2, 0x2, 0x4a, 0x1f8, 0x3, 0x2, 0x2,
- 0x2, 0x4c, 0x1fa, 0x3, 0x2, 0x2, 0x2, 0x4e, 0x1ff,
- 0x3, 0x2, 0x2, 0x2, 0x50, 0x205, 0x3, 0x2, 0x2,
- 0x2, 0x52, 0x209, 0x3, 0x2, 0x2, 0x2, 0x54, 0x20b,
- 0x3, 0x2, 0x2, 0x2, 0x56, 0x217, 0x3, 0x2, 0x2,
- 0x2, 0x58, 0x21d, 0x3, 0x2, 0x2, 0x2, 0x5a, 0x221,
- 0x3, 0x2, 0x2, 0x2, 0x5c, 0x223, 0x3, 0x2, 0x2,
- 0x2, 0x5e, 0x225, 0x3, 0x2, 0x2, 0x2, 0x60, 0x22e,
- 0x3, 0x2, 0x2, 0x2, 0x62, 0x232, 0x3, 0x2, 0x2,
- 0x2, 0x64, 0x240, 0x3, 0x2, 0x2, 0x2, 0x66, 0x25f,
- 0x3, 0x2, 0x2, 0x2, 0x68, 0x264, 0x3, 0x2, 0x2,
- 0x2, 0x6a, 0x268, 0x3, 0x2, 0x2, 0x2, 0x6c, 0x270,
- 0x3, 0x2, 0x2, 0x2, 0x6e, 0x272, 0x3, 0x2, 0x2,
- 0x2, 0x70, 0x274, 0x3, 0x2, 0x2, 0x2, 0x72, 0x277,
- 0x3, 0x2, 0x2, 0x2, 0x74, 0x27a, 0x3, 0x2, 0x2,
- 0x2, 0x76, 0x27d, 0x3, 0x2, 0x2, 0x2, 0x78, 0x28a,
- 0x3, 0x2, 0x2, 0x2, 0x7a, 0x297, 0x3, 0x2, 0x2,
- 0x2, 0x7c, 0x2a7, 0x3, 0x2, 0x2, 0x2, 0x7e, 0x2af,
- 0x3, 0x2, 0x2, 0x2, 0x80, 0x2b8, 0x3, 0x2, 0x2,
- 0x2, 0x82, 0x2bf, 0x3, 0x2, 0x2, 0x2, 0x84, 0x2c7,
- 0x3, 0x2, 0x2, 0x2, 0x86, 0x2d7, 0x3, 0x2, 0x2,
- 0x2, 0x88, 0x2d9, 0x3, 0x2, 0x2, 0x2, 0x8a, 0x2e8,
- 0x3, 0x2, 0x2, 0x2, 0x8c, 0x8e, 0x7, 0x25, 0x2,
- 0x2, 0x8d, 0x8c, 0x3, 0x2, 0x2, 0x2, 0x8d, 0x8e,
- 0x3, 0x2, 0x2, 0x2, 0x8e, 0x8f, 0x3, 0x2, 0x2,
- 0x2, 0x8f, 0x98, 0x7, 0x51, 0x2, 0x2, 0x90, 0x91,
- 0x7, 0x19, 0x2, 0x2, 0x91, 0x92, 0x7, 0x3, 0x2,
- 0x2, 0x92, 0x93, 0x5, 0x4, 0x3, 0x2, 0x93, 0x94,
- 0x7, 0x4, 0x2, 0x2, 0x94, 0x95, 0x7, 0x5, 0x2,
- 0x2, 0x95, 0x96, 0x5, 0x2, 0x2, 0x2, 0x96, 0x98,
- 0x3, 0x2, 0x2, 0x2, 0x97, 0x8d, 0x3, 0x2, 0x2,
- 0x2, 0x97, 0x90, 0x3, 0x2, 0x2, 0x2, 0x98, 0x3,
- 0x3, 0x2, 0x2, 0x2, 0x99, 0x9e, 0x5, 0x2, 0x2,
- 0x2, 0x9a, 0x9b, 0x7, 0x6, 0x2, 0x2, 0x9b, 0x9d,
- 0x5, 0x2, 0x2, 0x2, 0x9c, 0x9a, 0x3, 0x2, 0x2,
- 0x2, 0x9d, 0xa0, 0x3, 0x2, 0x2, 0x2, 0x9e, 0x9c,
- 0x3, 0x2, 0x2, 0x2, 0x9e, 0x9f, 0x3, 0x2, 0x2,
- 0x2, 0x9f, 0xa2, 0x3, 0x2, 0x2, 0x2, 0xa0, 0x9e,
- 0x3, 0x2, 0x2, 0x2, 0xa1, 0x99, 0x3, 0x2, 0x2,
- 0x2, 0xa1, 0xa2, 0x3, 0x2, 0x2, 0x2, 0xa2, 0x5,
- 0x3, 0x2, 0x2, 0x2, 0xa3, 0xa4, 0x7, 0x44, 0x2,
- 0x2, 0xa4, 0xa5, 0x5, 0x4, 0x3, 0x2, 0xa5, 0xa6,
- 0x7, 0x46, 0x2, 0x2, 0xa6, 0x7, 0x3, 0x2, 0x2,
- 0x2, 0xa7, 0xa8, 0x7, 0x44, 0x2, 0x2, 0xa8, 0xa9,
- 0x7, 0x51, 0x2, 0x2, 0xa9, 0xaa, 0x7, 0x7, 0x2,
- 0x2, 0xaa, 0xb1, 0x7, 0x8, 0x2, 0x2, 0xab, 0xac,
- 0x7, 0x6, 0x2, 0x2, 0xac, 0xad, 0x7, 0x51, 0x2,
- 0x2, 0xad, 0xae, 0x7, 0x7, 0x2, 0x2, 0xae, 0xb0,
- 0x7, 0x8, 0x2, 0x2, 0xaf, 0xab, 0x3, 0x2, 0x2,
- 0x2, 0xb0, 0xb3, 0x3, 0x2, 0x2, 0x2, 0xb1, 0xaf,
- 0x3, 0x2, 0x2, 0x2, 0xb1, 0xb2, 0x3, 0x2, 0x2,
- 0x2, 0xb2, 0xb4, 0x3, 0x2, 0x2, 0x2, 0xb3, 0xb1,
- 0x3, 0x2, 0x2, 0x2, 0xb4, 0xb6, 0x7, 0x46, 0x2,
- 0x2, 0xb5, 0xa7, 0x3, 0x2, 0x2, 0x2, 0xb5, 0xb6,
- 0x3, 0x2, 0x2, 0x2, 0xb6, 0x9, 0x3, 0x2, 0x2,
- 0x2, 0xb7, 0xb9, 0x7, 0x3, 0x2, 0x2, 0xb8, 0xba,
- 0x5, 0x2, 0x2, 0x2, 0xb9, 0xb8, 0x3, 0x2, 0x2,
- 0x2, 0xb9, 0xba, 0x3, 0x2, 0x2, 0x2, 0xba, 0xbf,
- 0x3, 0x2, 0x2, 0x2, 0xbb, 0xbc, 0x7, 0x6, 0x2,
- 0x2, 0xbc, 0xbe, 0x5, 0x2, 0x2, 0x2, 0xbd, 0xbb,
- 0x3, 0x2, 0x2, 0x2, 0xbe, 0xc1, 0x3, 0x2, 0x2,
- 0x2, 0xbf, 0xbd, 0x3, 0x2, 0x2, 0x2, 0xbf, 0xc0,
- 0x3, 0x2, 0x2, 0x2, 0xc0, 0xc4, 0x3, 0x2, 0x2,
- 0x2, 0xc1, 0xbf, 0x3, 0x2, 0x2, 0x2, 0xc2, 0xc3,
- 0x7, 0x6, 0x2, 0x2, 0xc3, 0xc5, 0x7, 0x4b, 0x2,
- 0x2, 0xc4, 0xc2, 0x3, 0x2, 0x2, 0x2, 0xc4, 0xc5,
- 0x3, 0x2, 0x2, 0x2, 0xc5, 0xc6, 0x3, 0x2, 0x2,
- 0x2, 0xc6, 0xcb, 0x7, 0x4, 0x2, 0x2, 0xc7, 0xc8,
- 0x7, 0x3, 0x2, 0x2, 0xc8, 0xc9, 0x7, 0x4b, 0x2,
- 0x2, 0xc9, 0xcb, 0x7, 0x4, 0x2, 0x2, 0xca, 0xb7,
- 0x3, 0x2, 0x2, 0x2, 0xca, 0xc7, 0x3, 0x2, 0x2,
- 0x2, 0xcb, 0xb, 0x3, 0x2, 0x2, 0x2, 0xcc, 0xd1,
- 0x7, 0x51, 0x2, 0x2, 0xcd, 0xce, 0x7, 0x3, 0x2,
- 0x2, 0xce, 0xcf, 0x5, 0x4, 0x3, 0x2, 0xcf, 0xd0,
- 0x7, 0x4, 0x2, 0x2, 0xd0, 0xd2, 0x3, 0x2, 0x2,
- 0x2, 0xd1, 0xcd, 0x3, 0x2, 0x2, 0x2, 0xd1, 0xd2,
- 0x3, 0x2, 0x2, 0x2, 0xd2, 0xd, 0x3, 0x2, 0x2,
- 0x2, 0xd3, 0xd4, 0x7, 0x7, 0x2, 0x2, 0xd4, 0xd6,
- 0x5, 0x2, 0x2, 0x2, 0xd5, 0xd3, 0x3, 0x2, 0x2,
- 0x2, 0xd5, 0xd6, 0x3, 0x2, 0x2, 0x2, 0xd6, 0xf,
- 0x3, 0x2, 0x2, 0x2, 0xd7, 0xd8, 0x7, 0x2d, 0x2,
- 0x2, 0xd8, 0xdd, 0x5, 0xc, 0x7, 0x2, 0xd9, 0xda,
- 0x7, 0x6, 0x2, 0x2, 0xda, 0xdc, 0x5, 0xc, 0x7,
- 0x2, 0xdb, 0xd9, 0x3, 0x2, 0x2, 0x2, 0xdc, 0xdf,
- 0x3, 0x2, 0x2, 0x2, 0xdd, 0xdb, 0x3, 0x2, 0x2,
- 0x2, 0xdd, 0xde, 0x3, 0x2, 0x2, 0x2, 0xde, 0xe1,
- 0x3, 0x2, 0x2, 0x2, 0xdf, 0xdd, 0x3, 0x2, 0x2,
- 0x2, 0xe0, 0xd7, 0x3, 0x2, 0x2, 0x2, 0xe0, 0xe1,
- 0x3, 0x2, 0x2, 0x2, 0xe1, 0x11, 0x3, 0x2, 0x2,
- 0x2, 0xe2, 0xe3, 0x7, 0x29, 0x2, 0x2, 0xe3, 0xe8,
- 0x7, 0x51, 0x2, 0x2, 0xe4, 0xe5, 0x7, 0x6, 0x2,
- 0x2, 0xe5, 0xe7, 0x7, 0x51, 0x2, 0x2, 0xe6, 0xe4,
- 0x3, 0x2, 0x2, 0x2, 0xe7, 0xea, 0x3, 0x2, 0x2,
- 0x2, 0xe8, 0xe6, 0x3, 0x2, 0x2, 0x2, 0xe8, 0xe9,
- 0x3, 0x2, 0x2, 0x2, 0xe9, 0xec, 0x3, 0x2, 0x2,
- 0x2, 0xea, 0xe8, 0x3, 0x2, 0x2, 0x2, 0xeb, 0xe2,
- 0x3, 0x2, 0x2, 0x2, 0xeb, 0xec, 0x3, 0x2, 0x2,
- 0x2, 0xec, 0x13, 0x3, 0x2, 0x2, 0x2, 0xed, 0xee,
- 0x7, 0x51, 0x2, 0x2, 0xee, 0xf0, 0x7, 0x7, 0x2,
- 0x2, 0xef, 0xf1, 0x5, 0x2, 0x2, 0x2, 0xf0, 0xef,
- 0x3, 0x2, 0x2, 0x2, 0xf0, 0xf1, 0x3, 0x2, 0x2,
- 0x2, 0xf1, 0x15, 0x3, 0x2, 0x2, 0x2, 0xf2, 0xf4,
- 0x7, 0x3, 0x2, 0x2, 0xf3, 0xf5, 0x5, 0x14, 0xb,
- 0x2, 0xf4, 0xf3, 0x3, 0x2, 0x2, 0x2, 0xf4, 0xf5,
- 0x3, 0x2, 0x2, 0x2, 0xf5, 0xfa, 0x3, 0x2, 0x2,
- 0x2, 0xf6, 0xf7, 0x7, 0x6, 0x2, 0x2, 0xf7, 0xf9,
- 0x5, 0x14, 0xb, 0x2, 0xf8, 0xf6, 0x3, 0x2, 0x2,
- 0x2, 0xf9, 0xfc, 0x3, 0x2, 0x2, 0x2, 0xfa, 0xf8,
- 0x3, 0x2, 0x2, 0x2, 0xfa, 0xfb, 0x3, 0x2, 0x2,
- 0x2, 0xfb, 0xfd, 0x3, 0x2, 0x2, 0x2, 0xfc, 0xfa,
- 0x3, 0x2, 0x2, 0x2, 0xfd, 0x108, 0x7, 0x4, 0x2,
- 0x2, 0xfe, 0xff, 0x7, 0x3, 0x2, 0x2, 0xff, 0x100,
- 0x5, 0x14, 0xb, 0x2, 0x100, 0x101, 0x7, 0x6, 0x2,
- 0x2, 0x101, 0x102, 0x5, 0x14, 0xb, 0x2, 0x102, 0x103,
- 0x7, 0x6, 0x2, 0x2, 0x103, 0x104, 0x7, 0x4b, 0x2,
- 0x2, 0x104, 0x105, 0x7, 0x51, 0x2, 0x2, 0x105, 0x106,
- 0x7, 0x4, 0x2, 0x2, 0x106, 0x108, 0x3, 0x2, 0x2,
- 0x2, 0x107, 0xf2, 0x3, 0x2, 0x2, 0x2, 0x107, 0xfe,
- 0x3, 0x2, 0x2, 0x2, 0x108, 0x17, 0x3, 0x2, 0x2,
- 0x2, 0x109, 0x10b, 0x7, 0x51, 0x2, 0x2, 0x10a, 0x10c,
- 0x5, 0x16, 0xc, 0x2, 0x10b, 0x10a, 0x3, 0x2, 0x2,
- 0x2, 0x10b, 0x10c, 0x3, 0x2, 0x2, 0x2, 0x10c, 0x19,
- 0x3, 0x2, 0x2, 0x2, 0x10d, 0x10e, 0x5, 0x1c, 0xf,
- 0x2, 0x10e, 0x1b, 0x3, 0x2, 0x2, 0x2, 0x10f, 0x110,
- 0x8, 0xf, 0x1, 0x2, 0x110, 0x111, 0x5, 0x1e, 0x10,
- 0x2, 0x111, 0x11a, 0x3, 0x2, 0x2, 0x2, 0x112, 0x113,
- 0xc, 0x3, 0x2, 0x2, 0x113, 0x114, 0x7, 0x9, 0x2,
- 0x2, 0x114, 0x115, 0x5, 0x1e, 0x10, 0x2, 0x115, 0x116,
- 0x7, 0x7, 0x2, 0x2, 0x116, 0x117, 0x5, 0x1e, 0x10,
- 0x2, 0x117, 0x119, 0x3, 0x2, 0x2, 0x2, 0x118, 0x112,
- 0x3, 0x2, 0x2, 0x2, 0x119, 0x11c, 0x3, 0x2, 0x2,
- 0x2, 0x11a, 0x118, 0x3, 0x2, 0x2, 0x2, 0x11a, 0x11b,
- 0x3, 0x2, 0x2, 0x2, 0x11b, 0x1d, 0x3, 0x2, 0x2,
- 0x2, 0x11c, 0x11a, 0x3, 0x2, 0x2, 0x2, 0x11d, 0x11e,
- 0x8, 0x10, 0x1, 0x2, 0x11e, 0x11f, 0x5, 0x20, 0x11,
- 0x2, 0x11f, 0x125, 0x3, 0x2, 0x2, 0x2, 0x120, 0x121,
- 0xc, 0x3, 0x2, 0x2, 0x121, 0x122, 0x7, 0xa, 0x2,
- 0x2, 0x122, 0x124, 0x5, 0x20, 0x11, 0x2, 0x123, 0x120,
- 0x3, 0x2, 0x2, 0x2, 0x124, 0x127, 0x3, 0x2, 0x2,
- 0x2, 0x125, 0x123, 0x3, 0x2, 0x2, 0x2, 0x125, 0x126,
- 0x3, 0x2, 0x2, 0x2, 0x126, 0x1f, 0x3, 0x2, 0x2,
- 0x2, 0x127, 0x125, 0x3, 0x2, 0x2, 0x2, 0x128, 0x129,
- 0x8, 0x11, 0x1, 0x2, 0x129, 0x12a, 0x5, 0x22, 0x12,
- 0x2, 0x12a, 0x130, 0x3, 0x2, 0x2, 0x2, 0x12b, 0x12c,
- 0xc, 0x3, 0x2, 0x2, 0x12c, 0x12d, 0x7, 0xb, 0x2,
- 0x2, 0x12d, 0x12f, 0x5, 0x22, 0x12, 0x2, 0x12e, 0x12b,
- 0x3, 0x2, 0x2, 0x2, 0x12f, 0x132, 0x3, 0x2, 0x2,
- 0x2, 0x130, 0x12e, 0x3, 0x2, 0x2, 0x2, 0x130, 0x131,
- 0x3, 0x2, 0x2, 0x2, 0x131, 0x21, 0x3, 0x2, 0x2,
- 0x2, 0x132, 0x130, 0x3, 0x2, 0x2, 0x2, 0x133, 0x134,
- 0x8, 0x12, 0x1, 0x2, 0x134, 0x135, 0x5, 0x24, 0x13,
- 0x2, 0x135, 0x13b, 0x3, 0x2, 0x2, 0x2, 0x136, 0x137,
- 0xc, 0x3, 0x2, 0x2, 0x137, 0x138, 0x9, 0x2, 0x2,
- 0x2, 0x138, 0x13a, 0x5, 0x24, 0x13, 0x2, 0x139, 0x136,
- 0x3, 0x2, 0x2, 0x2, 0x13a, 0x13d, 0x3, 0x2, 0x2,
- 0x2, 0x13b, 0x139, 0x3, 0x2, 0x2, 0x2, 0x13b, 0x13c,
- 0x3, 0x2, 0x2, 0x2, 0x13c, 0x23, 0x3, 0x2, 0x2,
- 0x2, 0x13d, 0x13b, 0x3, 0x2, 0x2, 0x2, 0x13e, 0x13f,
- 0x8, 0x13, 0x1, 0x2, 0x13f, 0x140, 0x5, 0x26, 0x14,
- 0x2, 0x140, 0x146, 0x3, 0x2, 0x2, 0x2, 0x141, 0x142,
- 0xc, 0x3, 0x2, 0x2, 0x142, 0x143, 0x9, 0x3, 0x2,
- 0x2, 0x143, 0x145, 0x5, 0x26, 0x14, 0x2, 0x144, 0x141,
- 0x3, 0x2, 0x2, 0x2, 0x145, 0x148, 0x3, 0x2, 0x2,
- 0x2, 0x146, 0x144, 0x3, 0x2, 0x2, 0x2, 0x146, 0x147,
- 0x3, 0x2, 0x2, 0x2, 0x147, 0x25, 0x3, 0x2, 0x2,
- 0x2, 0x148, 0x146, 0x3, 0x2, 0x2, 0x2, 0x149, 0x14a,
- 0x8, 0x14, 0x1, 0x2, 0x14a, 0x14b, 0x5, 0x28, 0x15,
- 0x2, 0x14b, 0x151, 0x3, 0x2, 0x2, 0x2, 0x14c, 0x14d,
- 0xc, 0x3, 0x2, 0x2, 0x14d, 0x14e, 0x9, 0x4, 0x2,
- 0x2, 0x14e, 0x150, 0x5, 0x28, 0x15, 0x2, 0x14f, 0x14c,
- 0x3, 0x2, 0x2, 0x2, 0x150, 0x153, 0x3, 0x2, 0x2,
- 0x2, 0x151, 0x14f, 0x3, 0x2, 0x2, 0x2, 0x151, 0x152,
- 0x3, 0x2, 0x2, 0x2, 0x152, 0x27, 0x3, 0x2, 0x2,
- 0x2, 0x153, 0x151, 0x3, 0x2, 0x2, 0x2, 0x154, 0x155,
- 0x8, 0x15, 0x1, 0x2, 0x155, 0x156, 0x5, 0x2a, 0x16,
- 0x2, 0x156, 0x15c, 0x3, 0x2, 0x2, 0x2, 0x157, 0x158,
- 0xc, 0x3, 0x2, 0x2, 0x158, 0x159, 0x9, 0x5, 0x2,
- 0x2, 0x159, 0x15b, 0x5, 0x2a, 0x16, 0x2, 0x15a, 0x157,
- 0x3, 0x2, 0x2, 0x2, 0x15b, 0x15e, 0x3, 0x2, 0x2,
- 0x2, 0x15c, 0x15a, 0x3, 0x2, 0x2, 0x2, 0x15c, 0x15d,
- 0x3, 0x2, 0x2, 0x2, 0x15d, 0x29, 0x3, 0x2, 0x2,
- 0x2, 0x15e, 0x15c, 0x3, 0x2, 0x2, 0x2, 0x15f, 0x160,
- 0x8, 0x16, 0x1, 0x2, 0x160, 0x161, 0x5, 0x2c, 0x17,
- 0x2, 0x161, 0x167, 0x3, 0x2, 0x2, 0x2, 0x162, 0x163,
- 0xc, 0x3, 0x2, 0x2, 0x163, 0x164, 0x9, 0x6, 0x2,
- 0x2, 0x164, 0x166, 0x5, 0x2c, 0x17, 0x2, 0x165, 0x162,
- 0x3, 0x2, 0x2, 0x2, 0x166, 0x169, 0x3, 0x2, 0x2,
- 0x2, 0x167, 0x165, 0x3, 0x2, 0x2, 0x2, 0x167, 0x168,
- 0x3, 0x2, 0x2, 0x2, 0x168, 0x2b, 0x3, 0x2, 0x2,
- 0x2, 0x169, 0x167, 0x3, 0x2, 0x2, 0x2, 0x16a, 0x16b,
- 0x8, 0x17, 0x1, 0x2, 0x16b, 0x16c, 0x5, 0x2e, 0x18,
- 0x2, 0x16c, 0x172, 0x3, 0x2, 0x2, 0x2, 0x16d, 0x16e,
- 0xc, 0x3, 0x2, 0x2, 0x16e, 0x16f, 0x9, 0x7, 0x2,
- 0x2, 0x16f, 0x171, 0x5, 0x2e, 0x18, 0x2, 0x170, 0x16d,
- 0x3, 0x2, 0x2, 0x2, 0x171, 0x174, 0x3, 0x2, 0x2,
- 0x2, 0x172, 0x170, 0x3, 0x2, 0x2, 0x2, 0x172, 0x173,
- 0x3, 0x2, 0x2, 0x2, 0x173, 0x2d, 0x3, 0x2, 0x2,
- 0x2, 0x174, 0x172, 0x3, 0x2, 0x2, 0x2, 0x175, 0x179,
- 0x5, 0x36, 0x1c, 0x2, 0x176, 0x177, 0x9, 0x8, 0x2,
- 0x2, 0x177, 0x179, 0x5, 0x2e, 0x18, 0x2, 0x178, 0x175,
- 0x3, 0x2, 0x2, 0x2, 0x178, 0x176, 0x3, 0x2, 0x2,
- 0x2, 0x179, 0x2f, 0x3, 0x2, 0x2, 0x2, 0x17a, 0x17b,
- 0x8, 0x19, 0x1, 0x2, 0x17b, 0x17d, 0x7, 0x51, 0x2,
- 0x2, 0x17c, 0x17e, 0x5, 0x6, 0x4, 0x2, 0x17d, 0x17c,
- 0x3, 0x2, 0x2, 0x2, 0x17d, 0x17e, 0x3, 0x2, 0x2,
- 0x2, 0x17e, 0x189, 0x3, 0x2, 0x2, 0x2, 0x17f, 0x180,
- 0xc, 0x4, 0x2, 0x2, 0x180, 0x181, 0x7, 0xc, 0x2,
- 0x2, 0x181, 0x188, 0x7, 0x51, 0x2, 0x2, 0x182, 0x183,
- 0xc, 0x3, 0x2, 0x2, 0x183, 0x184, 0x7, 0xd, 0x2,
- 0x2, 0x184, 0x185, 0x5, 0x1a, 0xe, 0x2, 0x185, 0x186,
- 0x7, 0xe, 0x2, 0x2, 0x186, 0x188, 0x3, 0x2, 0x2,
- 0x2, 0x187, 0x17f, 0x3, 0x2, 0x2, 0x2, 0x187, 0x182,
- 0x3, 0x2, 0x2, 0x2, 0x188, 0x18b, 0x3, 0x2, 0x2,
- 0x2, 0x189, 0x187, 0x3, 0x2, 0x2, 0x2, 0x189, 0x18a,
- 0x3, 0x2, 0x2, 0x2, 0x18a, 0x31, 0x3, 0x2, 0x2,
- 0x2, 0x18b, 0x189, 0x3, 0x2, 0x2, 0x2, 0x18c, 0x18d,
- 0x7, 0x4d, 0x2, 0x2, 0x18d, 0x197, 0x5, 0x30, 0x19,
- 0x2, 0x18e, 0x18f, 0x7, 0x4e, 0x2, 0x2, 0x18f, 0x197,
- 0x5, 0x30, 0x19, 0x2, 0x190, 0x191, 0x5, 0x30, 0x19,
- 0x2, 0x191, 0x192, 0x7, 0x4d, 0x2, 0x2, 0x192, 0x197,
- 0x3, 0x2, 0x2, 0x2, 0x193, 0x194, 0x5, 0x30, 0x19,
- 0x2, 0x194, 0x195, 0x7, 0x4e, 0x2, 0x2, 0x195, 0x197,
- 0x3, 0x2, 0x2, 0x2, 0x196, 0x18c, 0x3, 0x2, 0x2,
- 0x2, 0x196, 0x18e, 0x3, 0x2, 0x2, 0x2, 0x196, 0x190,
- 0x3, 0x2, 0x2, 0x2, 0x196, 0x193, 0x3, 0x2, 0x2,
- 0x2, 0x197, 0x33, 0x3, 0x2, 0x2, 0x2, 0x198, 0x19f,
- 0x5, 0x32, 0x1a, 0x2, 0x199, 0x19c, 0x5, 0x30, 0x19,
- 0x2, 0x19a, 0x19b, 0x9, 0x9, 0x2, 0x2, 0x19b, 0x19d,
- 0x5, 0x1a, 0xe, 0x2, 0x19c, 0x19a, 0x3, 0x2, 0x2,
- 0x2, 0x19c, 0x19d, 0x3, 0x2, 0x2, 0x2, 0x19d, 0x19f,
- 0x3, 0x2, 0x2, 0x2, 0x19e, 0x198, 0x3, 0x2, 0x2,
- 0x2, 0x19e, 0x199, 0x3, 0x2, 0x2, 0x2, 0x19f, 0x35,
- 0x3, 0x2, 0x2, 0x2, 0x1a0, 0x1a3, 0x5, 0x38, 0x1d,
- 0x2, 0x1a1, 0x1a3, 0x5, 0x34, 0x1b, 0x2, 0x1a2, 0x1a0,
- 0x3, 0x2, 0x2, 0x2, 0x1a2, 0x1a1, 0x3, 0x2, 0x2,
- 0x2, 0x1a3, 0x37, 0x3, 0x2, 0x2, 0x2, 0x1a4, 0x1be,
- 0x5, 0x48, 0x25, 0x2, 0x1a5, 0x1be, 0x7, 0x55, 0x2,
- 0x2, 0x1a6, 0x1be, 0x7, 0x50, 0x2, 0x2, 0x1a7, 0x1a8,
- 0x7, 0x20, 0x2, 0x2, 0x1a8, 0x1a9, 0x7, 0x44, 0x2,
- 0x2, 0x1a9, 0x1aa, 0x5, 0x2, 0x2, 0x2, 0x1aa, 0x1ab,
- 0x7, 0x46, 0x2, 0x2, 0x1ab, 0x1ac, 0x7, 0x3, 0x2,
- 0x2, 0x1ac, 0x1ad, 0x5, 0x1a, 0xe, 0x2, 0x1ad, 0x1ae,
- 0x7, 0x4, 0x2, 0x2, 0x1ae, 0x1af, 0x7, 0x29, 0x2,
- 0x2, 0x1af, 0x1b0, 0x7, 0x51, 0x2, 0x2, 0x1b0, 0x1be,
- 0x3, 0x2, 0x2, 0x2, 0x1b1, 0x1b2, 0x7, 0x21, 0x2,
- 0x2, 0x1b2, 0x1b3, 0x7, 0x44, 0x2, 0x2, 0x1b3, 0x1b4,
- 0x5, 0x2, 0x2, 0x2, 0x1b4, 0x1b5, 0x7, 0x46, 0x2,
- 0x2, 0x1b5, 0x1b6, 0x7, 0x3, 0x2, 0x2, 0x1b6, 0x1b7,
- 0x5, 0x1a, 0xe, 0x2, 0x1b7, 0x1b8, 0x7, 0x4, 0x2,
- 0x2, 0x1b8, 0x1be, 0x3, 0x2, 0x2, 0x2, 0x1b9, 0x1ba,
- 0x7, 0x3, 0x2, 0x2, 0x1ba, 0x1bb, 0x5, 0x1a, 0xe,
- 0x2, 0x1bb, 0x1bc, 0x7, 0x4, 0x2, 0x2, 0x1bc, 0x1be,
- 0x3, 0x2, 0x2, 0x2, 0x1bd, 0x1a4, 0x3, 0x2, 0x2,
- 0x2, 0x1bd, 0x1a5, 0x3, 0x2, 0x2, 0x2, 0x1bd, 0x1a6,
- 0x3, 0x2, 0x2, 0x2, 0x1bd, 0x1a7, 0x3, 0x2, 0x2,
- 0x2, 0x1bd, 0x1b1, 0x3, 0x2, 0x2, 0x2, 0x1bd, 0x1b9,
- 0x3, 0x2, 0x2, 0x2, 0x1be, 0x39, 0x3, 0x2, 0x2,
- 0x2, 0x1bf, 0x1c1, 0x5, 0x4e, 0x28, 0x2, 0x1c0, 0x1bf,
- 0x3, 0x2, 0x2, 0x2, 0x1c0, 0x1c1, 0x3, 0x2, 0x2,
- 0x2, 0x1c1, 0x3b, 0x3, 0x2, 0x2, 0x2, 0x1c2, 0x1c3,
- 0x7, 0x22, 0x2, 0x2, 0x1c3, 0x1c4, 0x7, 0x3, 0x2,
- 0x2, 0x1c4, 0x1c5, 0x5, 0x3a, 0x1e, 0x2, 0x1c5, 0x1c6,
- 0x7, 0xf, 0x2, 0x2, 0x1c6, 0x1c7, 0x5, 0x1a, 0xe,
- 0x2, 0x1c7, 0x1c8, 0x7, 0xf, 0x2, 0x2, 0x1c8, 0x1c9,
- 0x5, 0x34, 0x1b, 0x2, 0x1c9, 0x1ca, 0x7, 0x4, 0x2,
- 0x2, 0x1ca, 0x1cb, 0x5, 0x6c, 0x37, 0x2, 0x1cb, 0x3d,
- 0x3, 0x2, 0x2, 0x2, 0x1cc, 0x1ce, 0x7, 0xd, 0x2,
- 0x2, 0x1cd, 0x1cf, 0x5, 0x1a, 0xe, 0x2, 0x1ce, 0x1cd,
- 0x3, 0x2, 0x2, 0x2, 0x1ce, 0x1cf, 0x3, 0x2, 0x2,
- 0x2, 0x1cf, 0x1d0, 0x3, 0x2, 0x2, 0x2, 0x1d0, 0x1d2,
- 0x7, 0x7, 0x2, 0x2, 0x1d1, 0x1d3, 0x5, 0x1a, 0xe,
- 0x2, 0x1d2, 0x1d1, 0x3, 0x2, 0x2, 0x2, 0x1d2, 0x1d3,
- 0x3, 0x2, 0x2, 0x2, 0x1d3, 0x1d4, 0x3, 0x2, 0x2,
- 0x2, 0x1d4, 0x1d5, 0x7, 0xe, 0x2, 0x2, 0x1d5, 0x3f,
- 0x3, 0x2, 0x2, 0x2, 0x1d6, 0x1d8, 0x5, 0x3e, 0x20,
- 0x2, 0x1d7, 0x1d6, 0x3, 0x2, 0x2, 0x2, 0x1d7, 0x1d8,
- 0x3, 0x2, 0x2, 0x2, 0x1d8, 0x41, 0x3, 0x2, 0x2,
- 0x2, 0x1d9, 0x1da, 0x7, 0x22, 0x2, 0x2, 0x1da, 0x1db,
- 0x7, 0x3, 0x2, 0x2, 0x1db, 0x1dc, 0x5, 0x4c, 0x27,
- 0x2, 0x1dc, 0x1dd, 0x7, 0x10, 0x2, 0x2, 0x1dd, 0x1de,
- 0x5, 0x1a, 0xe, 0x2, 0x1de, 0x1df, 0x5, 0x40, 0x21,
- 0x2, 0x1df, 0x1e0, 0x7, 0x4, 0x2, 0x2, 0x1e0, 0x1e1,
- 0x5, 0x6c, 0x37, 0x2, 0x1e1, 0x43, 0x3, 0x2, 0x2,
- 0x2, 0x1e2, 0x1e3, 0x5, 0x1a, 0xe, 0x2, 0x1e3, 0x45,
- 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1e6, 0x7, 0x3, 0x2,
- 0x2, 0x1e5, 0x1e7, 0x5, 0x44, 0x23, 0x2, 0x1e6, 0x1e5,
- 0x3, 0x2, 0x2, 0x2, 0x1e6, 0x1e7, 0x3, 0x2, 0x2,
- 0x2, 0x1e7, 0x1ec, 0x3, 0x2, 0x2, 0x2, 0x1e8, 0x1e9,
- 0x7, 0x6, 0x2, 0x2, 0x1e9, 0x1eb, 0x5, 0x44, 0x23,
- 0x2, 0x1ea, 0x1e8, 0x3, 0x2, 0x2, 0x2, 0x1eb, 0x1ee,
- 0x3, 0x2, 0x2, 0x2, 0x1ec, 0x1ea, 0x3, 0x2, 0x2,
- 0x2, 0x1ec, 0x1ed, 0x3, 0x2, 0x2, 0x2, 0x1ed, 0x1ef,
- 0x3, 0x2, 0x2, 0x2, 0x1ee, 0x1ec, 0x3, 0x2, 0x2,
- 0x2, 0x1ef, 0x1f0, 0x7, 0x4, 0x2, 0x2, 0x1f0, 0x47,
- 0x3, 0x2, 0x2, 0x2, 0x1f1, 0x1f3, 0x9, 0xa, 0x2,
- 0x2, 0x1f2, 0x1f4, 0x5, 0x6, 0x4, 0x2, 0x1f3, 0x1f2,
- 0x3, 0x2, 0x2, 0x2, 0x1f3, 0x1f4, 0x3, 0x2, 0x2,
- 0x2, 0x1f4, 0x1f5, 0x3, 0x2, 0x2, 0x2, 0x1f5, 0x1f6,
- 0x5, 0x46, 0x24, 0x2, 0x1f6, 0x1f7, 0x5, 0x12, 0xa,
- 0x2, 0x1f7, 0x49, 0x3, 0x2, 0x2, 0x2, 0x1f8, 0x1f9,
- 0x7, 0x51, 0x2, 0x2, 0x1f9, 0x4b, 0x3, 0x2, 0x2,
- 0x2, 0x1fa, 0x1fb, 0x7, 0x31, 0x2, 0x2, 0x1fb, 0x1fc,
- 0x7, 0x51, 0x2, 0x2, 0x1fc, 0x1fd, 0x7, 0x7, 0x2,
- 0x2, 0x1fd, 0x1fe, 0x5, 0x2, 0x2, 0x2, 0x1fe, 0x4d,
- 0x3, 0x2, 0x2, 0x2, 0x1ff, 0x202, 0x5, 0x4c, 0x27,
- 0x2, 0x200, 0x201, 0x7, 0x36, 0x2, 0x2, 0x201, 0x203,
- 0x5, 0x1a, 0xe, 0x2, 0x202, 0x200, 0x3, 0x2, 0x2,
- 0x2, 0x202, 0x203, 0x3, 0x2, 0x2, 0x2, 0x203, 0x4f,
- 0x3, 0x2, 0x2, 0x2, 0x204, 0x206, 0x7, 0x2e, 0x2,
- 0x2, 0x205, 0x204, 0x3, 0x2, 0x2, 0x2, 0x205, 0x206,
- 0x3, 0x2, 0x2, 0x2, 0x206, 0x207, 0x3, 0x2, 0x2,
- 0x2, 0x207, 0x208, 0x5, 0x48, 0x25, 0x2, 0x208, 0x51,
- 0x3, 0x2, 0x2, 0x2, 0x209, 0x20a, 0x5, 0x34, 0x1b,
- 0x2, 0x20a, 0x53, 0x3, 0x2, 0x2, 0x2, 0x20b, 0x20d,
- 0x7, 0x1f, 0x2, 0x2, 0x20c, 0x20e, 0x7, 0x25, 0x2,
- 0x2, 0x20d, 0x20c, 0x3, 0x2, 0x2, 0x2, 0x20d, 0x20e,
- 0x3, 0x2, 0x2, 0x2, 0x20e, 0x20f, 0x3, 0x2, 0x2,
- 0x2, 0x20f, 0x210, 0x7, 0x3, 0x2, 0x2, 0x210, 0x211,
- 0x5, 0x1a, 0xe, 0x2, 0x211, 0x212, 0x7, 0x4, 0x2,
- 0x2, 0x212, 0x215, 0x5, 0x6c, 0x37, 0x2, 0x213, 0x214,
- 0x7, 0x11, 0x2, 0x2, 0x214, 0x216, 0x5, 0x6c, 0x37,
- 0x2, 0x215, 0x213, 0x3, 0x2, 0x2, 0x2, 0x215, 0x216,
- 0x3, 0x2, 0x2, 0x2, 0x216, 0x55, 0x3, 0x2, 0x2,
- 0x2, 0x217, 0x218, 0x7, 0x23, 0x2, 0x2, 0x218, 0x219,
- 0x7, 0x3, 0x2, 0x2, 0x219, 0x21a, 0x5, 0x1a, 0xe,
- 0x2, 0x21a, 0x21b, 0x7, 0x4, 0x2, 0x2, 0x21b, 0x21c,
- 0x5, 0x6c, 0x37, 0x2, 0x21c, 0x57, 0x3, 0x2, 0x2,
- 0x2, 0x21d, 0x21f, 0x7, 0x24, 0x2, 0x2, 0x21e, 0x220,
- 0x5, 0x1a, 0xe, 0x2, 0x21f, 0x21e, 0x3, 0x2, 0x2,
- 0x2, 0x21f, 0x220, 0x3, 0x2, 0x2, 0x2, 0x220, 0x59,
- 0x3, 0x2, 0x2, 0x2, 0x221, 0x222, 0x7, 0x27, 0x2,
- 0x2, 0x222, 0x5b, 0x3, 0x2, 0x2, 0x2, 0x223, 0x224,
- 0x7, 0x26, 0x2, 0x2, 0x224, 0x5d, 0x3, 0x2, 0x2,
- 0x2, 0x225, 0x226, 0x7, 0x28, 0x2, 0x2, 0x226, 0x228,
- 0x5, 0x4a, 0x26, 0x2, 0x227, 0x229, 0x5, 0x46, 0x24,
- 0x2, 0x228, 0x227, 0x3, 0x2, 0x2, 0x2, 0x228, 0x229,
- 0x3, 0x2, 0x2, 0x2, 0x229, 0x5f, 0x3, 0x2, 0x2,
- 0x2, 0x22a, 0x22b, 0x7, 0x2b, 0x2, 0x2, 0x22b, 0x22f,
- 0x7, 0x51, 0x2, 0x2, 0x22c, 0x22d, 0x7, 0x2c, 0x2,
- 0x2, 0x22d, 0x22f, 0x5, 0x18, 0xd, 0x2, 0x22e, 0x22a,
- 0x3, 0x2, 0x2, 0x2, 0x22e, 0x22c, 0x3, 0x2, 0x2,
- 0x2, 0x22f, 0x230, 0x3, 0x2, 0x2, 0x2, 0x230, 0x231,
- 0x5, 0x6c, 0x37, 0x2, 0x231, 0x61, 0x3, 0x2, 0x2,
- 0x2, 0x232, 0x233, 0x7, 0x2a, 0x2, 0x2, 0x233, 0x235,
- 0x5, 0x6c, 0x37, 0x2, 0x234, 0x236, 0x5, 0x60, 0x31,
- 0x2, 0x235, 0x234, 0x3, 0x2, 0x2, 0x2, 0x236, 0x237,
- 0x3, 0x2, 0x2, 0x2, 0x237, 0x235, 0x3, 0x2, 0x2,
- 0x2, 0x237, 0x238, 0x3, 0x2, 0x2, 0x2, 0x238, 0x63,
- 0x3, 0x2, 0x2, 0x2, 0x239, 0x23a, 0x7, 0x33, 0x2,
- 0x2, 0x23a, 0x23b, 0x7, 0x3, 0x2, 0x2, 0x23b, 0x23c,
- 0x5, 0x1a, 0xe, 0x2, 0x23c, 0x23d, 0x7, 0x4, 0x2,
- 0x2, 0x23d, 0x241, 0x3, 0x2, 0x2, 0x2, 0x23e, 0x241,
- 0x7, 0x34, 0x2, 0x2, 0x23f, 0x241, 0x7, 0x35, 0x2,
- 0x2, 0x240, 0x239, 0x3, 0x2, 0x2, 0x2, 0x240, 0x23e,
- 0x3, 0x2, 0x2, 0x2, 0x240, 0x23f, 0x3, 0x2, 0x2,
- 0x2, 0x241, 0x65, 0x3, 0x2, 0x2, 0x2, 0x242, 0x243,
- 0x5, 0x4e, 0x28, 0x2, 0x243, 0x244, 0x7, 0xf, 0x2,
- 0x2, 0x244, 0x260, 0x3, 0x2, 0x2, 0x2, 0x245, 0x246,
- 0x5, 0x50, 0x29, 0x2, 0x246, 0x247, 0x7, 0xf, 0x2,
- 0x2, 0x247, 0x260, 0x3, 0x2, 0x2, 0x2, 0x248, 0x249,
- 0x5, 0x52, 0x2a, 0x2, 0x249, 0x24a, 0x7, 0xf, 0x2,
- 0x2, 0x24a, 0x260, 0x3, 0x2, 0x2, 0x2, 0x24b, 0x24c,
- 0x5, 0x58, 0x2d, 0x2, 0x24c, 0x24d, 0x7, 0xf, 0x2,
- 0x2, 0x24d, 0x260, 0x3, 0x2, 0x2, 0x2, 0x24e, 0x24f,
- 0x5, 0x5a, 0x2e, 0x2, 0x24f, 0x250, 0x7, 0xf, 0x2,
- 0x2, 0x250, 0x260, 0x3, 0x2, 0x2, 0x2, 0x251, 0x252,
- 0x5, 0x5c, 0x2f, 0x2, 0x252, 0x253, 0x7, 0xf, 0x2,
- 0x2, 0x253, 0x260, 0x3, 0x2, 0x2, 0x2, 0x254, 0x255,
- 0x5, 0x5e, 0x30, 0x2, 0x255, 0x256, 0x7, 0xf, 0x2,
- 0x2, 0x256, 0x260, 0x3, 0x2, 0x2, 0x2, 0x257, 0x260,
- 0x5, 0x54, 0x2b, 0x2, 0x258, 0x259, 0x5, 0x64, 0x33,
- 0x2, 0x259, 0x25a, 0x7, 0xf, 0x2, 0x2, 0x25a, 0x260,
- 0x3, 0x2, 0x2, 0x2, 0x25b, 0x260, 0x5, 0x56, 0x2c,
- 0x2, 0x25c, 0x260, 0x5, 0x42, 0x22, 0x2, 0x25d, 0x260,
- 0x5, 0x3c, 0x1f, 0x2, 0x25e, 0x260, 0x5, 0x62, 0x32,
- 0x2, 0x25f, 0x242, 0x3, 0x2, 0x2, 0x2, 0x25f, 0x245,
- 0x3, 0x2, 0x2, 0x2, 0x25f, 0x248, 0x3, 0x2, 0x2,
- 0x2, 0x25f, 0x24b, 0x3, 0x2, 0x2, 0x2, 0x25f, 0x24e,
- 0x3, 0x2, 0x2, 0x2, 0x25f, 0x251, 0x3, 0x2, 0x2,
- 0x2, 0x25f, 0x254, 0x3, 0x2, 0x2, 0x2, 0x25f, 0x257,
- 0x3, 0x2, 0x2, 0x2, 0x25f, 0x258, 0x3, 0x2, 0x2,
- 0x2, 0x25f, 0x25b, 0x3, 0x2, 0x2, 0x2, 0x25f, 0x25c,
- 0x3, 0x2, 0x2, 0x2, 0x25f, 0x25d, 0x3, 0x2, 0x2,
- 0x2, 0x25f, 0x25e, 0x3, 0x2, 0x2, 0x2, 0x260, 0x67,
- 0x3, 0x2, 0x2, 0x2, 0x261, 0x263, 0x5, 0x66, 0x34,
- 0x2, 0x262, 0x261, 0x3, 0x2, 0x2, 0x2, 0x263, 0x266,
- 0x3, 0x2, 0x2, 0x2, 0x264, 0x262, 0x3, 0x2, 0x2,
- 0x2, 0x264, 0x265, 0x3, 0x2, 0x2, 0x2, 0x265, 0x69,
- 0x3, 0x2, 0x2, 0x2, 0x266, 0x264, 0x3, 0x2, 0x2,
- 0x2, 0x267, 0x269, 0x7, 0x1e, 0x2, 0x2, 0x268, 0x267,
- 0x3, 0x2, 0x2, 0x2, 0x268, 0x269, 0x3, 0x2, 0x2,
- 0x2, 0x269, 0x26a, 0x3, 0x2, 0x2, 0x2, 0x26a, 0x26b,
- 0x7, 0x12, 0x2, 0x2, 0x26b, 0x26c, 0x5, 0x68, 0x35,
- 0x2, 0x26c, 0x26d, 0x7, 0x13, 0x2, 0x2, 0x26d, 0x6b,
- 0x3, 0x2, 0x2, 0x2, 0x26e, 0x271, 0x5, 0x66, 0x34,
- 0x2, 0x26f, 0x271, 0x5, 0x6a, 0x36, 0x2, 0x270, 0x26e,
- 0x3, 0x2, 0x2, 0x2, 0x270, 0x26f, 0x3, 0x2, 0x2,
- 0x2, 0x271, 0x6d, 0x3, 0x2, 0x2, 0x2, 0x272, 0x273,
- 0x5, 0x6a, 0x36, 0x2, 0x273, 0x6f, 0x3, 0x2, 0x2,
- 0x2, 0x274, 0x275, 0x7, 0x14, 0x2, 0x2, 0x275, 0x276,
- 0x7, 0x51, 0x2, 0x2, 0x276, 0x71, 0x3, 0x2, 0x2,
- 0x2, 0x277, 0x278, 0x7, 0x15, 0x2, 0x2, 0x278, 0x279,
- 0x7, 0x50, 0x2, 0x2, 0x279, 0x73, 0x3, 0x2, 0x2,
- 0x2, 0x27a, 0x27b, 0x7, 0x25, 0x2, 0x2, 0x27b, 0x27c,
- 0x7, 0x50, 0x2, 0x2, 0x27c, 0x75, 0x3, 0x2, 0x2,
- 0x2, 0x27d, 0x27e, 0x7, 0x8, 0x2, 0x2, 0x27e, 0x280,
- 0x7, 0x51, 0x2, 0x2, 0x27f, 0x281, 0x5, 0x70, 0x39,
- 0x2, 0x280, 0x27f, 0x3, 0x2, 0x2, 0x2, 0x280, 0x281,
- 0x3, 0x2, 0x2, 0x2, 0x281, 0x283, 0x3, 0x2, 0x2,
- 0x2, 0x282, 0x284, 0x5, 0x72, 0x3a, 0x2, 0x283, 0x282,
- 0x3, 0x2, 0x2, 0x2, 0x283, 0x284, 0x3, 0x2, 0x2,
- 0x2, 0x284, 0x286, 0x3, 0x2, 0x2, 0x2, 0x285, 0x287,
- 0x5, 0x74, 0x3b, 0x2, 0x286, 0x285, 0x3, 0x2, 0x2,
- 0x2, 0x286, 0x287, 0x3, 0x2, 0x2, 0x2, 0x287, 0x288,
- 0x3, 0x2, 0x2, 0x2, 0x288, 0x289, 0x7, 0xf, 0x2,
- 0x2, 0x289, 0x77, 0x3, 0x2, 0x2, 0x2, 0x28a, 0x28c,
- 0x7, 0x32, 0x2, 0x2, 0x28b, 0x28d, 0x7, 0x1c, 0x2,
- 0x2, 0x28c, 0x28b, 0x3, 0x2, 0x2, 0x2, 0x28c, 0x28d,
- 0x3, 0x2, 0x2, 0x2, 0x28d, 0x28e, 0x3, 0x2, 0x2,
- 0x2, 0x28e, 0x28f, 0x7, 0x19, 0x2, 0x2, 0x28f, 0x290,
- 0x7, 0x51, 0x2, 0x2, 0x290, 0x291, 0x5, 0x8, 0x5,
- 0x2, 0x291, 0x292, 0x7, 0x3, 0x2, 0x2, 0x292, 0x293,
- 0x5, 0x4, 0x3, 0x2, 0x293, 0x294, 0x7, 0x4, 0x2,
- 0x2, 0x294, 0x295, 0x5, 0xe, 0x8, 0x2, 0x295, 0x296,
- 0x7, 0xf, 0x2, 0x2, 0x296, 0x79, 0x3, 0x2, 0x2,
- 0x2, 0x297, 0x29d, 0x7, 0x32, 0x2, 0x2, 0x298, 0x29a,
- 0x7, 0x1d, 0x2, 0x2, 0x299, 0x298, 0x3, 0x2, 0x2,
- 0x2, 0x299, 0x29a, 0x3, 0x2, 0x2, 0x2, 0x29a, 0x29b,
- 0x3, 0x2, 0x2, 0x2, 0x29b, 0x29c, 0x7, 0x16, 0x2,
- 0x2, 0x29c, 0x29e, 0x7, 0x50, 0x2, 0x2, 0x29d, 0x299,
- 0x3, 0x2, 0x2, 0x2, 0x29d, 0x29e, 0x3, 0x2, 0x2,
- 0x2, 0x29e, 0x29f, 0x3, 0x2, 0x2, 0x2, 0x29f, 0x2a0,
- 0x7, 0x18, 0x2, 0x2, 0x2a0, 0x2a1, 0x7, 0x51, 0x2,
- 0x2, 0x2a1, 0x2a2, 0x5, 0x8, 0x5, 0x2, 0x2a2, 0x2a3,
- 0x5, 0xa, 0x6, 0x2, 0x2a3, 0x2a4, 0x5, 0xe, 0x8,
- 0x2, 0x2a4, 0x2a5, 0x5, 0x10, 0x9, 0x2, 0x2a5, 0x2a6,
- 0x7, 0xf, 0x2, 0x2, 0x2a6, 0x7b, 0x3, 0x2, 0x2,
- 0x2, 0x2a7, 0x2a8, 0x7, 0x32, 0x2, 0x2, 0x2a8, 0x2a9,
- 0x7, 0x1a, 0x2, 0x2, 0x2a9, 0x2aa, 0x7, 0x51, 0x2,
- 0x2, 0x2aa, 0x2ab, 0x5, 0xa, 0x6, 0x2, 0x2ab, 0x2ac,
- 0x5, 0xe, 0x8, 0x2, 0x2ac, 0x2ad, 0x7, 0xf, 0x2,
- 0x2, 0x2ad, 0x7d, 0x3, 0x2, 0x2, 0x2, 0x2ae, 0x2b0,
- 0x7, 0x1c, 0x2, 0x2, 0x2af, 0x2ae, 0x3, 0x2, 0x2,
- 0x2, 0x2af, 0x2b0, 0x3, 0x2, 0x2, 0x2, 0x2b0, 0x2b1,
- 0x3, 0x2, 0x2, 0x2, 0x2b1, 0x2b2, 0x7, 0x19, 0x2,
- 0x2, 0x2b2, 0x2b3, 0x7, 0x51, 0x2, 0x2, 0x2b3, 0x2b4,
- 0x5, 0x8, 0x5, 0x2, 0x2b4, 0x2b5, 0x5, 0x16, 0xc,
- 0x2, 0x2b5, 0x2b6, 0x5, 0xe, 0x8, 0x2, 0x2b6, 0x2b7,
- 0x5, 0x6e, 0x38, 0x2, 0x2b7, 0x7f, 0x3, 0x2, 0x2,
- 0x2, 0x2b8, 0x2b9, 0x7, 0x51, 0x2, 0x2, 0x2b9, 0x2ba,
- 0x5, 0x6, 0x4, 0x2, 0x2ba, 0x2bb, 0x5, 0x16, 0xc,
- 0x2, 0x2bb, 0x2bc, 0x5, 0xe, 0x8, 0x2, 0x2bc, 0x2bd,
- 0x5, 0x10, 0x9, 0x2, 0x2bd, 0x2be, 0x5, 0x6e, 0x38,
- 0x2, 0x2be, 0x81, 0x3, 0x2, 0x2, 0x2, 0x2bf, 0x2c0,
- 0x7, 0x18, 0x2, 0x2, 0x2c0, 0x2c1, 0x7, 0x51, 0x2,
- 0x2, 0x2c1, 0x2c2, 0x5, 0x8, 0x5, 0x2, 0x2c2, 0x2c3,
- 0x5, 0x16, 0xc, 0x2, 0x2c3, 0x2c4, 0x5, 0xe, 0x8,
- 0x2, 0x2c4, 0x2c5, 0x5, 0x10, 0x9, 0x2, 0x2c5, 0x2c6,
- 0x5, 0x6e, 0x38, 0x2, 0x2c6, 0x83, 0x3, 0x2, 0x2,
- 0x2, 0x2c7, 0x2c8, 0x7, 0x17, 0x2, 0x2, 0x2c8, 0x2c9,
- 0x7, 0x51, 0x2, 0x2, 0x2c9, 0x2ca, 0x7, 0x7, 0x2,
- 0x2, 0x2ca, 0x2cb, 0x5, 0x2, 0x2, 0x2, 0x2cb, 0x2cc,
- 0x7, 0x36, 0x2, 0x2, 0x2cc, 0x2cd, 0x7, 0x50, 0x2,
- 0x2, 0x2cd, 0x2ce, 0x7, 0xf, 0x2, 0x2, 0x2ce, 0x85,
- 0x3, 0x2, 0x2, 0x2, 0x2cf, 0x2d8, 0x5, 0x76, 0x3c,
- 0x2, 0x2d0, 0x2d8, 0x5, 0x7e, 0x40, 0x2, 0x2d1, 0x2d8,
- 0x5, 0x80, 0x41, 0x2, 0x2d2, 0x2d8, 0x5, 0x82, 0x42,
- 0x2, 0x2d3, 0x2d8, 0x5, 0x7a, 0x3e, 0x2, 0x2d4, 0x2d8,
- 0x5, 0x78, 0x3d, 0x2, 0x2d5, 0x2d8, 0x5, 0x7c, 0x3f,
- 0x2, 0x2d6, 0x2d8, 0x5, 0x84, 0x43, 0x2, 0x2d7, 0x2cf,
- 0x3, 0x2, 0x2, 0x2, 0x2d7, 0x2d0, 0x3, 0x2, 0x2,
- 0x2, 0x2d7, 0x2d1, 0x3, 0x2, 0x2, 0x2, 0x2d7, 0x2d2,
- 0x3, 0x2, 0x2, 0x2, 0x2d7, 0x2d3, 0x3, 0x2, 0x2,
- 0x2, 0x2d7, 0x2d4, 0x3, 0x2, 0x2, 0x2, 0x2d7, 0x2d5,
- 0x3, 0x2, 0x2, 0x2, 0x2d7, 0x2d6, 0x3, 0x2, 0x2,
- 0x2, 0x2d8, 0x87, 0x3, 0x2, 0x2, 0x2, 0x2d9, 0x2da,
- 0x7, 0x1b, 0x2, 0x2, 0x2da, 0x2db, 0x7, 0x51, 0x2,
- 0x2, 0x2db, 0x2df, 0x7, 0x12, 0x2, 0x2, 0x2dc, 0x2de,
- 0x5, 0x86, 0x44, 0x2, 0x2dd, 0x2dc, 0x3, 0x2, 0x2,
- 0x2, 0x2de, 0x2e1, 0x3, 0x2, 0x2, 0x2, 0x2df, 0x2dd,
- 0x3, 0x2, 0x2, 0x2, 0x2df, 0x2e0, 0x3, 0x2, 0x2,
- 0x2, 0x2e0, 0x2e2, 0x3, 0x2, 0x2, 0x2, 0x2e1, 0x2df,
- 0x3, 0x2, 0x2, 0x2, 0x2e2, 0x2e3, 0x7, 0x13, 0x2,
- 0x2, 0x2e3, 0x89, 0x3, 0x2, 0x2, 0x2, 0x2e4, 0x2e7,
- 0x5, 0x88, 0x45, 0x2, 0x2e5, 0x2e7, 0x5, 0x86, 0x44,
- 0x2, 0x2e6, 0x2e4, 0x3, 0x2, 0x2, 0x2, 0x2e6, 0x2e5,
- 0x3, 0x2, 0x2, 0x2, 0x2e7, 0x2ea, 0x3, 0x2, 0x2,
- 0x2, 0x2e8, 0x2e6, 0x3, 0x2, 0x2, 0x2, 0x2e8, 0x2e9,
- 0x3, 0x2, 0x2, 0x2, 0x2e9, 0x8b, 0x3, 0x2, 0x2,
- 0x2, 0x2ea, 0x2e8, 0x3, 0x2, 0x2, 0x2, 0x48, 0x8d,
- 0x97, 0x9e, 0xa1, 0xb1, 0xb5, 0xb9, 0xbf, 0xc4, 0xca,
- 0xd1, 0xd5, 0xdd, 0xe0, 0xe8, 0xeb, 0xf0, 0xf4, 0xfa,
- 0x107, 0x10b, 0x11a, 0x125, 0x130, 0x13b, 0x146, 0x151, 0x15c,
- 0x167, 0x172, 0x178, 0x17d, 0x187, 0x189, 0x196, 0x19c, 0x19e,
- 0x1a2, 0x1bd, 0x1c0, 0x1ce, 0x1d2, 0x1d7, 0x1e6, 0x1ec, 0x1f3,
- 0x202, 0x205, 0x20d, 0x215, 0x21f, 0x228, 0x22e, 0x237, 0x240,
- 0x25f, 0x264, 0x268, 0x270, 0x280, 0x283, 0x286, 0x28c, 0x299,
- 0x29d, 0x2af, 0x2d7, 0x2df, 0x2e6, 0x2e8,
+ 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, 0x3, 0x44, 0x3,
+ 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44,
+ 0x3, 0x44, 0x3, 0x45, 0x5, 0x45, 0x2e1, 0xa, 0x45,
+ 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3,
+ 0x45, 0x3, 0x45, 0x3, 0x45, 0x5, 0x45, 0x2ea, 0xa,
+ 0x45, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46,
+ 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, 0x47, 0x3,
+ 0x47, 0x5, 0x47, 0x2f5, 0xa, 0x47, 0x3, 0x47, 0x3,
+ 0x47, 0x3, 0x47, 0x3, 0x47, 0x3, 0x47, 0x3, 0x47,
+ 0x3, 0x47, 0x3, 0x47, 0x5, 0x47, 0x2ff, 0xa, 0x47,
+ 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3,
+ 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x49, 0x3, 0x49,
+ 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3,
+ 0x49, 0x3, 0x49, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a,
+ 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4b, 0x3,
+ 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b,
+ 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x3,
+ 0x4b, 0x5, 0x4b, 0x321, 0xa, 0x4b, 0x3, 0x4c, 0x3,
+ 0x4c, 0x3, 0x4c, 0x3, 0x4c, 0x7, 0x4c, 0x327, 0xa,
+ 0x4c, 0xc, 0x4c, 0xe, 0x4c, 0x32a, 0xb, 0x4c, 0x3,
+ 0x4c, 0x3, 0x4c, 0x3, 0x4d, 0x3, 0x4d, 0x7, 0x4d,
+ 0x330, 0xa, 0x4d, 0xc, 0x4d, 0xe, 0x4d, 0x333, 0xb,
+ 0x4d, 0x3, 0x4d, 0x2, 0xd, 0x2, 0x1c, 0x1e, 0x20,
+ 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x30, 0x4e, 0x2,
+ 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14,
+ 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26,
+ 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38,
+ 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a,
+ 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c,
+ 0x5e, 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
+ 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e, 0x80,
+ 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92,
+ 0x94, 0x96, 0x98, 0x2, 0xd, 0x3, 0x2, 0x3c, 0x3d,
+ 0x4, 0x2, 0x36, 0x36, 0x41, 0x41, 0x3, 0x2, 0x42,
+ 0x45, 0x3, 0x2, 0x46, 0x48, 0x3, 0x2, 0x37, 0x38,
+ 0x3, 0x2, 0x39, 0x3b, 0x5, 0x2, 0x37, 0x38, 0x3e,
+ 0x3e, 0x4d, 0x4d, 0x3, 0x2, 0x34, 0x35, 0x4, 0x2,
+ 0x3f, 0x40, 0x4f, 0x4f, 0x3, 0x2, 0x2d, 0x2e, 0x3,
+ 0x2, 0x30, 0x31, 0x2, 0x351, 0x2, 0xaa, 0x3, 0x2,
+ 0x2, 0x2, 0x4, 0xbc, 0x3, 0x2, 0x2, 0x2, 0x6,
+ 0xbe, 0x3, 0x2, 0x2, 0x2, 0x8, 0xd0, 0x3, 0x2,
+ 0x2, 0x2, 0xa, 0xe5, 0x3, 0x2, 0x2, 0x2, 0xc,
+ 0xe7, 0x3, 0x2, 0x2, 0x2, 0xe, 0xf0, 0x3, 0x2,
+ 0x2, 0x2, 0x10, 0xfb, 0x3, 0x2, 0x2, 0x2, 0x12,
+ 0x106, 0x3, 0x2, 0x2, 0x2, 0x14, 0x108, 0x3, 0x2,
+ 0x2, 0x2, 0x16, 0x122, 0x3, 0x2, 0x2, 0x2, 0x18,
+ 0x124, 0x3, 0x2, 0x2, 0x2, 0x1a, 0x128, 0x3, 0x2,
+ 0x2, 0x2, 0x1c, 0x12a, 0x3, 0x2, 0x2, 0x2, 0x1e,
+ 0x138, 0x3, 0x2, 0x2, 0x2, 0x20, 0x143, 0x3, 0x2,
+ 0x2, 0x2, 0x22, 0x14e, 0x3, 0x2, 0x2, 0x2, 0x24,
+ 0x159, 0x3, 0x2, 0x2, 0x2, 0x26, 0x164, 0x3, 0x2,
+ 0x2, 0x2, 0x28, 0x16f, 0x3, 0x2, 0x2, 0x2, 0x2a,
+ 0x17a, 0x3, 0x2, 0x2, 0x2, 0x2c, 0x185, 0x3, 0x2,
+ 0x2, 0x2, 0x2e, 0x193, 0x3, 0x2, 0x2, 0x2, 0x30,
+ 0x1a0, 0x3, 0x2, 0x2, 0x2, 0x32, 0x1b9, 0x3, 0x2,
+ 0x2, 0x2, 0x34, 0x1c1, 0x3, 0x2, 0x2, 0x2, 0x36,
+ 0x1c5, 0x3, 0x2, 0x2, 0x2, 0x38, 0x1c7, 0x3, 0x2,
+ 0x2, 0x2, 0x3a, 0x1da, 0x3, 0x2, 0x2, 0x2, 0x3c,
+ 0x1e4, 0x3, 0x2, 0x2, 0x2, 0x3e, 0x1e7, 0x3, 0x2,
+ 0x2, 0x2, 0x40, 0x1e9, 0x3, 0x2, 0x2, 0x2, 0x42,
+ 0x1f3, 0x3, 0x2, 0x2, 0x2, 0x44, 0x1fe, 0x3, 0x2,
+ 0x2, 0x2, 0x46, 0x200, 0x3, 0x2, 0x2, 0x2, 0x48,
+ 0x209, 0x3, 0x2, 0x2, 0x2, 0x4a, 0x20b, 0x3, 0x2,
+ 0x2, 0x2, 0x4c, 0x218, 0x3, 0x2, 0x2, 0x2, 0x4e,
+ 0x21f, 0x3, 0x2, 0x2, 0x2, 0x50, 0x221, 0x3, 0x2,
+ 0x2, 0x2, 0x52, 0x226, 0x3, 0x2, 0x2, 0x2, 0x54,
+ 0x22c, 0x3, 0x2, 0x2, 0x2, 0x56, 0x230, 0x3, 0x2,
+ 0x2, 0x2, 0x58, 0x232, 0x3, 0x2, 0x2, 0x2, 0x5a,
+ 0x23e, 0x3, 0x2, 0x2, 0x2, 0x5c, 0x244, 0x3, 0x2,
+ 0x2, 0x2, 0x5e, 0x248, 0x3, 0x2, 0x2, 0x2, 0x60,
+ 0x24a, 0x3, 0x2, 0x2, 0x2, 0x62, 0x24c, 0x3, 0x2,
+ 0x2, 0x2, 0x64, 0x251, 0x3, 0x2, 0x2, 0x2, 0x66,
+ 0x255, 0x3, 0x2, 0x2, 0x2, 0x68, 0x263, 0x3, 0x2,
+ 0x2, 0x2, 0x6a, 0x282, 0x3, 0x2, 0x2, 0x2, 0x6c,
+ 0x287, 0x3, 0x2, 0x2, 0x2, 0x6e, 0x28b, 0x3, 0x2,
+ 0x2, 0x2, 0x70, 0x293, 0x3, 0x2, 0x2, 0x2, 0x72,
+ 0x295, 0x3, 0x2, 0x2, 0x2, 0x74, 0x297, 0x3, 0x2,
+ 0x2, 0x2, 0x76, 0x29f, 0x3, 0x2, 0x2, 0x2, 0x78,
+ 0x2a2, 0x3, 0x2, 0x2, 0x2, 0x7a, 0x2a5, 0x3, 0x2,
+ 0x2, 0x2, 0x7c, 0x2a8, 0x3, 0x2, 0x2, 0x2, 0x7e,
+ 0x2ab, 0x3, 0x2, 0x2, 0x2, 0x80, 0x2b8, 0x3, 0x2,
+ 0x2, 0x2, 0x82, 0x2be, 0x3, 0x2, 0x2, 0x2, 0x84,
+ 0x2cb, 0x3, 0x2, 0x2, 0x2, 0x86, 0x2d8, 0x3, 0x2,
+ 0x2, 0x2, 0x88, 0x2e0, 0x3, 0x2, 0x2, 0x2, 0x8a,
+ 0x2eb, 0x3, 0x2, 0x2, 0x2, 0x8c, 0x2f4, 0x3, 0x2,
+ 0x2, 0x2, 0x8e, 0x300, 0x3, 0x2, 0x2, 0x2, 0x90,
+ 0x307, 0x3, 0x2, 0x2, 0x2, 0x92, 0x30f, 0x3, 0x2,
+ 0x2, 0x2, 0x94, 0x320, 0x3, 0x2, 0x2, 0x2, 0x96,
+ 0x322, 0x3, 0x2, 0x2, 0x2, 0x98, 0x331, 0x3, 0x2,
+ 0x2, 0x2, 0x9a, 0x9c, 0x8, 0x2, 0x1, 0x2, 0x9b,
+ 0x9d, 0x7, 0x22, 0x2, 0x2, 0x9c, 0x9b, 0x3, 0x2,
+ 0x2, 0x2, 0x9c, 0x9d, 0x3, 0x2, 0x2, 0x2, 0x9d,
+ 0x9e, 0x3, 0x2, 0x2, 0x2, 0x9e, 0xab, 0x7, 0x4f,
+ 0x2, 0x2, 0x9f, 0xa0, 0x7, 0x19, 0x2, 0x2, 0xa0,
+ 0xa1, 0x7, 0x3, 0x2, 0x2, 0xa1, 0xa2, 0x5, 0x4,
+ 0x3, 0x2, 0xa2, 0xa3, 0x7, 0x4, 0x2, 0x2, 0xa3,
+ 0xa4, 0x7, 0x5, 0x2, 0x2, 0xa4, 0xa5, 0x5, 0x2,
+ 0x2, 0x5, 0xa5, 0xab, 0x3, 0x2, 0x2, 0x2, 0xa6,
+ 0xa7, 0x7, 0x3, 0x2, 0x2, 0xa7, 0xa8, 0x5, 0x2,
+ 0x2, 0x2, 0xa8, 0xa9, 0x7, 0x4, 0x2, 0x2, 0xa9,
+ 0xab, 0x3, 0x2, 0x2, 0x2, 0xaa, 0x9a, 0x3, 0x2,
+ 0x2, 0x2, 0xaa, 0x9f, 0x3, 0x2, 0x2, 0x2, 0xaa,
+ 0xa6, 0x3, 0x2, 0x2, 0x2, 0xab, 0xb1, 0x3, 0x2,
+ 0x2, 0x2, 0xac, 0xad, 0xc, 0x4, 0x2, 0x2, 0xad,
+ 0xae, 0x7, 0x3c, 0x2, 0x2, 0xae, 0xb0, 0x5, 0x2,
+ 0x2, 0x5, 0xaf, 0xac, 0x3, 0x2, 0x2, 0x2, 0xb0,
+ 0xb3, 0x3, 0x2, 0x2, 0x2, 0xb1, 0xaf, 0x3, 0x2,
+ 0x2, 0x2, 0xb1, 0xb2, 0x3, 0x2, 0x2, 0x2, 0xb2,
+ 0x3, 0x3, 0x2, 0x2, 0x2, 0xb3, 0xb1, 0x3, 0x2,
+ 0x2, 0x2, 0xb4, 0xb9, 0x5, 0x2, 0x2, 0x2, 0xb5,
+ 0xb6, 0x7, 0x6, 0x2, 0x2, 0xb6, 0xb8, 0x5, 0x2,
+ 0x2, 0x2, 0xb7, 0xb5, 0x3, 0x2, 0x2, 0x2, 0xb8,
+ 0xbb, 0x3, 0x2, 0x2, 0x2, 0xb9, 0xb7, 0x3, 0x2,
+ 0x2, 0x2, 0xb9, 0xba, 0x3, 0x2, 0x2, 0x2, 0xba,
+ 0xbd, 0x3, 0x2, 0x2, 0x2, 0xbb, 0xb9, 0x3, 0x2,
+ 0x2, 0x2, 0xbc, 0xb4, 0x3, 0x2, 0x2, 0x2, 0xbc,
+ 0xbd, 0x3, 0x2, 0x2, 0x2, 0xbd, 0x5, 0x3, 0x2,
+ 0x2, 0x2, 0xbe, 0xbf, 0x7, 0x42, 0x2, 0x2, 0xbf,
+ 0xc0, 0x5, 0x4, 0x3, 0x2, 0xc0, 0xc1, 0x7, 0x44,
+ 0x2, 0x2, 0xc1, 0x7, 0x3, 0x2, 0x2, 0x2, 0xc2,
+ 0xc3, 0x7, 0x42, 0x2, 0x2, 0xc3, 0xc4, 0x7, 0x4f,
+ 0x2, 0x2, 0xc4, 0xc5, 0x7, 0x7, 0x2, 0x2, 0xc5,
+ 0xcc, 0x7, 0x8, 0x2, 0x2, 0xc6, 0xc7, 0x7, 0x6,
+ 0x2, 0x2, 0xc7, 0xc8, 0x7, 0x4f, 0x2, 0x2, 0xc8,
+ 0xc9, 0x7, 0x7, 0x2, 0x2, 0xc9, 0xcb, 0x7, 0x8,
+ 0x2, 0x2, 0xca, 0xc6, 0x3, 0x2, 0x2, 0x2, 0xcb,
+ 0xce, 0x3, 0x2, 0x2, 0x2, 0xcc, 0xca, 0x3, 0x2,
+ 0x2, 0x2, 0xcc, 0xcd, 0x3, 0x2, 0x2, 0x2, 0xcd,
+ 0xcf, 0x3, 0x2, 0x2, 0x2, 0xce, 0xcc, 0x3, 0x2,
+ 0x2, 0x2, 0xcf, 0xd1, 0x7, 0x44, 0x2, 0x2, 0xd0,
+ 0xc2, 0x3, 0x2, 0x2, 0x2, 0xd0, 0xd1, 0x3, 0x2,
+ 0x2, 0x2, 0xd1, 0x9, 0x3, 0x2, 0x2, 0x2, 0xd2,
+ 0xd4, 0x7, 0x3, 0x2, 0x2, 0xd3, 0xd5, 0x5, 0x2,
+ 0x2, 0x2, 0xd4, 0xd3, 0x3, 0x2, 0x2, 0x2, 0xd4,
+ 0xd5, 0x3, 0x2, 0x2, 0x2, 0xd5, 0xda, 0x3, 0x2,
+ 0x2, 0x2, 0xd6, 0xd7, 0x7, 0x6, 0x2, 0x2, 0xd7,
+ 0xd9, 0x5, 0x2, 0x2, 0x2, 0xd8, 0xd6, 0x3, 0x2,
+ 0x2, 0x2, 0xd9, 0xdc, 0x3, 0x2, 0x2, 0x2, 0xda,
+ 0xd8, 0x3, 0x2, 0x2, 0x2, 0xda, 0xdb, 0x3, 0x2,
+ 0x2, 0x2, 0xdb, 0xdf, 0x3, 0x2, 0x2, 0x2, 0xdc,
+ 0xda, 0x3, 0x2, 0x2, 0x2, 0xdd, 0xde, 0x7, 0x6,
+ 0x2, 0x2, 0xde, 0xe0, 0x7, 0x49, 0x2, 0x2, 0xdf,
+ 0xdd, 0x3, 0x2, 0x2, 0x2, 0xdf, 0xe0, 0x3, 0x2,
+ 0x2, 0x2, 0xe0, 0xe1, 0x3, 0x2, 0x2, 0x2, 0xe1,
+ 0xe6, 0x7, 0x4, 0x2, 0x2, 0xe2, 0xe3, 0x7, 0x3,
+ 0x2, 0x2, 0xe3, 0xe4, 0x7, 0x49, 0x2, 0x2, 0xe4,
+ 0xe6, 0x7, 0x4, 0x2, 0x2, 0xe5, 0xd2, 0x3, 0x2,
+ 0x2, 0x2, 0xe5, 0xe2, 0x3, 0x2, 0x2, 0x2, 0xe6,
+ 0xb, 0x3, 0x2, 0x2, 0x2, 0xe7, 0xec, 0x7, 0x4f,
+ 0x2, 0x2, 0xe8, 0xe9, 0x7, 0x3, 0x2, 0x2, 0xe9,
+ 0xea, 0x5, 0x4, 0x3, 0x2, 0xea, 0xeb, 0x7, 0x4,
+ 0x2, 0x2, 0xeb, 0xed, 0x3, 0x2, 0x2, 0x2, 0xec,
+ 0xe8, 0x3, 0x2, 0x2, 0x2, 0xec, 0xed, 0x3, 0x2,
+ 0x2, 0x2, 0xed, 0xd, 0x3, 0x2, 0x2, 0x2, 0xee,
+ 0xef, 0x7, 0x7, 0x2, 0x2, 0xef, 0xf1, 0x5, 0x2,
+ 0x2, 0x2, 0xf0, 0xee, 0x3, 0x2, 0x2, 0x2, 0xf0,
+ 0xf1, 0x3, 0x2, 0x2, 0x2, 0xf1, 0xf, 0x3, 0x2,
+ 0x2, 0x2, 0xf2, 0xf3, 0x7, 0x29, 0x2, 0x2, 0xf3,
+ 0xf8, 0x5, 0xc, 0x7, 0x2, 0xf4, 0xf5, 0x7, 0x6,
+ 0x2, 0x2, 0xf5, 0xf7, 0x5, 0xc, 0x7, 0x2, 0xf6,
+ 0xf4, 0x3, 0x2, 0x2, 0x2, 0xf7, 0xfa, 0x3, 0x2,
+ 0x2, 0x2, 0xf8, 0xf6, 0x3, 0x2, 0x2, 0x2, 0xf8,
+ 0xf9, 0x3, 0x2, 0x2, 0x2, 0xf9, 0xfc, 0x3, 0x2,
+ 0x2, 0x2, 0xfa, 0xf8, 0x3, 0x2, 0x2, 0x2, 0xfb,
+ 0xf2, 0x3, 0x2, 0x2, 0x2, 0xfb, 0xfc, 0x3, 0x2,
+ 0x2, 0x2, 0xfc, 0x11, 0x3, 0x2, 0x2, 0x2, 0xfd,
+ 0xfe, 0x7, 0x26, 0x2, 0x2, 0xfe, 0x103, 0x7, 0x4f,
+ 0x2, 0x2, 0xff, 0x100, 0x7, 0x6, 0x2, 0x2, 0x100,
+ 0x102, 0x7, 0x4f, 0x2, 0x2, 0x101, 0xff, 0x3, 0x2,
+ 0x2, 0x2, 0x102, 0x105, 0x3, 0x2, 0x2, 0x2, 0x103,
+ 0x101, 0x3, 0x2, 0x2, 0x2, 0x103, 0x104, 0x3, 0x2,
+ 0x2, 0x2, 0x104, 0x107, 0x3, 0x2, 0x2, 0x2, 0x105,
+ 0x103, 0x3, 0x2, 0x2, 0x2, 0x106, 0xfd, 0x3, 0x2,
+ 0x2, 0x2, 0x106, 0x107, 0x3, 0x2, 0x2, 0x2, 0x107,
+ 0x13, 0x3, 0x2, 0x2, 0x2, 0x108, 0x109, 0x7, 0x4f,
+ 0x2, 0x2, 0x109, 0x10b, 0x7, 0x7, 0x2, 0x2, 0x10a,
+ 0x10c, 0x5, 0x2, 0x2, 0x2, 0x10b, 0x10a, 0x3, 0x2,
+ 0x2, 0x2, 0x10b, 0x10c, 0x3, 0x2, 0x2, 0x2, 0x10c,
+ 0x15, 0x3, 0x2, 0x2, 0x2, 0x10d, 0x10f, 0x7, 0x3,
+ 0x2, 0x2, 0x10e, 0x110, 0x5, 0x14, 0xb, 0x2, 0x10f,
+ 0x10e, 0x3, 0x2, 0x2, 0x2, 0x10f, 0x110, 0x3, 0x2,
+ 0x2, 0x2, 0x110, 0x115, 0x3, 0x2, 0x2, 0x2, 0x111,
+ 0x112, 0x7, 0x6, 0x2, 0x2, 0x112, 0x114, 0x5, 0x14,
+ 0xb, 0x2, 0x113, 0x111, 0x3, 0x2, 0x2, 0x2, 0x114,
+ 0x117, 0x3, 0x2, 0x2, 0x2, 0x115, 0x113, 0x3, 0x2,
+ 0x2, 0x2, 0x115, 0x116, 0x3, 0x2, 0x2, 0x2, 0x116,
+ 0x118, 0x3, 0x2, 0x2, 0x2, 0x117, 0x115, 0x3, 0x2,
+ 0x2, 0x2, 0x118, 0x123, 0x7, 0x4, 0x2, 0x2, 0x119,
+ 0x11a, 0x7, 0x3, 0x2, 0x2, 0x11a, 0x11b, 0x5, 0x14,
+ 0xb, 0x2, 0x11b, 0x11c, 0x7, 0x6, 0x2, 0x2, 0x11c,
+ 0x11d, 0x5, 0x14, 0xb, 0x2, 0x11d, 0x11e, 0x7, 0x6,
+ 0x2, 0x2, 0x11e, 0x11f, 0x7, 0x49, 0x2, 0x2, 0x11f,
+ 0x120, 0x7, 0x4f, 0x2, 0x2, 0x120, 0x121, 0x7, 0x4,
+ 0x2, 0x2, 0x121, 0x123, 0x3, 0x2, 0x2, 0x2, 0x122,
+ 0x10d, 0x3, 0x2, 0x2, 0x2, 0x122, 0x119, 0x3, 0x2,
+ 0x2, 0x2, 0x123, 0x17, 0x3, 0x2, 0x2, 0x2, 0x124,
+ 0x126, 0x7, 0x4f, 0x2, 0x2, 0x125, 0x127, 0x5, 0x16,
+ 0xc, 0x2, 0x126, 0x125, 0x3, 0x2, 0x2, 0x2, 0x126,
+ 0x127, 0x3, 0x2, 0x2, 0x2, 0x127, 0x19, 0x3, 0x2,
+ 0x2, 0x2, 0x128, 0x129, 0x5, 0x1c, 0xf, 0x2, 0x129,
+ 0x1b, 0x3, 0x2, 0x2, 0x2, 0x12a, 0x12b, 0x8, 0xf,
+ 0x1, 0x2, 0x12b, 0x12c, 0x5, 0x1e, 0x10, 0x2, 0x12c,
+ 0x135, 0x3, 0x2, 0x2, 0x2, 0x12d, 0x12e, 0xc, 0x3,
+ 0x2, 0x2, 0x12e, 0x12f, 0x7, 0x9, 0x2, 0x2, 0x12f,
+ 0x130, 0x5, 0x1e, 0x10, 0x2, 0x130, 0x131, 0x7, 0x7,
+ 0x2, 0x2, 0x131, 0x132, 0x5, 0x1e, 0x10, 0x2, 0x132,
+ 0x134, 0x3, 0x2, 0x2, 0x2, 0x133, 0x12d, 0x3, 0x2,
+ 0x2, 0x2, 0x134, 0x137, 0x3, 0x2, 0x2, 0x2, 0x135,
+ 0x133, 0x3, 0x2, 0x2, 0x2, 0x135, 0x136, 0x3, 0x2,
+ 0x2, 0x2, 0x136, 0x1d, 0x3, 0x2, 0x2, 0x2, 0x137,
+ 0x135, 0x3, 0x2, 0x2, 0x2, 0x138, 0x139, 0x8, 0x10,
+ 0x1, 0x2, 0x139, 0x13a, 0x5, 0x20, 0x11, 0x2, 0x13a,
+ 0x140, 0x3, 0x2, 0x2, 0x2, 0x13b, 0x13c, 0xc, 0x3,
+ 0x2, 0x2, 0x13c, 0x13d, 0x7, 0xa, 0x2, 0x2, 0x13d,
+ 0x13f, 0x5, 0x20, 0x11, 0x2, 0x13e, 0x13b, 0x3, 0x2,
+ 0x2, 0x2, 0x13f, 0x142, 0x3, 0x2, 0x2, 0x2, 0x140,
+ 0x13e, 0x3, 0x2, 0x2, 0x2, 0x140, 0x141, 0x3, 0x2,
+ 0x2, 0x2, 0x141, 0x1f, 0x3, 0x2, 0x2, 0x2, 0x142,
+ 0x140, 0x3, 0x2, 0x2, 0x2, 0x143, 0x144, 0x8, 0x11,
+ 0x1, 0x2, 0x144, 0x145, 0x5, 0x22, 0x12, 0x2, 0x145,
+ 0x14b, 0x3, 0x2, 0x2, 0x2, 0x146, 0x147, 0xc, 0x3,
+ 0x2, 0x2, 0x147, 0x148, 0x7, 0xb, 0x2, 0x2, 0x148,
+ 0x14a, 0x5, 0x22, 0x12, 0x2, 0x149, 0x146, 0x3, 0x2,
+ 0x2, 0x2, 0x14a, 0x14d, 0x3, 0x2, 0x2, 0x2, 0x14b,
+ 0x149, 0x3, 0x2, 0x2, 0x2, 0x14b, 0x14c, 0x3, 0x2,
+ 0x2, 0x2, 0x14c, 0x21, 0x3, 0x2, 0x2, 0x2, 0x14d,
+ 0x14b, 0x3, 0x2, 0x2, 0x2, 0x14e, 0x14f, 0x8, 0x12,
+ 0x1, 0x2, 0x14f, 0x150, 0x5, 0x24, 0x13, 0x2, 0x150,
+ 0x156, 0x3, 0x2, 0x2, 0x2, 0x151, 0x152, 0xc, 0x3,
+ 0x2, 0x2, 0x152, 0x153, 0x9, 0x2, 0x2, 0x2, 0x153,
+ 0x155, 0x5, 0x24, 0x13, 0x2, 0x154, 0x151, 0x3, 0x2,
+ 0x2, 0x2, 0x155, 0x158, 0x3, 0x2, 0x2, 0x2, 0x156,
+ 0x154, 0x3, 0x2, 0x2, 0x2, 0x156, 0x157, 0x3, 0x2,
+ 0x2, 0x2, 0x157, 0x23, 0x3, 0x2, 0x2, 0x2, 0x158,
+ 0x156, 0x3, 0x2, 0x2, 0x2, 0x159, 0x15a, 0x8, 0x13,
+ 0x1, 0x2, 0x15a, 0x15b, 0x5, 0x26, 0x14, 0x2, 0x15b,
+ 0x161, 0x3, 0x2, 0x2, 0x2, 0x15c, 0x15d, 0xc, 0x3,
+ 0x2, 0x2, 0x15d, 0x15e, 0x9, 0x3, 0x2, 0x2, 0x15e,
+ 0x160, 0x5, 0x26, 0x14, 0x2, 0x15f, 0x15c, 0x3, 0x2,
+ 0x2, 0x2, 0x160, 0x163, 0x3, 0x2, 0x2, 0x2, 0x161,
+ 0x15f, 0x3, 0x2, 0x2, 0x2, 0x161, 0x162, 0x3, 0x2,
+ 0x2, 0x2, 0x162, 0x25, 0x3, 0x2, 0x2, 0x2, 0x163,
+ 0x161, 0x3, 0x2, 0x2, 0x2, 0x164, 0x165, 0x8, 0x14,
+ 0x1, 0x2, 0x165, 0x166, 0x5, 0x28, 0x15, 0x2, 0x166,
+ 0x16c, 0x3, 0x2, 0x2, 0x2, 0x167, 0x168, 0xc, 0x3,
+ 0x2, 0x2, 0x168, 0x169, 0x9, 0x4, 0x2, 0x2, 0x169,
+ 0x16b, 0x5, 0x28, 0x15, 0x2, 0x16a, 0x167, 0x3, 0x2,
+ 0x2, 0x2, 0x16b, 0x16e, 0x3, 0x2, 0x2, 0x2, 0x16c,
+ 0x16a, 0x3, 0x2, 0x2, 0x2, 0x16c, 0x16d, 0x3, 0x2,
+ 0x2, 0x2, 0x16d, 0x27, 0x3, 0x2, 0x2, 0x2, 0x16e,
+ 0x16c, 0x3, 0x2, 0x2, 0x2, 0x16f, 0x170, 0x8, 0x15,
+ 0x1, 0x2, 0x170, 0x171, 0x5, 0x2a, 0x16, 0x2, 0x171,
+ 0x177, 0x3, 0x2, 0x2, 0x2, 0x172, 0x173, 0xc, 0x3,
+ 0x2, 0x2, 0x173, 0x174, 0x9, 0x5, 0x2, 0x2, 0x174,
+ 0x176, 0x5, 0x2a, 0x16, 0x2, 0x175, 0x172, 0x3, 0x2,
+ 0x2, 0x2, 0x176, 0x179, 0x3, 0x2, 0x2, 0x2, 0x177,
+ 0x175, 0x3, 0x2, 0x2, 0x2, 0x177, 0x178, 0x3, 0x2,
+ 0x2, 0x2, 0x178, 0x29, 0x3, 0x2, 0x2, 0x2, 0x179,
+ 0x177, 0x3, 0x2, 0x2, 0x2, 0x17a, 0x17b, 0x8, 0x16,
+ 0x1, 0x2, 0x17b, 0x17c, 0x5, 0x2c, 0x17, 0x2, 0x17c,
+ 0x182, 0x3, 0x2, 0x2, 0x2, 0x17d, 0x17e, 0xc, 0x3,
+ 0x2, 0x2, 0x17e, 0x17f, 0x9, 0x6, 0x2, 0x2, 0x17f,
+ 0x181, 0x5, 0x2c, 0x17, 0x2, 0x180, 0x17d, 0x3, 0x2,
+ 0x2, 0x2, 0x181, 0x184, 0x3, 0x2, 0x2, 0x2, 0x182,
+ 0x180, 0x3, 0x2, 0x2, 0x2, 0x182, 0x183, 0x3, 0x2,
+ 0x2, 0x2, 0x183, 0x2b, 0x3, 0x2, 0x2, 0x2, 0x184,
+ 0x182, 0x3, 0x2, 0x2, 0x2, 0x185, 0x186, 0x8, 0x17,
+ 0x1, 0x2, 0x186, 0x187, 0x5, 0x2e, 0x18, 0x2, 0x187,
+ 0x18d, 0x3, 0x2, 0x2, 0x2, 0x188, 0x189, 0xc, 0x3,
+ 0x2, 0x2, 0x189, 0x18a, 0x9, 0x7, 0x2, 0x2, 0x18a,
+ 0x18c, 0x5, 0x2e, 0x18, 0x2, 0x18b, 0x188, 0x3, 0x2,
+ 0x2, 0x2, 0x18c, 0x18f, 0x3, 0x2, 0x2, 0x2, 0x18d,
+ 0x18b, 0x3, 0x2, 0x2, 0x2, 0x18d, 0x18e, 0x3, 0x2,
+ 0x2, 0x2, 0x18e, 0x2d, 0x3, 0x2, 0x2, 0x2, 0x18f,
+ 0x18d, 0x3, 0x2, 0x2, 0x2, 0x190, 0x194, 0x5, 0x36,
+ 0x1c, 0x2, 0x191, 0x192, 0x9, 0x8, 0x2, 0x2, 0x192,
+ 0x194, 0x5, 0x2e, 0x18, 0x2, 0x193, 0x190, 0x3, 0x2,
+ 0x2, 0x2, 0x193, 0x191, 0x3, 0x2, 0x2, 0x2, 0x194,
+ 0x2f, 0x3, 0x2, 0x2, 0x2, 0x195, 0x196, 0x8, 0x19,
+ 0x1, 0x2, 0x196, 0x1a1, 0x7, 0x4f, 0x2, 0x2, 0x197,
+ 0x198, 0x5, 0x3c, 0x1f, 0x2, 0x198, 0x199, 0x7, 0xc,
+ 0x2, 0x2, 0x199, 0x19a, 0x7, 0x4f, 0x2, 0x2, 0x19a,
+ 0x1a1, 0x3, 0x2, 0x2, 0x2, 0x19b, 0x19c, 0x5, 0x3c,
+ 0x1f, 0x2, 0x19c, 0x19d, 0x7, 0xd, 0x2, 0x2, 0x19d,
+ 0x19e, 0x5, 0x1a, 0xe, 0x2, 0x19e, 0x19f, 0x7, 0xe,
+ 0x2, 0x2, 0x19f, 0x1a1, 0x3, 0x2, 0x2, 0x2, 0x1a0,
+ 0x195, 0x3, 0x2, 0x2, 0x2, 0x1a0, 0x197, 0x3, 0x2,
+ 0x2, 0x2, 0x1a0, 0x19b, 0x3, 0x2, 0x2, 0x2, 0x1a1,
+ 0x1ac, 0x3, 0x2, 0x2, 0x2, 0x1a2, 0x1a3, 0xc, 0x6,
+ 0x2, 0x2, 0x1a3, 0x1a4, 0x7, 0xc, 0x2, 0x2, 0x1a4,
+ 0x1ab, 0x7, 0x4f, 0x2, 0x2, 0x1a5, 0x1a6, 0xc, 0x4,
+ 0x2, 0x2, 0x1a6, 0x1a7, 0x7, 0xd, 0x2, 0x2, 0x1a7,
+ 0x1a8, 0x5, 0x1a, 0xe, 0x2, 0x1a8, 0x1a9, 0x7, 0xe,
+ 0x2, 0x2, 0x1a9, 0x1ab, 0x3, 0x2, 0x2, 0x2, 0x1aa,
+ 0x1a2, 0x3, 0x2, 0x2, 0x2, 0x1aa, 0x1a5, 0x3, 0x2,
+ 0x2, 0x2, 0x1ab, 0x1ae, 0x3, 0x2, 0x2, 0x2, 0x1ac,
+ 0x1aa, 0x3, 0x2, 0x2, 0x2, 0x1ac, 0x1ad, 0x3, 0x2,
+ 0x2, 0x2, 0x1ad, 0x31, 0x3, 0x2, 0x2, 0x2, 0x1ae,
+ 0x1ac, 0x3, 0x2, 0x2, 0x2, 0x1af, 0x1b0, 0x7, 0x4b,
+ 0x2, 0x2, 0x1b0, 0x1ba, 0x5, 0x30, 0x19, 0x2, 0x1b1,
+ 0x1b2, 0x7, 0x4c, 0x2, 0x2, 0x1b2, 0x1ba, 0x5, 0x30,
+ 0x19, 0x2, 0x1b3, 0x1b4, 0x5, 0x30, 0x19, 0x2, 0x1b4,
+ 0x1b5, 0x7, 0x4b, 0x2, 0x2, 0x1b5, 0x1ba, 0x3, 0x2,
+ 0x2, 0x2, 0x1b6, 0x1b7, 0x5, 0x30, 0x19, 0x2, 0x1b7,
+ 0x1b8, 0x7, 0x4c, 0x2, 0x2, 0x1b8, 0x1ba, 0x3, 0x2,
+ 0x2, 0x2, 0x1b9, 0x1af, 0x3, 0x2, 0x2, 0x2, 0x1b9,
+ 0x1b1, 0x3, 0x2, 0x2, 0x2, 0x1b9, 0x1b3, 0x3, 0x2,
+ 0x2, 0x2, 0x1b9, 0x1b6, 0x3, 0x2, 0x2, 0x2, 0x1ba,
+ 0x33, 0x3, 0x2, 0x2, 0x2, 0x1bb, 0x1c2, 0x5, 0x32,
+ 0x1a, 0x2, 0x1bc, 0x1bf, 0x5, 0x30, 0x19, 0x2, 0x1bd,
+ 0x1be, 0x9, 0x9, 0x2, 0x2, 0x1be, 0x1c0, 0x5, 0x1a,
+ 0xe, 0x2, 0x1bf, 0x1bd, 0x3, 0x2, 0x2, 0x2, 0x1bf,
+ 0x1c0, 0x3, 0x2, 0x2, 0x2, 0x1c0, 0x1c2, 0x3, 0x2,
+ 0x2, 0x2, 0x1c1, 0x1bb, 0x3, 0x2, 0x2, 0x2, 0x1c1,
+ 0x1bc, 0x3, 0x2, 0x2, 0x2, 0x1c2, 0x35, 0x3, 0x2,
+ 0x2, 0x2, 0x1c3, 0x1c6, 0x5, 0x3a, 0x1e, 0x2, 0x1c4,
+ 0x1c6, 0x5, 0x34, 0x1b, 0x2, 0x1c5, 0x1c3, 0x3, 0x2,
+ 0x2, 0x2, 0x1c5, 0x1c4, 0x3, 0x2, 0x2, 0x2, 0x1c6,
+ 0x37, 0x3, 0x2, 0x2, 0x2, 0x1c7, 0x1c8, 0x7, 0x4f,
+ 0x2, 0x2, 0x1c8, 0x1d1, 0x7, 0xf, 0x2, 0x2, 0x1c9,
+ 0x1ce, 0x5, 0x1a, 0xe, 0x2, 0x1ca, 0x1cb, 0x7, 0x6,
+ 0x2, 0x2, 0x1cb, 0x1cd, 0x5, 0x1a, 0xe, 0x2, 0x1cc,
+ 0x1ca, 0x3, 0x2, 0x2, 0x2, 0x1cd, 0x1d0, 0x3, 0x2,
+ 0x2, 0x2, 0x1ce, 0x1cc, 0x3, 0x2, 0x2, 0x2, 0x1ce,
+ 0x1cf, 0x3, 0x2, 0x2, 0x2, 0x1cf, 0x1d2, 0x3, 0x2,
+ 0x2, 0x2, 0x1d0, 0x1ce, 0x3, 0x2, 0x2, 0x2, 0x1d1,
+ 0x1c9, 0x3, 0x2, 0x2, 0x2, 0x1d1, 0x1d2, 0x3, 0x2,
+ 0x2, 0x2, 0x1d2, 0x1d3, 0x3, 0x2, 0x2, 0x2, 0x1d3,
+ 0x1d4, 0x7, 0x10, 0x2, 0x2, 0x1d4, 0x39, 0x3, 0x2,
+ 0x2, 0x2, 0x1d5, 0x1db, 0x5, 0x3c, 0x1f, 0x2, 0x1d6,
+ 0x1d8, 0x7, 0x4f, 0x2, 0x2, 0x1d7, 0x1d9, 0x5, 0x6,
+ 0x4, 0x2, 0x1d8, 0x1d7, 0x3, 0x2, 0x2, 0x2, 0x1d8,
+ 0x1d9, 0x3, 0x2, 0x2, 0x2, 0x1d9, 0x1db, 0x3, 0x2,
+ 0x2, 0x2, 0x1da, 0x1d5, 0x3, 0x2, 0x2, 0x2, 0x1da,
+ 0x1d6, 0x3, 0x2, 0x2, 0x2, 0x1db, 0x3b, 0x3, 0x2,
+ 0x2, 0x2, 0x1dc, 0x1e5, 0x5, 0x4c, 0x27, 0x2, 0x1dd,
+ 0x1e5, 0x5, 0x38, 0x1d, 0x2, 0x1de, 0x1e5, 0x7, 0x53,
+ 0x2, 0x2, 0x1df, 0x1e5, 0x7, 0x4e, 0x2, 0x2, 0x1e0,
+ 0x1e1, 0x7, 0x3, 0x2, 0x2, 0x1e1, 0x1e2, 0x5, 0x1a,
+ 0xe, 0x2, 0x1e2, 0x1e3, 0x7, 0x4, 0x2, 0x2, 0x1e3,
+ 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1dc, 0x3, 0x2,
+ 0x2, 0x2, 0x1e4, 0x1dd, 0x3, 0x2, 0x2, 0x2, 0x1e4,
+ 0x1de, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1df, 0x3, 0x2,
+ 0x2, 0x2, 0x1e4, 0x1e0, 0x3, 0x2, 0x2, 0x2, 0x1e5,
+ 0x3d, 0x3, 0x2, 0x2, 0x2, 0x1e6, 0x1e8, 0x5, 0x52,
+ 0x2a, 0x2, 0x1e7, 0x1e6, 0x3, 0x2, 0x2, 0x2, 0x1e7,
+ 0x1e8, 0x3, 0x2, 0x2, 0x2, 0x1e8, 0x3f, 0x3, 0x2,
+ 0x2, 0x2, 0x1e9, 0x1ea, 0x7, 0x1f, 0x2, 0x2, 0x1ea,
+ 0x1eb, 0x7, 0x3, 0x2, 0x2, 0x1eb, 0x1ec, 0x5, 0x3e,
+ 0x20, 0x2, 0x1ec, 0x1ed, 0x7, 0x11, 0x2, 0x2, 0x1ed,
+ 0x1ee, 0x5, 0x1a, 0xe, 0x2, 0x1ee, 0x1ef, 0x7, 0x11,
+ 0x2, 0x2, 0x1ef, 0x1f0, 0x5, 0x34, 0x1b, 0x2, 0x1f0,
+ 0x1f1, 0x7, 0x4, 0x2, 0x2, 0x1f1, 0x1f2, 0x5, 0x70,
+ 0x39, 0x2, 0x1f2, 0x41, 0x3, 0x2, 0x2, 0x2, 0x1f3,
+ 0x1f5, 0x7, 0xd, 0x2, 0x2, 0x1f4, 0x1f6, 0x5, 0x1a,
+ 0xe, 0x2, 0x1f5, 0x1f4, 0x3, 0x2, 0x2, 0x2, 0x1f5,
+ 0x1f6, 0x3, 0x2, 0x2, 0x2, 0x1f6, 0x1f7, 0x3, 0x2,
+ 0x2, 0x2, 0x1f7, 0x1f9, 0x7, 0x7, 0x2, 0x2, 0x1f8,
+ 0x1fa, 0x5, 0x1a, 0xe, 0x2, 0x1f9, 0x1f8, 0x3, 0x2,
+ 0x2, 0x2, 0x1f9, 0x1fa, 0x3, 0x2, 0x2, 0x2, 0x1fa,
+ 0x1fb, 0x3, 0x2, 0x2, 0x2, 0x1fb, 0x1fc, 0x7, 0xe,
+ 0x2, 0x2, 0x1fc, 0x43, 0x3, 0x2, 0x2, 0x2, 0x1fd,
+ 0x1ff, 0x5, 0x42, 0x22, 0x2, 0x1fe, 0x1fd, 0x3, 0x2,
+ 0x2, 0x2, 0x1fe, 0x1ff, 0x3, 0x2, 0x2, 0x2, 0x1ff,
+ 0x45, 0x3, 0x2, 0x2, 0x2, 0x200, 0x201, 0x7, 0x1f,
+ 0x2, 0x2, 0x201, 0x202, 0x7, 0x3, 0x2, 0x2, 0x202,
+ 0x203, 0x5, 0x50, 0x29, 0x2, 0x203, 0x204, 0x7, 0x12,
+ 0x2, 0x2, 0x204, 0x205, 0x5, 0x1a, 0xe, 0x2, 0x205,
+ 0x206, 0x5, 0x44, 0x23, 0x2, 0x206, 0x207, 0x7, 0x4,
+ 0x2, 0x2, 0x207, 0x208, 0x5, 0x70, 0x39, 0x2, 0x208,
+ 0x47, 0x3, 0x2, 0x2, 0x2, 0x209, 0x20a, 0x5, 0x1a,
+ 0xe, 0x2, 0x20a, 0x49, 0x3, 0x2, 0x2, 0x2, 0x20b,
+ 0x20d, 0x7, 0x3, 0x2, 0x2, 0x20c, 0x20e, 0x5, 0x48,
+ 0x25, 0x2, 0x20d, 0x20c, 0x3, 0x2, 0x2, 0x2, 0x20d,
+ 0x20e, 0x3, 0x2, 0x2, 0x2, 0x20e, 0x213, 0x3, 0x2,
+ 0x2, 0x2, 0x20f, 0x210, 0x7, 0x6, 0x2, 0x2, 0x210,
+ 0x212, 0x5, 0x48, 0x25, 0x2, 0x211, 0x20f, 0x3, 0x2,
+ 0x2, 0x2, 0x212, 0x215, 0x3, 0x2, 0x2, 0x2, 0x213,
+ 0x211, 0x3, 0x2, 0x2, 0x2, 0x213, 0x214, 0x3, 0x2,
+ 0x2, 0x2, 0x214, 0x216, 0x3, 0x2, 0x2, 0x2, 0x215,
+ 0x213, 0x3, 0x2, 0x2, 0x2, 0x216, 0x217, 0x7, 0x4,
+ 0x2, 0x2, 0x217, 0x4b, 0x3, 0x2, 0x2, 0x2, 0x218,
+ 0x21a, 0x9, 0xa, 0x2, 0x2, 0x219, 0x21b, 0x5, 0x6,
+ 0x4, 0x2, 0x21a, 0x219, 0x3, 0x2, 0x2, 0x2, 0x21a,
+ 0x21b, 0x3, 0x2, 0x2, 0x2, 0x21b, 0x21c, 0x3, 0x2,
+ 0x2, 0x2, 0x21c, 0x21d, 0x5, 0x4a, 0x26, 0x2, 0x21d,
+ 0x21e, 0x5, 0x12, 0xa, 0x2, 0x21e, 0x4d, 0x3, 0x2,
+ 0x2, 0x2, 0x21f, 0x220, 0x7, 0x4f, 0x2, 0x2, 0x220,
+ 0x4f, 0x3, 0x2, 0x2, 0x2, 0x221, 0x222, 0x9, 0xb,
+ 0x2, 0x2, 0x222, 0x223, 0x7, 0x4f, 0x2, 0x2, 0x223,
+ 0x224, 0x7, 0x7, 0x2, 0x2, 0x224, 0x225, 0x5, 0x2,
+ 0x2, 0x2, 0x225, 0x51, 0x3, 0x2, 0x2, 0x2, 0x226,
+ 0x229, 0x5, 0x50, 0x29, 0x2, 0x227, 0x228, 0x7, 0x34,
+ 0x2, 0x2, 0x228, 0x22a, 0x5, 0x1a, 0xe, 0x2, 0x229,
+ 0x227, 0x3, 0x2, 0x2, 0x2, 0x229, 0x22a, 0x3, 0x2,
+ 0x2, 0x2, 0x22a, 0x53, 0x3, 0x2, 0x2, 0x2, 0x22b,
+ 0x22d, 0x7, 0x2a, 0x2, 0x2, 0x22c, 0x22b, 0x3, 0x2,
+ 0x2, 0x2, 0x22c, 0x22d, 0x3, 0x2, 0x2, 0x2, 0x22d,
+ 0x22e, 0x3, 0x2, 0x2, 0x2, 0x22e, 0x22f, 0x5, 0x4c,
+ 0x27, 0x2, 0x22f, 0x55, 0x3, 0x2, 0x2, 0x2, 0x230,
+ 0x231, 0x5, 0x34, 0x1b, 0x2, 0x231, 0x57, 0x3, 0x2,
+ 0x2, 0x2, 0x232, 0x234, 0x7, 0x1e, 0x2, 0x2, 0x233,
+ 0x235, 0x7, 0x22, 0x2, 0x2, 0x234, 0x233, 0x3, 0x2,
+ 0x2, 0x2, 0x234, 0x235, 0x3, 0x2, 0x2, 0x2, 0x235,
+ 0x236, 0x3, 0x2, 0x2, 0x2, 0x236, 0x237, 0x7, 0x3,
+ 0x2, 0x2, 0x237, 0x238, 0x5, 0x1a, 0xe, 0x2, 0x238,
+ 0x239, 0x7, 0x4, 0x2, 0x2, 0x239, 0x23c, 0x5, 0x70,
+ 0x39, 0x2, 0x23a, 0x23b, 0x7, 0x13, 0x2, 0x2, 0x23b,
+ 0x23d, 0x5, 0x70, 0x39, 0x2, 0x23c, 0x23a, 0x3, 0x2,
+ 0x2, 0x2, 0x23c, 0x23d, 0x3, 0x2, 0x2, 0x2, 0x23d,
+ 0x59, 0x3, 0x2, 0x2, 0x2, 0x23e, 0x23f, 0x7, 0x20,
+ 0x2, 0x2, 0x23f, 0x240, 0x7, 0x3, 0x2, 0x2, 0x240,
+ 0x241, 0x5, 0x1a, 0xe, 0x2, 0x241, 0x242, 0x7, 0x4,
+ 0x2, 0x2, 0x242, 0x243, 0x5, 0x70, 0x39, 0x2, 0x243,
+ 0x5b, 0x3, 0x2, 0x2, 0x2, 0x244, 0x246, 0x7, 0x21,
+ 0x2, 0x2, 0x245, 0x247, 0x5, 0x1a, 0xe, 0x2, 0x246,
+ 0x245, 0x3, 0x2, 0x2, 0x2, 0x246, 0x247, 0x3, 0x2,
+ 0x2, 0x2, 0x247, 0x5d, 0x3, 0x2, 0x2, 0x2, 0x248,
+ 0x249, 0x7, 0x24, 0x2, 0x2, 0x249, 0x5f, 0x3, 0x2,
+ 0x2, 0x2, 0x24a, 0x24b, 0x7, 0x23, 0x2, 0x2, 0x24b,
+ 0x61, 0x3, 0x2, 0x2, 0x2, 0x24c, 0x24d, 0x7, 0x25,
+ 0x2, 0x2, 0x24d, 0x24f, 0x5, 0x4e, 0x28, 0x2, 0x24e,
+ 0x250, 0x5, 0x4a, 0x26, 0x2, 0x24f, 0x24e, 0x3, 0x2,
+ 0x2, 0x2, 0x24f, 0x250, 0x3, 0x2, 0x2, 0x2, 0x250,
+ 0x63, 0x3, 0x2, 0x2, 0x2, 0x251, 0x252, 0x7, 0x28,
+ 0x2, 0x2, 0x252, 0x253, 0x5, 0x18, 0xd, 0x2, 0x253,
+ 0x254, 0x5, 0x70, 0x39, 0x2, 0x254, 0x65, 0x3, 0x2,
+ 0x2, 0x2, 0x255, 0x256, 0x7, 0x27, 0x2, 0x2, 0x256,
+ 0x258, 0x5, 0x70, 0x39, 0x2, 0x257, 0x259, 0x5, 0x64,
+ 0x33, 0x2, 0x258, 0x257, 0x3, 0x2, 0x2, 0x2, 0x259,
+ 0x25a, 0x3, 0x2, 0x2, 0x2, 0x25a, 0x258, 0x3, 0x2,
+ 0x2, 0x2, 0x25a, 0x25b, 0x3, 0x2, 0x2, 0x2, 0x25b,
+ 0x67, 0x3, 0x2, 0x2, 0x2, 0x25c, 0x25d, 0x9, 0xc,
+ 0x2, 0x2, 0x25d, 0x25e, 0x7, 0x3, 0x2, 0x2, 0x25e,
+ 0x25f, 0x5, 0x1a, 0xe, 0x2, 0x25f, 0x260, 0x7, 0x4,
+ 0x2, 0x2, 0x260, 0x264, 0x3, 0x2, 0x2, 0x2, 0x261,
+ 0x264, 0x7, 0x32, 0x2, 0x2, 0x262, 0x264, 0x7, 0x33,
+ 0x2, 0x2, 0x263, 0x25c, 0x3, 0x2, 0x2, 0x2, 0x263,
+ 0x261, 0x3, 0x2, 0x2, 0x2, 0x263, 0x262, 0x3, 0x2,
+ 0x2, 0x2, 0x264, 0x69, 0x3, 0x2, 0x2, 0x2, 0x265,
+ 0x266, 0x5, 0x52, 0x2a, 0x2, 0x266, 0x267, 0x7, 0x11,
+ 0x2, 0x2, 0x267, 0x283, 0x3, 0x2, 0x2, 0x2, 0x268,
+ 0x269, 0x5, 0x54, 0x2b, 0x2, 0x269, 0x26a, 0x7, 0x11,
+ 0x2, 0x2, 0x26a, 0x283, 0x3, 0x2, 0x2, 0x2, 0x26b,
+ 0x26c, 0x5, 0x56, 0x2c, 0x2, 0x26c, 0x26d, 0x7, 0x11,
+ 0x2, 0x2, 0x26d, 0x283, 0x3, 0x2, 0x2, 0x2, 0x26e,
+ 0x26f, 0x5, 0x5c, 0x2f, 0x2, 0x26f, 0x270, 0x7, 0x11,
+ 0x2, 0x2, 0x270, 0x283, 0x3, 0x2, 0x2, 0x2, 0x271,
+ 0x272, 0x5, 0x5e, 0x30, 0x2, 0x272, 0x273, 0x7, 0x11,
+ 0x2, 0x2, 0x273, 0x283, 0x3, 0x2, 0x2, 0x2, 0x274,
+ 0x275, 0x5, 0x60, 0x31, 0x2, 0x275, 0x276, 0x7, 0x11,
+ 0x2, 0x2, 0x276, 0x283, 0x3, 0x2, 0x2, 0x2, 0x277,
+ 0x278, 0x5, 0x62, 0x32, 0x2, 0x278, 0x279, 0x7, 0x11,
+ 0x2, 0x2, 0x279, 0x283, 0x3, 0x2, 0x2, 0x2, 0x27a,
+ 0x283, 0x5, 0x58, 0x2d, 0x2, 0x27b, 0x27c, 0x5, 0x68,
+ 0x35, 0x2, 0x27c, 0x27d, 0x7, 0x11, 0x2, 0x2, 0x27d,
+ 0x283, 0x3, 0x2, 0x2, 0x2, 0x27e, 0x283, 0x5, 0x5a,
+ 0x2e, 0x2, 0x27f, 0x283, 0x5, 0x46, 0x24, 0x2, 0x280,
+ 0x283, 0x5, 0x40, 0x21, 0x2, 0x281, 0x283, 0x5, 0x66,
+ 0x34, 0x2, 0x282, 0x265, 0x3, 0x2, 0x2, 0x2, 0x282,
+ 0x268, 0x3, 0x2, 0x2, 0x2, 0x282, 0x26b, 0x3, 0x2,
+ 0x2, 0x2, 0x282, 0x26e, 0x3, 0x2, 0x2, 0x2, 0x282,
+ 0x271, 0x3, 0x2, 0x2, 0x2, 0x282, 0x274, 0x3, 0x2,
+ 0x2, 0x2, 0x282, 0x277, 0x3, 0x2, 0x2, 0x2, 0x282,
+ 0x27a, 0x3, 0x2, 0x2, 0x2, 0x282, 0x27b, 0x3, 0x2,
+ 0x2, 0x2, 0x282, 0x27e, 0x3, 0x2, 0x2, 0x2, 0x282,
+ 0x27f, 0x3, 0x2, 0x2, 0x2, 0x282, 0x280, 0x3, 0x2,
+ 0x2, 0x2, 0x282, 0x281, 0x3, 0x2, 0x2, 0x2, 0x283,
+ 0x6b, 0x3, 0x2, 0x2, 0x2, 0x284, 0x286, 0x5, 0x6a,
+ 0x36, 0x2, 0x285, 0x284, 0x3, 0x2, 0x2, 0x2, 0x286,
+ 0x289, 0x3, 0x2, 0x2, 0x2, 0x287, 0x285, 0x3, 0x2,
+ 0x2, 0x2, 0x287, 0x288, 0x3, 0x2, 0x2, 0x2, 0x288,
+ 0x6d, 0x3, 0x2, 0x2, 0x2, 0x289, 0x287, 0x3, 0x2,
+ 0x2, 0x2, 0x28a, 0x28c, 0x7, 0x1d, 0x2, 0x2, 0x28b,
+ 0x28a, 0x3, 0x2, 0x2, 0x2, 0x28b, 0x28c, 0x3, 0x2,
+ 0x2, 0x2, 0x28c, 0x28d, 0x3, 0x2, 0x2, 0x2, 0x28d,
+ 0x28e, 0x7, 0xf, 0x2, 0x2, 0x28e, 0x28f, 0x5, 0x6c,
+ 0x37, 0x2, 0x28f, 0x290, 0x7, 0x10, 0x2, 0x2, 0x290,
+ 0x6f, 0x3, 0x2, 0x2, 0x2, 0x291, 0x294, 0x5, 0x6a,
+ 0x36, 0x2, 0x292, 0x294, 0x5, 0x6e, 0x38, 0x2, 0x293,
+ 0x291, 0x3, 0x2, 0x2, 0x2, 0x293, 0x292, 0x3, 0x2,
+ 0x2, 0x2, 0x294, 0x71, 0x3, 0x2, 0x2, 0x2, 0x295,
+ 0x296, 0x5, 0x6e, 0x38, 0x2, 0x296, 0x73, 0x3, 0x2,
+ 0x2, 0x2, 0x297, 0x298, 0x7, 0x4f, 0x2, 0x2, 0x298,
+ 0x299, 0x7, 0x7, 0x2, 0x2, 0x299, 0x29a, 0x5, 0x2,
+ 0x2, 0x2, 0x29a, 0x29b, 0x7, 0x11, 0x2, 0x2, 0x29b,
+ 0x75, 0x3, 0x2, 0x2, 0x2, 0x29c, 0x29e, 0x5, 0x74,
+ 0x3b, 0x2, 0x29d, 0x29c, 0x3, 0x2, 0x2, 0x2, 0x29e,
+ 0x2a1, 0x3, 0x2, 0x2, 0x2, 0x29f, 0x29d, 0x3, 0x2,
+ 0x2, 0x2, 0x29f, 0x2a0, 0x3, 0x2, 0x2, 0x2, 0x2a0,
+ 0x77, 0x3, 0x2, 0x2, 0x2, 0x2a1, 0x29f, 0x3, 0x2,
+ 0x2, 0x2, 0x2a2, 0x2a3, 0x7, 0x14, 0x2, 0x2, 0x2a3,
+ 0x2a4, 0x7, 0x4f, 0x2, 0x2, 0x2a4, 0x79, 0x3, 0x2,
+ 0x2, 0x2, 0x2a5, 0x2a6, 0x7, 0x15, 0x2, 0x2, 0x2a6,
+ 0x2a7, 0x7, 0x4e, 0x2, 0x2, 0x2a7, 0x7b, 0x3, 0x2,
+ 0x2, 0x2, 0x2a8, 0x2a9, 0x7, 0x22, 0x2, 0x2, 0x2a9,
+ 0x2aa, 0x7, 0x4e, 0x2, 0x2, 0x2aa, 0x7d, 0x3, 0x2,
+ 0x2, 0x2, 0x2ab, 0x2ac, 0x7, 0x8, 0x2, 0x2, 0x2ac,
+ 0x2ae, 0x7, 0x4f, 0x2, 0x2, 0x2ad, 0x2af, 0x5, 0x78,
+ 0x3d, 0x2, 0x2ae, 0x2ad, 0x3, 0x2, 0x2, 0x2, 0x2ae,
+ 0x2af, 0x3, 0x2, 0x2, 0x2, 0x2af, 0x2b1, 0x3, 0x2,
+ 0x2, 0x2, 0x2b0, 0x2b2, 0x5, 0x7a, 0x3e, 0x2, 0x2b1,
+ 0x2b0, 0x3, 0x2, 0x2, 0x2, 0x2b1, 0x2b2, 0x3, 0x2,
+ 0x2, 0x2, 0x2b2, 0x2b4, 0x3, 0x2, 0x2, 0x2, 0x2b3,
+ 0x2b5, 0x5, 0x7c, 0x3f, 0x2, 0x2b4, 0x2b3, 0x3, 0x2,
+ 0x2, 0x2, 0x2b4, 0x2b5, 0x3, 0x2, 0x2, 0x2, 0x2b5,
+ 0x2b6, 0x3, 0x2, 0x2, 0x2, 0x2b6, 0x2b7, 0x7, 0x11,
+ 0x2, 0x2, 0x2b7, 0x7f, 0x3, 0x2, 0x2, 0x2, 0x2b8,
+ 0x2b9, 0x7, 0x8, 0x2, 0x2, 0x2b9, 0x2ba, 0x7, 0x4f,
+ 0x2, 0x2, 0x2ba, 0x2bb, 0x7, 0x34, 0x2, 0x2, 0x2bb,
+ 0x2bc, 0x5, 0x2, 0x2, 0x2, 0x2bc, 0x2bd, 0x7, 0x11,
+ 0x2, 0x2, 0x2bd, 0x81, 0x3, 0x2, 0x2, 0x2, 0x2be,
+ 0x2c0, 0x7, 0x2f, 0x2, 0x2, 0x2bf, 0x2c1, 0x7, 0x1c,
+ 0x2, 0x2, 0x2c0, 0x2bf, 0x3, 0x2, 0x2, 0x2, 0x2c0,
+ 0x2c1, 0x3, 0x2, 0x2, 0x2, 0x2c1, 0x2c2, 0x3, 0x2,
+ 0x2, 0x2, 0x2c2, 0x2c3, 0x7, 0x19, 0x2, 0x2, 0x2c3,
+ 0x2c4, 0x7, 0x4f, 0x2, 0x2, 0x2c4, 0x2c5, 0x5, 0x8,
+ 0x5, 0x2, 0x2c5, 0x2c6, 0x7, 0x3, 0x2, 0x2, 0x2c6,
+ 0x2c7, 0x5, 0x4, 0x3, 0x2, 0x2c7, 0x2c8, 0x7, 0x4,
+ 0x2, 0x2, 0x2c8, 0x2c9, 0x5, 0xe, 0x8, 0x2, 0x2c9,
+ 0x2ca, 0x7, 0x11, 0x2, 0x2, 0x2ca, 0x83, 0x3, 0x2,
+ 0x2, 0x2, 0x2cb, 0x2ce, 0x7, 0x2f, 0x2, 0x2, 0x2cc,
+ 0x2cd, 0x7, 0x16, 0x2, 0x2, 0x2cd, 0x2cf, 0x7, 0x4e,
+ 0x2, 0x2, 0x2ce, 0x2cc, 0x3, 0x2, 0x2, 0x2, 0x2ce,
+ 0x2cf, 0x3, 0x2, 0x2, 0x2, 0x2cf, 0x2d0, 0x3, 0x2,
+ 0x2, 0x2, 0x2d0, 0x2d1, 0x7, 0x18, 0x2, 0x2, 0x2d1,
+ 0x2d2, 0x7, 0x4f, 0x2, 0x2, 0x2d2, 0x2d3, 0x5, 0x8,
+ 0x5, 0x2, 0x2d3, 0x2d4, 0x5, 0xa, 0x6, 0x2, 0x2d4,
+ 0x2d5, 0x5, 0xe, 0x8, 0x2, 0x2d5, 0x2d6, 0x5, 0x10,
+ 0x9, 0x2, 0x2d6, 0x2d7, 0x7, 0x11, 0x2, 0x2, 0x2d7,
+ 0x85, 0x3, 0x2, 0x2, 0x2, 0x2d8, 0x2d9, 0x7, 0x2f,
+ 0x2, 0x2, 0x2d9, 0x2da, 0x7, 0x1a, 0x2, 0x2, 0x2da,
+ 0x2db, 0x7, 0x4f, 0x2, 0x2, 0x2db, 0x2dc, 0x5, 0xa,
+ 0x6, 0x2, 0x2dc, 0x2dd, 0x5, 0xe, 0x8, 0x2, 0x2dd,
+ 0x2de, 0x7, 0x11, 0x2, 0x2, 0x2de, 0x87, 0x3, 0x2,
+ 0x2, 0x2, 0x2df, 0x2e1, 0x7, 0x1c, 0x2, 0x2, 0x2e0,
+ 0x2df, 0x3, 0x2, 0x2, 0x2, 0x2e0, 0x2e1, 0x3, 0x2,
+ 0x2, 0x2, 0x2e1, 0x2e2, 0x3, 0x2, 0x2, 0x2, 0x2e2,
+ 0x2e3, 0x7, 0x19, 0x2, 0x2, 0x2e3, 0x2e4, 0x7, 0x4f,
+ 0x2, 0x2, 0x2e4, 0x2e5, 0x5, 0x8, 0x5, 0x2, 0x2e5,
+ 0x2e6, 0x5, 0x16, 0xc, 0x2, 0x2e6, 0x2e9, 0x5, 0xe,
+ 0x8, 0x2, 0x2e7, 0x2ea, 0x5, 0x72, 0x3a, 0x2, 0x2e8,
+ 0x2ea, 0x7, 0x11, 0x2, 0x2, 0x2e9, 0x2e7, 0x3, 0x2,
+ 0x2, 0x2, 0x2e9, 0x2e8, 0x3, 0x2, 0x2, 0x2, 0x2ea,
+ 0x89, 0x3, 0x2, 0x2, 0x2, 0x2eb, 0x2ec, 0x7, 0x4f,
+ 0x2, 0x2, 0x2ec, 0x2ed, 0x5, 0x6, 0x4, 0x2, 0x2ed,
+ 0x2ee, 0x5, 0x16, 0xc, 0x2, 0x2ee, 0x2ef, 0x5, 0xe,
+ 0x8, 0x2, 0x2ef, 0x2f0, 0x5, 0x10, 0x9, 0x2, 0x2f0,
+ 0x2f1, 0x5, 0x72, 0x3a, 0x2, 0x2f1, 0x8b, 0x3, 0x2,
+ 0x2, 0x2, 0x2f2, 0x2f3, 0x7, 0x16, 0x2, 0x2, 0x2f3,
+ 0x2f5, 0x7, 0x4e, 0x2, 0x2, 0x2f4, 0x2f2, 0x3, 0x2,
+ 0x2, 0x2, 0x2f4, 0x2f5, 0x3, 0x2, 0x2, 0x2, 0x2f5,
+ 0x2f6, 0x3, 0x2, 0x2, 0x2, 0x2f6, 0x2f7, 0x7, 0x18,
+ 0x2, 0x2, 0x2f7, 0x2f8, 0x7, 0x4f, 0x2, 0x2, 0x2f8,
+ 0x2f9, 0x5, 0x8, 0x5, 0x2, 0x2f9, 0x2fa, 0x5, 0x16,
+ 0xc, 0x2, 0x2fa, 0x2fb, 0x5, 0xe, 0x8, 0x2, 0x2fb,
+ 0x2fe, 0x5, 0x10, 0x9, 0x2, 0x2fc, 0x2ff, 0x5, 0x72,
+ 0x3a, 0x2, 0x2fd, 0x2ff, 0x7, 0x11, 0x2, 0x2, 0x2fe,
+ 0x2fc, 0x3, 0x2, 0x2, 0x2, 0x2fe, 0x2fd, 0x3, 0x2,
+ 0x2, 0x2, 0x2ff, 0x8d, 0x3, 0x2, 0x2, 0x2, 0x300,
+ 0x301, 0x7, 0x2e, 0x2, 0x2, 0x301, 0x302, 0x7, 0x4f,
+ 0x2, 0x2, 0x302, 0x303, 0x7, 0x7, 0x2, 0x2, 0x303,
+ 0x304, 0x5, 0x2, 0x2, 0x2, 0x304, 0x305, 0x5, 0x7a,
+ 0x3e, 0x2, 0x305, 0x306, 0x7, 0x11, 0x2, 0x2, 0x306,
+ 0x8f, 0x3, 0x2, 0x2, 0x2, 0x307, 0x308, 0x7, 0x2e,
+ 0x2, 0x2, 0x308, 0x309, 0x7, 0x4f, 0x2, 0x2, 0x309,
+ 0x30a, 0x7, 0x7, 0x2, 0x2, 0x30a, 0x30b, 0x5, 0x2,
+ 0x2, 0x2, 0x30b, 0x30c, 0x7, 0x34, 0x2, 0x2, 0x30c,
+ 0x30d, 0x5, 0x1a, 0xe, 0x2, 0x30d, 0x30e, 0x7, 0x11,
+ 0x2, 0x2, 0x30e, 0x91, 0x3, 0x2, 0x2, 0x2, 0x30f,
+ 0x310, 0x7, 0x17, 0x2, 0x2, 0x310, 0x311, 0x7, 0x4f,
+ 0x2, 0x2, 0x311, 0x312, 0x7, 0xf, 0x2, 0x2, 0x312,
+ 0x313, 0x5, 0x76, 0x3c, 0x2, 0x313, 0x314, 0x7, 0x10,
+ 0x2, 0x2, 0x314, 0x93, 0x3, 0x2, 0x2, 0x2, 0x315,
+ 0x321, 0x5, 0x92, 0x4a, 0x2, 0x316, 0x321, 0x5, 0x7e,
+ 0x40, 0x2, 0x317, 0x321, 0x5, 0x80, 0x41, 0x2, 0x318,
+ 0x321, 0x5, 0x88, 0x45, 0x2, 0x319, 0x321, 0x5, 0x8a,
+ 0x46, 0x2, 0x31a, 0x321, 0x5, 0x8c, 0x47, 0x2, 0x31b,
+ 0x321, 0x5, 0x84, 0x43, 0x2, 0x31c, 0x321, 0x5, 0x82,
+ 0x42, 0x2, 0x31d, 0x321, 0x5, 0x86, 0x44, 0x2, 0x31e,
+ 0x321, 0x5, 0x8e, 0x48, 0x2, 0x31f, 0x321, 0x5, 0x90,
+ 0x49, 0x2, 0x320, 0x315, 0x3, 0x2, 0x2, 0x2, 0x320,
+ 0x316, 0x3, 0x2, 0x2, 0x2, 0x320, 0x317, 0x3, 0x2,
+ 0x2, 0x2, 0x320, 0x318, 0x3, 0x2, 0x2, 0x2, 0x320,
+ 0x319, 0x3, 0x2, 0x2, 0x2, 0x320, 0x31a, 0x3, 0x2,
+ 0x2, 0x2, 0x320, 0x31b, 0x3, 0x2, 0x2, 0x2, 0x320,
+ 0x31c, 0x3, 0x2, 0x2, 0x2, 0x320, 0x31d, 0x3, 0x2,
+ 0x2, 0x2, 0x320, 0x31e, 0x3, 0x2, 0x2, 0x2, 0x320,
+ 0x31f, 0x3, 0x2, 0x2, 0x2, 0x321, 0x95, 0x3, 0x2,
+ 0x2, 0x2, 0x322, 0x323, 0x7, 0x1b, 0x2, 0x2, 0x323,
+ 0x324, 0x7, 0x4f, 0x2, 0x2, 0x324, 0x328, 0x7, 0xf,
+ 0x2, 0x2, 0x325, 0x327, 0x5, 0x94, 0x4b, 0x2, 0x326,
+ 0x325, 0x3, 0x2, 0x2, 0x2, 0x327, 0x32a, 0x3, 0x2,
+ 0x2, 0x2, 0x328, 0x326, 0x3, 0x2, 0x2, 0x2, 0x328,
+ 0x329, 0x3, 0x2, 0x2, 0x2, 0x329, 0x32b, 0x3, 0x2,
+ 0x2, 0x2, 0x32a, 0x328, 0x3, 0x2, 0x2, 0x2, 0x32b,
+ 0x32c, 0x7, 0x10, 0x2, 0x2, 0x32c, 0x97, 0x3, 0x2,
+ 0x2, 0x2, 0x32d, 0x330, 0x5, 0x96, 0x4c, 0x2, 0x32e,
+ 0x330, 0x5, 0x94, 0x4b, 0x2, 0x32f, 0x32d, 0x3, 0x2,
+ 0x2, 0x2, 0x32f, 0x32e, 0x3, 0x2, 0x2, 0x2, 0x330,
+ 0x333, 0x3, 0x2, 0x2, 0x2, 0x331, 0x32f, 0x3, 0x2,
+ 0x2, 0x2, 0x331, 0x332, 0x3, 0x2, 0x2, 0x2, 0x332,
+ 0x99, 0x3, 0x2, 0x2, 0x2, 0x333, 0x331, 0x3, 0x2,
+ 0x2, 0x2, 0x4f, 0x9c, 0xaa, 0xb1, 0xb9, 0xbc, 0xcc,
+ 0xd0, 0xd4, 0xda, 0xdf, 0xe5, 0xec, 0xf0, 0xf8, 0xfb,
+ 0x103, 0x106, 0x10b, 0x10f, 0x115, 0x122, 0x126, 0x135, 0x140,
+ 0x14b, 0x156, 0x161, 0x16c, 0x177, 0x182, 0x18d, 0x193, 0x1a0,
+ 0x1aa, 0x1ac, 0x1b9, 0x1bf, 0x1c1, 0x1c5, 0x1ce, 0x1d1, 0x1d8,
+ 0x1da, 0x1e4, 0x1e7, 0x1f5, 0x1f9, 0x1fe, 0x20d, 0x213, 0x21a,
+ 0x229, 0x22c, 0x234, 0x23c, 0x246, 0x24f, 0x25a, 0x263, 0x282,
+ 0x287, 0x28b, 0x293, 0x29f, 0x2ae, 0x2b1, 0x2b4, 0x2c0, 0x2ce,
+ 0x2e0, 0x2e9, 0x2f4, 0x2fe, 0x320, 0x328, 0x32f, 0x331,
};
atn::ATNDeserializer deserializer;
diff --git a/deps/v8/src/torque/TorqueParser.h b/deps/v8/src/torque/TorqueParser.h
index 1604b1b780..bca847835f 100644
--- a/deps/v8/src/torque/TorqueParser.h
+++ b/deps/v8/src/torque/TorqueParser.h
@@ -39,63 +39,61 @@ class TorqueParser : public antlr4::Parser {
RUNTIME = 24,
MODULE = 25,
JAVASCRIPT = 26,
- IMPLICIT = 27,
- DEFERRED = 28,
- IF = 29,
- CAST_KEYWORD = 30,
- CONVERT_KEYWORD = 31,
- FOR = 32,
- WHILE = 33,
- RETURN = 34,
- CONSTEXPR = 35,
- CONTINUE = 36,
- BREAK = 37,
- GOTO = 38,
- OTHERWISE = 39,
- TRY = 40,
- CATCH = 41,
- LABEL = 42,
- LABELS = 43,
- TAIL = 44,
- ISNT = 45,
- IS = 46,
- LET = 47,
- EXTERN = 48,
- ASSERT = 49,
- UNREACHABLE_TOKEN = 50,
- DEBUG_TOKEN = 51,
- ASSIGNMENT = 52,
- ASSIGNMENT_OPERATOR = 53,
- EQUAL = 54,
- PLUS = 55,
- MINUS = 56,
- MULTIPLY = 57,
- DIVIDE = 58,
- MODULO = 59,
- BIT_OR = 60,
- BIT_AND = 61,
- BIT_NOT = 62,
- MAX = 63,
- MIN = 64,
- NOT_EQUAL = 65,
- LESS_THAN = 66,
- LESS_THAN_EQUAL = 67,
- GREATER_THAN = 68,
- GREATER_THAN_EQUAL = 69,
- SHIFT_LEFT = 70,
- SHIFT_RIGHT = 71,
- SHIFT_RIGHT_ARITHMETIC = 72,
- VARARGS = 73,
- EQUALITY_OPERATOR = 74,
- INCREMENT = 75,
- DECREMENT = 76,
- NOT = 77,
- STRING_LITERAL = 78,
- IDENTIFIER = 79,
- WS = 80,
- BLOCK_COMMENT = 81,
- LINE_COMMENT = 82,
- DECIMAL_LITERAL = 83
+ DEFERRED = 27,
+ IF = 28,
+ FOR = 29,
+ WHILE = 30,
+ RETURN = 31,
+ CONSTEXPR = 32,
+ CONTINUE = 33,
+ BREAK = 34,
+ GOTO = 35,
+ OTHERWISE = 36,
+ TRY = 37,
+ LABEL = 38,
+ LABELS = 39,
+ TAIL = 40,
+ ISNT = 41,
+ IS = 42,
+ LET = 43,
+ CONST = 44,
+ EXTERN = 45,
+ ASSERT_TOKEN = 46,
+ CHECK_TOKEN = 47,
+ UNREACHABLE_TOKEN = 48,
+ DEBUG_TOKEN = 49,
+ ASSIGNMENT = 50,
+ ASSIGNMENT_OPERATOR = 51,
+ EQUAL = 52,
+ PLUS = 53,
+ MINUS = 54,
+ MULTIPLY = 55,
+ DIVIDE = 56,
+ MODULO = 57,
+ BIT_OR = 58,
+ BIT_AND = 59,
+ BIT_NOT = 60,
+ MAX = 61,
+ MIN = 62,
+ NOT_EQUAL = 63,
+ LESS_THAN = 64,
+ LESS_THAN_EQUAL = 65,
+ GREATER_THAN = 66,
+ GREATER_THAN_EQUAL = 67,
+ SHIFT_LEFT = 68,
+ SHIFT_RIGHT = 69,
+ SHIFT_RIGHT_ARITHMETIC = 70,
+ VARARGS = 71,
+ EQUALITY_OPERATOR = 72,
+ INCREMENT = 73,
+ DECREMENT = 74,
+ NOT = 75,
+ STRING_LITERAL = 76,
+ IDENTIFIER = 77,
+ WS = 78,
+ BLOCK_COMMENT = 79,
+ LINE_COMMENT = 80,
+ DECIMAL_LITERAL = 81
};
enum {
@@ -126,48 +124,55 @@ class TorqueParser : public antlr4::Parser {
RuleIncrementDecrement = 24,
RuleAssignment = 25,
RuleAssignmentExpression = 26,
- RulePrimaryExpression = 27,
- RuleForInitialization = 28,
- RuleForLoop = 29,
- RuleRangeSpecifier = 30,
- RuleForOfRange = 31,
- RuleForOfLoop = 32,
- RuleArgument = 33,
- RuleArgumentList = 34,
- RuleHelperCall = 35,
- RuleLabelReference = 36,
- RuleVariableDeclaration = 37,
- RuleVariableDeclarationWithInitialization = 38,
- RuleHelperCallStatement = 39,
- RuleExpressionStatement = 40,
- RuleIfStatement = 41,
- RuleWhileLoop = 42,
- RuleReturnStatement = 43,
- RuleBreakStatement = 44,
- RuleContinueStatement = 45,
- RuleGotoStatement = 46,
- RuleHandlerWithStatement = 47,
- RuleTryCatch = 48,
- RuleDiagnosticStatement = 49,
- RuleStatement = 50,
- RuleStatementList = 51,
- RuleStatementScope = 52,
- RuleStatementBlock = 53,
- RuleHelperBody = 54,
- RuleExtendsDeclaration = 55,
- RuleGeneratesDeclaration = 56,
- RuleConstexprDeclaration = 57,
- RuleTypeDeclaration = 58,
- RuleExternalBuiltin = 59,
- RuleExternalMacro = 60,
- RuleExternalRuntime = 61,
- RuleBuiltinDeclaration = 62,
- RuleGenericSpecialization = 63,
- RuleMacroDeclaration = 64,
- RuleConstDeclaration = 65,
- RuleDeclaration = 66,
- RuleModuleDeclaration = 67,
- RuleFile = 68
+ RuleStructExpression = 27,
+ RuleFunctionPointerExpression = 28,
+ RulePrimaryExpression = 29,
+ RuleForInitialization = 30,
+ RuleForLoop = 31,
+ RuleRangeSpecifier = 32,
+ RuleForOfRange = 33,
+ RuleForOfLoop = 34,
+ RuleArgument = 35,
+ RuleArgumentList = 36,
+ RuleHelperCall = 37,
+ RuleLabelReference = 38,
+ RuleVariableDeclaration = 39,
+ RuleVariableDeclarationWithInitialization = 40,
+ RuleHelperCallStatement = 41,
+ RuleExpressionStatement = 42,
+ RuleIfStatement = 43,
+ RuleWhileLoop = 44,
+ RuleReturnStatement = 45,
+ RuleBreakStatement = 46,
+ RuleContinueStatement = 47,
+ RuleGotoStatement = 48,
+ RuleHandlerWithStatement = 49,
+ RuleTryLabelStatement = 50,
+ RuleDiagnosticStatement = 51,
+ RuleStatement = 52,
+ RuleStatementList = 53,
+ RuleStatementScope = 54,
+ RuleStatementBlock = 55,
+ RuleHelperBody = 56,
+ RuleFieldDeclaration = 57,
+ RuleFieldListDeclaration = 58,
+ RuleExtendsDeclaration = 59,
+ RuleGeneratesDeclaration = 60,
+ RuleConstexprDeclaration = 61,
+ RuleTypeDeclaration = 62,
+ RuleTypeAliasDeclaration = 63,
+ RuleExternalBuiltin = 64,
+ RuleExternalMacro = 65,
+ RuleExternalRuntime = 66,
+ RuleBuiltinDeclaration = 67,
+ RuleGenericSpecialization = 68,
+ RuleMacroDeclaration = 69,
+ RuleExternConstDeclaration = 70,
+ RuleConstDeclaration = 71,
+ RuleStructDeclaration = 72,
+ RuleDeclaration = 73,
+ RuleModuleDeclaration = 74,
+ RuleFile = 75
};
explicit TorqueParser(antlr4::TokenStream* input);
@@ -208,6 +213,8 @@ class TorqueParser : public antlr4::Parser {
class IncrementDecrementContext;
class AssignmentContext;
class AssignmentExpressionContext;
+ class StructExpressionContext;
+ class FunctionPointerExpressionContext;
class PrimaryExpressionContext;
class ForInitializationContext;
class ForLoopContext;
@@ -229,24 +236,29 @@ class TorqueParser : public antlr4::Parser {
class ContinueStatementContext;
class GotoStatementContext;
class HandlerWithStatementContext;
- class TryCatchContext;
+ class TryLabelStatementContext;
class DiagnosticStatementContext;
class StatementContext;
class StatementListContext;
class StatementScopeContext;
class StatementBlockContext;
class HelperBodyContext;
+ class FieldDeclarationContext;
+ class FieldListDeclarationContext;
class ExtendsDeclarationContext;
class GeneratesDeclarationContext;
class ConstexprDeclarationContext;
class TypeDeclarationContext;
+ class TypeAliasDeclarationContext;
class ExternalBuiltinContext;
class ExternalMacroContext;
class ExternalRuntimeContext;
class BuiltinDeclarationContext;
class GenericSpecializationContext;
class MacroDeclarationContext;
+ class ExternConstDeclarationContext;
class ConstDeclarationContext;
+ class StructDeclarationContext;
class DeclarationContext;
class ModuleDeclarationContext;
class FileContext;
@@ -259,7 +271,9 @@ class TorqueParser : public antlr4::Parser {
antlr4::tree::TerminalNode* CONSTEXPR();
antlr4::tree::TerminalNode* BUILTIN();
TypeListContext* typeList();
- TypeContext* type();
+ std::vector<TypeContext*> type();
+ TypeContext* type(size_t i);
+ antlr4::tree::TerminalNode* BIT_OR();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
void exitRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -268,7 +282,7 @@ class TorqueParser : public antlr4::Parser {
};
TypeContext* type();
-
+ TypeContext* type(int precedence);
class TypeListContext : public antlr4::ParserRuleContext {
public:
TypeListContext(antlr4::ParserRuleContext* parent, size_t invokingState);
@@ -655,9 +669,9 @@ class TorqueParser : public antlr4::Parser {
size_t invokingState);
size_t getRuleIndex() const override;
antlr4::tree::TerminalNode* IDENTIFIER();
- GenericSpecializationTypeListContext* genericSpecializationTypeList();
- LocationExpressionContext* locationExpression();
+ PrimaryExpressionContext* primaryExpression();
ExpressionContext* expression();
+ LocationExpressionContext* locationExpression();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
void exitRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -708,7 +722,7 @@ class TorqueParser : public antlr4::Parser {
AssignmentExpressionContext(antlr4::ParserRuleContext* parent,
size_t invokingState);
size_t getRuleIndex() const override;
- PrimaryExpressionContext* primaryExpression();
+ FunctionPointerExpressionContext* functionPointerExpression();
AssignmentContext* assignment();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -719,20 +733,50 @@ class TorqueParser : public antlr4::Parser {
AssignmentExpressionContext* assignmentExpression();
+ class StructExpressionContext : public antlr4::ParserRuleContext {
+ public:
+ StructExpressionContext(antlr4::ParserRuleContext* parent,
+ size_t invokingState);
+ size_t getRuleIndex() const override;
+ antlr4::tree::TerminalNode* IDENTIFIER();
+ std::vector<ExpressionContext*> expression();
+ ExpressionContext* expression(size_t i);
+
+ void enterRule(antlr4::tree::ParseTreeListener* listener) override;
+ void exitRule(antlr4::tree::ParseTreeListener* listener) override;
+
+ antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
+ };
+
+ StructExpressionContext* structExpression();
+
+ class FunctionPointerExpressionContext : public antlr4::ParserRuleContext {
+ public:
+ FunctionPointerExpressionContext(antlr4::ParserRuleContext* parent,
+ size_t invokingState);
+ size_t getRuleIndex() const override;
+ PrimaryExpressionContext* primaryExpression();
+ antlr4::tree::TerminalNode* IDENTIFIER();
+ GenericSpecializationTypeListContext* genericSpecializationTypeList();
+
+ void enterRule(antlr4::tree::ParseTreeListener* listener) override;
+ void exitRule(antlr4::tree::ParseTreeListener* listener) override;
+
+ antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
+ };
+
+ FunctionPointerExpressionContext* functionPointerExpression();
+
class PrimaryExpressionContext : public antlr4::ParserRuleContext {
public:
PrimaryExpressionContext(antlr4::ParserRuleContext* parent,
size_t invokingState);
size_t getRuleIndex() const override;
HelperCallContext* helperCall();
+ StructExpressionContext* structExpression();
antlr4::tree::TerminalNode* DECIMAL_LITERAL();
antlr4::tree::TerminalNode* STRING_LITERAL();
- antlr4::tree::TerminalNode* CAST_KEYWORD();
- TypeContext* type();
ExpressionContext* expression();
- antlr4::tree::TerminalNode* OTHERWISE();
- antlr4::tree::TerminalNode* IDENTIFIER();
- antlr4::tree::TerminalNode* CONVERT_KEYWORD();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
void exitRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -895,9 +939,10 @@ class TorqueParser : public antlr4::Parser {
VariableDeclarationContext(antlr4::ParserRuleContext* parent,
size_t invokingState);
size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* LET();
antlr4::tree::TerminalNode* IDENTIFIER();
TypeContext* type();
+ antlr4::tree::TerminalNode* LET();
+ antlr4::tree::TerminalNode* CONST();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
void exitRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -1059,11 +1104,9 @@ class TorqueParser : public antlr4::Parser {
HandlerWithStatementContext(antlr4::ParserRuleContext* parent,
size_t invokingState);
size_t getRuleIndex() const override;
- StatementBlockContext* statementBlock();
- antlr4::tree::TerminalNode* CATCH();
- antlr4::tree::TerminalNode* IDENTIFIER();
antlr4::tree::TerminalNode* LABEL();
LabelDeclarationContext* labelDeclaration();
+ StatementBlockContext* statementBlock();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
void exitRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -1073,9 +1116,10 @@ class TorqueParser : public antlr4::Parser {
HandlerWithStatementContext* handlerWithStatement();
- class TryCatchContext : public antlr4::ParserRuleContext {
+ class TryLabelStatementContext : public antlr4::ParserRuleContext {
public:
- TryCatchContext(antlr4::ParserRuleContext* parent, size_t invokingState);
+ TryLabelStatementContext(antlr4::ParserRuleContext* parent,
+ size_t invokingState);
size_t getRuleIndex() const override;
antlr4::tree::TerminalNode* TRY();
StatementBlockContext* statementBlock();
@@ -1088,15 +1132,16 @@ class TorqueParser : public antlr4::Parser {
antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
};
- TryCatchContext* tryCatch();
+ TryLabelStatementContext* tryLabelStatement();
class DiagnosticStatementContext : public antlr4::ParserRuleContext {
public:
DiagnosticStatementContext(antlr4::ParserRuleContext* parent,
size_t invokingState);
size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* ASSERT();
ExpressionContext* expression();
+ antlr4::tree::TerminalNode* ASSERT_TOKEN();
+ antlr4::tree::TerminalNode* CHECK_TOKEN();
antlr4::tree::TerminalNode* UNREACHABLE_TOKEN();
antlr4::tree::TerminalNode* DEBUG_TOKEN();
@@ -1125,7 +1170,7 @@ class TorqueParser : public antlr4::Parser {
WhileLoopContext* whileLoop();
ForOfLoopContext* forOfLoop();
ForLoopContext* forLoop();
- TryCatchContext* tryCatch();
+ TryLabelStatementContext* tryLabelStatement();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
void exitRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -1197,6 +1242,38 @@ class TorqueParser : public antlr4::Parser {
HelperBodyContext* helperBody();
+ class FieldDeclarationContext : public antlr4::ParserRuleContext {
+ public:
+ FieldDeclarationContext(antlr4::ParserRuleContext* parent,
+ size_t invokingState);
+ size_t getRuleIndex() const override;
+ antlr4::tree::TerminalNode* IDENTIFIER();
+ TypeContext* type();
+
+ void enterRule(antlr4::tree::ParseTreeListener* listener) override;
+ void exitRule(antlr4::tree::ParseTreeListener* listener) override;
+
+ antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
+ };
+
+ FieldDeclarationContext* fieldDeclaration();
+
+ class FieldListDeclarationContext : public antlr4::ParserRuleContext {
+ public:
+ FieldListDeclarationContext(antlr4::ParserRuleContext* parent,
+ size_t invokingState);
+ size_t getRuleIndex() const override;
+ std::vector<FieldDeclarationContext*> fieldDeclaration();
+ FieldDeclarationContext* fieldDeclaration(size_t i);
+
+ void enterRule(antlr4::tree::ParseTreeListener* listener) override;
+ void exitRule(antlr4::tree::ParseTreeListener* listener) override;
+
+ antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
+ };
+
+ FieldListDeclarationContext* fieldListDeclaration();
+
class ExtendsDeclarationContext : public antlr4::ParserRuleContext {
public:
ExtendsDeclarationContext(antlr4::ParserRuleContext* parent,
@@ -1260,6 +1337,22 @@ class TorqueParser : public antlr4::Parser {
TypeDeclarationContext* typeDeclaration();
+ class TypeAliasDeclarationContext : public antlr4::ParserRuleContext {
+ public:
+ TypeAliasDeclarationContext(antlr4::ParserRuleContext* parent,
+ size_t invokingState);
+ size_t getRuleIndex() const override;
+ antlr4::tree::TerminalNode* IDENTIFIER();
+ TypeContext* type();
+
+ void enterRule(antlr4::tree::ParseTreeListener* listener) override;
+ void exitRule(antlr4::tree::ParseTreeListener* listener) override;
+
+ antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
+ };
+
+ TypeAliasDeclarationContext* typeAliasDeclaration();
+
class ExternalBuiltinContext : public antlr4::ParserRuleContext {
public:
ExternalBuiltinContext(antlr4::ParserRuleContext* parent,
@@ -1294,7 +1387,6 @@ class TorqueParser : public antlr4::Parser {
OptionalTypeContext* optionalType();
OptionalLabelListContext* optionalLabelList();
antlr4::tree::TerminalNode* STRING_LITERAL();
- antlr4::tree::TerminalNode* IMPLICIT();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
void exitRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -1376,6 +1468,7 @@ class TorqueParser : public antlr4::Parser {
OptionalTypeContext* optionalType();
OptionalLabelListContext* optionalLabelList();
HelperBodyContext* helperBody();
+ antlr4::tree::TerminalNode* STRING_LITERAL();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
void exitRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -1385,14 +1478,34 @@ class TorqueParser : public antlr4::Parser {
MacroDeclarationContext* macroDeclaration();
+ class ExternConstDeclarationContext : public antlr4::ParserRuleContext {
+ public:
+ ExternConstDeclarationContext(antlr4::ParserRuleContext* parent,
+ size_t invokingState);
+ size_t getRuleIndex() const override;
+ antlr4::tree::TerminalNode* CONST();
+ antlr4::tree::TerminalNode* IDENTIFIER();
+ TypeContext* type();
+ GeneratesDeclarationContext* generatesDeclaration();
+
+ void enterRule(antlr4::tree::ParseTreeListener* listener) override;
+ void exitRule(antlr4::tree::ParseTreeListener* listener) override;
+
+ antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
+ };
+
+ ExternConstDeclarationContext* externConstDeclaration();
+
class ConstDeclarationContext : public antlr4::ParserRuleContext {
public:
ConstDeclarationContext(antlr4::ParserRuleContext* parent,
size_t invokingState);
size_t getRuleIndex() const override;
+ antlr4::tree::TerminalNode* CONST();
antlr4::tree::TerminalNode* IDENTIFIER();
TypeContext* type();
- antlr4::tree::TerminalNode* STRING_LITERAL();
+ antlr4::tree::TerminalNode* ASSIGNMENT();
+ ExpressionContext* expression();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
void exitRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -1402,17 +1515,36 @@ class TorqueParser : public antlr4::Parser {
ConstDeclarationContext* constDeclaration();
+ class StructDeclarationContext : public antlr4::ParserRuleContext {
+ public:
+ StructDeclarationContext(antlr4::ParserRuleContext* parent,
+ size_t invokingState);
+ size_t getRuleIndex() const override;
+ antlr4::tree::TerminalNode* IDENTIFIER();
+ FieldListDeclarationContext* fieldListDeclaration();
+
+ void enterRule(antlr4::tree::ParseTreeListener* listener) override;
+ void exitRule(antlr4::tree::ParseTreeListener* listener) override;
+
+ antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
+ };
+
+ StructDeclarationContext* structDeclaration();
+
class DeclarationContext : public antlr4::ParserRuleContext {
public:
DeclarationContext(antlr4::ParserRuleContext* parent, size_t invokingState);
size_t getRuleIndex() const override;
+ StructDeclarationContext* structDeclaration();
TypeDeclarationContext* typeDeclaration();
+ TypeAliasDeclarationContext* typeAliasDeclaration();
BuiltinDeclarationContext* builtinDeclaration();
GenericSpecializationContext* genericSpecialization();
MacroDeclarationContext* macroDeclaration();
ExternalMacroContext* externalMacro();
ExternalBuiltinContext* externalBuiltin();
ExternalRuntimeContext* externalRuntime();
+ ExternConstDeclarationContext* externConstDeclaration();
ConstDeclarationContext* constDeclaration();
void enterRule(antlr4::tree::ParseTreeListener* listener) override;
@@ -1460,6 +1592,7 @@ class TorqueParser : public antlr4::Parser {
bool sempred(antlr4::RuleContext* _localctx, size_t ruleIndex,
size_t predicateIndex) override;
+ bool typeSempred(TypeContext* _localctx, size_t predicateIndex);
bool conditionalExpressionSempred(ConditionalExpressionContext* _localctx,
size_t predicateIndex);
bool logicalORExpressionSempred(LogicalORExpressionContext* _localctx,
diff --git a/deps/v8/src/torque/TorqueVisitor.h b/deps/v8/src/torque/TorqueVisitor.h
index fe97810e50..7b1ee2754e 100644
--- a/deps/v8/src/torque/TorqueVisitor.h
+++ b/deps/v8/src/torque/TorqueVisitor.h
@@ -100,6 +100,12 @@ class TorqueVisitor : public antlr4::tree::AbstractParseTreeVisitor {
virtual antlrcpp::Any visitAssignmentExpression(
TorqueParser::AssignmentExpressionContext* context) = 0;
+ virtual antlrcpp::Any visitStructExpression(
+ TorqueParser::StructExpressionContext* context) = 0;
+
+ virtual antlrcpp::Any visitFunctionPointerExpression(
+ TorqueParser::FunctionPointerExpressionContext* context) = 0;
+
virtual antlrcpp::Any visitPrimaryExpression(
TorqueParser::PrimaryExpressionContext* context) = 0;
@@ -162,8 +168,8 @@ class TorqueVisitor : public antlr4::tree::AbstractParseTreeVisitor {
virtual antlrcpp::Any visitHandlerWithStatement(
TorqueParser::HandlerWithStatementContext* context) = 0;
- virtual antlrcpp::Any visitTryCatch(
- TorqueParser::TryCatchContext* context) = 0;
+ virtual antlrcpp::Any visitTryLabelStatement(
+ TorqueParser::TryLabelStatementContext* context) = 0;
virtual antlrcpp::Any visitDiagnosticStatement(
TorqueParser::DiagnosticStatementContext* context) = 0;
@@ -183,6 +189,12 @@ class TorqueVisitor : public antlr4::tree::AbstractParseTreeVisitor {
virtual antlrcpp::Any visitHelperBody(
TorqueParser::HelperBodyContext* context) = 0;
+ virtual antlrcpp::Any visitFieldDeclaration(
+ TorqueParser::FieldDeclarationContext* context) = 0;
+
+ virtual antlrcpp::Any visitFieldListDeclaration(
+ TorqueParser::FieldListDeclarationContext* context) = 0;
+
virtual antlrcpp::Any visitExtendsDeclaration(
TorqueParser::ExtendsDeclarationContext* context) = 0;
@@ -195,6 +207,9 @@ class TorqueVisitor : public antlr4::tree::AbstractParseTreeVisitor {
virtual antlrcpp::Any visitTypeDeclaration(
TorqueParser::TypeDeclarationContext* context) = 0;
+ virtual antlrcpp::Any visitTypeAliasDeclaration(
+ TorqueParser::TypeAliasDeclarationContext* context) = 0;
+
virtual antlrcpp::Any visitExternalBuiltin(
TorqueParser::ExternalBuiltinContext* context) = 0;
@@ -213,9 +228,15 @@ class TorqueVisitor : public antlr4::tree::AbstractParseTreeVisitor {
virtual antlrcpp::Any visitMacroDeclaration(
TorqueParser::MacroDeclarationContext* context) = 0;
+ virtual antlrcpp::Any visitExternConstDeclaration(
+ TorqueParser::ExternConstDeclarationContext* context) = 0;
+
virtual antlrcpp::Any visitConstDeclaration(
TorqueParser::ConstDeclarationContext* context) = 0;
+ virtual antlrcpp::Any visitStructDeclaration(
+ TorqueParser::StructDeclarationContext* context) = 0;
+
virtual antlrcpp::Any visitDeclaration(
TorqueParser::DeclarationContext* context) = 0;
diff --git a/deps/v8/src/torque/ast-generator.cc b/deps/v8/src/torque/ast-generator.cc
index 617afd1485..5eb2f37ef6 100644
--- a/deps/v8/src/torque/ast-generator.cc
+++ b/deps/v8/src/torque/ast-generator.cc
@@ -76,14 +76,20 @@ LabelAndTypesVector AstGenerator::GetOptionalLabelAndTypeList(
TypeExpression* AstGenerator::GetType(TorqueParser::TypeContext* context) {
if (context->BUILTIN()) {
ParameterList parameters = context->typeList()->accept(this);
- TypeExpression* return_type = GetType(context->type());
+ TypeExpression* return_type = GetType(context->type(0));
return RegisterNode(
new FunctionTypeExpression(Pos(context), parameters, return_type));
- } else {
+ } else if (context->BIT_OR()) {
+ return RegisterNode(new UnionTypeExpression(
+ Pos(context), GetType(context->type(0)), GetType(context->type(1))));
+ } else if (context->IDENTIFIER()) {
bool is_constexpr = context->CONSTEXPR() != nullptr;
std::string name = context->IDENTIFIER()->getSymbol()->getText();
return RegisterNode(
new BasicTypeExpression(Pos(context), is_constexpr, std::move(name)));
+ } else {
+ DCHECK_EQ(1, context->type().size());
+ return GetType(context->type(0));
}
}
@@ -169,13 +175,19 @@ antlrcpp::Any AstGenerator::visitMacroDeclaration(
GetOptionalParameterList(context->parameterList()),
GetOptionalType(context->optionalType()),
GetOptionalLabelAndTypeList(context->optionalLabelList())});
- auto body = context->helperBody()->accept(this).as<Statement*>();
+ if (auto* op = context->STRING_LITERAL()) {
+ macro->op = StringLiteralUnquote(op->getSymbol()->getText());
+ }
+ base::Optional<Statement*> body;
+ if (context->helperBody())
+ body = context->helperBody()->accept(this).as<Statement*>();
Declaration* result = nullptr;
if (generic_parameters.size() != 0) {
result = RegisterNode(
new GenericDeclaration{Pos(context), macro, generic_parameters, body});
} else {
- result = RegisterNode(new StandardDeclaration{Pos(context), macro, body});
+ if (!body) ReportError("A non-generic declaration needs a body.");
+ result = RegisterNode(new StandardDeclaration{Pos(context), macro, *body});
}
return result;
}
@@ -184,7 +196,9 @@ antlrcpp::Any AstGenerator::visitBuiltinDeclaration(
TorqueParser::BuiltinDeclarationContext* context) {
auto generic_parameters =
GetIdentifierVector(context->optionalGenericTypeList()->IDENTIFIER());
- Statement* body = context->helperBody()->accept(this).as<Statement*>();
+ base::Optional<Statement*> body;
+ if (context->helperBody())
+ body = context->helperBody()->accept(this).as<Statement*>();
TorqueBuiltinDeclaration* builtin = RegisterNode(new TorqueBuiltinDeclaration{
Pos(context), context->JAVASCRIPT() != nullptr,
@@ -197,7 +211,9 @@ antlrcpp::Any AstGenerator::visitBuiltinDeclaration(
result = RegisterNode(new GenericDeclaration{Pos(context), builtin,
generic_parameters, body});
} else {
- result = RegisterNode(new StandardDeclaration{Pos(context), builtin, body});
+ if (!body) ReportError("A non-generic declaration needs a body.");
+ result =
+ RegisterNode(new StandardDeclaration{Pos(context), builtin, *body});
}
return result;
}
@@ -209,7 +225,6 @@ antlrcpp::Any AstGenerator::visitExternalMacro(
MacroDeclaration* macro = RegisterNode(new ExternalMacroDeclaration{
Pos(context),
context->IDENTIFIER()->getSymbol()->getText(),
- context->IMPLICIT() != nullptr,
{},
std::move(
context->typeListMaybeVarArgs()->accept(this).as<ParameterList>()),
@@ -263,26 +278,38 @@ antlrcpp::Any AstGenerator::visitExternalRuntime(
RegisterNode(new StandardDeclaration{Pos(context), runtime, nullptr}));
}
+antlrcpp::Any AstGenerator::visitConstDeclaration(
+ TorqueParser::ConstDeclarationContext* context) {
+ auto name = context->IDENTIFIER()->getSymbol()->getText();
+ auto type = GetType(context->type());
+ Expression* expression =
+ context->expression()->accept(this).as<Expression*>();
+ return implicit_cast<Declaration*>(
+ RegisterNode(new ConstDeclaration{Pos(context), name, type, expression}));
+}
+
antlrcpp::Any AstGenerator::visitGenericSpecialization(
TorqueParser::GenericSpecializationContext* context) {
auto name = context->IDENTIFIER()->getSymbol()->getText();
auto specialization_parameters =
GetTypeVector(context->genericSpecializationTypeList()->typeList());
- Statement* body = context->helperBody()->accept(this).as<Statement*>();
return implicit_cast<Declaration*>(RegisterNode(new SpecializationDeclaration{
- Pos(context), name, specialization_parameters,
+ Pos(context), name, false, specialization_parameters,
GetOptionalParameterList(context->parameterList()),
GetOptionalType(context->optionalType()),
- GetOptionalLabelAndTypeList(context->optionalLabelList()), body}));
+ GetOptionalLabelAndTypeList(context->optionalLabelList()),
+ context->helperBody()->accept(this).as<Statement*>()}));
}
-antlrcpp::Any AstGenerator::visitConstDeclaration(
- TorqueParser::ConstDeclarationContext* context) {
- return implicit_cast<Declaration*>(RegisterNode(new ConstDeclaration{
+antlrcpp::Any AstGenerator::visitExternConstDeclaration(
+ TorqueParser::ExternConstDeclarationContext* context) {
+ return implicit_cast<Declaration*>(RegisterNode(new ExternConstDeclaration{
Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
GetType(context->type()),
- StringLiteralUnquote(
- context->STRING_LITERAL()->getSymbol()->getText())}));
+ StringLiteralUnquote(context->generatesDeclaration()
+ ->STRING_LITERAL()
+ ->getSymbol()
+ ->getText())}));
}
antlrcpp::Any AstGenerator::visitTypeDeclaration(
@@ -308,10 +335,20 @@ antlrcpp::Any AstGenerator::visitTypeDeclaration(
return implicit_cast<Declaration*>(result);
}
+antlrcpp::Any AstGenerator::visitTypeAliasDeclaration(
+ TorqueParser::TypeAliasDeclarationContext* context) {
+ TypeAliasDeclaration* result = RegisterNode(new TypeAliasDeclaration{
+ Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
+ GetType(context->type())});
+ return implicit_cast<Declaration*>(result);
+}
+
antlrcpp::Any AstGenerator::visitVariableDeclaration(
TorqueParser::VariableDeclarationContext* context) {
+ bool is_const_qualified = context->CONST() != nullptr;
return RegisterNode(
new VarDeclarationStatement{Pos(context),
+ is_const_qualified,
context->IDENTIFIER()->getSymbol()->getText(),
GetType(context->type()),
{}});
@@ -485,40 +522,44 @@ antlrcpp::Any AstGenerator::visitForOfLoop(
return implicit_cast<Statement*>(result);
}
-antlrcpp::Any AstGenerator::visitTryCatch(
- TorqueParser::TryCatchContext* context) {
- TryCatchStatement* result = RegisterNode(new TryCatchStatement{
- Pos(context),
- context->statementBlock()->accept(this).as<Statement*>(),
- {}});
+antlrcpp::Any AstGenerator::visitTryLabelStatement(
+ TorqueParser::TryLabelStatementContext* context) {
+ TryLabelStatement* result = RegisterNode(new TryLabelStatement{
+ Pos(context), context->statementBlock()->accept(this).as<Statement*>()});
for (auto* handler : context->handlerWithStatement()) {
- if (handler->CATCH() != nullptr) {
- CatchBlock* catch_block = RegisterNode(new CatchBlock{
- Pos(handler->statementBlock()),
- {},
- handler->statementBlock()->accept(this).as<Statement*>()});
- catch_block->caught = handler->IDENTIFIER()->getSymbol()->getText();
- result->catch_blocks.push_back(catch_block);
- } else {
- handler->labelDeclaration()->accept(this);
- auto parameter_list = handler->labelDeclaration()->parameterList();
- ParameterList label_parameters = parameter_list == nullptr
- ? ParameterList()
- : handler->labelDeclaration()
- ->parameterList()
- ->accept(this)
- .as<ParameterList>();
- LabelBlock* label_block = RegisterNode(new LabelBlock{
- Pos(handler->statementBlock()),
- handler->labelDeclaration()->IDENTIFIER()->getSymbol()->getText(),
- label_parameters,
- handler->statementBlock()->accept(this).as<Statement*>()});
- result->label_blocks.push_back(label_block);
- }
+ handler->labelDeclaration()->accept(this);
+ auto parameter_list = handler->labelDeclaration()->parameterList();
+ ParameterList label_parameters = parameter_list == nullptr
+ ? ParameterList()
+ : handler->labelDeclaration()
+ ->parameterList()
+ ->accept(this)
+ .as<ParameterList>();
+ LabelBlock* label_block = RegisterNode(new LabelBlock{
+ Pos(handler->statementBlock()),
+ handler->labelDeclaration()->IDENTIFIER()->getSymbol()->getText(),
+ label_parameters,
+ handler->statementBlock()->accept(this).as<Statement*>()});
+ result->label_blocks.push_back(label_block);
}
return implicit_cast<Statement*>(result);
}
+antlrcpp::Any AstGenerator::visitFunctionPointerExpression(
+ TorqueParser::FunctionPointerExpressionContext* context) {
+ if (context->IDENTIFIER()) {
+ std::vector<TypeExpression*> templateArguments;
+ if (context->genericSpecializationTypeList()) {
+ templateArguments =
+ GetTypeVector(context->genericSpecializationTypeList()->typeList());
+ }
+ return implicit_cast<Expression*>(RegisterNode(new IdentifierExpression{
+ Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
+ std::move(templateArguments)}));
+ }
+ return context->primaryExpression()->accept(this);
+}
+
antlrcpp::Any AstGenerator::visitPrimaryExpression(
TorqueParser::PrimaryExpressionContext* context) {
if (auto* e = context->helperCall()) return e->accept(this);
@@ -528,18 +569,23 @@ antlrcpp::Any AstGenerator::visitPrimaryExpression(
if (auto* e = context->STRING_LITERAL())
return implicit_cast<Expression*>(RegisterNode(
new StringLiteralExpression{Pos(context), e->getSymbol()->getText()}));
- if (context->CONVERT_KEYWORD())
- return implicit_cast<Expression*>(RegisterNode(new ConvertExpression{
- Pos(context), GetType(context->type()),
- context->expression()->accept(this).as<Expression*>()}));
- if (context->CAST_KEYWORD())
- return implicit_cast<Expression*>(RegisterNode(new CastExpression{
- Pos(context), GetType(context->type()),
- context->IDENTIFIER()->getSymbol()->getText(),
- context->expression()->accept(this).as<Expression*>()}));
+ if (context->structExpression()) {
+ return context->structExpression()->accept(this);
+ }
return context->expression()->accept(this);
}
+antlrcpp::Any AstGenerator::visitStructExpression(
+ TorqueParser::StructExpressionContext* context) {
+ std::vector<Expression*> expressions;
+ for (auto& e : context->expression()) {
+ expressions.push_back(e->accept(this).as<Expression*>());
+ }
+ return implicit_cast<Expression*>(RegisterNode(new StructExpression{
+ Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
+ expressions}));
+}
+
antlrcpp::Any AstGenerator::visitAssignment(
TorqueParser::AssignmentContext* context) {
if (auto* e = context->incrementDecrement()) return e->accept(this);
@@ -572,25 +618,23 @@ antlrcpp::Any AstGenerator::visitIncrementDecrement(
antlrcpp::Any AstGenerator::visitLocationExpression(
TorqueParser::LocationExpressionContext* context) {
- if (auto* l = context->locationExpression()) {
- Expression* location = l->accept(this).as<Expression*>();
- if (auto* e = context->expression()) {
- return implicit_cast<Expression*>(
- RegisterNode(new ElementAccessExpression{
- Pos(context), location, e->accept(this).as<Expression*>()}));
- }
- return implicit_cast<Expression*>(RegisterNode(new FieldAccessExpression{
- Pos(context), location,
- context->IDENTIFIER()->getSymbol()->getText()}));
+ Expression* location = nullptr;
+ if (auto* p = context->primaryExpression()) {
+ location = p->accept(this).as<Expression*>();
+ } else if (auto* l = context->locationExpression()) {
+ location = l->accept(this).as<Expression*>();
+ } else {
+ return implicit_cast<Expression*>(RegisterNode(new IdentifierExpression{
+ Pos(context), context->IDENTIFIER()->getSymbol()->getText(), {}}));
}
- std::vector<TypeExpression*> templateArguments;
- if (context->genericSpecializationTypeList()) {
- templateArguments =
- GetTypeVector(context->genericSpecializationTypeList()->typeList());
+
+ if (auto* e = context->expression()) {
+ return implicit_cast<Expression*>(RegisterNode(new ElementAccessExpression{
+ Pos(context), location, e->accept(this).as<Expression*>()}));
}
- return implicit_cast<Expression*>(RegisterNode(new IdentifierExpression{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
- std::move(templateArguments)}));
+
+ return implicit_cast<Expression*>(RegisterNode(new FieldAccessExpression{
+ Pos(context), location, context->IDENTIFIER()->getSymbol()->getText()}));
}
antlrcpp::Any AstGenerator::visitUnaryExpression(
@@ -733,14 +777,14 @@ antlrcpp::Any AstGenerator::visitConditionalExpression(
antlrcpp::Any AstGenerator::visitDiagnosticStatement(
TorqueParser::DiagnosticStatementContext* context) {
- if (context->ASSERT()) {
+ if (context->ASSERT_TOKEN() || context->CHECK_TOKEN()) {
size_t a = context->expression()->start->getStartIndex();
size_t b = context->expression()->stop->getStopIndex();
antlr4::misc::Interval interval(a, b);
std::string source = source_file_context_->stream->getText(interval);
return implicit_cast<Statement*>(RegisterNode(new AssertStatement{
- Pos(context), context->expression()->accept(this).as<Expression*>(),
- source}));
+ Pos(context), context->ASSERT_TOKEN() != nullptr,
+ context->expression()->accept(this).as<Expression*>(), source}));
} else if (context->UNREACHABLE_TOKEN()) {
return implicit_cast<Statement*>(
RegisterNode(new DebugStatement{Pos(context), "unreachable", true}));
@@ -751,6 +795,22 @@ antlrcpp::Any AstGenerator::visitDiagnosticStatement(
}
}
+antlrcpp::Any AstGenerator::visitStructDeclaration(
+ TorqueParser::StructDeclarationContext* context) {
+ StructDeclaration* struct_declaration = RegisterNode(new StructDeclaration{
+ Pos(context), context->IDENTIFIER()->getSymbol()->getText()});
+
+ for (auto* fieldDeclaration :
+ context->fieldListDeclaration()->fieldDeclaration()) {
+ FieldNameAndType field = {
+ fieldDeclaration->IDENTIFIER()->getSymbol()->getText(),
+ GetType(fieldDeclaration->type())};
+ struct_declaration->fields.push_back(field);
+ }
+
+ return implicit_cast<Declaration*>(struct_declaration);
+}
+
void AstGenerator::visitSourceFile(SourceFileContext* context) {
source_file_context_ = context;
current_source_file_ = SourceFileMap::Get().AddSource(context->name);
diff --git a/deps/v8/src/torque/ast-generator.h b/deps/v8/src/torque/ast-generator.h
index f0dbb09bbb..31eca57b91 100644
--- a/deps/v8/src/torque/ast-generator.h
+++ b/deps/v8/src/torque/ast-generator.h
@@ -41,15 +41,21 @@ class AstGenerator : public TorqueBaseVisitor {
antlrcpp::Any visitExternalRuntime(
TorqueParser::ExternalRuntimeContext* context) override;
+ antlrcpp::Any visitConstDeclaration(
+ TorqueParser::ConstDeclarationContext* context) override;
+
antlrcpp::Any visitGenericSpecialization(
TorqueParser::GenericSpecializationContext* context) override;
- antlrcpp::Any visitConstDeclaration(
- TorqueParser::ConstDeclarationContext* context) override;
+ antlrcpp::Any visitExternConstDeclaration(
+ TorqueParser::ExternConstDeclarationContext* context) override;
antlrcpp::Any visitTypeDeclaration(
TorqueParser::TypeDeclarationContext* context) override;
+ antlrcpp::Any visitTypeAliasDeclaration(
+ TorqueParser::TypeAliasDeclarationContext* context) override;
+
antlrcpp::Any visitVariableDeclaration(
TorqueParser::VariableDeclarationContext* context) override;
@@ -63,6 +69,9 @@ class AstGenerator : public TorqueBaseVisitor {
antlrcpp::Any visitHelperCallStatement(
TorqueParser::HelperCallStatementContext* context) override;
+ antlrcpp::Any visitStructExpression(
+ TorqueParser::StructExpressionContext* context) override;
+
antlrcpp::Any visitConditionalExpression(
TorqueParser::ConditionalExpressionContext* context) override;
@@ -102,10 +111,14 @@ class AstGenerator : public TorqueBaseVisitor {
antlrcpp::Any visitAssignment(
TorqueParser::AssignmentContext* context) override;
+ antlrcpp::Any visitFunctionPointerExpression(
+ TorqueParser::FunctionPointerExpressionContext* context) override;
+
antlrcpp::Any visitPrimaryExpression(
TorqueParser::PrimaryExpressionContext* context) override;
- antlrcpp::Any visitTryCatch(TorqueParser::TryCatchContext* context) override;
+ antlrcpp::Any visitTryLabelStatement(
+ TorqueParser::TryLabelStatementContext* context) override;
antlrcpp::Any visitStatementScope(
TorqueParser::StatementScopeContext* context) override;
@@ -139,6 +152,9 @@ class AstGenerator : public TorqueBaseVisitor {
antlrcpp::Any visitDiagnosticStatement(
TorqueParser::DiagnosticStatementContext* context) override;
+ antlrcpp::Any visitStructDeclaration(
+ TorqueParser::StructDeclarationContext* context) override;
+
antlrcpp::Any aggregateResult(antlrcpp::Any aggregate,
const antlrcpp::Any& nextResult) override {
if (aggregate.isNull())
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 01b55add28..6af444a56b 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -29,6 +29,7 @@ DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition)
#define AST_EXPRESSION_NODE_KIND_LIST(V) \
V(CallExpression) \
+ V(StructExpression) \
V(LogicalOrExpression) \
V(LogicalAndExpression) \
V(ConditionalExpression) \
@@ -38,13 +39,12 @@ DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition)
V(FieldAccessExpression) \
V(ElementAccessExpression) \
V(AssignmentExpression) \
- V(IncrementDecrementExpression) \
- V(CastExpression) \
- V(ConvertExpression)
+ V(IncrementDecrementExpression)
#define AST_TYPE_EXPRESSION_NODE_KIND_LIST(V) \
V(BasicTypeExpression) \
- V(FunctionTypeExpression)
+ V(FunctionTypeExpression) \
+ V(UnionTypeExpression)
#define AST_STATEMENT_NODE_KIND_LIST(V) \
V(BlockStatement) \
@@ -61,16 +61,19 @@ DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition)
V(TailCallStatement) \
V(VarDeclarationStatement) \
V(GotoStatement) \
- V(TryCatchStatement)
+ V(TryLabelStatement)
#define AST_DECLARATION_NODE_KIND_LIST(V) \
V(TypeDeclaration) \
+ V(TypeAliasDeclaration) \
V(StandardDeclaration) \
V(GenericDeclaration) \
V(SpecializationDeclaration) \
- V(ConstDeclaration) \
+ V(ExternConstDeclaration) \
+ V(StructDeclaration) \
V(DefaultModuleDeclaration) \
- V(ExplicitModuleDeclaration)
+ V(ExplicitModuleDeclaration) \
+ V(ConstDeclaration)
#define AST_CALLABLE_NODE_KIND_LIST(V) \
V(TorqueMacroDeclaration) \
@@ -85,7 +88,6 @@ DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition)
AST_STATEMENT_NODE_KIND_LIST(V) \
AST_DECLARATION_NODE_KIND_LIST(V) \
AST_CALLABLE_NODE_KIND_LIST(V) \
- V(CatchBlock) \
V(LabelBlock)
struct AstNode {
@@ -260,6 +262,14 @@ struct CallExpression : Expression {
std::vector<std::string> labels;
};
+struct StructExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(StructExpression)
+ StructExpression(SourcePosition p, std::string n, std::vector<Expression*> e)
+ : Expression(kKind, p), name(n), expressions(std::move(e)) {}
+ std::string name;
+ std::vector<Expression*> expressions;
+};
+
struct LogicalOrExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LogicalOrExpression)
LogicalOrExpression(SourcePosition p, Expression* l, Expression* r)
@@ -300,24 +310,6 @@ struct NumberLiteralExpression : Expression {
std::string number;
};
-struct CastExpression : Expression {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(CastExpression)
- CastExpression(SourcePosition p, TypeExpression* t, std::string o,
- Expression* v)
- : Expression(kKind, p), type(t), otherwise(o), value(v) {}
- TypeExpression* type;
- std::string otherwise;
- Expression* value;
-};
-
-struct ConvertExpression : Expression {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(ConvertExpression)
- ConvertExpression(SourcePosition p, TypeExpression* t, Expression* v)
- : Expression(kKind, p), type(t), value(v) {}
- TypeExpression* type;
- Expression* value;
-};
-
struct ElementAccessExpression : LocationExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ElementAccessExpression)
ElementAccessExpression(SourcePosition p, Expression* a, Expression* i)
@@ -379,6 +371,14 @@ struct FunctionTypeExpression : TypeExpression {
TypeExpression* return_type;
};
+struct UnionTypeExpression : TypeExpression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(UnionTypeExpression)
+ UnionTypeExpression(SourcePosition pos, TypeExpression* a, TypeExpression* b)
+ : TypeExpression(kKind, pos), a(a), b(b) {}
+ TypeExpression* a;
+ TypeExpression* b;
+};
+
struct ExpressionStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExpressionStatement)
ExpressionStatement(SourcePosition p, Expression* e)
@@ -426,8 +426,13 @@ struct DebugStatement : Statement {
struct AssertStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(AssertStatement)
- AssertStatement(SourcePosition p, Expression* e, const std::string& s)
- : Statement(kKind, p), expression(e), source(s) {}
+ AssertStatement(SourcePosition pos, bool debug_only, Expression* expression,
+ std::string source)
+ : Statement(kKind, pos),
+ debug_only(debug_only),
+ expression(expression),
+ source(std::move(source)) {}
+ bool debug_only;
Expression* expression;
std::string source;
};
@@ -441,9 +446,14 @@ struct TailCallStatement : Statement {
struct VarDeclarationStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(VarDeclarationStatement)
- VarDeclarationStatement(SourcePosition p, std::string n, TypeExpression* t,
- base::Optional<Expression*> i)
- : Statement(kKind, p), name(std::move(n)), type(t), initializer(i) {}
+ VarDeclarationStatement(SourcePosition p, bool c, std::string n,
+ TypeExpression* t, base::Optional<Expression*> i)
+ : Statement(kKind, p),
+ const_qualified(c),
+ name(std::move(n)),
+ type(t),
+ initializer(i) {}
+ bool const_qualified;
std::string name;
TypeExpression* type;
base::Optional<Expression*> initializer;
@@ -501,14 +511,6 @@ struct ForOfLoopStatement : Statement {
Statement* body;
};
-struct CatchBlock : AstNode {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(CatchBlock)
- CatchBlock(SourcePosition p, const std::string& c, Statement* b)
- : AstNode(kKind, p), caught(std::move(c)), body(std::move(b)) {}
- std::string caught;
- Statement* body;
-};
-
struct LabelBlock : AstNode {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LabelBlock)
LabelBlock(SourcePosition p, const std::string& l,
@@ -522,12 +524,11 @@ struct LabelBlock : AstNode {
Statement* body;
};
-struct TryCatchStatement : Statement {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(TryCatchStatement)
- TryCatchStatement(SourcePosition p, Statement* t, std::vector<CatchBlock*> c)
- : Statement(kKind, p), try_block(std::move(t)), catch_blocks(c) {}
+struct TryLabelStatement : Statement {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(TryLabelStatement)
+ TryLabelStatement(SourcePosition p, Statement* t)
+ : Statement(kKind, p), try_block(std::move(t)) {}
Statement* try_block;
- std::vector<CatchBlock*> catch_blocks;
std::vector<LabelBlock*> label_blocks;
};
@@ -553,6 +554,19 @@ struct TypeDeclaration : Declaration {
base::Optional<std::string> constexpr_generates;
};
+struct TypeAliasDeclaration : Declaration {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(TypeAliasDeclaration)
+ TypeAliasDeclaration(SourcePosition p, std::string n, TypeExpression* t)
+ : Declaration(kKind, p), name(std::move(n)), type(t) {}
+ std::string name;
+ TypeExpression* type;
+};
+
+struct FieldNameAndType {
+ std::string name;
+ TypeExpression* type;
+};
+
struct LabelAndTypes {
std::string name;
std::vector<TypeExpression*> types;
@@ -580,27 +594,26 @@ struct CallableNode : AstNode {
struct MacroDeclaration : CallableNode {
DEFINE_AST_NODE_INNER_BOILERPLATE(MacroDeclaration)
- MacroDeclaration(AstNode::Kind kind, SourcePosition p, std::string n, bool i,
+ MacroDeclaration(AstNode::Kind kind, SourcePosition p, std::string n,
base::Optional<std::string> o, ParameterList pl,
TypeExpression* r, const LabelAndTypesVector& l)
- : CallableNode(kind, p, n, pl, r, l), implicit(i), op(std::move(o)) {}
- bool implicit;
+ : CallableNode(kind, p, n, pl, r, l), op(std::move(o)) {}
base::Optional<std::string> op;
};
struct ExternalMacroDeclaration : MacroDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalMacroDeclaration)
- ExternalMacroDeclaration(SourcePosition p, std::string n, bool i,
+ ExternalMacroDeclaration(SourcePosition p, std::string n,
base::Optional<std::string> o, ParameterList pl,
TypeExpression* r, const LabelAndTypesVector& l)
- : MacroDeclaration(kKind, p, n, i, o, pl, r, l) {}
+ : MacroDeclaration(kKind, p, n, o, pl, r, l) {}
};
struct TorqueMacroDeclaration : MacroDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TorqueMacroDeclaration)
TorqueMacroDeclaration(SourcePosition p, std::string n, ParameterList pl,
TypeExpression* r, const LabelAndTypesVector& l)
- : MacroDeclaration(kKind, p, n, false, {}, pl, r, l) {}
+ : MacroDeclaration(kKind, p, n, {}, pl, r, l) {}
};
struct BuiltinDeclaration : CallableNode {
@@ -631,6 +644,16 @@ struct ExternalRuntimeDeclaration : CallableNode {
: CallableNode(kKind, p, n, pl, r, {}) {}
};
+struct ConstDeclaration : Declaration {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(ConstDeclaration)
+ ConstDeclaration(SourcePosition p, std::string n, TypeExpression* r,
+ Expression* e)
+ : Declaration(kKind, p), name(std::move(n)), type(r), expression(e) {}
+ std::string name;
+ TypeExpression* type;
+ Expression* expression;
+};
+
struct StandardDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StandardDeclaration)
StandardDeclaration(SourcePosition p, CallableNode* c, Statement* b)
@@ -642,37 +665,40 @@ struct StandardDeclaration : Declaration {
struct GenericDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(GenericDeclaration)
GenericDeclaration(SourcePosition p, CallableNode* c,
- std::vector<std::string> gp, Statement* b)
+ std::vector<std::string> gp,
+ base::Optional<Statement*> b = base::nullopt)
: Declaration(kKind, p),
callable(c),
generic_parameters(std::move(gp)),
body(b) {}
CallableNode* callable;
std::vector<std::string> generic_parameters;
- Statement* body;
+ base::Optional<Statement*> body;
};
struct SpecializationDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(SpecializationDeclaration)
- SpecializationDeclaration(SourcePosition p, std::string n,
+ SpecializationDeclaration(SourcePosition p, std::string n, bool e,
std::vector<TypeExpression*> gp, ParameterList pl,
TypeExpression* r, LabelAndTypesVector l,
Statement* b)
: Declaration(kKind, p),
name(std::move(n)),
+ external(e),
generic_parameters(gp),
signature(new CallableNodeSignature{pl, r, l}),
body(b) {}
std::string name;
+ bool external;
std::vector<TypeExpression*> generic_parameters;
std::unique_ptr<CallableNodeSignature> signature;
Statement* body;
};
-struct ConstDeclaration : Declaration {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(ConstDeclaration)
- ConstDeclaration(SourcePosition p, std::string n, TypeExpression* t,
- std::string l)
+struct ExternConstDeclaration : Declaration {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternConstDeclaration)
+ ExternConstDeclaration(SourcePosition p, std::string n, TypeExpression* t,
+ std::string l)
: Declaration(kKind, p),
name(std::move(n)),
type(t),
@@ -682,6 +708,14 @@ struct ConstDeclaration : Declaration {
std::string literal;
};
+struct StructDeclaration : Declaration {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(StructDeclaration)
+ StructDeclaration(SourcePosition p, std::string n)
+ : Declaration(kKind, p), name(std::move(n)) {}
+ std::string name;
+ std::vector<FieldNameAndType> fields;
+};
+
#define ENUM_ITEM(name) \
case AstNode::Kind::k##name: \
return std::is_base_of<T, name>::value; \
diff --git a/deps/v8/src/torque/contextual.h b/deps/v8/src/torque/contextual.h
index 33f0481f9a..9cd56a2ed9 100644
--- a/deps/v8/src/torque/contextual.h
+++ b/deps/v8/src/torque/contextual.h
@@ -7,25 +7,36 @@
#include <type_traits>
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
namespace v8 {
namespace internal {
namespace torque {
-// Contextual variables store a value in one or more stack-allocated scopes and
-// allow global access to the most recent active scope on the current
-// call-stack.
+// {ContextualVariable} provides a clean alternative to a global variable.
+// The contextual variable is mutable, and supports managing the value of
+// a variable in a well-nested fashion via the {Scope} class.
+// {ContextualVariable} only stores a pointer to the current value, which
+// is stored in a {Scope} object. The most recent value can be retrieved
+// via Get(). Because only {Scope} has actual storage, there must be at
+// least one active {Scope} (i.e. in a surrounding C++ scope), whenever Get()
+// is called.
+// Note that contextual variables must only be used from the same thread,
+// i.e. {Scope} and Get() have to be in the same thread.
template <class Derived, class VarType>
class ContextualVariable {
public:
- // A {Scope} contains a new object of type {T} and gives
+ // A {Scope} contains a new object of type {VarType} and gives
// ContextualVariable::Get() access to it. Upon destruction, the contextual
// variable is restored to the state before the {Scope} was created. Scopes
// have to follow a stack discipline: A {Scope} has to be destructed before
// any older scope is destructed.
class Scope {
public:
- explicit Scope(VarType x = VarType())
- : current_(std::move(x)), previous_(top_) {
+ template <class... Args>
+ explicit Scope(Args&&... args)
+ : current_(std::forward<Args>(args)...), previous_(top_) {
top_ = &current_;
}
~Scope() {
@@ -40,6 +51,9 @@ class ContextualVariable {
static_assert(std::is_base_of<ContextualVariable, Derived>::value,
"Curiously Recurring Template Pattern");
+
+ DISALLOW_NEW_AND_DELETE();
+ DISALLOW_COPY_AND_ASSIGN(Scope);
};
// Access the most recent active {Scope}. There has to be an active {Scope}
@@ -62,7 +76,7 @@ thread_local VarType* ContextualVariable<Derived, VarType>::top_ = nullptr;
: v8::internal::torque::ContextualVariable<VarName, __VA_ARGS__> {};
// By inheriting from {ContextualClass} a class can become a contextual variable
-// of itself.
+// of itself, which is very similar to a singleton.
template <class T>
using ContextualClass = ContextualVariable<T, T>;
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index 88ba228635..122f7de9b4 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -11,57 +11,74 @@ namespace v8 {
namespace internal {
namespace torque {
-bool Type::IsSubtypeOf(const Type* supertype) const {
- const Type* subtype = this;
- while (subtype != nullptr) {
- if (subtype == supertype) return true;
- subtype = subtype->parent();
- }
- return false;
+std::ostream& operator<<(std::ostream& os, const Callable& m) {
+ os << "callable " << m.name() << "(" << m.signature().parameter_types
+ << "): " << *m.signature().return_type;
+ return os;
}
-bool Type::IsAbstractName(const std::string& name) const {
- if (!IsAbstractType()) return false;
- return AbstractType::cast(this)->name() == name;
+std::ostream& operator<<(std::ostream& os, const Variable& v) {
+ os << "variable " << v.name() << ": " << *v.type();
+ return os;
}
-std::string AbstractType::GetGeneratedTNodeTypeName() const {
- std::string result = GetGeneratedTypeName();
- DCHECK_EQ(result.substr(0, 6), "TNode<");
- result = result.substr(6, result.length() - 7);
- return result;
+std::ostream& operator<<(std::ostream& os, const Builtin& b) {
+ os << "builtin " << *b.signature().return_type << " " << b.name()
+ << b.signature().parameter_types;
+ return os;
}
-std::string FunctionPointerType::ToString() const {
- std::stringstream result;
- result << "builtin (";
- bool first = true;
- for (const Type* t : parameter_types_) {
- if (!first) {
- result << ", ";
- first = false;
- }
- result << t;
+std::ostream& operator<<(std::ostream& os, const RuntimeFunction& b) {
+ os << "runtime function " << *b.signature().return_type << " " << b.name()
+ << b.signature().parameter_types;
+ return os;
+}
+
+std::string Variable::RValue() const {
+ if (!IsDefined()) {
+ ReportError("Reading uninitialized variable.");
+ }
+ if (type()->IsStructType()) {
+ return value();
}
- result << ") => " << return_type_;
- return result.str();
+ std::string result = "(*" + value() + ")";
+ if (!IsConst()) result += ".value()";
+ return result;
}
-std::string FunctionPointerType::MangledName() const {
- std::stringstream result;
- result << "FT";
- bool first = true;
- for (const Type* t : parameter_types_) {
- if (!first) {
- result << ", ";
- first = false;
+void PrintLabel(std::ostream& os, const Label& l, bool with_names) {
+ os << l.name();
+ if (l.GetParameterCount() != 0) {
+ os << "(";
+ if (with_names) {
+ PrintCommaSeparatedList(os, l.GetParameters(),
+ [](Variable* v) -> std::string {
+ std::stringstream stream;
+ stream << v->name();
+ stream << ": ";
+ stream << *(v->type());
+ return stream.str();
+ });
+ } else {
+ PrintCommaSeparatedList(
+ os, l.GetParameters(),
+ [](Variable* v) -> const Type& { return *(v->type()); });
}
- std::string arg_type_string = t->MangledName();
- result << arg_type_string.size() << arg_type_string;
+ os << ")";
}
- std::string return_type_string = return_type_->MangledName();
- result << return_type_string.size() << return_type_string;
- return result.str();
+}
+
+std::ostream& operator<<(std::ostream& os, const Label& l) {
+ PrintLabel(os, l, true);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const Generic& g) {
+ os << "generic " << g.name() << "<";
+ PrintCommaSeparatedList(os, g.declaration()->generic_parameters);
+ os << ">";
+
+ return os;
}
} // namespace torque
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index de5f713f50..b8abcca801 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -32,9 +32,11 @@ class Declarable {
kBuiltin,
kRuntimeFunction,
kGeneric,
+ kGenericList,
kTypeAlias,
kLabel,
- kConstant
+ kExternConstant,
+ kModuleConstant
};
Kind kind() const { return kind_; }
bool IsMacro() const { return kind() == kMacro; }
@@ -46,8 +48,13 @@ class Declarable {
bool IsLabel() const { return kind() == kLabel; }
bool IsVariable() const { return kind() == kVariable; }
bool IsMacroList() const { return kind() == kMacroList; }
- bool IsConstant() const { return kind() == kConstant; }
- bool IsValue() const { return IsVariable() || IsConstant() || IsParameter(); }
+ bool IsGenericList() const { return kind() == kGenericList; }
+ bool IsExternConstant() const { return kind() == kExternConstant; }
+ bool IsModuleConstant() const { return kind() == kModuleConstant; }
+ bool IsValue() const {
+ return IsVariable() || IsExternConstant() || IsParameter() ||
+ IsModuleConstant();
+ }
virtual const char* type_name() const { return "<<unknown>>"; }
protected:
@@ -82,11 +89,8 @@ class Value : public Declarable {
public:
const std::string& name() const { return name_; }
virtual bool IsConst() const { return true; }
- virtual std::string GetValueForDeclaration() const = 0;
- virtual std::string GetValueForRead() const {
- return GetValueForDeclaration();
- }
- virtual std::string GetValueForWrite() const { UNREACHABLE(); }
+ virtual std::string value() const = 0;
+ virtual std::string RValue() const { return value(); }
DECLARE_DECLARABLE_BOILERPLATE(Value, value);
const Type* type() const { return type_; }
@@ -102,7 +106,7 @@ class Value : public Declarable {
class Parameter : public Value {
public:
DECLARE_DECLARABLE_BOILERPLATE(Parameter, parameter);
- std::string GetValueForDeclaration() const override { return var_name_; }
+ std::string value() const override { return var_name_; }
private:
friend class Declarations;
@@ -113,33 +117,46 @@ class Parameter : public Value {
std::string var_name_;
};
+class ModuleConstant : public Value {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(ModuleConstant, constant);
+ std::string value() const override { UNREACHABLE(); }
+ std::string RValue() const override { return name() + "()"; }
+
+ private:
+ friend class Declarations;
+ explicit ModuleConstant(const std::string& name, const Type* type)
+ : Value(Declarable::kModuleConstant, type, name) {}
+};
+
class Variable : public Value {
public:
DECLARE_DECLARABLE_BOILERPLATE(Variable, variable);
- bool IsConst() const override { return false; }
- std::string GetValueForDeclaration() const override { return value_; }
- std::string GetValueForRead() const override {
- if (type()->IsConstexpr()) {
- return std::string("*") + value_;
- } else {
- return value_ + "->value()";
+ bool IsConst() const override { return const_; }
+ std::string value() const override { return value_; }
+ std::string RValue() const override;
+ void Define() {
+ if (defined_ && IsConst()) {
+ ReportError("Cannot re-define a const-bound variable.");
}
+ defined_ = true;
}
- std::string GetValueForWrite() const override {
- return std::string("*") + value_;
- }
- void Define() { defined_ = true; }
bool IsDefined() const { return defined_; }
private:
friend class Declarations;
- Variable(const std::string& name, const std::string& value, const Type* type)
+ Variable(const std::string& name, const std::string& value, const Type* type,
+ bool is_const)
: Value(Declarable::kVariable, type, name),
value_(value),
- defined_(false) {}
+ defined_(false),
+ const_(is_const) {
+ DCHECK_IMPLIES(type->IsConstexpr(), IsConst());
+ }
std::string value_;
bool defined_;
+ bool const_;
};
class Label : public Declarable {
@@ -170,16 +187,16 @@ class Label : public Declarable {
bool used_;
};
-class Constant : public Value {
+class ExternConstant : public Value {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Constant, constant);
- std::string GetValueForDeclaration() const override { return value_; }
+ DECLARE_DECLARABLE_BOILERPLATE(ExternConstant, constant);
+ std::string value() const override { return value_; }
private:
friend class Declarations;
- explicit Constant(const std::string& name, const Type* type,
- const std::string& value)
- : Value(Declarable::kConstant, type, name), value_(value) {}
+ explicit ExternConstant(const std::string& name, const Type* type,
+ const std::string& value)
+ : Value(Declarable::kExternConstant, type, name), value_(value) {}
std::string value_;
};
@@ -225,7 +242,11 @@ class Macro : public Callable {
protected:
Macro(Declarable::Kind type, const std::string& name,
const Signature& signature)
- : Callable(type, name, signature) {}
+ : Callable(type, name, signature) {
+ if (signature.parameter_types.var_args) {
+ ReportError("Varargs are not supported for macros.");
+ }
+ }
private:
friend class Declarations;
@@ -286,6 +307,7 @@ class Generic : public Declarable {
DECLARE_DECLARABLE_BOILERPLATE(Generic, generic);
GenericDeclaration* declaration() const { return declaration_; }
+ const std::string& name() const { return name_; }
Module* module() const { return module_; }
private:
@@ -293,13 +315,31 @@ class Generic : public Declarable {
Generic(const std::string& name, Module* module,
GenericDeclaration* declaration)
: Declarable(Declarable::kGeneric),
+ name_(name),
module_(module),
declaration_(declaration) {}
+ std::string name_;
Module* module_;
GenericDeclaration* declaration_;
};
+class GenericList : public Declarable {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(GenericList, generic_list);
+ const std::vector<Generic*>& list() { return list_; }
+ Generic* AddGeneric(Generic* generic) {
+ list_.push_back(generic);
+ return generic;
+ }
+
+ private:
+ friend class Declarations;
+ GenericList() : Declarable(Declarable::kGenericList) {}
+
+ std::vector<Generic*> list_;
+};
+
typedef std::pair<Generic*, TypeVector> SpecializationKey;
class TypeAlias : public Declarable {
@@ -316,28 +356,14 @@ class TypeAlias : public Declarable {
const Type* type_;
};
-inline std::ostream& operator<<(std::ostream& os, const Callable& m) {
- os << "callable " << m.name() << "(" << m.signature().parameter_types
- << "): " << m.signature().return_type;
- return os;
-}
-
-inline std::ostream& operator<<(std::ostream& os, const Variable& v) {
- os << "variable " << v.name() << ": " << v.type();
- return os;
-}
-
-inline std::ostream& operator<<(std::ostream& os, const Builtin& b) {
- os << "builtin " << b.signature().return_type << " " << b.name()
- << b.signature().parameter_types;
- return os;
-}
-
-inline std::ostream& operator<<(std::ostream& os, const RuntimeFunction& b) {
- os << "runtime function " << b.signature().return_type << " " << b.name()
- << b.signature().parameter_types;
- return os;
-}
+void PrintLabel(std::ostream& os, const Label& l, bool with_names);
+
+std::ostream& operator<<(std::ostream& os, const Callable& m);
+std::ostream& operator<<(std::ostream& os, const Variable& v);
+std::ostream& operator<<(std::ostream& os, const Builtin& b);
+std::ostream& operator<<(std::ostream& os, const Label& l);
+std::ostream& operator<<(std::ostream& os, const RuntimeFunction& b);
+std::ostream& operator<<(std::ostream& os, const Generic& g);
#undef DECLARE_DECLARABLE_BOILERPLATE
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index a841a9d823..b83dbb3bc3 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -90,11 +90,20 @@ Builtin* DeclarationVisitor::BuiltinDeclarationCommon(
declarations()->LookupGlobalType(OBJECT_TYPE_STRING))) {
std::stringstream stream;
stream << "second parameter to javascript builtin " << decl->name
- << " is " << signature.types()[1] << " but should be Object";
+ << " is " << *signature.types()[1] << " but should be Object";
ReportError(stream.str());
}
}
+ if (const StructType* struct_type =
+ StructType::DynamicCast(signature.return_type)) {
+ std::stringstream stream;
+ stream << "builtins (in this case" << decl->name
+ << ") cannot return structs (in this case " << struct_type->name()
+ << ")";
+ ReportError(stream.str());
+ }
+
std::string generated_name = GetGeneratedCallableName(
decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
return declarations()->DeclareBuiltin(generated_name, kind, external,
@@ -117,6 +126,15 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl,
ReportError(stream.str());
}
+ if (signature.return_type->IsStructType()) {
+ std::stringstream stream;
+ stream << "runtime functions (in this case" << decl->name
+ << ") cannot return structs (in this case "
+ << static_cast<const StructType*>(signature.return_type)->name()
+ << ")";
+ ReportError(stream.str());
+ }
+
declarations()->DeclareRuntimeFunction(decl->name, signature);
}
@@ -129,31 +147,7 @@ void DeclarationVisitor::Visit(ExternalMacroDeclaration* decl,
std::string generated_name = GetGeneratedCallableName(
decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
- declarations()->DeclareMacro(generated_name, signature);
- if (decl->op) {
- OperationHandler handler(
- {decl->name, signature.parameter_types, signature.return_type});
- auto i = global_context_.op_handlers_.find(*decl->op);
- if (i == global_context_.op_handlers_.end()) {
- global_context_.op_handlers_[*decl->op] = std::vector<OperationHandler>();
- i = global_context_.op_handlers_.find(*decl->op);
- }
- i->second.push_back(handler);
- }
-
- if (decl->implicit) {
- if (!decl->op || *decl->op != "convert<>") {
- ReportError("implicit can only be used with cast<> operator");
- }
-
- const TypeVector& parameter_types = signature.parameter_types.types;
- if (parameter_types.size() != 1 || signature.parameter_types.var_args) {
- ReportError(
- "implicit cast operators doesn't only have a single parameter");
- }
- GetTypeOracle().RegisterImplicitConversion(signature.return_type,
- parameter_types[0]);
- }
+ declarations()->DeclareMacro(generated_name, signature, decl->op);
}
void DeclarationVisitor::Visit(TorqueBuiltinDeclaration* decl,
@@ -162,9 +156,9 @@ void DeclarationVisitor::Visit(TorqueBuiltinDeclaration* decl,
CurrentCallableActivator activator(global_context_, builtin, decl);
DeclareSignature(signature);
if (signature.parameter_types.var_args) {
- declarations()->DeclareConstant(
+ declarations()->DeclareExternConstant(
decl->signature->parameters.arguments_variable,
- GetTypeOracle().GetArgumentsType(), "arguments");
+ TypeOracle::GetArgumentsType(), "arguments");
}
torque_builtins_.push_back(builtin);
Visit(body);
@@ -174,27 +168,38 @@ void DeclarationVisitor::Visit(TorqueMacroDeclaration* decl,
const Signature& signature, Statement* body) {
std::string generated_name = GetGeneratedCallableName(
decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
- Macro* macro = declarations()->DeclareMacro(generated_name, signature);
+ Macro* macro =
+ declarations()->DeclareMacro(generated_name, signature, decl->op);
CurrentCallableActivator activator(global_context_, macro, decl);
DeclareSignature(signature);
+ Variable* return_variable = nullptr;
if (!signature.return_type->IsVoidOrNever()) {
- declarations()->DeclareVariable(kReturnValueVariable,
- signature.return_type);
+ return_variable =
+ DeclareVariable(kReturnValueVariable, signature.return_type,
+ signature.return_type->IsConstexpr());
}
PushControlSplit();
- Visit(body);
+ if (body != nullptr) {
+ Visit(body);
+ }
auto changed_vars = PopControlSplit();
+ if (return_variable) changed_vars.insert(return_variable);
global_context_.AddControlSplitChangedVariables(
decl, declarations()->GetCurrentSpecializationTypeNamesVector(),
changed_vars);
}
+void DeclarationVisitor::Visit(ConstDeclaration* decl) {
+ declarations()->DeclareModuleConstant(decl->name,
+ declarations()->GetType(decl->type));
+ Visit(decl->expression);
+}
+
void DeclarationVisitor::Visit(StandardDeclaration* decl) {
- Signature signature =
- MakeSignature(decl->callable, decl->callable->signature.get());
+ Signature signature = MakeSignature(decl->callable->signature.get());
Visit(decl->callable, signature, decl->body);
}
@@ -203,45 +208,213 @@ void DeclarationVisitor::Visit(GenericDeclaration* decl) {
}
void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
- Generic* generic = declarations()->LookupGeneric(decl->name);
- SpecializationKey key = {generic, GetTypeVector(decl->generic_parameters)};
- CallableNode* callable = generic->declaration()->callable;
+ if ((decl->body != nullptr) == decl->external) {
+ std::stringstream stream;
+ stream << "specialization of " << decl->name
+ << " must either be marked 'extern' or have a body";
+ ReportError(stream.str());
+ }
- {
- Signature signature_with_types =
- MakeSignature(callable, decl->signature.get());
+ GenericList* generic_list = declarations()->LookupGeneric(decl->name);
+ // Find the matching generic specialization based on the concrete parameter
+ // list.
+ CallableNode* matching_callable = nullptr;
+ SpecializationKey matching_key;
+ Signature signature_with_types = MakeSignature(decl->signature.get());
+ for (Generic* generic : generic_list->list()) {
+ SpecializationKey key = {generic, GetTypeVector(decl->generic_parameters)};
+ CallableNode* callable_candidate = generic->declaration()->callable;
// Abuse the Specialization nodes' scope to temporarily declare the
// specialization aliases for the generic types to compare signatures. This
// scope is never used for anything else, so it's OK to pollute it.
- Declarations::NodeScopeActivator specialization_activator(declarations(),
- decl);
+ Declarations::CleanNodeScopeActivator specialization_activator(
+ declarations(), decl);
DeclareSpecializedTypes(key);
Signature generic_signature_with_types =
- MakeSignature(generic->declaration()->callable,
- generic->declaration()->callable->signature.get());
- if (!signature_with_types.HasSameTypesAs(generic_signature_with_types)) {
- std::stringstream stream;
- stream << "specialization of " << callable->name
- << " has incompatible parameter list or label list with generic "
- "definition";
- ReportError(stream.str());
+ MakeSignature(generic->declaration()->callable->signature.get());
+ if (signature_with_types.HasSameTypesAs(generic_signature_with_types)) {
+ if (matching_callable != nullptr) {
+ std::stringstream stream;
+ stream << "specialization of " << callable_candidate->name
+ << " is ambigous, it matches more than one generic declaration ("
+ << *matching_key.first << " and " << *key.first << ")";
+ ReportError(stream.str());
+ }
+ matching_callable = callable_candidate;
+ matching_key = key;
}
}
- SpecializeGeneric({key, callable, decl->signature.get(), decl->body});
+ if (matching_callable == nullptr) {
+ std::stringstream stream;
+ stream << "specialization of " << decl->name
+ << " doesn't match any generic declaration";
+ ReportError(stream.str());
+ }
+
+ // Make sure the declarations of the parameter types for the specialization
+ // are the ones from the matching generic.
+ {
+ Declarations::CleanNodeScopeActivator specialization_activator(
+ declarations(), decl);
+ DeclareSpecializedTypes(matching_key);
+ }
+
+ SpecializeGeneric({matching_key, matching_callable, decl->signature.get(),
+ decl->body, decl->pos});
}
void DeclarationVisitor::Visit(ReturnStatement* stmt) {
- const Callable* callable = global_context_.GetCurrentCallable();
- if (callable->IsMacro() && callable->HasReturnValue()) {
- MarkVariableModified(
- Variable::cast(declarations()->LookupValue(kReturnValueVariable)));
- }
if (stmt->value) {
Visit(*stmt->value);
}
}
+Variable* DeclarationVisitor::DeclareVariable(const std::string& name,
+ const Type* type, bool is_const) {
+ Variable* result = declarations()->DeclareVariable(name, type, is_const);
+ if (type->IsStructType()) {
+ const StructType* struct_type = StructType::cast(type);
+ for (auto& field : struct_type->fields()) {
+ std::string field_var_name = name + "." + field.name;
+ DeclareVariable(field_var_name, field.type, is_const);
+ }
+ }
+ return result;
+}
+
+Parameter* DeclarationVisitor::DeclareParameter(const std::string& name,
+ const Type* type) {
+ Parameter* result = declarations()->DeclareParameter(
+ name, GetParameterVariableFromName(name), type);
+ if (type->IsStructType()) {
+ const StructType* struct_type = StructType::cast(type);
+ for (auto& field : struct_type->fields()) {
+ std::string field_var_name = name + "." + field.name;
+ DeclareParameter(field_var_name, field.type);
+ }
+ }
+ return result;
+}
+
+void DeclarationVisitor::Visit(VarDeclarationStatement* stmt) {
+ std::string variable_name = stmt->name;
+ const Type* type = declarations()->GetType(stmt->type);
+ if (type->IsConstexpr() && !stmt->const_qualified) {
+ ReportError(
+ "cannot declare variable with constexpr type. Use 'const' instead.");
+ }
+ DeclareVariable(variable_name, type, stmt->const_qualified);
+ if (global_context_.verbose()) {
+ std::cout << "declared variable " << variable_name << " with type " << *type
+ << "\n";
+ }
+
+ // const qualified variables are required to be initialized properly.
+ if (stmt->const_qualified && !stmt->initializer) {
+ std::stringstream stream;
+ stream << "local constant \"" << variable_name << "\" is not initialized.";
+ ReportError(stream.str());
+ }
+
+ if (stmt->initializer) {
+ Visit(*stmt->initializer);
+ if (global_context_.verbose()) {
+ std::cout << "variable has initialization expression at "
+ << CurrentPositionAsString() << "\n";
+ }
+ }
+}
+
+void DeclarationVisitor::Visit(ExternConstDeclaration* decl) {
+ const Type* type = declarations()->GetType(decl->type);
+ if (!type->IsConstexpr()) {
+ std::stringstream stream;
+ stream << "extern constants must have constexpr type, but found: \""
+ << *type << "\"\n";
+ ReportError(stream.str());
+ }
+
+ declarations()->DeclareExternConstant(decl->name, type, decl->literal);
+}
+
+void DeclarationVisitor::Visit(StructDeclaration* decl) {
+ std::vector<NameAndType> fields;
+ for (auto& field : decl->fields) {
+ const Type* field_type = declarations()->GetType(field.type);
+ fields.push_back({field.name, field_type});
+ }
+ declarations()->DeclareStruct(CurrentModule(), decl->name, fields);
+}
+
+void DeclarationVisitor::Visit(LogicalOrExpression* expr) {
+ {
+ Declarations::NodeScopeActivator scope(declarations(), expr->left);
+ declarations()->DeclareLabel(kFalseLabelName);
+ Visit(expr->left);
+ }
+ Visit(expr->right);
+}
+
+void DeclarationVisitor::Visit(LogicalAndExpression* expr) {
+ {
+ Declarations::NodeScopeActivator scope(declarations(), expr->left);
+ declarations()->DeclareLabel(kTrueLabelName);
+ Visit(expr->left);
+ }
+ Visit(expr->right);
+}
+
+void DeclarationVisitor::DeclareExpressionForBranch(Expression* node) {
+ Declarations::NodeScopeActivator scope(declarations(), node);
+ // Conditional expressions can either explicitly return a bit
+ // type, or they can be backed by macros that don't return but
+ // take a true and false label. By declaring the labels before
+ // visiting the conditional expression, those label-based
+ // macro conditionals will be able to find them through normal
+ // label lookups.
+ declarations()->DeclareLabel(kTrueLabelName);
+ declarations()->DeclareLabel(kFalseLabelName);
+ Visit(node);
+}
+
+void DeclarationVisitor::Visit(ConditionalExpression* expr) {
+ DeclareExpressionForBranch(expr->condition);
+ PushControlSplit();
+ Visit(expr->if_true);
+ Visit(expr->if_false);
+ auto changed_vars = PopControlSplit();
+ global_context_.AddControlSplitChangedVariables(
+ expr, declarations()->GetCurrentSpecializationTypeNamesVector(),
+ changed_vars);
+}
+
+void DeclarationVisitor::Visit(IfStatement* stmt) {
+ if (!stmt->is_constexpr) {
+ PushControlSplit();
+ }
+ DeclareExpressionForBranch(stmt->condition);
+ Visit(stmt->if_true);
+ if (stmt->if_false) Visit(*stmt->if_false);
+ if (!stmt->is_constexpr) {
+ auto changed_vars = PopControlSplit();
+ global_context_.AddControlSplitChangedVariables(
+ stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
+ changed_vars);
+ }
+}
+
+void DeclarationVisitor::Visit(WhileStatement* stmt) {
+ Declarations::NodeScopeActivator scope(declarations(), stmt);
+ DeclareExpressionForBranch(stmt->condition);
+ PushControlSplit();
+ Visit(stmt->body);
+ auto changed_vars = PopControlSplit();
+ global_context_.AddControlSplitChangedVariables(
+ stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
+ changed_vars);
+}
+
void DeclarationVisitor::Visit(ForOfLoopStatement* stmt) {
// Scope for for iteration variable
Declarations::NodeScopeActivator scope(declarations(), stmt);
@@ -257,13 +430,26 @@ void DeclarationVisitor::Visit(ForOfLoopStatement* stmt) {
changed_vars);
}
-void DeclarationVisitor::Visit(TryCatchStatement* stmt) {
- // Activate a new scope to declare catch handler labels, they should not be
- // visible outside the catch.
+void DeclarationVisitor::Visit(ForLoopStatement* stmt) {
+ Declarations::NodeScopeActivator scope(declarations(), stmt);
+ if (stmt->var_declaration) Visit(*stmt->var_declaration);
+ PushControlSplit();
+ DeclareExpressionForBranch(stmt->test);
+ Visit(stmt->body);
+ Visit(stmt->action);
+ auto changed_vars = PopControlSplit();
+ global_context_.AddControlSplitChangedVariables(
+ stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
+ changed_vars);
+}
+
+void DeclarationVisitor::Visit(TryLabelStatement* stmt) {
+ // Activate a new scope to declare handler labels, they should not be
+ // visible outside the label block.
{
Declarations::NodeScopeActivator scope(declarations(), stmt);
- // Declare catch labels
+ // Declare labels
for (LabelBlock* block : stmt->label_blocks) {
CurrentSourcePosition::Scope scope(block->pos);
Label* shared_label = declarations()->DeclareLabel(block->label);
@@ -277,42 +463,96 @@ void DeclarationVisitor::Visit(TryCatchStatement* stmt) {
size_t i = 0;
for (auto p : block->parameters.names) {
- shared_label->AddVariable(declarations()->DeclareVariable(
- p, declarations()->GetType(block->parameters.types[i])));
+ const Type* type =
+ declarations()->GetType(block->parameters.types[i]);
+ if (type->IsConstexpr()) {
+ ReportError("no constexpr type allowed for label arguments");
+ }
+
+ shared_label->AddVariable(DeclareVariable(p, type, false));
++i;
}
}
if (global_context_.verbose()) {
- std::cout << " declaring catch for exception " << block->label << "\n";
+ std::cout << " declaring label " << block->label << "\n";
}
}
- // Try catch not supported yet
- DCHECK_EQ(stmt->catch_blocks.size(), 0);
-
Visit(stmt->try_block);
}
- for (CatchBlock* block : stmt->catch_blocks) {
+ for (LabelBlock* block : stmt->label_blocks) {
Visit(block->body);
}
+}
- for (LabelBlock* block : stmt->label_blocks) {
- Visit(block->body);
+void DeclarationVisitor::GenerateHeader(std::string& file_name) {
+ std::stringstream new_contents_stream;
+ new_contents_stream
+ << "#ifndef V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
+ "#define V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
+ "\n"
+ "#define BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) "
+ "\\\n";
+ for (auto builtin : torque_builtins_) {
+ int firstParameterIndex = 1;
+ bool declareParameters = true;
+ if (builtin->IsStub()) {
+ new_contents_stream << "TFS(" << builtin->name();
+ } else {
+ new_contents_stream << "TFJ(" << builtin->name();
+ if (builtin->IsVarArgsJavaScript()) {
+ new_contents_stream
+ << ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
+ declareParameters = false;
+ } else {
+ assert(builtin->IsFixedArgsJavaScript());
+ // FixedArg javascript builtins need to offer the parameter
+ // count.
+ assert(builtin->parameter_names().size() >= 2);
+ new_contents_stream << ", " << (builtin->parameter_names().size() - 2);
+ // And the receiver is explicitly declared.
+ new_contents_stream << ", kReceiver";
+ firstParameterIndex = 2;
+ }
+ }
+ if (declareParameters) {
+ int index = 0;
+ for (auto parameter : builtin->parameter_names()) {
+ if (index >= firstParameterIndex) {
+ new_contents_stream << ", k" << CamelifyString(parameter);
+ }
+ index++;
+ }
+ }
+ new_contents_stream << ") \\\n";
}
+ new_contents_stream
+ << "\n"
+ "#endif // V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n";
+
+ std::string new_contents(new_contents_stream.str());
+ ReplaceFileContentsIfDifferent(file_name, new_contents);
}
void DeclarationVisitor::Visit(IdentifierExpression* expr) {
if (expr->generic_arguments.size() != 0) {
- Generic* generic = declarations()->LookupGeneric(expr->name);
TypeVector specialization_types;
for (auto t : expr->generic_arguments) {
specialization_types.push_back(declarations()->GetType(t));
}
- CallableNode* callable = generic->declaration()->callable;
- QueueGenericSpecialization({generic, specialization_types}, callable,
- callable->signature.get(),
- generic->declaration()->body);
+ // Specialize all versions of the generic, since the exact parameter type
+ // list cannot be resolved until the call's parameter expressions are
+ // evaluated. This is an overly conservative but simple way to make sure
+ // that the correct specialization exists.
+ for (auto generic : declarations()->LookupGeneric(expr->name)->list()) {
+ CallableNode* callable = generic->declaration()->callable;
+ if (generic->declaration()->body) {
+ QueueGenericSpecialization({generic, specialization_types}, callable,
+ callable->signature.get(),
+ generic->declaration()->body);
+ }
+ }
}
}
@@ -321,9 +561,85 @@ void DeclarationVisitor::Visit(CallExpression* expr) {
for (Expression* arg : expr->arguments) Visit(arg);
}
+void DeclarationVisitor::Visit(TypeDeclaration* decl) {
+ std::string generates = decl->generates ? *decl->generates : std::string("");
+ const AbstractType* type = declarations()->DeclareAbstractType(
+ decl->name, generates, {}, decl->extends);
+
+ if (decl->constexpr_generates) {
+ std::string constexpr_name = CONSTEXPR_TYPE_PREFIX + decl->name;
+ base::Optional<std::string> constexpr_extends;
+ if (decl->extends)
+ constexpr_extends = CONSTEXPR_TYPE_PREFIX + *decl->extends;
+ declarations()->DeclareAbstractType(
+ constexpr_name, *decl->constexpr_generates, type, constexpr_extends);
+ }
+}
+
+void DeclarationVisitor::MarkLocationModified(Expression* location) {
+ if (IdentifierExpression* id = IdentifierExpression::cast(location)) {
+ const Value* value = declarations()->LookupValue(id->name);
+ if (value->IsVariable()) {
+ const Variable* variable = Variable::cast(value);
+ bool was_live = MarkVariableModified(variable);
+ if (was_live && global_context_.verbose()) {
+ std::cout << *variable << " was modified in control split at "
+ << PositionAsString(id->pos) << "\n";
+ }
+ }
+ }
+}
+
+bool DeclarationVisitor::MarkVariableModified(const Variable* variable) {
+ auto e = live_and_changed_variables_.rend();
+ auto c = live_and_changed_variables_.rbegin();
+ bool was_live_in_preceeding_split = false;
+ while (c != e) {
+ if (c->live.find(variable) != c->live.end()) {
+ c->changed.insert(variable);
+ was_live_in_preceeding_split = true;
+ }
+ c++;
+ }
+ return was_live_in_preceeding_split;
+}
+
+void DeclarationVisitor::DeclareSignature(const Signature& signature) {
+ auto type_iterator = signature.parameter_types.types.begin();
+ for (auto name : signature.parameter_names) {
+ const Type* t(*type_iterator++);
+ if (name.size() != 0) {
+ DeclareParameter(name, t);
+ }
+ }
+ for (auto& label : signature.labels) {
+ auto label_params = label.types;
+ Label* new_label = declarations()->DeclareLabel(label.name);
+ size_t i = 0;
+ for (auto var_type : label_params) {
+ if (var_type->IsConstexpr()) {
+ ReportError("no constexpr type allowed for label arguments");
+ }
+
+ std::string var_name = label.name + std::to_string(i++);
+ new_label->AddVariable(DeclareVariable(var_name, var_type, false));
+ }
+ }
+}
+
void DeclarationVisitor::DeclareSpecializedTypes(const SpecializationKey& key) {
size_t i = 0;
Generic* generic = key.first;
+ const std::size_t generic_parameter_count =
+ generic->declaration()->generic_parameters.size();
+ if (generic_parameter_count != key.second.size()) {
+ std::stringstream stream;
+ stream << "Wrong generic argument count for specialization of \""
+ << generic->name() << "\", expected: " << generic_parameter_count
+ << ", actual: " << key.second.size();
+ ReportError(stream.str());
+ }
+
for (auto type : key.second) {
std::string generic_type_name =
generic->declaration()->generic_parameters[i++];
@@ -339,7 +655,7 @@ void DeclarationVisitor::Specialize(const SpecializationKey& key,
// TODO(tebbi): The error should point to the source position where the
// instantiation was requested.
- CurrentSourcePosition::Scope scope(generic->declaration()->pos);
+ CurrentSourcePosition::Scope pos_scope(generic->declaration()->pos);
size_t generic_parameter_count =
generic->declaration()->generic_parameters.size();
if (generic_parameter_count != key.second.size()) {
@@ -352,14 +668,16 @@ void DeclarationVisitor::Specialize(const SpecializationKey& key,
ReportError(stream.str());
}
+ Signature type_signature;
{
// Manually activate the specialized generic's scope when declaring the
// generic parameter specializations.
- Declarations::GenericScopeActivator scope(declarations(), key);
+ Declarations::GenericScopeActivator namespace_scope(declarations(), key);
DeclareSpecializedTypes(key);
+ type_signature = MakeSignature(signature);
}
- Visit(callable, MakeSignature(callable, signature), body);
+ Visit(callable, type_signature, body);
}
} // namespace torque
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index ef34a93fe4..b37ecb2860 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -24,8 +24,7 @@ class DeclarationVisitor : public FileVisitor {
public:
explicit DeclarationVisitor(GlobalContext& global_context)
: FileVisitor(global_context),
- scope_(declarations(), global_context.ast()->default_module()) {
- }
+ scope_(declarations(), global_context.GetDefaultModule()) {}
void Visit(Ast* ast) {
Visit(ast->default_module());
@@ -38,7 +37,7 @@ class DeclarationVisitor : public FileVisitor {
void Visit(ModuleDeclaration* decl) {
ScopedModuleActivator activator(this, decl->GetModule());
- Declarations::NodeScopeActivator scope(declarations(), decl);
+ Declarations::ModuleScopeActivator scope(declarations(), decl->GetModule());
for (Declaration* child : decl->declarations) Visit(child);
}
void Visit(DefaultModuleDeclaration* decl) {
@@ -59,29 +58,18 @@ class DeclarationVisitor : public FileVisitor {
Visit(expr->index);
}
void Visit(FieldAccessExpression* expr) { Visit(expr->object); }
- void Visit(CastExpression* expr) { Visit(expr->value); }
- void Visit(ConvertExpression* expr) { Visit(expr->value); }
void Visit(BlockStatement* expr) {
Declarations::NodeScopeActivator scope(declarations(), expr);
for (Statement* stmt : expr->statements) Visit(stmt);
}
void Visit(ExpressionStatement* stmt) { Visit(stmt->expression); }
void Visit(TailCallStatement* stmt) { Visit(stmt->call); }
+ void Visit(TypeDeclaration* decl);
- void Visit(TypeDeclaration* decl) {
- std::string extends = decl->extends ? *decl->extends : std::string("");
- std::string* extends_ptr = decl->extends ? &extends : nullptr;
-
- std::string generates =
- decl->generates ? *decl->generates : std::string("");
- declarations()->DeclareAbstractType(decl->name, generates, extends_ptr);
-
- if (decl->constexpr_generates) {
- std::string constexpr_name =
- std::string(CONSTEXPR_TYPE_PREFIX) + decl->name;
- declarations()->DeclareAbstractType(
- constexpr_name, *decl->constexpr_generates, &(decl->name));
- }
+ void Visit(TypeAliasDeclaration* decl) {
+ const Type* type = declarations()->GetType(decl->type);
+ type->AddAlias(decl->name);
+ declarations()->DeclareType(decl->name, type);
}
Builtin* BuiltinDeclarationCommon(BuiltinDeclaration* decl, bool external,
@@ -103,6 +91,7 @@ class DeclarationVisitor : public FileVisitor {
void Visit(CallableNode* decl, const Signature& signature, Statement* body);
+ void Visit(ConstDeclaration* decl);
void Visit(StandardDeclaration* decl);
void Visit(GenericDeclaration* decl);
void Visit(SpecializationDeclaration* decl);
@@ -110,104 +99,26 @@ class DeclarationVisitor : public FileVisitor {
void Visit(DebugStatement* stmt) {}
void Visit(AssertStatement* stmt) {
+ bool do_check = !stmt->debug_only;
#if defined(DEBUG)
- DeclareExpressionForBranch(stmt->expression);
+ do_check = true;
#endif
+ if (do_check) DeclareExpressionForBranch(stmt->expression);
}
- void Visit(VarDeclarationStatement* stmt) {
- std::string variable_name = stmt->name;
- const Type* type = declarations()->GetType(stmt->type);
- if (type->IsConstexpr()) {
- ReportError("cannot declare variable with constexpr type");
- }
- declarations()->DeclareVariable(variable_name, type);
- if (global_context_.verbose()) {
- std::cout << "declared variable " << variable_name << " with type "
- << type << "\n";
- }
- if (stmt->initializer) {
- Visit(*stmt->initializer);
- if (global_context_.verbose()) {
- std::cout << "variable has initialization expression at "
- << CurrentPositionAsString() << "\n";
- }
- }
- }
-
- void Visit(ConstDeclaration* decl) {
- declarations()->DeclareConstant(
- decl->name, declarations()->GetType(decl->type), decl->literal);
- }
-
- void Visit(LogicalOrExpression* expr) {
- {
- Declarations::NodeScopeActivator scope(declarations(), expr->left);
- declarations()->DeclareLabel(kFalseLabelName);
- Visit(expr->left);
- }
- Visit(expr->right);
- }
-
- void Visit(LogicalAndExpression* expr) {
- {
- Declarations::NodeScopeActivator scope(declarations(), expr->left);
- declarations()->DeclareLabel(kTrueLabelName);
- Visit(expr->left);
- }
- Visit(expr->right);
- }
-
- void DeclareExpressionForBranch(Expression* node) {
- Declarations::NodeScopeActivator scope(declarations(), node);
- // Conditional expressions can either explicitly return a bit
- // type, or they can be backed by macros that don't return but
- // take a true and false label. By declaring the labels before
- // visiting the conditional expression, those label-based
- // macro conditionals will be able to find them through normal
- // label lookups.
- declarations()->DeclareLabel(kTrueLabelName);
- declarations()->DeclareLabel(kFalseLabelName);
- Visit(node);
- }
-
- void Visit(ConditionalExpression* expr) {
- DeclareExpressionForBranch(expr->condition);
- PushControlSplit();
- Visit(expr->if_true);
- Visit(expr->if_false);
- auto changed_vars = PopControlSplit();
- global_context_.AddControlSplitChangedVariables(
- expr, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
- }
+ void Visit(VarDeclarationStatement* stmt);
+ void Visit(ExternConstDeclaration* decl);
- void Visit(IfStatement* stmt) {
- if (!stmt->is_constexpr) {
- PushControlSplit();
- }
- DeclareExpressionForBranch(stmt->condition);
- Visit(stmt->if_true);
- if (stmt->if_false) Visit(*stmt->if_false);
- if (!stmt->is_constexpr) {
- auto changed_vars = PopControlSplit();
- global_context_.AddControlSplitChangedVariables(
- stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
- }
- }
+ void Visit(StructDeclaration* decl);
+ void Visit(StructExpression* decl) {}
- void Visit(WhileStatement* stmt) {
- Declarations::NodeScopeActivator scope(declarations(), stmt);
- DeclareExpressionForBranch(stmt->condition);
- PushControlSplit();
- Visit(stmt->body);
- auto changed_vars = PopControlSplit();
- global_context_.AddControlSplitChangedVariables(
- stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
- }
+ void Visit(LogicalOrExpression* expr);
+ void Visit(LogicalAndExpression* expr);
+ void DeclareExpressionForBranch(Expression* node);
+ void Visit(ConditionalExpression* expr);
+ void Visit(IfStatement* stmt);
+ void Visit(WhileStatement* stmt);
void Visit(ForOfLoopStatement* stmt);
void Visit(AssignmentExpression* expr) {
@@ -219,75 +130,15 @@ class DeclarationVisitor : public FileVisitor {
void Visit(BreakStatement* stmt) {}
void Visit(ContinueStatement* stmt) {}
void Visit(GotoStatement* expr) {}
-
- void Visit(ForLoopStatement* stmt) {
- Declarations::NodeScopeActivator scope(declarations(), stmt);
- if (stmt->var_declaration) Visit(*stmt->var_declaration);
- PushControlSplit();
- DeclareExpressionForBranch(stmt->test);
- Visit(stmt->body);
- Visit(stmt->action);
- auto changed_vars = PopControlSplit();
- global_context_.AddControlSplitChangedVariables(
- stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
- }
+ void Visit(ForLoopStatement* stmt);
void Visit(IncrementDecrementExpression* expr) {
MarkLocationModified(expr->location);
Visit(expr->location);
}
- void Visit(TryCatchStatement* stmt);
-
- void GenerateHeader(std::string& file_name) {
- std::stringstream new_contents_stream;
- new_contents_stream
- << "#ifndef V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
- "#define V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
- "\n"
- "#define BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) "
- "\\\n";
- for (auto builtin : torque_builtins_) {
- int firstParameterIndex = 1;
- bool declareParameters = true;
- if (builtin->IsStub()) {
- new_contents_stream << "TFS(" << builtin->name();
- } else {
- new_contents_stream << "TFJ(" << builtin->name();
- if (builtin->IsVarArgsJavaScript()) {
- new_contents_stream
- << ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
- declareParameters = false;
- } else {
- assert(builtin->IsFixedArgsJavaScript());
- // FixedArg javascript builtins need to offer the parameter
- // count.
- assert(builtin->parameter_names().size() >= 2);
- new_contents_stream << ", "
- << (builtin->parameter_names().size() - 2);
- // And the receiver is implicitly declared.
- firstParameterIndex = 2;
- }
- }
- if (declareParameters) {
- int index = 0;
- for (auto parameter : builtin->parameter_names()) {
- if (index >= firstParameterIndex) {
- new_contents_stream << ", k" << CamelifyString(parameter);
- }
- index++;
- }
- }
- new_contents_stream << ") \\\n";
- }
- new_contents_stream
- << "\n"
- "#endif // V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n";
-
- std::string new_contents(new_contents_stream.str());
- ReplaceFileContentsIfDifferent(file_name, new_contents);
- }
+ void Visit(TryLabelStatement* stmt);
+ void GenerateHeader(std::string& file_name);
private:
struct LiveAndChanged {
@@ -301,66 +152,26 @@ class DeclarationVisitor : public FileVisitor {
live_and_changed_variables_.push_back(live_and_changed);
}
+ Variable* DeclareVariable(const std::string& name, const Type* type,
+ bool is_const);
+ Parameter* DeclareParameter(const std::string& name, const Type* type);
+
std::set<const Variable*> PopControlSplit() {
auto result = live_and_changed_variables_.back().changed;
live_and_changed_variables_.pop_back();
return result;
}
- void MarkLocationModified(Expression* location) {
- if (IdentifierExpression* id = IdentifierExpression::cast(location)) {
- const Value* value = declarations()->LookupValue(id->name);
- if (value->IsVariable()) {
- const Variable* variable = Variable::cast(value);
- bool was_live = MarkVariableModified(variable);
- if (was_live && global_context_.verbose()) {
- std::cout << *variable << " was modified in control split at "
- << PositionAsString(id->pos) << "\n";
- }
- }
- }
- }
-
- bool MarkVariableModified(const Variable* variable) {
- auto e = live_and_changed_variables_.rend();
- auto c = live_and_changed_variables_.rbegin();
- bool was_live_in_preceeding_split = false;
- while (c != e) {
- if (c->live.find(variable) != c->live.end()) {
- c->changed.insert(variable);
- was_live_in_preceeding_split = true;
- }
- c++;
- }
- return was_live_in_preceeding_split;
- }
-
- void DeclareSignature(const Signature& signature) {
- auto name_iterator = signature.parameter_names.begin();
- for (auto t : signature.types()) {
- const std::string& name(*name_iterator++);
- declarations()->DeclareParameter(name, GetParameterVariableFromName(name),
- t);
- }
- for (auto& label : signature.labels) {
- auto label_params = label.types;
- Label* new_label = declarations()->DeclareLabel(label.name);
- size_t i = 0;
- for (auto var_type : label_params) {
- std::string var_name = label.name + std::to_string(i++);
- new_label->AddVariable(
- declarations()->DeclareVariable(var_name, var_type));
- }
- }
- }
-
+ void MarkLocationModified(Expression* location);
+ bool MarkVariableModified(const Variable* variable);
+ void DeclareSignature(const Signature& signature);
void DeclareSpecializedTypes(const SpecializationKey& key);
void Specialize(const SpecializationKey& key, CallableNode* callable,
const CallableNodeSignature* signature,
Statement* body) override;
- Declarations::NodeScopeActivator scope_;
+ Declarations::ModuleScopeActivator scope_;
std::vector<Builtin*> torque_builtins_;
std::vector<LiveAndChanged> live_and_changed_variables_;
};
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index f13cc3edd7..95764e3029 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -4,18 +4,29 @@
#include "src/torque/declarations.h"
#include "src/torque/declarable.h"
+#include "src/torque/type-oracle.h"
namespace v8 {
namespace internal {
namespace torque {
-Scope* Declarations::GetNodeScope(const AstNode* node) {
+Scope* Declarations::GetModuleScope(const Module* module) {
+ auto i = module_scopes_.find(module);
+ if (i != module_scopes_.end()) return i->second;
+ Scope* result = chain_.NewScope();
+ module_scopes_[module] = result;
+ return result;
+}
+
+Scope* Declarations::GetNodeScope(const AstNode* node, bool reset_scope) {
std::pair<const AstNode*, TypeVector> key(
node, current_generic_specialization_ == nullptr
? TypeVector()
: current_generic_specialization_->second);
- auto i = scopes_.find(key);
- if (i != scopes_.end()) return i->second;
+ if (!reset_scope) {
+ auto i = scopes_.find(key);
+ if (i != scopes_.end()) return i->second;
+ }
Scope* result = chain_.NewScope();
scopes_[key] = result;
return result;
@@ -63,35 +74,22 @@ const Type* Declarations::LookupGlobalType(const std::string& name) {
return TypeAlias::cast(raw)->type();
}
-const AbstractType* Declarations::GetAbstractType(const Type* parent,
- std::string name,
- std::string generated) {
- AbstractType* result =
- new AbstractType(parent, std::move(name), std::move(generated));
- nominal_types_.push_back(std::unique_ptr<AbstractType>(result));
- return result;
-}
-
-const Type* Declarations::GetFunctionPointerType(TypeVector argument_types,
- const Type* return_type) {
- const Type* code_type = LookupGlobalType(CODE_TYPE_STRING);
- return function_pointer_types_.Add(
- FunctionPointerType(code_type, argument_types, return_type));
-}
-
const Type* Declarations::GetType(TypeExpression* type_expression) {
if (auto* basic = BasicTypeExpression::DynamicCast(type_expression)) {
std::string name =
(basic->is_constexpr ? CONSTEXPR_TYPE_PREFIX : "") + basic->name;
return LookupType(name);
+ } else if (auto* union_type = UnionTypeExpression::cast(type_expression)) {
+ return TypeOracle::GetUnionType(GetType(union_type->a),
+ GetType(union_type->b));
} else {
auto* function_type_exp = FunctionTypeExpression::cast(type_expression);
TypeVector argument_types;
for (TypeExpression* type_exp : function_type_exp->parameters.types) {
argument_types.push_back(GetType(type_exp));
}
- return GetFunctionPointerType(argument_types,
- GetType(function_type_exp->return_type));
+ return TypeOracle::GetFunctionPointerType(
+ argument_types, GetType(function_type_exp->return_type));
}
}
@@ -130,9 +128,9 @@ Label* Declarations::LookupLabel(const std::string& name) {
return Label::cast(d);
}
-Macro* Declarations::LookupMacro(const std::string& name,
- const TypeVector& types) {
- Declarable* declarable = Lookup(name);
+Macro* Declarations::TryLookupMacro(const std::string& name,
+ const TypeVector& types) {
+ Declarable* declarable = TryLookup(name);
if (declarable != nullptr) {
if (declarable->IsMacroList()) {
for (auto& m : MacroList::cast(declarable)->list()) {
@@ -143,6 +141,13 @@ Macro* Declarations::LookupMacro(const std::string& name,
}
}
}
+ return nullptr;
+}
+
+Macro* Declarations::LookupMacro(const std::string& name,
+ const TypeVector& types) {
+ Macro* result = TryLookupMacro(name, types);
+ if (result != nullptr) return result;
std::stringstream stream;
stream << "macro " << name << " with parameter types " << types
<< " is not defined";
@@ -162,11 +167,11 @@ Builtin* Declarations::LookupBuiltin(const std::string& name) {
return nullptr;
}
-Generic* Declarations::LookupGeneric(const std::string& name) {
- Declarable* declarable = Lookup(name);
- if (declarable != nullptr) {
- if (declarable->IsGeneric()) {
- return Generic::cast(declarable);
+GenericList* Declarations::LookupGeneric(const std::string& name) {
+ Declarable* declarable_list = Lookup(name);
+ if (declarable_list != nullptr) {
+ if (declarable_list->IsGenericList()) {
+ return GenericList::cast(declarable_list);
}
ReportError(name + " is not a generic");
}
@@ -174,12 +179,25 @@ Generic* Declarations::LookupGeneric(const std::string& name) {
return nullptr;
}
+ModuleConstant* Declarations::LookupModuleConstant(const std::string& name) {
+ Declarable* declarable = Lookup(name);
+ if (declarable != nullptr) {
+ if (declarable->IsModuleConstant()) {
+ return ModuleConstant::cast(declarable);
+ }
+ ReportError(name + " is not a constant");
+ }
+ ReportError(std::string("constant \"") + name + "\" is not defined");
+ return nullptr;
+}
+
const AbstractType* Declarations::DeclareAbstractType(
const std::string& name, const std::string& generated,
- const std::string* parent) {
+ base::Optional<const AbstractType*> non_constexpr_version,
+ const base::Optional<std::string>& parent) {
CheckAlreadyDeclared(name, "type");
const Type* parent_type = nullptr;
- if (parent != nullptr) {
+ if (parent) {
Declarable* maybe_parent_type = Lookup(*parent);
if (maybe_parent_type == nullptr) {
std::stringstream s;
@@ -194,7 +212,8 @@ const AbstractType* Declarations::DeclareAbstractType(
}
parent_type = TypeAlias::cast(maybe_parent_type)->type();
}
- const AbstractType* type = GetAbstractType(parent_type, name, generated);
+ const AbstractType* type = TypeOracle::GetAbstractType(
+ parent_type, name, generated, non_constexpr_version);
DeclareType(name, type);
return type;
}
@@ -205,6 +224,12 @@ void Declarations::DeclareType(const std::string& name, const Type* type) {
Declare(name, std::unique_ptr<TypeAlias>(result));
}
+void Declarations::DeclareStruct(Module* module, const std::string& name,
+ const std::vector<NameAndType>& fields) {
+ const StructType* new_type = TypeOracle::GetStructType(module, name, fields);
+ DeclareType(name, new_type);
+}
+
Label* Declarations::DeclareLabel(const std::string& name) {
CheckAlreadyDeclared(name, "label");
Label* result = new Label(name);
@@ -212,8 +237,8 @@ Label* Declarations::DeclareLabel(const std::string& name) {
return result;
}
-Macro* Declarations::DeclareMacro(const std::string& name,
- const Signature& signature) {
+MacroList* Declarations::GetMacroListForName(const std::string& name,
+ const Signature& signature) {
auto previous = chain_.Lookup(name);
MacroList* macro_list = nullptr;
if (previous == nullptr) {
@@ -238,8 +263,17 @@ Macro* Declarations::DeclareMacro(const std::string& name,
ReportError(s.str());
}
}
- return macro_list->AddMacro(
- RegisterDeclarable(std::unique_ptr<Macro>(new Macro(name, signature))));
+ return macro_list;
+}
+
+Macro* Declarations::DeclareMacro(const std::string& name,
+ const Signature& signature,
+ base::Optional<std::string> op) {
+ Macro* macro =
+ RegisterDeclarable(std::unique_ptr<Macro>(new Macro(name, signature)));
+ GetMacroListForName(name, signature)->AddMacro(macro);
+ if (op) GetMacroListForName(*op, signature)->AddMacro(macro);
+ return macro;
}
Builtin* Declarations::DeclareBuiltin(const std::string& name,
@@ -260,10 +294,12 @@ RuntimeFunction* Declarations::DeclareRuntimeFunction(
}
Variable* Declarations::DeclareVariable(const std::string& var,
- const Type* type) {
- std::string name(var + std::to_string(GetNextUniqueDeclarationNumber()));
+ const Type* type, bool is_const) {
+ std::string name(var + "_" +
+ std::to_string(GetNextUniqueDeclarationNumber()));
+ std::replace(name.begin(), name.end(), '.', '_');
CheckAlreadyDeclared(var, "variable");
- Variable* result = new Variable(var, name, type);
+ Variable* result = new Variable(var, name, type, is_const);
Declare(var, std::unique_ptr<Declarable>(result));
return result;
}
@@ -286,18 +322,39 @@ Label* Declarations::DeclarePrivateLabel(const std::string& raw_name) {
return result;
}
-void Declarations::DeclareConstant(const std::string& name, const Type* type,
- const std::string& value) {
+void Declarations::DeclareExternConstant(const std::string& name,
+ const Type* type,
+ const std::string& value) {
CheckAlreadyDeclared(name, "constant, parameter or arguments");
- Constant* result = new Constant(name, type, value);
+ ExternConstant* result = new ExternConstant(name, type, value);
+ Declare(name, std::unique_ptr<Declarable>(result));
+}
+
+ModuleConstant* Declarations::DeclareModuleConstant(const std::string& name,
+ const Type* type) {
+ CheckAlreadyDeclared(name, "module constant");
+ ModuleConstant* result = new ModuleConstant(name, type);
Declare(name, std::unique_ptr<Declarable>(result));
+ return result;
}
Generic* Declarations::DeclareGeneric(const std::string& name, Module* module,
GenericDeclaration* generic) {
- CheckAlreadyDeclared(name, "generic");
- Generic* result = new Generic(name, module, generic);
- Declare(name, std::unique_ptr<Generic>(result));
+ auto previous = chain_.Lookup(name);
+ GenericList* generic_list = nullptr;
+ if (previous == nullptr) {
+ generic_list = new GenericList();
+ Declare(name, std::unique_ptr<Declarable>(generic_list));
+ } else if (!previous->IsGenericList()) {
+ std::stringstream s;
+ s << "cannot redeclare non-generic " << name << " as a generic";
+ ReportError(s.str());
+ } else {
+ generic_list = GenericList::cast(previous);
+ }
+ Generic* result = RegisterDeclarable(
+ std::unique_ptr<Generic>(new Generic(name, module, generic)));
+ generic_list->AddGeneric(result);
generic_declaration_scopes_[result] = GetScopeChainSnapshot();
return result;
}
@@ -310,6 +367,16 @@ TypeVector Declarations::GetCurrentSpecializationTypeNamesVector() {
return result;
}
+std::string GetGeneratedCallableName(const std::string& name,
+ const TypeVector& specialized_types) {
+ std::string result = name;
+ for (auto type : specialized_types) {
+ std::string type_string = type->MangledName();
+ result += std::to_string(type_string.size()) + type_string;
+ }
+ return result;
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index fb5467d962..76a436e43e 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -15,6 +15,10 @@ namespace v8 {
namespace internal {
namespace torque {
+static constexpr const char* const kFromConstexprMacroName = "from_constexpr";
+static constexpr const char* kTrueLabelName = "_True";
+static constexpr const char* kFalseLabelName = "_False";
+
class Declarations {
public:
Declarations()
@@ -47,32 +51,38 @@ class Declarations {
const Type* LookupGlobalType(const std::string& name);
const Type* GetType(TypeExpression* type_expression);
- const AbstractType* GetAbstractType(const Type* parent, std::string name,
- std::string generated);
- const Type* GetFunctionPointerType(TypeVector argument_types,
- const Type* return_type);
-
Builtin* FindSomeInternalBuiltinWithType(const FunctionPointerType* type);
Value* LookupValue(const std::string& name);
+ Macro* TryLookupMacro(const std::string& name, const TypeVector& types);
Macro* LookupMacro(const std::string& name, const TypeVector& types);
Builtin* LookupBuiltin(const std::string& name);
+ Label* TryLookupLabel(const std::string& name) {
+ Declarable* d = TryLookup(name);
+ return d && d->IsLabel() ? Label::cast(d) : nullptr;
+ }
Label* LookupLabel(const std::string& name);
- Generic* LookupGeneric(const std::string& name);
+ GenericList* LookupGeneric(const std::string& name);
+ ModuleConstant* LookupModuleConstant(const std::string& name);
- const AbstractType* DeclareAbstractType(const std::string& name,
- const std::string& generated,
- const std::string* parent = nullptr);
+ const AbstractType* DeclareAbstractType(
+ const std::string& name, const std::string& generated,
+ base::Optional<const AbstractType*> non_constexpr_version,
+ const base::Optional<std::string>& parent = {});
void DeclareType(const std::string& name, const Type* type);
+ void DeclareStruct(Module* module, const std::string& name,
+ const std::vector<NameAndType>& fields);
+
Label* DeclareLabel(const std::string& name);
- Macro* DeclareMacro(const std::string& name, const Signature& signature);
+ Macro* DeclareMacro(const std::string& name, const Signature& signature,
+ base::Optional<std::string> op = {});
Builtin* DeclareBuiltin(const std::string& name, Builtin::Kind kind,
bool external, const Signature& signature);
@@ -80,7 +90,8 @@ class Declarations {
RuntimeFunction* DeclareRuntimeFunction(const std::string& name,
const Signature& signature);
- Variable* DeclareVariable(const std::string& var, const Type* type);
+ Variable* DeclareVariable(const std::string& var, const Type* type,
+ bool is_const);
Parameter* DeclareParameter(const std::string& name,
const std::string& mangled_name,
@@ -88,8 +99,10 @@ class Declarations {
Label* DeclarePrivateLabel(const std::string& name);
- void DeclareConstant(const std::string& name, const Type* type,
- const std::string& value);
+ void DeclareExternConstant(const std::string& name, const Type* type,
+ const std::string& value);
+ ModuleConstant* DeclareModuleConstant(const std::string& name,
+ const Type* type);
Generic* DeclareGeneric(const std::string& name, Module* module,
GenericDeclaration* generic);
@@ -106,13 +119,16 @@ class Declarations {
void PrintScopeChain() { chain_.Print(); }
+ class ModuleScopeActivator;
class NodeScopeActivator;
+ class CleanNodeScopeActivator;
class GenericScopeActivator;
class ScopedGenericSpecializationKey;
class ScopedGenericScopeChainSnapshot;
private:
- Scope* GetNodeScope(const AstNode* node);
+ Scope* GetModuleScope(const Module* module);
+ Scope* GetNodeScope(const AstNode* node, bool reset_scope = false);
Scope* GetGenericScope(Generic* generic, const TypeVector& types);
template <class T>
@@ -122,6 +138,9 @@ class Declarations {
return ptr;
}
+ MacroList* GetMacroListForName(const std::string& name,
+ const Signature& signature);
+
void Declare(const std::string& name, std::unique_ptr<Declarable> d) {
chain_.Declare(name, RegisterDeclarable(std::move(d)));
}
@@ -135,8 +154,7 @@ class Declarations {
const SpecializationKey* current_generic_specialization_;
Statement* next_body_;
std::vector<std::unique_ptr<Declarable>> declarables_;
- Deduplicator<FunctionPointerType> function_pointer_types_;
- std::vector<std::unique_ptr<Type>> nominal_types_;
+ std::map<const Module*, Scope*> module_scopes_;
std::map<std::pair<const AstNode*, TypeVector>, Scope*> scopes_;
std::map<Generic*, ScopeChain::Snapshot> generic_declaration_scopes_;
};
@@ -150,6 +168,24 @@ class Declarations::NodeScopeActivator {
Scope::Activator activator_;
};
+class Declarations::ModuleScopeActivator {
+ public:
+ ModuleScopeActivator(Declarations* declarations, const Module* module)
+ : activator_(declarations->GetModuleScope(module)) {}
+
+ private:
+ Scope::Activator activator_;
+};
+
+class Declarations::CleanNodeScopeActivator {
+ public:
+ CleanNodeScopeActivator(Declarations* declarations, AstNode* node)
+ : activator_(declarations->GetNodeScope(node, true)) {}
+
+ private:
+ Scope::Activator activator_;
+};
+
class Declarations::GenericScopeActivator {
public:
GenericScopeActivator(Declarations* declarations,
@@ -186,6 +222,9 @@ class Declarations::ScopedGenericScopeChainSnapshot {
ScopeChain::ScopedSnapshotRestorer restorer_;
};
+std::string GetGeneratedCallableName(const std::string& name,
+ const TypeVector& specialized_types);
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/file-visitor.cc b/deps/v8/src/torque/file-visitor.cc
index 47448b87a2..c2e5aa7924 100644
--- a/deps/v8/src/torque/file-visitor.cc
+++ b/deps/v8/src/torque/file-visitor.cc
@@ -5,14 +5,13 @@
#include "src/torque/file-visitor.h"
#include "src/torque/declarable.h"
+#include "src/torque/parameter-difference.h"
namespace v8 {
namespace internal {
namespace torque {
-Signature FileVisitor::MakeSignature(CallableNode* decl,
- const CallableNodeSignature* signature) {
- Declarations::NodeScopeActivator scope(declarations(), decl);
+Signature FileVisitor::MakeSignature(const CallableNodeSignature* signature) {
LabelDeclarationVector definition_vector;
for (auto label : signature->labels) {
LabelDeclaration def = {label.name, GetTypeVector(label.types)};
@@ -26,79 +25,33 @@ Signature FileVisitor::MakeSignature(CallableNode* decl,
return result;
}
-std::string FileVisitor::GetGeneratedCallableName(
- const std::string& name, const TypeVector& specialized_types) {
- std::string result = name;
- for (auto type : specialized_types) {
- std::string type_string = type->MangledName();
- result += std::to_string(type_string.size()) + type_string;
- }
- return result;
-}
-
-Callable* FileVisitor::LookupCall(const std::string& name,
- const TypeVector& parameter_types) {
- Callable* result = nullptr;
- Declarable* declarable = declarations()->Lookup(name);
- if (declarable->IsBuiltin()) {
- result = Builtin::cast(declarable);
- } else if (declarable->IsRuntimeFunction()) {
- result = RuntimeFunction::cast(declarable);
- } else if (declarable->IsMacroList()) {
- for (auto& m : MacroList::cast(declarable)->list()) {
- if (GetTypeOracle().IsCompatibleSignature(m->signature().parameter_types,
- parameter_types)) {
- if (result != nullptr) {
- std::stringstream stream;
- stream << "multiple matching matching parameter list for macro "
- << name << ": (" << parameter_types << ") and ("
- << result->signature().parameter_types << ")";
- ReportError(stream.str());
- }
- result = m;
- }
- }
- if (result == nullptr) {
- std::stringstream stream;
- stream << "no matching matching parameter list for macro " << name
- << ": call parameters were (" << parameter_types << ")";
- ReportError(stream.str());
- }
- } else {
- std::stringstream stream;
- stream << "can't call " << declarable->type_name() << " " << name
- << " because it's not callable"
- << ": call parameters were (" << parameter_types << ")";
- ReportError(stream.str());
- }
-
- size_t caller_size = parameter_types.size();
- size_t callee_size = result->signature().types().size();
- if (caller_size != callee_size &&
- !result->signature().parameter_types.var_args) {
- std::stringstream stream;
- stream << "parameter count mismatch calling " << *result << " - expected "
- << std::to_string(callee_size) << ", found "
- << std::to_string(caller_size);
- ReportError(stream.str());
- }
-
+Signature FileVisitor::MakeSignatureFromReturnType(
+ TypeExpression* return_type) {
+ Signature result{{}, {{}, false}, declarations()->GetType(return_type), {}};
return result;
}
void FileVisitor::QueueGenericSpecialization(
const SpecializationKey& key, CallableNode* callable,
- const CallableNodeSignature* signature, Statement* body) {
- pending_specializations_.push_back({key, callable, signature, body});
+ const CallableNodeSignature* signature, base::Optional<Statement*> body) {
+ pending_specializations_.push_back(
+ {key, callable, signature, body, CurrentSourcePosition::Get()});
}
void FileVisitor::SpecializeGeneric(
const PendingSpecialization& specialization) {
+ CurrentSourcePosition::Scope scope(specialization.request_position);
if (completed_specializations_.find(specialization.key) !=
completed_specializations_.end()) {
std::stringstream stream;
stream << "cannot redeclare specialization of "
- << specialization.key.first->declaration()->callable->name
+ << specialization.key.first->name() << " with types <"
+ << specialization.key.second << ">";
+ ReportError(stream.str());
+ }
+ if (!specialization.body) {
+ std::stringstream stream;
+ stream << "missing specialization of " << specialization.key.first->name()
<< " with types <" << specialization.key.second << ">";
ReportError(stream.str());
}
@@ -107,7 +60,7 @@ void FileVisitor::SpecializeGeneric(
FileVisitor::ScopedModuleActivator activator(
this, specialization.key.first->module());
Specialize(specialization.key, specialization.callable,
- specialization.signature, specialization.body);
+ specialization.signature, *specialization.body);
completed_specializations_.insert(specialization.key);
}
diff --git a/deps/v8/src/torque/file-visitor.h b/deps/v8/src/torque/file-visitor.h
index 5aac1bad46..f1b7c4bbef 100644
--- a/deps/v8/src/torque/file-visitor.h
+++ b/deps/v8/src/torque/file-visitor.h
@@ -53,8 +53,6 @@ class FileVisitor {
};
protected:
- static constexpr const char* kTrueLabelName = "_True";
- static constexpr const char* kFalseLabelName = "_False";
static constexpr const char* kReturnValueVariable = "_return";
static constexpr const char* kDoneLabelName = "_done";
static constexpr const char* kForIndexValueVariable = "_for_index";
@@ -62,32 +60,26 @@ class FileVisitor {
Module* CurrentModule() const { return module_; }
friend class ScopedModuleActivator;
- TypeOracle& GetTypeOracle() { return global_context_.GetTypeOracle(); }
std::string GetParameterVariableFromName(const std::string& name) {
return std::string("p_") + name;
}
- Callable* LookupCall(const std::string& name,
- const TypeVector& parameter_types);
-
- Signature MakeSignature(CallableNode* decl,
- const CallableNodeSignature* signature);
-
- std::string GetGeneratedCallableName(const std::string& name,
- const TypeVector& specialized_types);
+ Signature MakeSignature(const CallableNodeSignature* signature);
+ Signature MakeSignatureFromReturnType(TypeExpression* return_type);
struct PendingSpecialization {
SpecializationKey key;
CallableNode* callable;
const CallableNodeSignature* signature;
- Statement* body;
+ base::Optional<Statement*> body;
+ SourcePosition request_position;
};
void QueueGenericSpecialization(const SpecializationKey& key,
CallableNode* callable,
const CallableNodeSignature* signature,
- Statement* body);
+ base::Optional<Statement*> body);
void SpecializeGeneric(const PendingSpecialization& specialization);
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index eee1fe626e..4dc5534950 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -22,8 +22,10 @@ class TypeOracle;
class Module {
public:
- explicit Module(const std::string& name) : name_(name) {}
+ explicit Module(const std::string& name, bool is_default)
+ : name_(name), is_default_(is_default) {}
const std::string& name() const { return name_; }
+ bool IsDefault() const { return is_default_; }
std::ostream& source_stream() { return source_stream_; }
std::ostream& header_stream() { return header_stream_; }
std::string source() { return source_stream_.str(); }
@@ -31,17 +33,11 @@ class Module {
private:
std::string name_;
+ bool is_default_;
std::stringstream header_stream_;
std::stringstream source_stream_;
};
-class OperationHandler {
- public:
- std::string macro_name;
- ParameterTypes parameter_types;
- const Type* result_type;
-};
-
struct SourceFileContext {
std::string name;
std::unique_ptr<antlr4::ANTLRFileStream> stream;
@@ -56,16 +52,15 @@ class GlobalContext {
explicit GlobalContext(Ast ast)
: verbose_(false),
next_label_number_(0),
- type_oracle_(&declarations_),
- default_module_(GetModule("base")),
+ default_module_(GetModule("base", true)),
ast_(std::move(ast)) {}
Module* GetDefaultModule() { return default_module_; }
- Module* GetModule(const std::string& name) {
+ Module* GetModule(const std::string& name, bool is_default = false) {
auto i = modules_.find(name);
if (i != modules_.end()) {
return i->second.get();
}
- Module* module = new Module(name);
+ Module* module = new Module(name, is_default);
modules_[name] = std::unique_ptr<Module>(module);
return module;
}
@@ -104,16 +99,12 @@ class GlobalContext {
friend class CurrentCallableActivator;
friend class BreakContinueActivator;
- TypeOracle& GetTypeOracle() { return type_oracle_; }
-
Callable* GetCurrentCallable() const { return current_callable_; }
Label* GetCurrentBreak() const { return break_continue_stack_.back().first; }
Label* GetCurrentContinue() const {
return break_continue_stack_.back().second;
}
- std::map<std::string, std::vector<OperationHandler>> op_handlers_;
-
Declarations* declarations() { return &declarations_; }
Ast* ast() { return &ast_; }
@@ -123,7 +114,6 @@ class GlobalContext {
Declarations declarations_;
Callable* current_callable_;
std::vector<std::pair<Label*, Label*>> break_continue_stack_;
- TypeOracle type_oracle_;
std::map<std::string, std::unique_ptr<Module>> modules_;
Module* default_module_;
std::map<std::pair<const AstNode*, TypeVector>, std::set<const Variable*>>
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index a594c516e9..58fe638a13 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "src/torque/implementation-visitor.h"
+#include "src/torque/parameter-difference.h"
#include "include/v8.h"
@@ -26,6 +29,8 @@ VisitResult ImplementationVisitor::Visit(Expression* expr) {
const Type* ImplementationVisitor::Visit(Statement* stmt) {
CurrentSourcePosition::Scope scope(stmt->pos);
+ GenerateIndent();
+ source_out() << "// " << CurrentPositionAsString() << "\n";
switch (stmt->kind) {
#define ENUM_ITEM(name) \
case AstNode::Kind::k##name: \
@@ -65,13 +70,11 @@ void ImplementationVisitor::Visit(CallableNode* decl,
}
}
-void ImplementationVisitor::Visit(ModuleDeclaration* decl) {
- Module* module = decl->GetModule();
-
+void ImplementationVisitor::BeginModuleFile(Module* module) {
std::ostream& source = module->source_stream();
std::ostream& header = module->header_stream();
- if (decl->IsDefault()) {
+ if (module->IsDefault()) {
source << "#include \"src/code-stub-assembler.h\"";
} else {
source << "#include \"src/builtins/builtins-" +
@@ -102,7 +105,7 @@ void ImplementationVisitor::Visit(ModuleDeclaration* decl) {
std::string("V8_TORQUE_") + upper_name + "_FROM_DSL_BASE_H__";
header << "#ifndef " << headerDefine << std::endl;
header << "#define " << headerDefine << std::endl << std::endl;
- if (decl->IsDefault()) {
+ if (module->IsDefault()) {
header << "#include \"src/code-stub-assembler.h\"";
} else {
header << "#include \"src/builtins/builtins-" +
@@ -129,15 +132,20 @@ void ImplementationVisitor::Visit(ModuleDeclaration* decl) {
header << " template <class T>" << std::endl;
header << " using SloppyTNode = compiler::SloppyTNode<T>;" << std::endl
<< std::endl;
+}
- Module* saved_module = module_;
- module_ = module;
- Declarations::NodeScopeActivator scope(declarations(), decl);
- for (auto& child : decl->declarations) Visit(child);
- module_ = saved_module;
+void ImplementationVisitor::EndModuleFile(Module* module) {
+ std::ostream& source = module->source_stream();
+ std::ostream& header = module->header_stream();
DrainSpecializationQueue();
+ std::string upper_name(module->name());
+ transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
+ ::toupper);
+ std::string headerDefine =
+ std::string("V8_TORQUE_") + upper_name + "_FROM_DSL_BASE_H__";
+
source << "} // namepsace internal" << std::endl
<< "} // namespace v8" << std::endl
<< "" << std::endl;
@@ -149,9 +157,54 @@ void ImplementationVisitor::Visit(ModuleDeclaration* decl) {
header << "#endif // " << headerDefine << std::endl;
}
+void ImplementationVisitor::Visit(ModuleDeclaration* decl) {
+ Module* module = decl->GetModule();
+ Module* saved_module = module_;
+ module_ = module;
+ Declarations::ModuleScopeActivator scope(declarations(), decl->GetModule());
+ for (auto& child : decl->declarations) Visit(child);
+ module_ = saved_module;
+}
+
+void ImplementationVisitor::Visit(ConstDeclaration* decl) {
+ Signature signature = MakeSignatureFromReturnType(decl->type);
+ std::string name = decl->name;
+
+ header_out() << " ";
+ GenerateFunctionDeclaration(header_out(), "", name, signature, {});
+ header_out() << ";\n";
+
+ GenerateFunctionDeclaration(source_out(),
+ GetDSLAssemblerName(CurrentModule()) + "::", name,
+ signature, {});
+ source_out() << " {\n";
+
+ DCHECK(!signature.return_type->IsVoidOrNever());
+
+ VisitResult expression_result = Visit(decl->expression);
+ VisitResult return_result =
+ GenerateImplicitConvert(signature.return_type, expression_result);
+
+ GenerateIndent();
+ source_out() << "return " << return_result.RValue() << ";\n";
+ source_out() << "}\n\n";
+}
+
+void ImplementationVisitor::Visit(StructDeclaration* decl) {
+ header_out() << " struct " << decl->name << " {\n";
+ const StructType* struct_type =
+ static_cast<const StructType*>(declarations()->LookupType(decl->name));
+ for (auto& field : struct_type->fields()) {
+ header_out() << " " << field.type->GetGeneratedTypeName();
+ header_out() << " " << field.name << ";\n";
+ }
+ header_out() << " } "
+ << ";\n";
+}
+
void ImplementationVisitor::Visit(TorqueMacroDeclaration* decl,
const Signature& sig, Statement* body) {
- Signature signature = MakeSignature(decl, decl->signature.get());
+ Signature signature = MakeSignature(decl->signature.get());
std::string name = GetGeneratedCallableName(
decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
const TypeVector& list = signature.types();
@@ -159,67 +212,60 @@ void ImplementationVisitor::Visit(TorqueMacroDeclaration* decl,
CurrentCallableActivator activator(global_context_, macro, decl);
- header_out() << " ";
- GenerateMacroFunctionDeclaration(header_out(), "", macro);
- header_out() << ";" << std::endl;
+ if (body != nullptr) {
+ header_out() << " ";
+ GenerateMacroFunctionDeclaration(header_out(), "", macro);
+ header_out() << ";" << std::endl;
- GenerateMacroFunctionDeclaration(
- source_out(), GetDSLAssemblerName(CurrentModule()) + "::", macro);
- source_out() << " {" << std::endl;
+ GenerateMacroFunctionDeclaration(
+ source_out(), GetDSLAssemblerName(CurrentModule()) + "::", macro);
+ source_out() << " {" << std::endl;
- const Variable* result_var = nullptr;
- if (macro->HasReturnValue()) {
- const Type* return_type = macro->signature().return_type;
- if (!return_type->IsConstexpr()) {
- GenerateIndent();
- source_out() << "Node* return_default = &*SmiConstant(0);" << std::endl;
+ const Variable* result_var = nullptr;
+ if (macro->HasReturnValue()) {
+ result_var =
+ GenerateVariableDeclaration(decl, kReturnValueVariable, {}, {});
}
- VisitResult init = {
- return_type,
- return_type->IsConstexpr()
- ? (return_type->GetGeneratedTypeName() + "()")
- : (std::string("UncheckedCast<") +
- return_type->GetGeneratedTNodeTypeName() + ">(return_default)")};
- result_var =
- GenerateVariableDeclaration(decl, kReturnValueVariable, {}, init);
- }
- Label* macro_end = declarations()->DeclareLabel("macro_end");
- GenerateLabelDefinition(macro_end, decl);
-
- const Type* result = Visit(body);
- if (result->IsNever()) {
- if (!macro->signature().return_type->IsNever() && !macro->HasReturns()) {
- std::stringstream s;
- s << "macro " << decl->name
- << " that never returns must have return type never";
- ReportError(s.str());
+ Label* macro_end = declarations()->DeclareLabel("macro_end");
+ GenerateLabelDefinition(macro_end, decl);
+
+ const Type* result = Visit(body);
+ if (result->IsNever()) {
+ if (!macro->signature().return_type->IsNever() && !macro->HasReturns()) {
+ std::stringstream s;
+ s << "macro " << decl->name
+ << " that never returns must have return type never";
+ ReportError(s.str());
+ }
+ } else {
+ if (macro->signature().return_type->IsNever()) {
+ std::stringstream s;
+ s << "macro " << decl->name
+ << " has implicit return at end of its declartion but return type "
+ "never";
+ ReportError(s.str());
+ } else if (!macro->signature().return_type->IsVoid()) {
+ std::stringstream s;
+ s << "macro " << decl->name
+ << " expects to return a value but doesn't on all paths";
+ ReportError(s.str());
+ }
}
- } else {
- if (macro->signature().return_type->IsNever()) {
- std::stringstream s;
- s << "macro " << decl->name
- << " has implicit return at end of its declartion but return type "
- "never";
- ReportError(s.str());
- } else if (!macro->signature().return_type->IsVoid()) {
- std::stringstream s;
- s << "macro " << decl->name
- << " expects to return a value but doesn't on all paths";
- ReportError(s.str());
+ if (macro->HasReturns()) {
+ if (!result->IsNever()) {
+ GenerateLabelGoto(macro_end);
+ }
+ GenerateLabelBind(macro_end);
}
- }
- if (macro->HasReturns()) {
- if (!result->IsNever()) {
- GenerateLabelGoto(macro_end);
+ if (result_var != nullptr) {
+ GenerateIndent();
+ source_out() << "return "
+ << RValueFlattenStructs(
+ VisitResult(result_var->type(), result_var))
+ << ";" << std::endl;
}
- GenerateLabelBind(macro_end);
- }
- if (result_var != nullptr) {
- GenerateIndent();
- source_out() << "return " << result_var->GetValueForRead() << ";"
- << std::endl;
+ source_out() << "}" << std::endl << std::endl;
}
- source_out() << "}" << std::endl << std::endl;
}
void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
@@ -235,22 +281,22 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
const Value* val =
declarations()->LookupValue(decl->signature->parameters.names[0]);
GenerateIndent();
- source_out() << "TNode<Context> " << val->GetValueForDeclaration()
+ source_out() << "TNode<Context> " << val->value()
<< " = UncheckedCast<Context>(Parameter("
- << (builtin->IsVarArgsJavaScript() ? "Builtin" : "")
<< "Descriptor::kContext));" << std::endl;
GenerateIndent();
- source_out() << "USE(" << val->GetValueForDeclaration() << ");" << std::endl;
+ source_out() << "USE(" << val->value() << ");" << std::endl;
size_t first = 1;
if (builtin->IsVarArgsJavaScript()) {
assert(decl->signature->parameters.has_varargs);
- Constant* arguments = Constant::cast(declarations()->LookupValue(
- decl->signature->parameters.arguments_variable));
- std::string arguments_name = arguments->GetValueForDeclaration();
+ ExternConstant* arguments =
+ ExternConstant::cast(declarations()->LookupValue(
+ decl->signature->parameters.arguments_variable));
+ std::string arguments_name = arguments->value();
GenerateIndent();
source_out()
- << "Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);"
+ << "Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);"
<< std::endl;
GenerateIndent();
source_out() << "CodeStubArguments arguments_impl(this, "
@@ -259,15 +305,14 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
const Value* receiver =
declarations()->LookupValue(decl->signature->parameters.names[1]);
GenerateIndent();
- source_out() << "TNode<Object> " << receiver->GetValueForDeclaration()
+ source_out() << "TNode<Object> " << receiver->value()
<< " = arguments_impl.GetReceiver();" << std::endl;
GenerateIndent();
source_out() << "auto arguments = &arguments_impl;" << std::endl;
GenerateIndent();
source_out() << "USE(arguments);" << std::endl;
GenerateIndent();
- source_out() << "USE(" << receiver->GetValueForDeclaration() << ");"
- << std::endl;
+ source_out() << "USE(" << receiver->value() << ");" << std::endl;
first = 2;
}
@@ -282,7 +327,7 @@ const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
init_result = Visit(*stmt->initializer);
}
GenerateVariableDeclaration(stmt, stmt->name, {}, init_result);
- return GetTypeOracle().GetVoidType();
+ return TypeOracle::GetVoidType();
}
const Type* ImplementationVisitor::Visit(TailCallStatement* stmt) {
@@ -305,7 +350,7 @@ VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
source_out() << "" << std::endl;
left = Visit(expr->if_true);
GenerateIndent();
- source_out() << "return " << left.variable() << ";" << std::endl;
+ source_out() << "return " << RValueFlattenStructs(left) << ";" << std::endl;
}
source_out() << ";" << std::endl;
GenerateIndent();
@@ -315,14 +360,14 @@ VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
source_out() << "" << std::endl;
right = Visit(expr->if_false);
GenerateIndent();
- source_out() << "return " << right.variable() << ";" << std::endl;
+ source_out() << "return " << RValueFlattenStructs(right) << ";"
+ << std::endl;
}
source_out() << ";" << std::endl;
const Type* common_type = GetCommonType(left.type(), right.type());
std::string result_var = NewTempVariable();
- const Variable* result =
- GenerateVariableDeclaration(expr, result_var, common_type);
+ Variable* result = GenerateVariableDeclaration(expr, result_var, common_type);
{
ScopedIndent indent(this);
@@ -337,24 +382,25 @@ VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
VisitResult condition_result = Visit(expr->condition);
if (!condition_result.type()->IsNever()) {
+ condition_result =
+ GenerateImplicitConvert(TypeOracle::GetBoolType(), condition_result);
GenerateBranch(condition_result, true_label, false_label);
}
-
GenerateLabelBind(true_label);
GenerateIndent();
- source_out() << result->GetValueForWrite() << " = " << f1 << "();"
- << std::endl;
+ VisitResult left_result = {right.type(), f1 + "()"};
+ GenerateAssignToVariable(result, left_result);
GenerateLabelGoto(done_label);
GenerateLabelBind(false_label);
GenerateIndent();
- source_out() << result->GetValueForWrite() << " = " << f2 << "();"
- << std::endl;
+ VisitResult right_result = {right.type(), f2 + "()"};
+ GenerateAssignToVariable(result, right_result);
GenerateLabelGoto(done_label);
GenerateLabelBind(done_label);
}
- return VisitResult(common_type, result->GetValueForRead());
+ return VisitResult(common_type, result);
}
VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
@@ -367,7 +413,7 @@ VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
if (left_result.type()->IsBool()) {
Label* true_label = declarations()->LookupLabel(kTrueLabelName);
GenerateIndent();
- source_out() << "GotoIf(" << left_result.variable() << ", "
+ source_out() << "GotoIf(" << RValueFlattenStructs(left_result) << ", "
<< true_label->generated() << ");" << std::endl;
} else if (!left_result.type()->IsConstexprBool()) {
GenerateLabelBind(false_label);
@@ -377,13 +423,14 @@ VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
if (right_result.type() != left_result.type()) {
std::stringstream stream;
stream << "types of left and right expression of logical OR don't match (\""
- << left_result.type() << "\" vs. \"" << right_result.type() << "\")";
+ << *left_result.type() << "\" vs. \"" << *right_result.type()
+ << "\")";
ReportError(stream.str());
}
if (left_result.type()->IsConstexprBool()) {
- return VisitResult(left_result.type(), std::string("(") +
- left_result.variable() + " || " +
- right_result.variable() + ")");
+ return VisitResult(left_result.type(),
+ std::string("(") + RValueFlattenStructs(left_result) +
+ " || " + RValueFlattenStructs(right_result) + ")");
} else {
return right_result;
}
@@ -399,7 +446,7 @@ VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
if (left_result.type()->IsBool()) {
Label* false_label = declarations()->LookupLabel(kFalseLabelName);
GenerateIndent();
- source_out() << "GotoIfNot(" << left_result.variable() << ", "
+ source_out() << "GotoIfNot(" << RValueFlattenStructs(left_result) << ", "
<< false_label->generated() << ");" << std::endl;
} else if (!left_result.type()->IsConstexprBool()) {
GenerateLabelBind(true_label);
@@ -410,13 +457,13 @@ VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
std::stringstream stream;
stream
<< "types of left and right expression of logical AND don't match (\""
- << left_result.type() << "\" vs. \"" << right_result.type() << "\")";
+ << *left_result.type() << "\" vs. \"" << *right_result.type() << "\")";
ReportError(stream.str());
}
if (left_result.type()->IsConstexprBool()) {
- return VisitResult(left_result.type(), std::string("(") +
- left_result.variable() + " && " +
- right_result.variable() + ")");
+ return VisitResult(left_result.type(),
+ std::string("(") + RValueFlattenStructs(left_result) +
+ " && " + RValueFlattenStructs(right_result) + ")");
} else {
return right_result;
}
@@ -430,10 +477,10 @@ VisitResult ImplementationVisitor::Visit(IncrementDecrementExpression* expr) {
if (expr->postfix) {
value_copy = GenerateCopy(current_value);
}
- VisitResult one = {GetTypeOracle().GetConstInt31Type(), "1"};
+ VisitResult one = {TypeOracle::GetConstInt31Type(), "1"};
Arguments args;
args.parameters = {current_value, one};
- VisitResult assignment_value = GenerateOperation(
+ VisitResult assignment_value = GenerateCall(
expr->op == IncrementDecrementOperator::kIncrement ? "+" : "-", args);
GenerateAssignToLocation(expr->location, location_ref, assignment_value);
return expr->postfix ? value_copy : assignment_value;
@@ -448,7 +495,7 @@ VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
assignment_value = Visit(expr->value);
Arguments args;
args.parameters = {assignment_value, assignment_value};
- assignment_value = GenerateOperation(*expr->op, args);
+ assignment_value = GenerateCall(*expr->op, args);
GenerateAssignToLocation(expr->location, location_ref, assignment_value);
} else {
assignment_value = Visit(expr->value);
@@ -464,12 +511,10 @@ VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
const Type* result_type =
declarations()->LookupType(CONST_FLOAT64_TYPE_STRING);
if (i == d) {
- if (Internals::IsValidSmi(i)) {
- if (sizeof(void*) == sizeof(double) && ((i >> 30) != (i >> 31))) {
- result_type = declarations()->LookupType(CONST_INT32_TYPE_STRING);
- } else {
- result_type = declarations()->LookupType(CONST_INT31_TYPE_STRING);
- }
+ if ((i >> 30) == (i >> 31)) {
+ result_type = declarations()->LookupType(CONST_INT31_TYPE_STRING);
+ } else {
+ result_type = declarations()->LookupType(CONST_INT32_TYPE_STRING);
}
}
std::string temp = GenerateNewTempVariable(result_type);
@@ -478,11 +523,10 @@ VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
}
VisitResult ImplementationVisitor::Visit(StringLiteralExpression* expr) {
- std::string temp = GenerateNewTempVariable(GetTypeOracle().GetStringType());
- source_out() << "StringConstant(\""
- << expr->literal.substr(1, expr->literal.size() - 2) << "\");"
- << std::endl;
- return VisitResult{GetTypeOracle().GetStringType(), temp};
+ std::string temp = GenerateNewTempVariable(TypeOracle::GetConstStringType());
+ source_out() << "\"" << expr->literal.substr(1, expr->literal.size() - 2)
+ << "\";" << std::endl;
+ return VisitResult{TypeOracle::GetConstStringType(), temp};
}
VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
@@ -491,7 +535,7 @@ VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
"creating function pointers is only allowed for internal builtins with "
"stub linkage");
}
- const Type* type = declarations()->GetFunctionPointerType(
+ const Type* type = TypeOracle::GetFunctionPointerType(
builtin->signature().parameter_types.types,
builtin->signature().return_type);
std::string code =
@@ -503,13 +547,15 @@ VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
VisitResult ImplementationVisitor::Visit(IdentifierExpression* expr) {
std::string name = expr->name;
if (expr->generic_arguments.size() != 0) {
- Generic* generic = declarations()->LookupGeneric(expr->name);
- TypeVector specialization_types = GetTypeVector(expr->generic_arguments);
- name = GetGeneratedCallableName(name, specialization_types);
- CallableNode* callable = generic->declaration()->callable;
- QueueGenericSpecialization({generic, specialization_types}, callable,
- callable->signature.get(),
- generic->declaration()->body);
+ GenericList* generic_list = declarations()->LookupGeneric(expr->name);
+ for (Generic* generic : generic_list->list()) {
+ TypeVector specialization_types = GetTypeVector(expr->generic_arguments);
+ name = GetGeneratedCallableName(name, specialization_types);
+ CallableNode* callable = generic->declaration()->callable;
+ QueueGenericSpecialization({generic, specialization_types}, callable,
+ callable->signature.get(),
+ generic->declaration()->body);
+ }
}
if (Builtin* builtin = Builtin::DynamicCast(declarations()->Lookup(name))) {
@@ -519,20 +565,6 @@ VisitResult ImplementationVisitor::Visit(IdentifierExpression* expr) {
return GenerateFetchFromLocation(expr, GetLocationReference(expr));
}
-VisitResult ImplementationVisitor::Visit(CastExpression* expr) {
- Arguments args;
- args.parameters = {Visit(expr->value)};
- args.labels = LabelsFromIdentifiers({expr->otherwise});
- return GenerateOperation("cast<>", args, declarations()->GetType(expr->type));
-}
-
-VisitResult ImplementationVisitor::Visit(ConvertExpression* expr) {
- Arguments args;
- args.parameters = {Visit(expr->value)};
- return GenerateOperation("convert<>", args,
- declarations()->GetType(expr->type));
-}
-
const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
Label* label = declarations()->LookupLabel(stmt->label);
@@ -553,7 +585,7 @@ const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
GenerateLabelGoto(label);
label->MarkUsed();
- return GetTypeOracle().GetNeverType();
+ return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
@@ -564,17 +596,19 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
if (stmt->is_constexpr) {
VisitResult expression_result = Visit(stmt->condition);
- if (!(expression_result.type() == GetTypeOracle().GetConstexprBoolType())) {
+ if (!(expression_result.type() == TypeOracle::GetConstexprBoolType())) {
std::stringstream stream;
- stream << "expression should return type \"constexpr bool\" but doesn't";
+ stream << "expression should return type constexpr bool "
+ << "but returns type " << *expression_result.type();
ReportError(stream.str());
}
const Type* left_result;
- const Type* right_result = GetTypeOracle().GetVoidType();
+ const Type* right_result = TypeOracle::GetVoidType();
{
GenerateIndent();
- source_out() << "if ((" << expression_result.variable() << ")) ";
+ source_out() << "if ((" << RValueFlattenStructs(expression_result)
+ << ")) ";
ScopedIndent indent(this, false);
source_out() << std::endl;
left_result = Visit(stmt->if_true);
@@ -626,8 +660,7 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
if (live) {
GenerateLabelBind(done_label);
}
- return live ? GetTypeOracle().GetVoidType()
- : GetTypeOracle().GetNeverType();
+ return live ? TypeOracle::GetVoidType() : TypeOracle::GetNeverType();
}
}
@@ -656,13 +689,13 @@ const Type* ImplementationVisitor::Visit(WhileStatement* stmt) {
{stmt->body}, header_label);
GenerateLabelBind(exit_label);
- return GetTypeOracle().GetVoidType();
+ return TypeOracle::GetVoidType();
}
const Type* ImplementationVisitor::Visit(BlockStatement* block) {
Declarations::NodeScopeActivator scope(declarations(), block);
ScopedIndent indent(this);
- const Type* type = GetTypeOracle().GetVoidType();
+ const Type* type = TypeOracle::GetVoidType();
for (Statement* s : block->statements) {
if (type->IsNever()) {
std::stringstream stream;
@@ -684,60 +717,83 @@ const Type* ImplementationVisitor::Visit(DebugStatement* stmt) {
GenerateIndent();
if (stmt->never_continues) {
source_out() << "Unreachable();" << std::endl;
- return GetTypeOracle().GetNeverType();
+ return TypeOracle::GetNeverType();
} else {
source_out() << "DebugBreak();" << std::endl;
- return GetTypeOracle().GetVoidType();
+ return TypeOracle::GetVoidType();
}
}
+namespace {
+
+std::string FormatAssertSource(const std::string& str) {
+ // Replace all whitespace characters with a space character.
+ std::string str_no_newlines = str;
+ std::replace_if(str_no_newlines.begin(), str_no_newlines.end(),
+ [](unsigned char c) { return isspace(c); }, ' ');
+
+ // str might include indentation, squash multiple space characters into one.
+ std::string result;
+ std::unique_copy(str_no_newlines.begin(), str_no_newlines.end(),
+ std::back_inserter(result),
+ [](char a, char b) { return a == ' ' && b == ' '; });
+ return result;
+}
+
+} // namespace
+
const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
+ bool do_check = !stmt->debug_only;
#if defined(DEBUG)
- // CSA_ASSERT & co. are not used here on purpose for two reasons. First,
- // Torque allows and handles two types of expressions in the if protocol
- // automagically, ones that return TNode<BoolT> and those that use the
- // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
- // handle this is embedded in the expression handling and to it's not possible
- // to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH isn't trivial
- // up-front. Secondly, on failure, the assert text should be the corresponding
- // Torque code, not the -gen.cc code, which would be the case when using
- // CSA_ASSERT_XXX.
- Label* true_label = nullptr;
- Label* false_label = nullptr;
- Declarations::NodeScopeActivator scope(declarations(), stmt->expression);
- true_label = declarations()->LookupLabel(kTrueLabelName);
- GenerateLabelDefinition(true_label);
- false_label = declarations()->LookupLabel(kFalseLabelName);
- GenerateLabelDefinition(false_label);
-
- VisitResult expression_result = Visit(stmt->expression);
- if (expression_result.type() == GetTypeOracle().GetBoolType()) {
- GenerateBranch(expression_result, true_label, false_label);
- } else {
- if (expression_result.type() != GetTypeOracle().GetNeverType()) {
- std::stringstream s;
- s << "unexpected return type " << expression_result.type()
- << " for branch expression";
- ReportError(s.str());
+ do_check = true;
+#endif
+ if (do_check) {
+ // CSA_ASSERT & co. are not used here on purpose for two reasons. First,
+ // Torque allows and handles two types of expressions in the if protocol
+ // automagically, ones that return TNode<BoolT> and those that use the
+ // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
+ // handle this is embedded in the expression handling and to it's not
+ // possible to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH
+ // isn't trivial up-front. Secondly, on failure, the assert text should be
+ // the corresponding Torque code, not the -gen.cc code, which would be the
+ // case when using CSA_ASSERT_XXX.
+ Label* true_label = nullptr;
+ Label* false_label = nullptr;
+ Declarations::NodeScopeActivator scope(declarations(), stmt->expression);
+ true_label = declarations()->LookupLabel(kTrueLabelName);
+ GenerateLabelDefinition(true_label);
+ false_label = declarations()->LookupLabel(kFalseLabelName);
+ GenerateLabelDefinition(false_label);
+
+ VisitResult expression_result = Visit(stmt->expression);
+ if (expression_result.type() == TypeOracle::GetBoolType()) {
+ GenerateBranch(expression_result, true_label, false_label);
+ } else {
+ if (expression_result.type() != TypeOracle::GetNeverType()) {
+ std::stringstream s;
+ s << "unexpected return type " << *expression_result.type()
+ << " for branch expression";
+ ReportError(s.str());
+ }
}
- }
- GenerateLabelBind(false_label);
- GenerateIndent();
- source_out() << "Print(\""
- << "assert '" << stmt->source << "' failed at "
- << PositionAsString(stmt->pos) << "\");" << std::endl;
- GenerateIndent();
- source_out() << "Unreachable();" << std::endl;
+ GenerateLabelBind(false_label);
+ GenerateIndent();
+ source_out() << "Print(\""
+ << "assert '" << FormatAssertSource(stmt->source)
+ << "' failed at " << PositionAsString(stmt->pos) << "\");"
+ << std::endl;
+ GenerateIndent();
+ source_out() << "Unreachable();" << std::endl;
- GenerateLabelBind(true_label);
-#endif
- return GetTypeOracle().GetVoidType();
+ GenerateLabelBind(true_label);
+ }
+ return TypeOracle::GetVoidType();
}
const Type* ImplementationVisitor::Visit(ExpressionStatement* stmt) {
const Type* type = Visit(stmt->expression).type();
- return type->IsNever() ? type : GetTypeOracle().GetVoidType();
+ return type->IsNever() ? type : TypeOracle::GetVoidType();
}
const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
@@ -754,7 +810,7 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
if (!stmt->value) {
std::stringstream s;
s << "return expression needs to be specified for a return type of "
- << current_callable->signature().return_type;
+ << *current_callable->signature().return_type;
ReportError(s.str());
}
VisitResult expression_result = Visit(*stmt->value);
@@ -768,11 +824,12 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
} else if (current_callable->IsBuiltin()) {
if (Builtin::cast(current_callable)->IsVarArgsJavaScript()) {
GenerateIndent();
- source_out() << "arguments->PopAndReturn(" << return_result.variable()
- << ");" << std::endl;
+ source_out() << "arguments->PopAndReturn("
+ << RValueFlattenStructs(return_result) << ");"
+ << std::endl;
} else {
GenerateIndent();
- source_out() << "Return(" << return_result.variable() << ");"
+ source_out() << "Return(" << RValueFlattenStructs(return_result) << ");"
<< std::endl;
}
} else {
@@ -788,20 +845,20 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
GenerateLabelGoto(end);
}
current_callable->IncrementReturns();
- return GetTypeOracle().GetNeverType();
+ return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
Declarations::NodeScopeActivator scope(declarations(), stmt);
VisitResult expression_result = Visit(stmt->iterable);
- VisitResult begin =
- stmt->begin ? Visit(*stmt->begin)
- : VisitResult(GetTypeOracle().GetConstInt31Type(), "0");
+ VisitResult begin = stmt->begin
+ ? Visit(*stmt->begin)
+ : VisitResult(TypeOracle::GetConstInt31Type(), "0");
- VisitResult end =
- stmt->end ? Visit(*stmt->end)
- : GenerateOperation(".length", {{expression_result}, {}});
+ VisitResult end = stmt->end
+ ? Visit(*stmt->end)
+ : GenerateCall(".length", {{expression_result}, {}});
Label* body_label = declarations()->DeclarePrivateLabel("body");
GenerateLabelDefinition(body_label);
@@ -815,8 +872,7 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
stmt, std::string(kForIndexValueVariable) + "_" + NewTempVariable(),
common_type, begin);
- VisitResult index_for_read = {index_var->type(),
- index_var->GetValueForRead()};
+ VisitResult index_for_read = {index_var->type(), index_var};
Label* header_label = declarations()->DeclarePrivateLabel("header");
GenerateLabelDefinition(header_label, stmt);
@@ -828,12 +884,12 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
BreakContinueActivator activator(global_context_, exit_label,
increment_label);
- VisitResult result = GenerateOperation("<", {{index_for_read, end}, {}});
+ VisitResult result = GenerateCall("<", {{index_for_read, end}, {}});
GenerateBranch(result, body_label, exit_label);
GenerateLabelBind(body_label);
VisitResult element_result =
- GenerateOperation("[]", {{expression_result, index_for_read}, {}});
+ GenerateCall("[]", {{expression_result, index_for_read}, {}});
GenerateVariableDeclaration(stmt->var_declaration,
stmt->var_declaration->name, {}, element_result);
Visit(stmt->body);
@@ -842,22 +898,22 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
GenerateLabelBind(increment_label);
Arguments increment_args;
increment_args.parameters = {index_for_read,
- {GetTypeOracle().GetConstInt31Type(), "1"}};
- VisitResult increment_result = GenerateOperation("+", increment_args);
+ {TypeOracle::GetConstInt31Type(), "1"}};
+ VisitResult increment_result = GenerateCall("+", increment_args);
GenerateAssignToVariable(index_var, increment_result);
GenerateLabelGoto(header_label);
GenerateLabelBind(exit_label);
- return GetTypeOracle().GetVoidType();
+ return TypeOracle::GetVoidType();
}
-const Type* ImplementationVisitor::Visit(TryCatchStatement* stmt) {
+const Type* ImplementationVisitor::Visit(TryLabelStatement* stmt) {
ScopedIndent indent(this);
Label* try_done = declarations()->DeclarePrivateLabel("try_done");
GenerateLabelDefinition(try_done);
- const Type* try_result = GetTypeOracle().GetNeverType();
+ const Type* try_result = TypeOracle::GetNeverType();
std::vector<Label*> labels;
// Output labels for the goto handlers and for the merge after the try.
@@ -890,7 +946,7 @@ const Type* ImplementationVisitor::Visit(TryCatchStatement* stmt) {
if (GenerateLabeledStatementBlocks({stmt->try_block},
std::vector<Label*>({try_begin_label}),
try_done)) {
- try_result = GetTypeOracle().GetVoidType();
+ try_result = TypeOracle::GetVoidType();
}
}
@@ -914,7 +970,7 @@ const Type* ImplementationVisitor::Visit(TryCatchStatement* stmt) {
std::vector<Statement*> bodies;
for (LabelBlock* block : stmt->label_blocks) bodies.push_back(block->body);
if (GenerateLabeledStatementBlocks(bodies, labels, try_done)) {
- try_result = GetTypeOracle().GetVoidType();
+ try_result = TypeOracle::GetVoidType();
}
if (!try_result->IsNever()) {
@@ -929,7 +985,7 @@ const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
ReportError("break used outside of loop");
}
GenerateLabelGoto(break_label);
- return GetTypeOracle().GetNeverType();
+ return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
@@ -938,7 +994,7 @@ const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
ReportError("continue used outside of loop");
}
GenerateLabelGoto(continue_label);
- return GetTypeOracle().GetNeverType();
+ return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
@@ -977,7 +1033,7 @@ const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
}
GenerateLabelBind(exit_label);
- return GetTypeOracle().GetVoidType();
+ return TypeOracle::GetVoidType();
}
void ImplementationVisitor::GenerateImplementation(const std::string& dir,
@@ -1017,26 +1073,35 @@ void ImplementationVisitor::GenerateIndent() {
void ImplementationVisitor::GenerateMacroFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, Macro* macro) {
+ GenerateFunctionDeclaration(o, macro_prefix, macro->name(),
+ macro->signature(), macro->parameter_names());
+}
+
+void ImplementationVisitor::GenerateFunctionDeclaration(
+ std::ostream& o, const std::string& macro_prefix, const std::string& name,
+ const Signature& signature, const NameVector& parameter_names) {
if (global_context_.verbose()) {
- std::cout << "generating source for declaration " << *macro << ""
+ std::cout << "generating source for declaration " << name << ""
<< std::endl;
}
// Quite a hack here. Make sure that TNode is namespace qualified if the
- // macro name is also qualified.
- std::string return_type_name(
- macro->signature().return_type->GetGeneratedTypeName());
- if (macro_prefix != "" && (return_type_name.length() > 5) &&
- (return_type_name.substr(0, 5) == "TNode")) {
+ // macro/constant name is also qualified.
+ std::string return_type_name(signature.return_type->GetGeneratedTypeName());
+ if (const StructType* struct_type =
+ StructType::DynamicCast(signature.return_type)) {
+ o << GetDSLAssemblerName(struct_type->module()) << "::";
+ } else if (macro_prefix != "" && (return_type_name.length() > 5) &&
+ (return_type_name.substr(0, 5) == "TNode")) {
o << "compiler::";
}
o << return_type_name;
- o << " " << macro_prefix << macro->name() << "(";
+ o << " " << macro_prefix << name << "(";
- DCHECK_EQ(macro->signature().types().size(), macro->parameter_names().size());
- auto type_iterator = macro->signature().types().begin();
+ DCHECK_EQ(signature.types().size(), parameter_names.size());
+ auto type_iterator = signature.types().begin();
bool first = true;
- for (const std::string& name : macro->parameter_names()) {
+ for (const std::string& name : parameter_names) {
if (!first) {
o << ", ";
}
@@ -1044,12 +1109,12 @@ void ImplementationVisitor::GenerateMacroFunctionDeclaration(
const Type* parameter_type = *type_iterator;
const std::string& generated_type_name =
parameter_type->GetGeneratedTypeName();
- o << generated_type_name << " " << parameter->GetValueForDeclaration();
+ o << generated_type_name << " " << parameter->value();
type_iterator++;
first = false;
}
- for (const LabelDeclaration& label_info : macro->signature().labels) {
+ for (const LabelDeclaration& label_info : signature.labels) {
Label* label = declarations()->LookupLabel(label_info.name);
if (!first) {
o << ", ";
@@ -1060,89 +1125,204 @@ void ImplementationVisitor::GenerateMacroFunctionDeclaration(
generated_type_name += var->type()->GetGeneratedTNodeTypeName();
generated_type_name += ">*";
o << ", ";
- o << generated_type_name << " " << var->GetValueForDeclaration();
+ o << generated_type_name << " " << var->value();
}
}
o << ")";
}
-VisitResult ImplementationVisitor::GenerateOperation(
- const std::string& operation, Arguments arguments,
- base::Optional<const Type*> return_type) {
+namespace {
+
+void PrintMacroSignatures(std::stringstream& s, const std::string& name,
+ const std::vector<Macro*>& macros) {
+ for (Macro* m : macros) {
+ s << "\n " << name;
+ PrintSignature(s, m->signature(), false);
+ }
+}
+
+void FailMacroLookup(const std::string& reason, const std::string& name,
+ const Arguments& arguments,
+ const std::vector<Macro*>& candidates) {
+ std::stringstream stream;
+ stream << "\n"
+ << reason << ": \n " << name << "("
+ << arguments.parameters.GetTypeVector() << ")";
+ if (arguments.labels.size() != 0) {
+ stream << " labels ";
+ for (auto l : arguments.labels) {
+ PrintLabel(stream, *l, false);
+ }
+ }
+ stream << "\ncandidates are:";
+ PrintMacroSignatures(stream, name, candidates);
+ ReportError(stream.str());
+}
+
+} // namespace
+
+Callable* ImplementationVisitor::LookupCall(const std::string& name,
+ const Arguments& arguments) {
+ Callable* result = nullptr;
TypeVector parameter_types(arguments.parameters.GetTypeVector());
+ Declarable* declarable = declarations()->Lookup(name);
+ if (declarable->IsBuiltin()) {
+ result = Builtin::cast(declarable);
+ } else if (declarable->IsRuntimeFunction()) {
+ result = RuntimeFunction::cast(declarable);
+ } else if (declarable->IsMacroList()) {
+ std::vector<Macro*> candidates;
+ std::vector<Macro*> macros_with_same_name;
+ for (Macro* m : MacroList::cast(declarable)->list()) {
+ bool try_bool_context =
+ arguments.labels.size() == 0 &&
+ m->signature().return_type == TypeOracle::GetNeverType();
+ Label* true_label = nullptr;
+ Label* false_label = nullptr;
+ if (try_bool_context) {
+ true_label = declarations()->TryLookupLabel(kTrueLabelName);
+ false_label = declarations()->TryLookupLabel(kFalseLabelName);
+ }
+ if (IsCompatibleSignature(m->signature(), parameter_types,
+ arguments.labels) ||
+ (true_label && false_label &&
+ IsCompatibleSignature(m->signature(), parameter_types,
+ {true_label, false_label}))) {
+ candidates.push_back(m);
+ } else {
+ macros_with_same_name.push_back(m);
+ }
+ }
+
+ if (candidates.empty() && macros_with_same_name.empty()) {
+ std::stringstream stream;
+ stream << "no matching declaration found for " << name;
+ ReportError(stream.str());
+ } else if (candidates.empty()) {
+ FailMacroLookup("cannot find macro with name", name, arguments,
+ macros_with_same_name);
+ }
- auto i = global_context_.op_handlers_.find(operation);
- if (i != global_context_.op_handlers_.end()) {
- for (auto handler : i->second) {
- if (GetTypeOracle().IsCompatibleSignature(handler.parameter_types,
- parameter_types)) {
- // Operators used in a bit context can also be function calls that never
- // return but have a True and False label
- if (!return_type && handler.result_type->IsNever()) {
- if (arguments.labels.size() == 0) {
- Label* true_label = declarations()->LookupLabel(kTrueLabelName);
- arguments.labels.push_back(true_label);
- Label* false_label = declarations()->LookupLabel(kFalseLabelName);
- arguments.labels.push_back(false_label);
- }
- }
-
- if (!return_type || (GetTypeOracle().IsAssignableFrom(
- *return_type, handler.result_type))) {
- return GenerateCall(handler.macro_name, arguments, false);
- }
+ auto is_better_candidate = [&](Macro* a, Macro* b) {
+ return ParameterDifference(a->signature().parameter_types.types,
+ parameter_types)
+ .StrictlyBetterThan(ParameterDifference(
+ b->signature().parameter_types.types, parameter_types));
+ };
+
+ Macro* best = *std::min_element(candidates.begin(), candidates.end(),
+ is_better_candidate);
+ for (Macro* candidate : candidates) {
+ if (candidate != best && !is_better_candidate(best, candidate)) {
+ FailMacroLookup("ambiguous macro", name, arguments, candidates);
}
}
+ result = best;
+ } else {
+ std::stringstream stream;
+ stream << "can't call " << declarable->type_name() << " " << name
+ << " because it's not callable"
+ << ": call parameters were (" << parameter_types << ")";
+ ReportError(stream.str());
+ }
+
+ size_t caller_size = parameter_types.size();
+ size_t callee_size = result->signature().types().size();
+ if (caller_size != callee_size &&
+ !result->signature().parameter_types.var_args) {
+ std::stringstream stream;
+ stream << "parameter count mismatch calling " << *result << " - expected "
+ << std::to_string(callee_size) << ", found "
+ << std::to_string(caller_size);
+ ReportError(stream.str());
+ }
+
+ return result;
+}
+
+void ImplementationVisitor::GetFlattenedStructsVars(
+ const Variable* base, std::set<const Variable*>& vars) {
+ const Type* type = base->type();
+ if (base->IsConst()) return;
+ if (type->IsStructType()) {
+ const StructType* struct_type = StructType::cast(type);
+ for (auto& field : struct_type->fields()) {
+ std::string field_var_name = base->name() + "." + field.name;
+ GetFlattenedStructsVars(
+ Variable::cast(declarations()->LookupValue(field_var_name)), vars);
+ }
+ } else {
+ vars.insert(base);
}
- std::stringstream s;
- s << "cannot find implementation of operation \"" << operation
- << "\" with types " << parameter_types;
- ReportError(s.str());
- return VisitResult(GetTypeOracle().GetVoidType(), "");
}
void ImplementationVisitor::GenerateChangedVarsFromControlSplit(AstNode* node) {
const std::set<const Variable*>& changed_vars =
global_context_.GetControlSplitChangedVariables(
node, declarations()->GetCurrentSpecializationTypeNamesVector());
- source_out() << "{";
- bool first = true;
+ std::set<const Variable*> flattened_vars;
for (auto v : changed_vars) {
- if (v->type()->IsConstexpr()) continue;
- if (first) {
- first = false;
- } else {
- source_out() << ", ";
- }
- source_out() << v->GetValueForDeclaration();
+ GetFlattenedStructsVars(v, flattened_vars);
}
+ source_out() << "{";
+ PrintCommaSeparatedList(source_out(), flattened_vars,
+ [&](const Variable* v) { return v->value(); });
source_out() << "}";
}
const Type* ImplementationVisitor::GetCommonType(const Type* left,
const Type* right) {
- const Type* common_type = GetTypeOracle().GetVoidType();
- if (GetTypeOracle().IsAssignableFrom(left, right)) {
+ const Type* common_type;
+ if (IsAssignableFrom(left, right)) {
common_type = left;
- } else if (GetTypeOracle().IsAssignableFrom(right, left)) {
+ } else if (IsAssignableFrom(right, left)) {
common_type = right;
} else {
- std::stringstream s;
- s << "illegal combination of types " << left << " and " << right;
- ReportError(s.str());
+ common_type = TypeOracle::GetUnionType(left, right);
}
+ common_type = common_type->NonConstexprVersion();
return common_type;
}
VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
std::string temp = GenerateNewTempVariable(to_copy.type());
- source_out() << to_copy.variable() << ";" << std::endl;
+ source_out() << RValueFlattenStructs(to_copy) << ";" << std::endl;
GenerateIndent();
source_out() << "USE(" << temp << ");" << std::endl;
return VisitResult(to_copy.type(), temp);
}
+VisitResult ImplementationVisitor::Visit(StructExpression* decl) {
+ const Type* raw_type = declarations()->LookupType(decl->name);
+ if (!raw_type->IsStructType()) {
+ std::stringstream s;
+ s << decl->name << " is not a struct but used like one ";
+ ReportError(s.str());
+ }
+ const StructType* struct_type = StructType::cast(raw_type);
+ if (struct_type->fields().size() != decl->expressions.size()) {
+ std::stringstream s;
+ s << "initializer count mismatch for struct " << decl->name << " (expected "
+ << struct_type->fields().size() << ", found " << decl->expressions.size()
+ << ")";
+ ReportError(s.str());
+ }
+ std::vector<VisitResult> expression_results;
+ for (auto& field : struct_type->fields()) {
+ VisitResult value = Visit(decl->expressions[expression_results.size()]);
+ value = GenerateImplicitConvert(field.type, value);
+ expression_results.push_back(value);
+ }
+ std::string result_var_name = GenerateNewTempVariable(struct_type);
+ source_out() << "{";
+ PrintCommaSeparatedList(
+ source_out(), expression_results,
+ [&](const VisitResult& result) { return RValueFlattenStructs(result); });
+ source_out() << "};\n";
+ return VisitResult(struct_type, result_var_name);
+}
+
LocationReference ImplementationVisitor::GetLocationReference(
LocationExpression* location) {
switch (location->kind) {
@@ -1159,6 +1339,47 @@ LocationReference ImplementationVisitor::GetLocationReference(
}
}
+LocationReference ImplementationVisitor::GetLocationReference(
+ FieldAccessExpression* expr) {
+ VisitResult result = Visit(expr->object);
+ if (result.type()->IsStructType()) {
+ if (result.declarable()) {
+ return LocationReference(
+ declarations()->LookupValue((*result.declarable())->name() + "." +
+ expr->field),
+ {}, {});
+
+ } else {
+ return LocationReference(
+ nullptr,
+ VisitResult(result.type(), result.RValue() + "." + expr->field), {});
+ }
+ }
+ return LocationReference(nullptr, result, {});
+}
+
+std::string ImplementationVisitor::RValueFlattenStructs(VisitResult result) {
+ if (result.declarable()) {
+ const Value* value = *result.declarable();
+ const Type* type = value->type();
+ if (const StructType* struct_type = StructType::DynamicCast(type)) {
+ std::stringstream s;
+ s << struct_type->name() << "{";
+ PrintCommaSeparatedList(
+ s, struct_type->fields(), [&](const NameAndType& field) {
+ std::string field_declaration = value->name() + "." + field.name;
+ Variable* field_variable =
+ Variable::cast(declarations()->LookupValue(field_declaration));
+ return RValueFlattenStructs(
+ VisitResult(field_variable->type(), field_variable));
+ });
+ s << "}";
+ return s.str();
+ }
+ }
+ return result.RValue();
+}
+
VisitResult ImplementationVisitor::GenerateFetchFromLocation(
LocationExpression* location, LocationReference reference) {
switch (location->kind) {
@@ -1176,35 +1397,115 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
}
}
+VisitResult ImplementationVisitor::GenerateFetchFromLocation(
+ FieldAccessExpression* expr, LocationReference reference) {
+ const Type* type = reference.base.type();
+ if (reference.value != nullptr) {
+ return GenerateFetchFromLocation(reference);
+ } else if (const StructType* struct_type = StructType::DynamicCast(type)) {
+ auto& fields = struct_type->fields();
+ auto i = std::find_if(
+ fields.begin(), fields.end(),
+ [&](const NameAndType& f) { return f.name == expr->field; });
+ if (i == fields.end()) {
+ std::stringstream s;
+ s << "\"" << expr->field << "\" is not a field of struct type \""
+ << struct_type->name() << "\"";
+ ReportError(s.str());
+ }
+ return VisitResult(i->type, reference.base.RValue());
+ } else {
+ Arguments arguments;
+ arguments.parameters = {reference.base};
+ return GenerateCall(std::string(".") + expr->field, arguments);
+ }
+}
+
void ImplementationVisitor::GenerateAssignToVariable(Variable* var,
VisitResult value) {
- VisitResult casted_value = GenerateImplicitConvert(var->type(), value);
- GenerateIndent();
- source_out() << var->GetValueForWrite() << " = " << casted_value.variable()
- << ";" << std::endl;
+ if (var->type()->IsStructType()) {
+ if (value.type() != var->type()) {
+ std::stringstream s;
+ s << "incompatable assignment from type " << *value.type() << " to "
+ << *var->type();
+ ReportError(s.str());
+ }
+ const StructType* struct_type = StructType::cast(var->type());
+ for (auto& field : struct_type->fields()) {
+ std::string field_declaration = var->name() + "." + field.name;
+ Variable* field_variable =
+ Variable::cast(declarations()->LookupValue(field_declaration));
+ if (value.declarable() && (*value.declarable())->IsVariable()) {
+ Variable* source_field = Variable::cast(declarations()->LookupValue(
+ Variable::cast((*value.declarable()))->name() + "." + field.name));
+ GenerateAssignToVariable(
+ field_variable, VisitResult{source_field->type(), source_field});
+ } else {
+ GenerateAssignToVariable(
+ field_variable, VisitResult{field_variable->type(),
+ value.RValue() + "." + field.name});
+ }
+ }
+ } else {
+ VisitResult casted_value = GenerateImplicitConvert(var->type(), value);
+ GenerateIndent();
+ VisitResult var_value = {var->type(), var};
+ source_out() << var_value.LValue() << " = "
+ << RValueFlattenStructs(casted_value) << ";" << std::endl;
+ }
var->Define();
}
void ImplementationVisitor::GenerateAssignToLocation(
LocationExpression* location, const LocationReference& reference,
VisitResult assignment_value) {
- if (IdentifierExpression::cast(location)) {
+ if (reference.value != nullptr) {
Value* value = reference.value;
- if (value->IsConst()) {
+ Variable* var = Variable::cast(value);
+ if (var->IsConst()) {
std::stringstream s;
- s << "\"" << value->name()
+ s << "\"" << var->name()
<< "\" is declared const (maybe implicitly) and cannot be assigned to";
ReportError(s.str());
}
- Variable* var = Variable::cast(value);
GenerateAssignToVariable(var, assignment_value);
} else if (auto access = FieldAccessExpression::cast(location)) {
- GenerateOperation(std::string(".") + access->field + "=",
- {{reference.base, assignment_value}, {}});
+ GenerateCall(std::string(".") + access->field + "=",
+ {{reference.base, assignment_value}, {}});
} else {
DCHECK_NOT_NULL(ElementAccessExpression::cast(location));
- GenerateOperation(
- "[]=", {{reference.base, reference.index, assignment_value}, {}});
+ GenerateCall("[]=",
+ {{reference.base, reference.index, assignment_value}, {}});
+ }
+}
+
+void ImplementationVisitor::GenerateVariableDeclaration(const Variable* var) {
+ const Type* var_type = var->type();
+ if (var_type->IsStructType()) {
+ const StructType* struct_type = StructType::cast(var_type);
+ for (auto& field : struct_type->fields()) {
+ GenerateVariableDeclaration(Variable::cast(
+ declarations()->LookupValue(var->name() + "." + field.name)));
+ }
+ } else {
+ std::string value = var->value();
+ GenerateIndent();
+ if (var_type->IsConstexpr()) {
+ source_out() << var_type->GetGeneratedTypeName();
+ source_out() << " " << value << "_impl;" << std::endl;
+ } else if (var->IsConst()) {
+ source_out() << "TNode<" << var->type()->GetGeneratedTNodeTypeName();
+ source_out() << "> " << var->value() << "_impl;\n";
+ } else {
+ source_out() << "TVARIABLE(";
+ source_out() << var_type->GetGeneratedTNodeTypeName();
+ source_out() << ", " << value << "_impl);" << std::endl;
+ }
+ GenerateIndent();
+ source_out() << "auto " << value << " = &" << value << "_impl;"
+ << std::endl;
+ GenerateIndent();
+ source_out() << "USE(" << value << ");" << std::endl;
}
}
@@ -1217,7 +1518,7 @@ Variable* ImplementationVisitor::GenerateVariableDeclaration(
if (declarations()->TryLookup(name)) {
variable = Variable::cast(declarations()->LookupValue(name));
} else {
- variable = declarations()->DeclareVariable(name, *type);
+ variable = declarations()->DeclareVariable(name, *type, false);
// Because the variable is being defined during code generation, it must be
// assumed that it changes along all control split paths because it's no
// longer possible to run the control-flow anlaysis in the declaration pass
@@ -1226,24 +1527,7 @@ Variable* ImplementationVisitor::GenerateVariableDeclaration(
node, declarations()->GetCurrentSpecializationTypeNamesVector(),
variable);
}
-
- GenerateIndent();
- if (variable->type()->IsConstexpr()) {
- source_out() << variable->type()->GetGeneratedTypeName();
- source_out() << " " << variable->GetValueForDeclaration() << "_impl;"
- << std::endl;
- } else {
- source_out() << "TVARIABLE(";
- source_out() << variable->type()->GetGeneratedTNodeTypeName();
- source_out() << ", " << variable->GetValueForDeclaration() << "_impl);"
- << std::endl;
- }
- GenerateIndent();
- source_out() << "auto " << variable->GetValueForDeclaration() << " = &"
- << variable->GetValueForDeclaration() << "_impl;" << std::endl;
- GenerateIndent();
- source_out() << "USE(" << variable->GetValueForDeclaration() << ");"
- << std::endl;
+ GenerateVariableDeclaration(variable);
if (initialization) {
GenerateAssignToVariable(variable, *initialization);
}
@@ -1253,7 +1537,7 @@ Variable* ImplementationVisitor::GenerateVariableDeclaration(
void ImplementationVisitor::GenerateParameter(
const std::string& parameter_name) {
const Value* val = declarations()->LookupValue(parameter_name);
- std::string var = val->GetValueForDeclaration();
+ std::string var = val->value();
GenerateIndent();
source_out() << val->type()->GetGeneratedTypeName() << " " << var << " = ";
@@ -1282,18 +1566,38 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
if (!callee_result.type()->IsFunctionPointerType()) {
std::stringstream stream;
stream << "Expected a function pointer type but found "
- << callee_result.type();
+ << *callee_result.type();
ReportError(stream.str());
}
const FunctionPointerType* type =
FunctionPointerType::cast(callee_result.type());
+ if (type->parameter_types().size() != parameter_types.size()) {
+ std::stringstream stream;
+ stream << "parameter count mismatch calling function pointer with Type: "
+ << *type << " - expected "
+ << std::to_string(type->parameter_types().size()) << ", found "
+ << std::to_string(parameter_types.size());
+ ReportError(stream.str());
+ }
+
+ ParameterTypes types{type->parameter_types(), false};
+ Signature sig;
+ sig.parameter_types = types;
+ if (!IsCompatibleSignature(sig, parameter_types, {})) {
+ std::stringstream stream;
+ stream << "parameters do not match function pointer signature. Expected: ("
+ << type->parameter_types() << ") but got: (" << parameter_types
+ << ")";
+ ReportError(stream.str());
+ }
+
std::vector<std::string> variables;
for (size_t current = 0; current < arguments.parameters.size(); ++current) {
const Type* to_type = type->parameter_types()[current];
VisitResult result =
GenerateImplicitConvert(to_type, arguments.parameters[current]);
- variables.push_back(result.variable());
+ variables.push_back(RValueFlattenStructs(result));
}
std::string result_variable_name;
@@ -1301,14 +1605,24 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
if (no_result) {
GenerateIndent();
} else {
- result_variable_name = GenerateNewTempVariable(type->return_type());
- source_out() << "UncheckedCast<";
- source_out() << type->return_type()->GetGeneratedTNodeTypeName();
- source_out() << ">(";
+ const Type* return_type = type->return_type();
+ result_variable_name = GenerateNewTempVariable(return_type);
+ if (return_type->IsStructType()) {
+ source_out() << "(";
+ } else {
+ source_out() << "UncheckedCast<";
+ source_out() << type->return_type()->GetGeneratedTNodeTypeName();
+ source_out() << ">(";
+ }
}
Builtin* example_builtin =
declarations()->FindSomeInternalBuiltinWithType(type);
+ if (!example_builtin) {
+ std::stringstream stream;
+ stream << "unable to find any builtin with type \"" << *type << "\"";
+ ReportError(stream.str());
+ }
if (is_tailcall) {
source_out() << "TailCallStub(";
@@ -1317,7 +1631,7 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
}
source_out() << "Builtins::CallableFor(isolate(), Builtins::k"
<< example_builtin->name() << ").descriptor(), "
- << callee_result.variable() << ", ";
+ << RValueFlattenStructs(callee_result) << ", ";
size_t total_parameters = 0;
for (size_t i = 0; i < arguments.parameters.size(); ++i) {
@@ -1334,20 +1648,29 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
}
VisitResult ImplementationVisitor::GenerateCall(
- const std::string& callable_name, const Arguments& arguments,
- bool is_tailcall) {
- TypeVector parameter_types(arguments.parameters.GetTypeVector());
- Callable* callable = LookupCall(callable_name, parameter_types);
+ const std::string& callable_name, Arguments arguments, bool is_tailcall) {
+ Callable* callable = LookupCall(callable_name, arguments);
+
+ // Operators used in a branching context can also be function calls that never
+ // return but have a True and False label
+ if (arguments.labels.size() == 0 &&
+ callable->signature().labels.size() == 2) {
+ Label* true_label = declarations()->LookupLabel(kTrueLabelName);
+ arguments.labels.push_back(true_label);
+ Label* false_label = declarations()->LookupLabel(kFalseLabelName);
+ arguments.labels.push_back(false_label);
+ }
+
const Type* result_type = callable->signature().return_type;
std::vector<std::string> variables;
for (size_t current = 0; current < arguments.parameters.size(); ++current) {
const Type* to_type = (current >= callable->signature().types().size())
- ? GetTypeOracle().GetObjectType()
+ ? TypeOracle::GetObjectType()
: callable->signature().types()[current];
VisitResult result =
GenerateImplicitConvert(to_type, arguments.parameters[current]);
- variables.push_back(result.variable());
+ variables.push_back(RValueFlattenStructs(result));
}
std::string result_variable_name;
@@ -1356,9 +1679,13 @@ VisitResult ImplementationVisitor::GenerateCall(
} else {
result_variable_name = GenerateNewTempVariable(result_type);
if (!result_type->IsConstexpr()) {
- source_out() << "UncheckedCast<";
- source_out() << result_type->GetGeneratedTNodeTypeName();
- source_out() << ">(";
+ if (result_type->IsStructType()) {
+ source_out() << "(";
+ } else {
+ source_out() << "UncheckedCast<";
+ source_out() << result_type->GetGeneratedTNodeTypeName();
+ source_out() << ">(";
+ }
}
}
if (callable->IsBuiltin()) {
@@ -1425,13 +1752,13 @@ VisitResult ImplementationVisitor::GenerateCall(
Variable* variable = label->GetParameter(j);
if (!(variable->type() == t)) {
std::stringstream s;
- s << "mismatch of label parameters (expected " << t << " got "
- << label->GetParameter(j)->type() << " for parameter "
+ s << "mismatch of label parameters (expected " << *t << " got "
+ << *label->GetParameter(j)->type() << " for parameter "
<< std::to_string(i + 1) << ")";
ReportError(s.str());
}
j++;
- source_out() << variable->GetValueForDeclaration();
+ source_out() << variable->value();
}
label->MarkUsed();
}
@@ -1449,17 +1776,35 @@ VisitResult ImplementationVisitor::GenerateCall(
}
void ImplementationVisitor::Visit(StandardDeclaration* decl) {
- Visit(decl->callable, {}, decl->body);
+ Signature signature = MakeSignature(decl->callable->signature.get());
+ Visit(decl->callable, signature, decl->body);
}
void ImplementationVisitor::Visit(SpecializationDeclaration* decl) {
- Generic* generic = declarations()->LookupGeneric(decl->name);
- TypeVector specialization_types = GetTypeVector(decl->generic_parameters);
- CallableNode* callable = generic->declaration()->callable;
- SpecializeGeneric({{generic, specialization_types},
- callable,
- decl->signature.get(),
- decl->body});
+ Signature signature_with_types = MakeSignature(decl->signature.get());
+ Declarations::NodeScopeActivator specialization_activator(declarations(),
+ decl);
+ GenericList* generic_list = declarations()->LookupGeneric(decl->name);
+ for (Generic* generic : generic_list->list()) {
+ CallableNode* callable = generic->declaration()->callable;
+ Signature generic_signature_with_types =
+ MakeSignature(callable->signature.get());
+ if (signature_with_types.HasSameTypesAs(generic_signature_with_types)) {
+ TypeVector specialization_types = GetTypeVector(decl->generic_parameters);
+ SpecializeGeneric({{generic, specialization_types},
+ callable,
+ decl->signature.get(),
+ decl->body,
+ decl->pos});
+ return;
+ }
+ }
+ // Because the DeclarationVisitor already performed the same lookup
+ // as above to find aspecialization match and already threw if it didn't
+ // find one, failure to find a match here should never happen.
+ // TODO(danno): Remember the specialization found in the declaration visitor
+ // so that the lookup doesn't have to be repeated here.
+ UNREACHABLE();
}
VisitResult ImplementationVisitor::Visit(CallExpression* expr,
@@ -1468,26 +1813,22 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
std::string name = expr->callee.name;
bool has_template_arguments = expr->callee.generic_arguments.size() != 0;
if (has_template_arguments) {
- Generic* generic = declarations()->LookupGeneric(expr->callee.name);
TypeVector specialization_types =
GetTypeVector(expr->callee.generic_arguments);
name = GetGeneratedCallableName(name, specialization_types);
- CallableNode* callable = generic->declaration()->callable;
- QueueGenericSpecialization({generic, specialization_types}, callable,
- callable->signature.get(),
- generic->declaration()->body);
+ for (auto generic :
+ declarations()->LookupGeneric(expr->callee.name)->list()) {
+ CallableNode* callable = generic->declaration()->callable;
+ if (generic->declaration()->body) {
+ QueueGenericSpecialization({generic, specialization_types}, callable,
+ callable->signature.get(),
+ generic->declaration()->body);
+ }
+ }
}
for (Expression* arg : expr->arguments)
arguments.parameters.push_back(Visit(arg));
arguments.labels = LabelsFromIdentifiers(expr->labels);
- if (expr->is_operator) {
- if (is_tailcall) {
- std::stringstream s;
- s << "can't tail call an operator";
- ReportError(s.str());
- }
- return GenerateOperation(name, arguments);
- }
VisitResult result;
if (!has_template_arguments &&
declarations()->Lookup(expr->callee.name)->IsValue()) {
@@ -1497,10 +1838,10 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
}
if (!result.type()->IsVoidOrNever()) {
GenerateIndent();
- source_out() << "USE(" << result.variable() << ");" << std::endl;
+ source_out() << "USE(" << RValueFlattenStructs(result) << ");" << std::endl;
}
if (is_tailcall) {
- result = {GetTypeOracle().GetNeverType(), ""};
+ result = {TypeOracle::GetNeverType(), ""};
}
return result;
}
@@ -1529,7 +1870,7 @@ void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
Label* true_label,
Label* false_label) {
GenerateIndent();
- source_out() << "Branch(" << condition.variable() << ", "
+ source_out() << "Branch(" << RValueFlattenStructs(condition) << ", "
<< true_label->generated() << ", " << false_label->generated()
<< ");" << std::endl;
}
@@ -1541,12 +1882,12 @@ bool ImplementationVisitor::GenerateExpressionBranch(
Declarations::NodeScopeActivator scope(declarations(), expression);
VisitResult expression_result = Visit(expression);
- if (expression_result.type() == GetTypeOracle().GetBoolType()) {
+ if (expression_result.type() == TypeOracle::GetBoolType()) {
GenerateBranch(expression_result, statement_labels[0], statement_labels[1]);
} else {
- if (expression_result.type() != GetTypeOracle().GetNeverType()) {
+ if (expression_result.type() != TypeOracle::GetNeverType()) {
std::stringstream s;
- s << "unexpected return type " << expression_result.type()
+ s << "unexpected return type " << *expression_result.type()
<< " for branch expression";
ReportError(s.str());
}
@@ -1561,22 +1902,22 @@ VisitResult ImplementationVisitor::GenerateImplicitConvert(
if (destination_type == source.type()) {
return source;
}
- if (GetTypeOracle().IsImplicitlyConverableFrom(destination_type,
- source.type())) {
- VisitResult result(source.type(), source.variable());
- Arguments args;
- args.parameters = {result};
- return GenerateOperation("convert<>", args, destination_type);
- } else if (GetTypeOracle().IsAssignableFrom(destination_type,
+
+ if (TypeOracle::IsImplicitlyConvertableFrom(destination_type,
source.type())) {
- return VisitResult(destination_type, source.variable());
+ std::string name =
+ GetGeneratedCallableName(kFromConstexprMacroName, {destination_type});
+ return GenerateCall(name, {{source}, {}}, false);
+ } else if (IsAssignableFrom(destination_type, source.type())) {
+ source.SetType(destination_type);
+ return source;
} else {
std::stringstream s;
- s << "cannot use expression of type " << source.type()
- << " as a value of type " << destination_type;
+ s << "cannot use expression of type " << *source.type()
+ << " as a value of type " << *destination_type;
ReportError(s.str());
}
- return VisitResult(GetTypeOracle().GetVoidType(), "");
+ return VisitResult(TypeOracle::GetVoidType(), "");
}
std::string ImplementationVisitor::NewTempVariable() {
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 3c609b3dc0..82cbb48ce8 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -37,40 +37,36 @@ class ImplementationVisitor : public FileVisitor {
const Type* Visit(Statement* stmt);
void Visit(Declaration* decl);
+ VisitResult Visit(StructExpression* decl);
+
LocationReference GetLocationReference(LocationExpression* location);
LocationReference GetLocationReference(IdentifierExpression* expr) {
return LocationReference(declarations()->LookupValue(expr->name), {}, {});
}
- LocationReference GetLocationReference(FieldAccessExpression* expr) {
- return LocationReference({}, Visit(expr->object), {});
- }
+ LocationReference GetLocationReference(FieldAccessExpression* expr);
LocationReference GetLocationReference(ElementAccessExpression* expr) {
return LocationReference({}, Visit(expr->array), Visit(expr->index));
}
+ std::string RValueFlattenStructs(VisitResult result);
+
+ VisitResult GenerateFetchFromLocation(LocationReference reference) {
+ const Value* value = reference.value;
+ return VisitResult(value->type(), value);
+ }
VisitResult GenerateFetchFromLocation(LocationExpression* location,
LocationReference reference);
VisitResult GenerateFetchFromLocation(IdentifierExpression* expr,
LocationReference reference) {
- Value* value = reference.value;
- if (value->IsVariable() && !Variable::cast(value)->IsDefined()) {
- std::stringstream s;
- s << "\"" << value->name() << "\" is used before it is defined";
- ReportError(s.str());
- }
- return VisitResult({value->type(), value->GetValueForRead()});
+ return GenerateFetchFromLocation(reference);
}
VisitResult GenerateFetchFromLocation(FieldAccessExpression* expr,
- LocationReference reference) {
- Arguments arguments;
- arguments.parameters = {reference.base};
- return GenerateOperation(std::string(".") + expr->field, arguments);
- }
+ LocationReference reference);
VisitResult GenerateFetchFromLocation(ElementAccessExpression* expr,
LocationReference reference) {
Arguments arguments;
arguments.parameters = {reference.base, reference.index};
- return GenerateOperation("[]", arguments);
+ return GenerateCall("[]", arguments);
}
VisitResult GetBuiltinCode(Builtin* builtin);
@@ -83,9 +79,6 @@ class ImplementationVisitor : public FileVisitor {
return GenerateFetchFromLocation(expr, GetLocationReference(expr));
}
- VisitResult Visit(CastExpression* expr);
- VisitResult Visit(ConvertExpression* expr);
-
void Visit(ModuleDeclaration* decl);
void Visit(DefaultModuleDeclaration* decl) {
Visit(implicit_cast<ModuleDeclaration*>(decl));
@@ -94,7 +87,9 @@ class ImplementationVisitor : public FileVisitor {
Visit(implicit_cast<ModuleDeclaration*>(decl));
}
void Visit(TypeDeclaration* decl) {}
- void Visit(ConstDeclaration* decl) {}
+ void Visit(TypeAliasDeclaration* decl) {}
+ void Visit(ExternConstDeclaration* decl) {}
+ void Visit(StructDeclaration* decl);
void Visit(StandardDeclaration* decl);
void Visit(GenericDeclaration* decl) {}
void Visit(SpecializationDeclaration* decl);
@@ -110,6 +105,7 @@ class ImplementationVisitor : public FileVisitor {
void Visit(ExternalRuntimeDeclaration* decl, const Signature& signature,
Statement* body) {}
void Visit(CallableNode* decl, const Signature& signature, Statement* body);
+ void Visit(ConstDeclaration* decl);
VisitResult Visit(CallExpression* expr, bool is_tail = false);
const Type* Visit(TailCallStatement* stmt);
@@ -127,7 +123,7 @@ class ImplementationVisitor : public FileVisitor {
VisitResult Visit(StringLiteralExpression* expr);
VisitResult Visit(NumberLiteralExpression* expr);
- const Type* Visit(TryCatchStatement* stmt);
+ const Type* Visit(TryLabelStatement* stmt);
const Type* Visit(ReturnStatement* stmt);
const Type* Visit(GotoStatement* stmt);
const Type* Visit(IfStatement* stmt);
@@ -142,6 +138,9 @@ class ImplementationVisitor : public FileVisitor {
const Type* Visit(DebugStatement* stmt);
const Type* Visit(AssertStatement* stmt);
+ void BeginModuleFile(Module* module);
+ void EndModuleFile(Module* module);
+
void GenerateImplementation(const std::string& dir, Module* module);
private:
@@ -172,6 +171,13 @@ class ImplementationVisitor : public FileVisitor {
ImplementationVisitor* visitor_;
};
+ Callable* LookupCall(const std::string& name, const Arguments& arguments);
+
+ bool GenerateChangedVarFromControlSplit(const Variable* v, bool first = true);
+
+ void GetFlattenedStructsVars(const Variable* base,
+ std::set<const Variable*>& vars);
+
void GenerateChangedVarsFromControlSplit(AstNode* node);
const Type* GetCommonType(const Type* left, const Type* right);
@@ -184,6 +190,8 @@ class ImplementationVisitor : public FileVisitor {
const LocationReference& reference,
VisitResult assignment_value);
+ void GenerateVariableDeclaration(const Variable* var);
+
Variable* GenerateVariableDeclaration(
AstNode* node, const std::string& name,
const base::Optional<const Type*>& type,
@@ -194,7 +202,7 @@ class ImplementationVisitor : public FileVisitor {
void GenerateParameterList(const NameVector& list, size_t first = 0);
VisitResult GenerateCall(const std::string& callable_name,
- const Arguments& parameters, bool tail_call);
+ Arguments parameters, bool tail_call = false);
VisitResult GeneratePointerCall(Expression* callee,
const Arguments& parameters, bool tail_call);
@@ -213,10 +221,11 @@ class ImplementationVisitor : public FileVisitor {
void GenerateMacroFunctionDeclaration(std::ostream& o,
const std::string& macro_prefix,
Macro* macro);
-
- VisitResult GenerateOperation(const std::string& operation,
- Arguments arguments,
- base::Optional<const Type*> return_type = {});
+ void GenerateFunctionDeclaration(std::ostream& o,
+ const std::string& macro_prefix,
+ const std::string& name,
+ const Signature& signature,
+ const NameVector& parameter_names);
VisitResult GenerateImplicitConvert(const Type* destination_type,
VisitResult source);
@@ -225,7 +234,7 @@ class ImplementationVisitor : public FileVisitor {
const CallableNodeSignature* signature,
Statement* body) override {
Declarations::GenericScopeActivator scope(declarations(), key);
- Visit(callable, MakeSignature(callable, signature), body);
+ Visit(callable, MakeSignature(signature), body);
}
std::string NewTempVariable();
diff --git a/deps/v8/src/torque/parameter-difference.h b/deps/v8/src/torque/parameter-difference.h
new file mode 100644
index 0000000000..aaa1745af2
--- /dev/null
+++ b/deps/v8/src/torque/parameter-difference.h
@@ -0,0 +1,77 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_PARAMETER_DIFFERENCE_H_
+#define V8_TORQUE_PARAMETER_DIFFERENCE_H_
+
+#include <vector>
+
+#include "src/torque/types.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class ParameterDifference {
+ public:
+ ParameterDifference(const TypeVector& to, const TypeVector& from) {
+ DCHECK_EQ(to.size(), from.size());
+ for (size_t i = 0; i < to.size(); ++i) {
+ AddParameter(to[i], from[i]);
+ }
+ }
+
+ // An overload is selected if it is strictly better than all alternatives.
+ // This means that it has to be strictly better in at least one parameter,
+ // and better or equally good in all others.
+ //
+ // When comparing a pair of corresponding parameters of two overloads...
+ // ... they are considered equally good if:
+ // - They are equal.
+ // - Both require some implicit conversion.
+ // ... one is considered better if:
+ // - It is a strict subtype of the other.
+ // - It doesn't require an implicit conversion, while the other does.
+ bool StrictlyBetterThan(const ParameterDifference& other) const {
+ DCHECK_EQ(difference_.size(), other.difference_.size());
+ bool better_parameter_found = false;
+ for (size_t i = 0; i < difference_.size(); ++i) {
+ base::Optional<const Type*> a = difference_[i];
+ base::Optional<const Type*> b = other.difference_[i];
+ if (a == b) {
+ continue;
+ } else if (a && b && a != b && (*a)->IsSubtypeOf(*b)) {
+ DCHECK(!(*b)->IsSubtypeOf(*a));
+ better_parameter_found = true;
+ } else if (a && !b) {
+ better_parameter_found = true;
+ } else {
+ return false;
+ }
+ }
+ return better_parameter_found;
+ }
+
+ private:
+ // Pointwise difference between call arguments and a signature.
+ // {base::nullopt} means that an implicit conversion was necessary,
+ // otherwise we store the supertype found in the signature.
+ std::vector<base::Optional<const Type*>> difference_;
+
+ void AddParameter(const Type* to, const Type* from) {
+ if (from->IsSubtypeOf(to)) {
+ difference_.push_back(to);
+ } else if (IsAssignableFrom(to, from)) {
+ difference_.push_back(base::nullopt);
+ } else {
+ UNREACHABLE();
+ }
+ }
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_PARAMETER_DIFFERENCE_H_
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index f1027df0c7..c771623738 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -38,6 +38,19 @@ class FailedParseErrorStrategy : public antlr4::DefaultErrorStrategy {
bool failed_;
};
+class TorqueErrorListener : public antlr4::BaseErrorListener {
+ public:
+ TorqueErrorListener() : BaseErrorListener() {}
+
+ void syntaxError(antlr4::Recognizer* recognizer,
+ antlr4::Token* /*offendingSymbol*/, size_t line,
+ size_t charPositionInLine, const std::string& msg,
+ std::exception_ptr /*e*/) {
+ std::cerr << recognizer->getInputStream()->getSourceName() << ": " << line
+ << ":" << charPositionInLine << " " << msg << "\n";
+ }
+};
+
int WrappedMain(int argc, const char** argv) {
std::string output_directory;
std::vector<SourceFileContext> file_contexts;
@@ -45,6 +58,7 @@ int WrappedMain(int argc, const char** argv) {
SourceFileContext context;
size_t lexer_errors = 0;
auto error_strategy = std::make_shared<FailedParseErrorStrategy>();
+ TorqueErrorListener error_listener;
bool verbose = false;
SourceFileMap::Scope scope;
for (int i = 1; i < argc; ++i) {
@@ -66,6 +80,8 @@ int WrappedMain(int argc, const char** argv) {
new antlr4::ANTLRFileStream(context.name.c_str()));
context.lexer =
std::unique_ptr<TorqueLexer>(new TorqueLexer(context.stream.get()));
+ context.lexer->removeErrorListeners();
+ context.lexer->addErrorListener(&error_listener);
context.tokens = std::unique_ptr<antlr4::CommonTokenStream>(
new antlr4::CommonTokenStream(context.lexer.get()));
context.tokens->fill();
@@ -73,6 +89,8 @@ int WrappedMain(int argc, const char** argv) {
context.parser =
std::unique_ptr<TorqueParser>(new TorqueParser(context.tokens.get()));
context.parser->setErrorHandler(error_strategy);
+ context.parser->removeErrorListeners();
+ context.parser->addErrorListener(&error_listener);
context.file = context.parser->file();
ast_generator.visitSourceFile(&context);
}
@@ -83,6 +101,7 @@ int WrappedMain(int argc, const char** argv) {
GlobalContext global_context(std::move(ast_generator).GetAst());
if (verbose) global_context.SetVerbose();
+ TypeOracle::Scope type_oracle(global_context.declarations());
if (output_directory.length() != 0) {
{
@@ -96,9 +115,14 @@ int WrappedMain(int argc, const char** argv) {
}
ImplementationVisitor visitor(global_context);
+ for (auto& module : global_context.GetModules()) {
+ visitor.BeginModuleFile(module.second.get());
+ }
+
visitor.Visit(global_context.ast());
for (auto& module : global_context.GetModules()) {
+ visitor.EndModuleFile(module.second.get());
visitor.GenerateImplementation(output_directory, module.second.get());
}
}
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index 6c7462b02e..1e498c19f4 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -5,6 +5,7 @@
#ifndef V8_TORQUE_TYPE_ORACLE_H_
#define V8_TORQUE_TYPE_ORACLE_H_
+#include "src/torque/contextual.h"
#include "src/torque/declarable.h"
#include "src/torque/declarations.h"
#include "src/torque/types.h"
@@ -14,65 +15,89 @@ namespace v8 {
namespace internal {
namespace torque {
-class TypeOracle {
+class TypeOracle : public ContextualClass<TypeOracle> {
public:
explicit TypeOracle(Declarations* declarations)
: declarations_(declarations) {}
- void RegisterImplicitConversion(const Type* to, const Type* from) {
- implicit_conversions_.push_back(std::make_pair(to, from));
+ static const AbstractType* GetAbstractType(
+ const Type* parent, std::string name, std::string generated,
+ base::Optional<const AbstractType*> non_constexpr_version) {
+ AbstractType* result = new AbstractType(
+ parent, std::move(name), std::move(generated), non_constexpr_version);
+ Get().nominal_types_.push_back(std::unique_ptr<AbstractType>(result));
+ return result;
}
- const Type* GetArgumentsType() {
- return GetBuiltinType(ARGUMENTS_TYPE_STRING);
+ static const StructType* GetStructType(
+ Module* module, const std::string& name,
+ const std::vector<NameAndType>& fields) {
+ StructType* result = new StructType(module, name, fields);
+ Get().struct_types_.push_back(std::unique_ptr<StructType>(result));
+ return result;
}
- const Type* GetBoolType() { return GetBuiltinType(BOOL_TYPE_STRING); }
+ static const FunctionPointerType* GetFunctionPointerType(
+ TypeVector argument_types, const Type* return_type) {
+ const Type* code_type = Get().GetBuiltinType(CODE_TYPE_STRING);
+ return Get().function_pointer_types_.Add(
+ FunctionPointerType(code_type, argument_types, return_type));
+ }
- const Type* GetConstexprBoolType() {
- return GetBuiltinType(CONSTEXPR_BOOL_TYPE_STRING);
+ static const Type* GetUnionType(UnionType type) {
+ if (base::Optional<const Type*> single = type.GetSingleMember()) {
+ return *single;
+ }
+ return Get().union_types_.Add(std::move(type));
}
- const Type* GetVoidType() { return GetBuiltinType(VOID_TYPE_STRING); }
+ static const Type* GetUnionType(const Type* a, const Type* b) {
+ if (a->IsSubtypeOf(b)) return b;
+ if (b->IsSubtypeOf(a)) return a;
+ UnionType result = UnionType::FromType(a);
+ result.Extend(b);
+ return GetUnionType(std::move(result));
+ }
- const Type* GetObjectType() { return GetBuiltinType(OBJECT_TYPE_STRING); }
+ static const Type* GetArgumentsType() {
+ return Get().GetBuiltinType(ARGUMENTS_TYPE_STRING);
+ }
- const Type* GetStringType() { return GetBuiltinType(STRING_TYPE_STRING); }
+ static const Type* GetBoolType() {
+ return Get().GetBuiltinType(BOOL_TYPE_STRING);
+ }
- const Type* GetIntPtrType() { return GetBuiltinType(INTPTR_TYPE_STRING); }
+ static const Type* GetConstexprBoolType() {
+ return Get().GetBuiltinType(CONSTEXPR_BOOL_TYPE_STRING);
+ }
- const Type* GetNeverType() { return GetBuiltinType(NEVER_TYPE_STRING); }
+ static const Type* GetVoidType() {
+ return Get().GetBuiltinType(VOID_TYPE_STRING);
+ }
- const Type* GetConstInt31Type() {
- return GetBuiltinType(CONST_INT31_TYPE_STRING);
+ static const Type* GetObjectType() {
+ return Get().GetBuiltinType(OBJECT_TYPE_STRING);
}
- bool IsAssignableFrom(const Type* to, const Type* from) {
- if (to == from) return true;
- if (from->IsSubtypeOf(to) && !from->IsConstexpr()) return true;
- return IsImplicitlyConverableFrom(to, from);
+ static const Type* GetConstStringType() {
+ return Get().GetBuiltinType(CONST_STRING_TYPE_STRING);
}
- bool IsImplicitlyConverableFrom(const Type* to, const Type* from) {
- for (auto& conversion : implicit_conversions_) {
- if (conversion.first == to && conversion.second == from) {
- return true;
- }
- }
- return false;
- }
-
- bool IsCompatibleSignature(const ParameterTypes& to, const TypeVector& from) {
- auto i = to.types.begin();
- for (auto current : from) {
- if (i == to.types.end()) {
- if (!to.var_args) return false;
- if (!IsAssignableFrom(GetObjectType(), current)) return false;
- } else {
- if (!IsAssignableFrom(*i++, current)) return false;
- }
- }
- return true;
+ static const Type* GetIntPtrType() {
+ return Get().GetBuiltinType(INTPTR_TYPE_STRING);
+ }
+
+ static const Type* GetNeverType() {
+ return Get().GetBuiltinType(NEVER_TYPE_STRING);
+ }
+
+ static const Type* GetConstInt31Type() {
+ return Get().GetBuiltinType(CONST_INT31_TYPE_STRING);
+ }
+
+ static bool IsImplicitlyConvertableFrom(const Type* to, const Type* from) {
+ std::string name = GetGeneratedCallableName(kFromConstexprMacroName, {to});
+ return Get().declarations_->TryLookupMacro(name, {from}) != nullptr;
}
private:
@@ -81,7 +106,10 @@ class TypeOracle {
}
Declarations* declarations_;
- std::vector<std::pair<const Type*, const Type*>> implicit_conversions_;
+ Deduplicator<FunctionPointerType> function_pointer_types_;
+ Deduplicator<UnionType> union_types_;
+ std::vector<std::unique_ptr<Type>> nominal_types_;
+ std::vector<std::unique_ptr<Type>> struct_types_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index f319b5d94d..3ba7846678 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -6,43 +6,208 @@
#include <iostream>
#include "src/torque/declarable.h"
+#include "src/torque/type-oracle.h"
#include "src/torque/types.h"
namespace v8 {
namespace internal {
namespace torque {
-std::ostream& operator<<(std::ostream& os, const Signature& sig) {
+std::string Type::ToString() const {
+ if (aliases_.size() == 0) return ToExplicitString();
+ if (aliases_.size() == 1) return *aliases_.begin();
+ std::stringstream result;
+ int i = 0;
+ for (const std::string& alias : aliases_) {
+ if (i == 0) {
+ result << alias << " (aka. ";
+ } else if (i == 1) {
+ result << alias;
+ } else {
+ result << ", " << alias;
+ }
+ ++i;
+ }
+ result << ")";
+ return result.str();
+}
+
+bool Type::IsSubtypeOf(const Type* supertype) const {
+ if (const UnionType* union_type = UnionType::DynamicCast(supertype)) {
+ return union_type->IsSupertypeOf(this);
+ }
+ const Type* subtype = this;
+ while (subtype != nullptr) {
+ if (subtype == supertype) return true;
+ subtype = subtype->parent();
+ }
+ return false;
+}
+
+// static
+const Type* Type::CommonSupertype(const Type* a, const Type* b) {
+ int diff = a->Depth() - b->Depth();
+ const Type* a_supertype = a;
+ const Type* b_supertype = b;
+ for (; diff > 0; --diff) a_supertype = a_supertype->parent();
+ for (; diff < 0; ++diff) b_supertype = b_supertype->parent();
+ while (a_supertype && b_supertype) {
+ if (a_supertype == b_supertype) return a_supertype;
+ a_supertype = a_supertype->parent();
+ b_supertype = b_supertype->parent();
+ }
+ ReportError("types " + a->ToString() + " and " + b->ToString() +
+ " have no common supertype");
+}
+
+int Type::Depth() const {
+ int result = 0;
+ for (const Type* current = parent_; current; current = current->parent_) {
+ ++result;
+ }
+ return result;
+}
+
+bool Type::IsAbstractName(const std::string& name) const {
+ if (!IsAbstractType()) return false;
+ return AbstractType::cast(this)->name() == name;
+}
+
+std::string AbstractType::GetGeneratedTNodeTypeName() const {
+ std::string result = GetGeneratedTypeName();
+ DCHECK_EQ(result.substr(0, 6), "TNode<");
+ result = result.substr(6, result.length() - 7);
+ return result;
+}
+
+std::string FunctionPointerType::ToExplicitString() const {
+ std::stringstream result;
+ result << "builtin (";
+ PrintCommaSeparatedList(result, parameter_types_);
+ result << ") => " << *return_type_;
+ return result.str();
+}
+
+std::string FunctionPointerType::MangledName() const {
+ std::stringstream result;
+ result << "FT";
+ for (const Type* t : parameter_types_) {
+ std::string arg_type_string = t->MangledName();
+ result << arg_type_string.size() << arg_type_string;
+ }
+ std::string return_type_string = return_type_->MangledName();
+ result << return_type_string.size() << return_type_string;
+ return result.str();
+}
+
+std::string UnionType::ToExplicitString() const {
+ std::stringstream result;
+ result << "(";
+ bool first = true;
+ for (const Type* t : types_) {
+ if (!first) {
+ result << " | ";
+ }
+ first = false;
+ result << *t;
+ }
+ result << ")";
+ return result.str();
+}
+
+std::string UnionType::MangledName() const {
+ std::stringstream result;
+ result << "UT";
+ for (const Type* t : types_) {
+ std::string arg_type_string = t->MangledName();
+ result << arg_type_string.size() << arg_type_string;
+ }
+ return result.str();
+}
+
+std::string UnionType::GetGeneratedTNodeTypeName() const {
+ if (types_.size() <= 3) {
+ std::set<std::string> members;
+ for (const Type* t : types_) {
+ members.insert(t->GetGeneratedTNodeTypeName());
+ }
+ if (members == std::set<std::string>{"Smi", "HeapNumber"}) {
+ return "Number";
+ }
+ if (members == std::set<std::string>{"Smi", "HeapNumber", "BigInt"}) {
+ return "Numeric";
+ }
+ }
+ return parent()->GetGeneratedTNodeTypeName();
+}
+
+const Type* UnionType::NonConstexprVersion() const {
+ if (IsConstexpr()) {
+ auto it = types_.begin();
+ UnionType result((*it)->NonConstexprVersion());
+ ++it;
+ for (; it != types_.end(); ++it) {
+ result.Extend((*it)->NonConstexprVersion());
+ }
+ return TypeOracle::GetUnionType(std::move(result));
+ }
+ return this;
+}
+
+std::string StructType::ToExplicitString() const {
+ std::stringstream result;
+ result << "{";
+ PrintCommaSeparatedList(result, fields_);
+ result << "}";
+ return result.str();
+}
+
+void PrintSignature(std::ostream& os, const Signature& sig, bool with_names) {
os << "(";
- for (size_t i = 0; i < sig.parameter_names.size(); ++i) {
+ for (size_t i = 0; i < sig.parameter_types.types.size(); ++i) {
if (i > 0) os << ", ";
- if (!sig.parameter_names.empty()) os << sig.parameter_names[i] << ": ";
- os << sig.parameter_types.types[i];
+ if (with_names && !sig.parameter_names.empty()) {
+ os << sig.parameter_names[i] << ": ";
+ }
+ os << *sig.parameter_types.types[i];
}
if (sig.parameter_types.var_args) {
if (sig.parameter_names.size()) os << ", ";
os << "...";
}
os << ")";
- if (!sig.return_type->IsVoid()) {
- os << ": " << sig.return_type;
+ os << ": " << *sig.return_type;
+
+ if (sig.labels.empty()) return;
+
+ os << " labels ";
+ for (size_t i = 0; i < sig.labels.size(); ++i) {
+ if (i > 0) os << ", ";
+ if (with_names) os << sig.labels[i].name;
+
+ if (sig.labels[i].types.size() > 0) os << "(" << sig.labels[i].types << ")";
}
+}
+
+std::ostream& operator<<(std::ostream& os, const NameAndType& name_and_type) {
+ os << name_and_type.name;
+ os << ": ";
+ os << *name_and_type.type;
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const Signature& sig) {
+ PrintSignature(os, sig, true);
return os;
}
std::ostream& operator<<(std::ostream& os, const TypeVector& types) {
- for (size_t i = 0; i < types.size(); ++i) {
- if (i > 0) os << ", ";
- os << types[i];
- }
+ PrintCommaSeparatedList(os, types);
return os;
}
std::ostream& operator<<(std::ostream& os, const ParameterTypes& p) {
- for (size_t i = 0; i < p.types.size(); ++i) {
- if (i > 0) os << ", ";
- os << p.types[i];
- }
+ PrintCommaSeparatedList(os, p.types);
if (p.var_args) {
if (p.types.size() > 0) os << ", ";
os << "...";
@@ -68,6 +233,57 @@ bool Signature::HasSameTypesAs(const Signature& other) const {
return true;
}
+bool IsAssignableFrom(const Type* to, const Type* from) {
+ if (to == from) return true;
+ if (from->IsSubtypeOf(to)) return true;
+ return TypeOracle::IsImplicitlyConvertableFrom(to, from);
+}
+
+bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
+ const std::vector<Label*>& labels) {
+ auto i = sig.parameter_types.types.begin();
+ if (sig.parameter_types.types.size() > types.size()) return false;
+ // TODO(danno): The test below is actually insufficient. The labels'
+ // parameters must be checked too. ideally, the named part of
+ // LabelDeclarationVector would be factored out so that the label count and
+ // parameter types could be passed separately.
+ if (sig.labels.size() != labels.size()) return false;
+ for (auto current : types) {
+ if (i == sig.parameter_types.types.end()) {
+ if (!sig.parameter_types.var_args) return false;
+ if (!IsAssignableFrom(TypeOracle::GetObjectType(), current)) return false;
+ } else {
+ if (!IsAssignableFrom(*i++, current)) return false;
+ }
+ }
+ return true;
+}
+
+bool operator<(const Type& a, const Type& b) {
+ return a.MangledName() < b.MangledName();
+}
+
+VisitResult::VisitResult(const Type* type, const Value* declarable)
+ : type_(type), value_(), declarable_(declarable) {}
+
+std::string VisitResult::LValue() const {
+ return std::string("*") + (declarable_ ? (*declarable_)->value() : value_);
+}
+
+std::string VisitResult::RValue() const {
+ if (declarable()) {
+ auto value = *declarable();
+ if (value->IsVariable() && !Variable::cast(value)->IsDefined()) {
+ std::stringstream s;
+ s << "\"" << value->name() << "\" is used before it is defined";
+ ReportError(s.str());
+ }
+ return value->RValue();
+ } else {
+ return value_;
+ }
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index a5362150f1..f1b6cd9c7e 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -5,9 +5,12 @@
#ifndef V8_TORQUE_TYPES_H_
#define V8_TORQUE_TYPES_H_
+#include <algorithm>
+#include <set>
#include <string>
#include <vector>
+#include "src/base/optional.h"
#include "src/torque/utils.h"
namespace v8 {
@@ -19,10 +22,10 @@ static const char* const NEVER_TYPE_STRING = "never";
static const char* const CONSTEXPR_BOOL_TYPE_STRING = "constexpr bool";
static const char* const BOOL_TYPE_STRING = "bool";
static const char* const VOID_TYPE_STRING = "void";
-static const char* const ARGUMENTS_TYPE_STRING = "Arguments";
+static const char* const ARGUMENTS_TYPE_STRING = "constexpr Arguments";
static const char* const CONTEXT_TYPE_STRING = "Context";
static const char* const OBJECT_TYPE_STRING = "Object";
-static const char* const STRING_TYPE_STRING = "String";
+static const char* const CONST_STRING_TYPE_STRING = "constexpr string";
static const char* const CODE_TYPE_STRING = "Code";
static const char* const INTPTR_TYPE_STRING = "intptr";
static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
@@ -30,15 +33,23 @@ static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
class Label;
+class Value;
class TypeBase {
public:
- enum class Kind { kAbstractType, kFunctionPointerType };
+ enum class Kind {
+ kAbstractType,
+ kFunctionPointerType,
+ kUnionType,
+ kStructType
+ };
virtual ~TypeBase() {}
bool IsAbstractType() const { return kind() == Kind::kAbstractType; }
bool IsFunctionPointerType() const {
return kind() == Kind::kFunctionPointerType;
}
+ bool IsUnionType() const { return kind() == Kind::kUnionType; }
+ bool IsStructType() const { return kind() == Kind::kStructType; }
protected:
explicit TypeBase(Kind kind) : kind_(kind) {}
@@ -70,8 +81,9 @@ class TypeBase {
class Type : public TypeBase {
public:
- bool IsSubtypeOf(const Type* supertype) const;
- virtual std::string ToString() const = 0;
+ virtual bool IsSubtypeOf(const Type* supertype) const;
+
+ std::string ToString() const;
virtual std::string MangledName() const = 0;
bool IsVoid() const { return IsAbstractName(VOID_TYPE_STRING); }
bool IsNever() const { return IsAbstractName(NEVER_TYPE_STRING); }
@@ -80,49 +92,71 @@ class Type : public TypeBase {
return IsAbstractName(CONSTEXPR_BOOL_TYPE_STRING);
}
bool IsVoidOrNever() const { return IsVoid() || IsNever(); }
- virtual const std::string& GetGeneratedTypeName() const = 0;
+ virtual std::string GetGeneratedTypeName() const = 0;
virtual std::string GetGeneratedTNodeTypeName() const = 0;
virtual bool IsConstexpr() const = 0;
+ virtual const Type* NonConstexprVersion() const = 0;
+ static const Type* CommonSupertype(const Type* a, const Type* b);
+ void AddAlias(std::string alias) const { aliases_.insert(std::move(alias)); }
protected:
Type(TypeBase::Kind kind, const Type* parent)
: TypeBase(kind), parent_(parent) {}
const Type* parent() const { return parent_; }
+ void set_parent(const Type* t) { parent_ = t; }
+ int Depth() const;
+ virtual std::string ToExplicitString() const = 0;
private:
bool IsAbstractName(const std::string& name) const;
// If {parent_} is not nullptr, then this type is a subtype of {parent_}.
- const Type* const parent_;
+ const Type* parent_;
+ mutable std::set<std::string> aliases_;
};
using TypeVector = std::vector<const Type*>;
+struct NameAndType {
+ std::string name;
+ const Type* type;
+};
+
+std::ostream& operator<<(std::ostream& os, const NameAndType& name_and_type);
+
class AbstractType final : public Type {
public:
DECLARE_TYPE_BOILERPLATE(AbstractType);
const std::string& name() const { return name_; }
- std::string ToString() const override { return name(); }
+ std::string ToExplicitString() const override { return name(); }
std::string MangledName() const override { return "AT" + name(); }
- const std::string& GetGeneratedTypeName() const override {
- return generated_type_;
- }
+ std::string GetGeneratedTypeName() const override { return generated_type_; }
std::string GetGeneratedTNodeTypeName() const override;
bool IsConstexpr() const override {
return name().substr(0, strlen(CONSTEXPR_TYPE_PREFIX)) ==
CONSTEXPR_TYPE_PREFIX;
}
+ const Type* NonConstexprVersion() const override {
+ if (IsConstexpr()) return *non_constexpr_version_;
+ return this;
+ }
private:
- friend class Declarations;
+ friend class TypeOracle;
AbstractType(const Type* parent, const std::string& name,
- const std::string& generated_type)
+ const std::string& generated_type,
+ base::Optional<const AbstractType*> non_constexpr_version)
: Type(Kind::kAbstractType, parent),
name_(name),
- generated_type_(generated_type) {}
+ generated_type_(generated_type),
+ non_constexpr_version_(non_constexpr_version) {
+ DCHECK_EQ(non_constexpr_version_.has_value(), IsConstexpr());
+ if (parent) DCHECK(parent->IsConstexpr() == IsConstexpr());
+ }
const std::string name_;
const std::string generated_type_;
+ base::Optional<const AbstractType*> non_constexpr_version_;
};
// For now, function pointers are restricted to Code objects of Torque-defined
@@ -130,15 +164,19 @@ class AbstractType final : public Type {
class FunctionPointerType final : public Type {
public:
DECLARE_TYPE_BOILERPLATE(FunctionPointerType);
- std::string ToString() const override;
+ std::string ToExplicitString() const override;
std::string MangledName() const override;
- const std::string& GetGeneratedTypeName() const override {
+ std::string GetGeneratedTypeName() const override {
return parent()->GetGeneratedTypeName();
}
std::string GetGeneratedTNodeTypeName() const override {
return parent()->GetGeneratedTNodeTypeName();
}
- bool IsConstexpr() const override { return parent()->IsConstexpr(); }
+ bool IsConstexpr() const override {
+ DCHECK(!parent()->IsConstexpr());
+ return false;
+ }
+ const Type* NonConstexprVersion() const override { return this; }
const TypeVector& parameter_types() const { return parameter_types_; }
const Type* return_type() const { return return_type_; }
@@ -156,7 +194,7 @@ class FunctionPointerType final : public Type {
}
private:
- friend class Declarations;
+ friend class TypeOracle;
FunctionPointerType(const Type* parent, TypeVector parameter_types,
const Type* return_type)
: Type(Kind::kFunctionPointerType, parent),
@@ -167,22 +205,152 @@ class FunctionPointerType final : public Type {
const Type* const return_type_;
};
-inline std::ostream& operator<<(std::ostream& os, const Type* t) {
- os << t->ToString();
+bool operator<(const Type& a, const Type& b);
+struct TypeLess {
+ bool operator()(const Type* const a, const Type* const b) const {
+ return *a < *b;
+ }
+};
+
+class UnionType final : public Type {
+ public:
+ DECLARE_TYPE_BOILERPLATE(UnionType);
+ std::string ToExplicitString() const override;
+ std::string MangledName() const override;
+ std::string GetGeneratedTypeName() const override {
+ return "TNode<" + GetGeneratedTNodeTypeName() + ">";
+ }
+ std::string GetGeneratedTNodeTypeName() const override;
+
+ bool IsConstexpr() const override {
+ DCHECK_EQ(false, parent()->IsConstexpr());
+ return false;
+ }
+ const Type* NonConstexprVersion() const override;
+
+ friend size_t hash_value(const UnionType& p) {
+ size_t result = 0;
+ for (const Type* t : p.types_) {
+ result = base::hash_combine(result, t);
+ }
+ return result;
+ }
+ bool operator==(const UnionType& other) const {
+ return types_ == other.types_;
+ }
+
+ base::Optional<const Type*> GetSingleMember() const {
+ if (types_.size() == 1) {
+ DCHECK_EQ(*types_.begin(), parent());
+ return *types_.begin();
+ }
+ return base::nullopt;
+ }
+
+ const Type* Normalize() const {
+ if (types_.size() == 1) {
+ return parent();
+ }
+ return this;
+ }
+
+ bool IsSubtypeOf(const Type* other) const override {
+ for (const Type* member : types_) {
+ if (!member->IsSubtypeOf(other)) return false;
+ }
+ return true;
+ }
+
+ bool IsSupertypeOf(const Type* other) const {
+ for (const Type* member : types_) {
+ if (other->IsSubtypeOf(member)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void Extend(const Type* t) {
+ if (const UnionType* union_type = UnionType::DynamicCast(t)) {
+ for (const Type* member : union_type->types_) {
+ Extend(member);
+ }
+ } else {
+ if (t->IsSubtypeOf(this)) return;
+ set_parent(CommonSupertype(parent(), t));
+ for (const Type* member : types_) {
+ if (member->IsSubtypeOf(t)) {
+ types_.erase(member);
+ }
+ }
+ types_.insert(t);
+ }
+ }
+
+ static UnionType FromType(const Type* t) {
+ const UnionType* union_type = UnionType::DynamicCast(t);
+ return union_type ? UnionType(*union_type) : UnionType(t);
+ }
+
+ private:
+ explicit UnionType(const Type* t) : Type(Kind::kUnionType, t), types_({t}) {}
+
+ std::set<const Type*, TypeLess> types_;
+};
+
+class StructType final : public Type {
+ public:
+ DECLARE_TYPE_BOILERPLATE(StructType);
+ std::string ToExplicitString() const override;
+ std::string MangledName() const override { return name_; }
+ std::string GetGeneratedTypeName() const override { return GetStructName(); }
+ std::string GetGeneratedTNodeTypeName() const override { UNREACHABLE(); }
+ const Type* NonConstexprVersion() const override { return this; }
+
+ bool IsConstexpr() const override { return false; }
+
+ const std::vector<NameAndType>& fields() const { return fields_; }
+ const std::string& name() const { return name_; }
+ Module* module() const { return module_; }
+
+ private:
+ friend class TypeOracle;
+ StructType(Module* module, const std::string& name,
+ const std::vector<NameAndType>& fields)
+ : Type(Kind::kStructType, nullptr),
+ module_(module),
+ name_(name),
+ fields_(fields) {}
+
+ const std::string& GetStructName() const { return name_; }
+
+ Module* module_;
+ std::string name_;
+ std::vector<NameAndType> fields_;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const Type& t) {
+ os << t.ToString();
return os;
}
class VisitResult {
public:
VisitResult() {}
- VisitResult(const Type* type, const std::string& variable)
- : type_(type), variable_(variable) {}
+ VisitResult(const Type* type, const std::string& value)
+ : type_(type), value_(value), declarable_{} {}
+ VisitResult(const Type* type, const Value* declarable);
const Type* type() const { return type_; }
- const std::string& variable() const { return variable_; }
+ // const std::string& variable() const { return variable_; }
+ base::Optional<const Value*> declarable() const { return declarable_; }
+ std::string LValue() const;
+ std::string RValue() const;
+ void SetType(const Type* new_type) { type_ = new_type; }
private:
const Type* type_;
- std::string variable_;
+ std::string value_;
+ base::Optional<const Value*> declarable_;
};
class VisitResultVector : public std::vector<VisitResult> {
@@ -201,11 +369,6 @@ class VisitResultVector : public std::vector<VisitResult> {
std::ostream& operator<<(std::ostream& os, const TypeVector& types);
-struct NameAndType {
- std::string name;
- const Type* type;
-};
-
typedef std::vector<NameAndType> NameAndTypeVector;
struct LabelDefinition {
@@ -243,8 +406,13 @@ struct Arguments {
std::vector<Label*> labels;
};
+void PrintSignature(std::ostream& os, const Signature& sig, bool with_names);
std::ostream& operator<<(std::ostream& os, const Signature& sig);
+bool IsAssignableFrom(const Type* to, const Type* from);
+bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
+ const std::vector<Label*>& labels);
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 4dbf36972e..24aeb6fc11 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -18,7 +18,7 @@ std::string CurrentPositionAsString() {
return PositionAsString(CurrentSourcePosition::Get());
}
-void ReportError(const std::string& error) {
+[[noreturn]] void ReportError(const std::string& error) {
std::cerr << CurrentPositionAsString() << ": Torque error: " << error << "\n";
throw(-1);
}
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 7fe6ffdbcc..59379fa526 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -17,7 +17,7 @@ namespace torque {
typedef std::vector<std::string> NameVector;
-void ReportError(const std::string& error);
+[[noreturn]] void ReportError(const std::string& error);
std::string CamelifyString(const std::string& underscore_string);
std::string DashifyString(const std::string& underscore_string);
@@ -36,6 +36,49 @@ class Deduplicator {
std::unordered_set<T, base::hash<T>> storage_;
};
+template <class C, class T>
+void PrintCommaSeparatedList(std::ostream& os, const T& list, C transform) {
+ bool first = true;
+ for (auto& e : list) {
+ if (first) {
+ first = false;
+ } else {
+ os << ", ";
+ }
+ os << transform(e);
+ }
+}
+
+template <class T,
+ typename std::enable_if<
+ std::is_pointer<typename T::value_type>::value, int>::type = 0>
+void PrintCommaSeparatedList(std::ostream& os, const T& list) {
+ bool first = true;
+ for (auto& e : list) {
+ if (first) {
+ first = false;
+ } else {
+ os << ", ";
+ }
+ os << *e;
+ }
+}
+
+template <class T,
+ typename std::enable_if<
+ !std::is_pointer<typename T::value_type>::value, int>::type = 0>
+void PrintCommaSeparatedList(std::ostream& os, const T& list) {
+ bool first = true;
+ for (auto& e : list) {
+ if (first) {
+ first = false;
+ } else {
+ os << ", ";
+ }
+ os << e;
+ }
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 9a40c13569..86bcd66128 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -90,7 +90,7 @@ HeapObjectReference** TransitionArray::GetTargetSlot(int transition_number) {
// static
PropertyDetails TransitionsAccessor::GetTargetDetails(Name* name, Map* target) {
- DCHECK(!IsSpecialTransition(name));
+ DCHECK(!IsSpecialTransition(name->GetReadOnlyRoots(), name));
int descriptor = target->LastAdded();
DescriptorArray* descriptors = target->instance_descriptors();
// Transitions are allowed only for the last added property.
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 0324941d9e..b22e48ef34 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -58,7 +58,6 @@ bool TransitionsAccessor::HasSimpleTransitionTo(Map* map) {
void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
SimpleTransitionFlag flag) {
DCHECK(!map_handle_.is_null());
- Isolate* isolate = map_->GetIsolate();
target->SetBackPointer(map_);
// If the map doesn't have any transitions at all yet, install the new one.
@@ -69,7 +68,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
}
// If the flag requires a full TransitionArray, allocate one.
Handle<TransitionArray> result =
- isolate->factory()->NewTransitionArray(0, 1);
+ isolate_->factory()->NewTransitionArray(0, 1);
ReplaceTransitions(MaybeObject::FromObject(*result));
Reload();
}
@@ -90,9 +89,9 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
return;
}
// Otherwise allocate a full TransitionArray with slack for a new entry.
- Handle<Map> map(simple_transition);
+ Handle<Map> map(simple_transition, isolate_);
Handle<TransitionArray> result =
- isolate->factory()->NewTransitionArray(1, 1);
+ isolate_->factory()->NewTransitionArray(1, 1);
// Reload state; allocations might have caused it to be cleared.
Reload();
simple_transition = GetSimpleTransition();
@@ -117,7 +116,8 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
int number_of_transitions = 0;
int new_nof = 0;
int insertion_index = kNotFound;
- DCHECK_EQ(is_special_transition, IsSpecialTransition(*name));
+ DCHECK_EQ(is_special_transition,
+ IsSpecialTransition(ReadOnlyRoots(isolate_), *name));
PropertyDetails details = is_special_transition
? PropertyDetails::Empty()
: GetTargetDetails(*name, *target);
@@ -131,8 +131,8 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
int index =
is_special_transition
? array->SearchSpecial(Symbol::cast(*name), &insertion_index)
- : array->Search(details.kind(), *name, details.attributes(),
- &insertion_index);
+ : array->Search(isolate_, details.kind(), *name,
+ details.attributes(), &insertion_index);
// If an existing entry was found, overwrite it and return.
if (index != kNotFound) {
array->SetRawTarget(index, HeapObjectReference::Weak(*target));
@@ -158,7 +158,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
}
// We're gonna need a bigger TransitionArray.
- Handle<TransitionArray> result = isolate->factory()->NewTransitionArray(
+ Handle<TransitionArray> result = isolate_->factory()->NewTransitionArray(
new_nof,
Map::SlackForArraySize(number_of_transitions, kMaxNumberOfTransitions));
@@ -178,8 +178,8 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
int index =
is_special_transition
? array->SearchSpecial(Symbol::cast(*name), &insertion_index)
- : array->Search(details.kind(), *name, details.attributes(),
- &insertion_index);
+ : array->Search(isolate_, details.kind(), *name,
+ details.attributes(), &insertion_index);
if (index == kNotFound) {
++new_nof;
} else {
@@ -187,7 +187,6 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
}
DCHECK(insertion_index >= 0 && insertion_index <= number_of_transitions);
- result->Shrink(TransitionArray::ToKeyIndex(new_nof));
result->SetNumberOfTransitions(new_nof);
}
@@ -221,7 +220,7 @@ Map* TransitionsAccessor::SearchTransition(Name* name, PropertyKind kind,
return map;
}
case kFullTransitionArray: {
- int transition = transitions()->Search(kind, name, attributes);
+ int transition = transitions()->Search(isolate_, kind, name, attributes);
if (transition == kNotFound) return nullptr;
return transitions()->GetTarget(transition);
}
@@ -237,13 +236,12 @@ Map* TransitionsAccessor::SearchSpecial(Symbol* name) {
}
// static
-bool TransitionsAccessor::IsSpecialTransition(Name* name) {
+bool TransitionsAccessor::IsSpecialTransition(ReadOnlyRoots roots, Name* name) {
if (!name->IsSymbol()) return false;
- Heap* heap = name->GetHeap();
- return name == heap->nonextensible_symbol() ||
- name == heap->sealed_symbol() || name == heap->frozen_symbol() ||
- name == heap->elements_transition_symbol() ||
- name == heap->strict_function_transition_symbol();
+ return name == roots.nonextensible_symbol() ||
+ name == roots.sealed_symbol() || name == roots.frozen_symbol() ||
+ name == roots.elements_transition_symbol() ||
+ name == roots.strict_function_transition_symbol();
}
MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
@@ -259,7 +257,7 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
if (requested_location == kFieldOnly && details.location() != kField) {
return MaybeHandle<Map>();
}
- return Handle<Map>(target);
+ return Handle<Map>(target, isolate_);
}
Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
@@ -277,7 +275,7 @@ Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
if (details.attributes() != NONE) return Handle<String>::null();
Name* name = GetSimpleTransitionKey(target);
if (!name->IsString()) return Handle<String>::null();
- return handle(String::cast(name));
+ return handle(String::cast(name), isolate_);
}
}
UNREACHABLE();
@@ -285,7 +283,7 @@ Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
Handle<Map> TransitionsAccessor::ExpectedTransitionTarget() {
DCHECK(!ExpectedTransitionKey().is_null());
- return handle(GetTarget(0));
+ return handle(GetTarget(0), isolate_);
}
bool TransitionsAccessor::CanHaveMoreTransitions() {
@@ -309,7 +307,8 @@ bool TransitionsAccessor::IsMatchingMap(Map* target, Name* name,
}
// static
-bool TransitionArray::CompactPrototypeTransitionArray(WeakFixedArray* array) {
+bool TransitionArray::CompactPrototypeTransitionArray(Isolate* isolate,
+ WeakFixedArray* array) {
const int header = kProtoTransitionHeaderSize;
int number_of_transitions = NumberOfPrototypeTransitions(array);
if (number_of_transitions == 0) {
@@ -329,8 +328,8 @@ bool TransitionArray::CompactPrototypeTransitionArray(WeakFixedArray* array) {
}
}
// Fill slots that became free with undefined value.
- MaybeObject* undefined = MaybeObject::FromObject(
- *array->GetIsolate()->factory()->undefined_value());
+ MaybeObject* undefined =
+ MaybeObject::FromObject(*isolate->factory()->undefined_value());
for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
array->Set(header + i, undefined);
}
@@ -369,16 +368,16 @@ void TransitionsAccessor::PutPrototypeTransition(Handle<Object> prototype,
const int header = TransitionArray::kProtoTransitionHeaderSize;
- Handle<WeakFixedArray> cache(GetPrototypeTransitions());
+ Handle<WeakFixedArray> cache(GetPrototypeTransitions(), isolate_);
int capacity = cache->length() - header;
int transitions = TransitionArray::NumberOfPrototypeTransitions(*cache) + 1;
if (transitions > capacity) {
// Grow the array if compacting it doesn't free space.
- if (!TransitionArray::CompactPrototypeTransitionArray(*cache)) {
+ if (!TransitionArray::CompactPrototypeTransitionArray(isolate_, *cache)) {
if (capacity == TransitionArray::kMaxCachedPrototypeTransitions) return;
cache = TransitionArray::GrowPrototypeTransitionArray(
- cache, 2 * transitions, target_map->GetIsolate());
+ cache, 2 * transitions, isolate_);
Reload();
SetPrototypeTransitions(cache);
}
@@ -404,7 +403,7 @@ Handle<Map> TransitionsAccessor::GetPrototypeTransition(
if (!target->IsClearedWeakHeapObject()) {
Map* map = Map::cast(target->ToWeakHeapObject());
if (map->prototype() == *prototype) {
- return handle(map);
+ return handle(map, isolate_);
}
}
}
@@ -414,7 +413,7 @@ Handle<Map> TransitionsAccessor::GetPrototypeTransition(
WeakFixedArray* TransitionsAccessor::GetPrototypeTransitions() {
if (encoding() != kFullTransitionArray ||
!transitions()->HasPrototypeTransitions()) {
- return map_->GetHeap()->empty_weak_fixed_array();
+ return ReadOnlyRoots(isolate_).empty_weak_fixed_array();
}
return transitions()->GetPrototypeTransitions();
}
@@ -441,10 +440,11 @@ int TransitionsAccessor::NumberOfTransitions() {
return 0; // Make GCC happy.
}
-void TransitionArray::Zap() {
- MemsetPointer(data_start() + kPrototypeTransitionsIndex,
- MaybeObject::FromObject(GetHeap()->the_hole_value()),
- length() - kPrototypeTransitionsIndex);
+void TransitionArray::Zap(Isolate* isolate) {
+ MemsetPointer(
+ data_start() + kPrototypeTransitionsIndex,
+ MaybeObject::FromObject(ReadOnlyRoots(isolate).the_hole_value()),
+ length() - kPrototypeTransitionsIndex);
SetNumberOfTransitions(0);
}
@@ -460,7 +460,7 @@ void TransitionsAccessor::ReplaceTransitions(MaybeObject* new_transitions) {
// keep referenced objects alive, so we zap it.
// When there is another reference to the array somewhere (e.g. a handle),
// not zapping turns from a waste of memory into a source of crashes.
- old_transitions->Zap();
+ old_transitions->Zap(isolate_);
}
map_->set_raw_transitions(new_transitions);
MarkNeedsReload();
@@ -475,17 +475,15 @@ void TransitionsAccessor::SetPrototypeTransitions(
void TransitionsAccessor::EnsureHasFullTransitionArray() {
if (encoding() == kFullTransitionArray) return;
int nof = encoding() == kUninitialized ? 0 : 1;
- Handle<TransitionArray> result =
- map_->GetIsolate()->factory()->NewTransitionArray(nof);
+ Handle<TransitionArray> result = isolate_->factory()->NewTransitionArray(nof);
Reload(); // Reload after possible GC.
if (nof == 1) {
if (encoding() == kUninitialized) {
// If allocation caused GC and cleared the target, trim the new array.
- result->Shrink(TransitionArray::ToKeyIndex(0));
result->SetNumberOfTransitions(0);
} else {
// Otherwise populate the new array.
- Handle<Map> target(GetSimpleTransition());
+ Handle<Map> target(GetSimpleTransition(), isolate_);
Name* key = GetSimpleTransitionKey(*target);
result->Set(0, key, HeapObjectReference::Weak(*target));
}
@@ -502,7 +500,7 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
break;
case kWeakRef: {
Map* simple_target = Map::cast(raw_transitions_->ToWeakHeapObject());
- TransitionsAccessor(simple_target, no_gc)
+ TransitionsAccessor(isolate_, simple_target, no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
break;
}
@@ -516,12 +514,13 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
DCHECK(target->IsClearedWeakHeapObject() ||
target->IsWeakHeapObject());
if (target->IsClearedWeakHeapObject()) continue;
- TransitionsAccessor(Map::cast(target->ToWeakHeapObject()), no_gc)
+ TransitionsAccessor(isolate_, Map::cast(target->ToWeakHeapObject()),
+ no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
}
}
for (int i = 0; i < transitions()->number_of_transitions(); ++i) {
- TransitionsAccessor(transitions()->GetTarget(i), no_gc)
+ TransitionsAccessor(isolate_, transitions()->GetTarget(i), no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
}
break;
@@ -541,12 +540,12 @@ void TransitionsAccessor::CheckNewTransitionsAreConsistent(
if (target->instance_descriptors() == map_->instance_descriptors()) {
Name* key = old_transitions->GetKey(i);
int new_target_index;
- if (IsSpecialTransition(key)) {
+ if (IsSpecialTransition(ReadOnlyRoots(isolate_), key)) {
new_target_index = new_transitions->SearchSpecial(Symbol::cast(key));
} else {
PropertyDetails details = GetTargetDetails(key, target);
- new_target_index =
- new_transitions->Search(details.kind(), key, details.attributes());
+ new_target_index = new_transitions->Search(isolate_, details.kind(),
+ key, details.attributes());
}
DCHECK_NE(TransitionArray::kNotFound, new_target_index);
DCHECK_EQ(target, new_transitions->GetTarget(new_target_index));
@@ -557,7 +556,8 @@ void TransitionsAccessor::CheckNewTransitionsAreConsistent(
// Private non-static helper functions (operating on full transition arrays).
-int TransitionArray::SearchDetails(int transition, PropertyKind kind,
+int TransitionArray::SearchDetails(Isolate* isolate, int transition,
+ PropertyKind kind,
PropertyAttributes attributes,
int* out_insertion_index) {
int nof_transitions = number_of_transitions();
@@ -581,25 +581,26 @@ int TransitionArray::SearchDetails(int transition, PropertyKind kind,
return kNotFound;
}
-
-int TransitionArray::Search(PropertyKind kind, Name* name,
+int TransitionArray::Search(Isolate* isolate, PropertyKind kind, Name* name,
PropertyAttributes attributes,
int* out_insertion_index) {
int transition = SearchName(name, out_insertion_index);
if (transition == kNotFound) return kNotFound;
- return SearchDetails(transition, kind, attributes, out_insertion_index);
+ return SearchDetails(isolate, transition, kind, attributes,
+ out_insertion_index);
}
void TransitionArray::Sort() {
DisallowHeapAllocation no_gc;
// In-place insertion sort.
int length = number_of_transitions();
+ ReadOnlyRoots roots = GetReadOnlyRoots();
for (int i = 1; i < length; i++) {
Name* key = GetKey(i);
MaybeObject* target = GetRawTarget(i);
PropertyKind kind = kData;
PropertyAttributes attributes = NONE;
- if (!TransitionsAccessor::IsSpecialTransition(key)) {
+ if (!TransitionsAccessor::IsSpecialTransition(roots, key)) {
Map* target_map = TransitionsAccessor::GetTargetFromRaw(target);
PropertyDetails details =
TransitionsAccessor::GetTargetDetails(key, target_map);
@@ -612,7 +613,7 @@ void TransitionArray::Sort() {
MaybeObject* temp_target = GetRawTarget(j);
PropertyKind temp_kind = kData;
PropertyAttributes temp_attributes = NONE;
- if (!TransitionsAccessor::IsSpecialTransition(temp_key)) {
+ if (!TransitionsAccessor::IsSpecialTransition(roots, temp_key)) {
Map* temp_target_map =
TransitionsAccessor::GetTargetFromRaw(temp_target);
PropertyDetails details =
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 386097749e..99fba563ea 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -37,11 +37,13 @@ namespace internal {
// cleared when the map they refer to is not otherwise reachable.
class TransitionsAccessor {
public:
- TransitionsAccessor(Map* map, DisallowHeapAllocation* no_gc) : map_(map) {
+ TransitionsAccessor(Isolate* isolate, Map* map, DisallowHeapAllocation* no_gc)
+ : isolate_(isolate), map_(map) {
Initialize();
USE(no_gc);
}
- explicit TransitionsAccessor(Handle<Map> map) : map_handle_(map), map_(*map) {
+ TransitionsAccessor(Isolate* isolate, Handle<Map> map)
+ : isolate_(isolate), map_handle_(map), map_(*map) {
Initialize();
}
@@ -57,7 +59,7 @@ class TransitionsAccessor {
Map* SearchSpecial(Symbol* name);
// Returns true for non-property transitions like elements kind, or
// or frozen/sealed transitions.
- static bool IsSpecialTransition(Name* name);
+ static bool IsSpecialTransition(ReadOnlyRoots roots, Name* name);
enum RequestedLocation { kAnyLocation, kFieldOnly };
MaybeHandle<Map> FindTransitionToDataProperty(
@@ -177,6 +179,7 @@ class TransitionsAccessor {
inline TransitionArray* transitions();
+ Isolate* isolate_;
Handle<Map> map_handle_;
Map* map_;
MaybeObject* raw_transitions_;
@@ -232,11 +235,7 @@ class TransitionArray : public WeakFixedArray {
void Sort();
-#if defined(DEBUG) || defined(OBJECT_PRINT)
- // For our gdb macros.
- void Print();
- void Print(std::ostream& os);
-#endif
+ void PrintInternal(std::ostream& os);
DECL_PRINTER(TransitionArray)
DECL_VERIFIER(TransitionArray)
@@ -298,8 +297,8 @@ class TransitionArray : public WeakFixedArray {
}
// Search a transition for a given kind, property name and attributes.
- int Search(PropertyKind kind, Name* name, PropertyAttributes attributes,
- int* out_insertion_index = nullptr);
+ int Search(Isolate* isolate, PropertyKind kind, Name* name,
+ PropertyAttributes attributes, int* out_insertion_index = nullptr);
// Search a non-property transition (like elements kind, observe or frozen
// transitions).
@@ -308,12 +307,13 @@ class TransitionArray : public WeakFixedArray {
}
// Search a first transition for a given property name.
inline int SearchName(Name* name, int* out_insertion_index = nullptr);
- int SearchDetails(int transition, PropertyKind kind,
+ int SearchDetails(Isolate* isolate, int transition, PropertyKind kind,
PropertyAttributes attributes, int* out_insertion_index);
inline int number_of_transitions() const;
- static bool CompactPrototypeTransitionArray(WeakFixedArray* array);
+ static bool CompactPrototypeTransitionArray(Isolate* isolate,
+ WeakFixedArray* array);
static Handle<WeakFixedArray> GrowPrototypeTransitionArray(
Handle<WeakFixedArray> array, int new_capacity, Isolate* isolate);
@@ -339,7 +339,7 @@ class TransitionArray : public WeakFixedArray {
inline void Set(int transition_number, Name* key, MaybeObject* target);
- void Zap();
+ void Zap(Isolate* isolate);
DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
};
diff --git a/deps/v8/src/trap-handler/OWNERS b/deps/v8/src/trap-handler/OWNERS
index 9aee9d9a28..ac0d46af0e 100644
--- a/deps/v8/src/trap-handler/OWNERS
+++ b/deps/v8/src/trap-handler/OWNERS
@@ -1,11 +1,11 @@
set noparent
-bradnelson@chromium.org
-eholk@chromium.org
+titzer@chromium.org
+ahaas@chromium.org
# Changes to this directory should also be reviewed by:
#
-# eholk@chromium.org
+# ahaas@chromium.org
# mseaborn@chromium.org
# mark@chromium.org
diff --git a/deps/v8/src/turbo-assembler.cc b/deps/v8/src/turbo-assembler.cc
new file mode 100644
index 0000000000..079feabb2e
--- /dev/null
+++ b/deps/v8/src/turbo-assembler.cc
@@ -0,0 +1,122 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/turbo-assembler.h"
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/constants-table-builder.h"
+#include "src/heap/heap-inl.h"
+#include "src/snapshot/serializer-common.h"
+
+namespace v8 {
+namespace internal {
+
+TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
+ const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(options, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ = Handle<HeapObject>::New(
+ ReadOnlyRoots(isolate).self_reference_marker(), isolate);
+ }
+}
+
+void TurboAssemblerBase::IndirectLoadConstant(Register destination,
+ Handle<HeapObject> object) {
+ CHECK(isolate()->ShouldLoadConstantsFromRootList());
+ CHECK(root_array_available_);
+
+ // Before falling back to the (fairly slow) lookup from the constants table,
+ // check if any of the fast paths can be applied.
+
+ int builtin_index;
+ Heap::RootListIndex root_index;
+ if (isolate()->heap()->IsRootHandle(object, &root_index)) {
+ // Roots are loaded relative to the root register.
+ LoadRoot(destination, root_index);
+ } else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin_index)) {
+ // Similar to roots, builtins may be loaded from the builtins table.
+ LoadRootRelative(destination,
+ RootRegisterOffsetForBuiltinIndex(builtin_index));
+ } else if (object.is_identical_to(code_object_) &&
+ Builtins::IsBuiltinId(maybe_builtin_index_)) {
+ // The self-reference loaded through Codevalue() may also be a builtin
+ // and thus viable for a fast load.
+ LoadRootRelative(destination,
+ RootRegisterOffsetForBuiltinIndex(maybe_builtin_index_));
+ } else {
+ // Ensure the given object is in the builtins constants table and fetch its
+ // index.
+ BuiltinsConstantsTableBuilder* builder =
+ isolate()->builtins_constants_table_builder();
+ uint32_t index = builder->AddObject(object);
+
+ // Slow load from the constants table.
+ LoadFromConstantsTable(destination, index);
+ }
+}
+
+void TurboAssemblerBase::IndirectLoadExternalReference(
+ Register destination, ExternalReference reference) {
+ CHECK(isolate()->ShouldLoadConstantsFromRootList());
+ CHECK(root_array_available_);
+
+ if (IsAddressableThroughRootRegister(isolate(), reference)) {
+ // Some external references can be efficiently loaded as an offset from
+ // kRootRegister.
+ intptr_t offset =
+ RootRegisterOffsetForExternalReference(isolate(), reference);
+ LoadRootRegisterOffset(destination, offset);
+ } else {
+ // Otherwise, do a memory load from the external reference table.
+
+ // Encode as an index into the external reference table stored on the
+ // isolate.
+ ExternalReferenceEncoder encoder(isolate());
+ ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
+ CHECK(!v.is_from_api());
+
+ LoadRootRelative(destination,
+ RootRegisterOffsetForExternalReferenceIndex(v.index()));
+ }
+}
+
+// static
+int32_t TurboAssemblerBase::RootRegisterOffset(Heap::RootListIndex root_index) {
+ return (root_index << kPointerSizeLog2) - kRootRegisterBias;
+}
+
+// static
+int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceIndex(
+ int reference_index) {
+ return Heap::roots_to_external_reference_table_offset() - kRootRegisterBias +
+ ExternalReferenceTable::OffsetOfEntry(reference_index);
+}
+
+// static
+intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ Isolate* isolate, const ExternalReference& reference) {
+ return static_cast<intptr_t>(reference.address()) - kRootRegisterBias -
+ reinterpret_cast<intptr_t>(isolate->heap()->roots_array_start());
+}
+
+// static
+bool TurboAssemblerBase::IsAddressableThroughRootRegister(
+ Isolate* isolate, const ExternalReference& reference) {
+ Address start = reinterpret_cast<Address>(isolate);
+ Address end = isolate->heap()->root_register_addressable_end();
+ Address address = reference.address();
+ return start <= address && address < end;
+}
+
+// static
+int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltinIndex(
+ int builtin_index) {
+ return Heap::roots_to_builtins_offset() - kRootRegisterBias +
+ builtin_index * kPointerSize;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/turbo-assembler.h b/deps/v8/src/turbo-assembler.h
new file mode 100644
index 0000000000..67f895b1ca
--- /dev/null
+++ b/deps/v8/src/turbo-assembler.h
@@ -0,0 +1,114 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TURBO_ASSEMBLER_H_
+#define V8_TURBO_ASSEMBLER_H_
+
+#include "src/assembler-arch.h"
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+// Common base class for platform-specific TurboAssemblers containing
+// platform-independent bits.
+class TurboAssemblerBase : public Assembler {
+ public:
+ Isolate* isolate() const { return isolate_; }
+
+ Handle<HeapObject> CodeObject() const {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
+
+ bool root_array_available() const { return root_array_available_; }
+ void set_root_array_available(bool v) { root_array_available_ = v; }
+
+ bool trap_on_abort() const { return trap_on_abort_; }
+ void set_trap_on_abort(bool v) { trap_on_abort_ = v; }
+
+ void set_builtin_index(int i) { maybe_builtin_index_ = i; }
+
+ void set_has_frame(bool v) { has_frame_ = v; }
+ bool has_frame() const { return has_frame_; }
+
+ // Loads the given constant or external reference without embedding its direct
+ // pointer. The produced code is isolate-independent.
+ void IndirectLoadConstant(Register destination, Handle<HeapObject> object);
+ void IndirectLoadExternalReference(Register destination,
+ ExternalReference reference);
+
+ virtual void LoadFromConstantsTable(Register destination,
+ int constant_index) = 0;
+
+ virtual void LoadRootRegisterOffset(Register destination,
+ intptr_t offset) = 0;
+ virtual void LoadRootRelative(Register destination, int32_t offset) = 0;
+
+ virtual void LoadRoot(Register destination, Heap::RootListIndex index) = 0;
+
+ static int32_t RootRegisterOffset(Heap::RootListIndex root_index);
+ static int32_t RootRegisterOffsetForExternalReferenceIndex(
+ int reference_index);
+
+ static int32_t RootRegisterOffsetForBuiltinIndex(int builtin_index);
+
+ static intptr_t RootRegisterOffsetForExternalReference(
+ Isolate* isolate, const ExternalReference& reference);
+
+ // An address is addressable through kRootRegister if it is located within
+ // [isolate, roots_ + root_register_addressable_end_offset[.
+ static bool IsAddressableThroughRootRegister(
+ Isolate* isolate, const ExternalReference& reference);
+
+ protected:
+ TurboAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object);
+
+ Isolate* const isolate_ = nullptr;
+
+ // This handle will be patched with the code object on installation.
+ Handle<HeapObject> code_object_;
+
+ // Whether kRootRegister has been initialized.
+ bool root_array_available_ = true;
+
+ // Immediately trap instead of calling {Abort} when debug code fails.
+ bool trap_on_abort_ = FLAG_trap_on_abort;
+
+ // May be set while generating builtins.
+ int maybe_builtin_index_ = Builtins::kNoBuiltinId;
+
+ bool has_frame_ = false;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase);
+};
+
+// Avoids emitting calls to the {Builtins::kAbort} builtin when emitting debug
+// code during the lifetime of this scope object. For disabling debug code
+// entirely use the {DontEmitDebugCodeScope} instead.
+class TrapOnAbortScope BASE_EMBEDDED {
+ public:
+ explicit TrapOnAbortScope(TurboAssemblerBase* assembler)
+ : assembler_(assembler), old_value_(assembler->trap_on_abort()) {
+ assembler_->set_trap_on_abort(true);
+ }
+ ~TrapOnAbortScope() { assembler_->set_trap_on_abort(old_value_); }
+
+ private:
+ TurboAssemblerBase* assembler_;
+ bool old_value_;
+};
+
+// Helper stubs can be called in different ways depending on where the target
+// code is located and how the call sequence is expected to look like:
+// - JavaScript: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}.
+// - WebAssembly: Call native {WasmCode} stub via {RelocInfo::WASM_STUB_CALL}.
+enum class StubCallMode { kCallOnHeapBuiltin, kCallWasmRuntimeStub };
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TURBO_ASSEMBLER_H_
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index 7c0386ce52..0140858115 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -85,6 +85,7 @@ unsigned Utf8::Encode(char* str,
str[1] = 0x80 | (c & kMask);
return 2;
} else if (c <= kMaxThreeByteChar) {
+ DCHECK(!Utf16::IsLeadSurrogate(Utf16::kNoPreviousCharacter));
if (Utf16::IsSurrogatePair(previous, c)) {
const int kUnmatchedSize = kSizeOfUnmatchedSurrogate;
return Encode(str - kUnmatchedSize,
@@ -127,8 +128,8 @@ unsigned Utf8::Length(uchar c, int previous) {
} else if (c <= kMaxTwoByteChar) {
return 2;
} else if (c <= kMaxThreeByteChar) {
- if (Utf16::IsTrailSurrogate(c) &&
- Utf16::IsLeadSurrogate(previous)) {
+ DCHECK(!Utf16::IsLeadSurrogate(Utf16::kNoPreviousCharacter));
+ if (Utf16::IsSurrogatePair(previous, c)) {
return kSizeOfUnmatchedSurrogate - kBytesSavedByCombiningSurrogates;
}
return 3;
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 75f53e22d1..dddf22c4c6 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -39,8 +39,12 @@ class Predicate {
inline CacheEntry()
: bit_field_(CodePointField::encode(0) | ValueField::encode(0)) {}
inline CacheEntry(uchar code_point, bool value)
- : bit_field_(CodePointField::encode(code_point) |
- ValueField::encode(value)) {}
+ : bit_field_(
+ CodePointField::encode(CodePointField::kMask & code_point) |
+ ValueField::encode(value)) {
+ DCHECK_IMPLIES((CodePointField::kMask & code_point) != code_point,
+ code_point == static_cast<uchar>(-1));
+ }
uchar code_point() const { return CodePointField::decode(bit_field_); }
bool value() const { return ValueField::decode(bit_field_); }
@@ -94,22 +98,20 @@ class UnicodeData {
class Utf16 {
public:
+ static const int kNoPreviousCharacter = -1;
static inline bool IsSurrogatePair(int lead, int trail) {
return IsLeadSurrogate(lead) && IsTrailSurrogate(trail);
}
static inline bool IsLeadSurrogate(int code) {
- if (code == kNoPreviousCharacter) return false;
return (code & 0xfc00) == 0xd800;
}
static inline bool IsTrailSurrogate(int code) {
- if (code == kNoPreviousCharacter) return false;
return (code & 0xfc00) == 0xdc00;
}
static inline int CombineSurrogatePair(uchar lead, uchar trail) {
return 0x10000 + ((lead & 0x3ff) << 10) + (trail & 0x3ff);
}
- static const int kNoPreviousCharacter = -1;
static const uchar kMaxNonSurrogateCharCode = 0xffff;
// Encoding a single UTF-16 code unit will produce 1, 2 or 3 bytes
// of UTF-8 data. The special case where the unit is a surrogate
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/uri.cc
index 775c0ede2c..72fada759c 100644
--- a/deps/v8/src/uri.cc
+++ b/deps/v8/src/uri.cc
@@ -175,7 +175,7 @@ bool IntoOneAndTwoByte(Handle<String> uri, bool is_uri,
MaybeHandle<String> Uri::Decode(Isolate* isolate, Handle<String> uri,
bool is_uri) {
- uri = String::Flatten(uri);
+ uri = String::Flatten(isolate, uri);
std::vector<uint8_t> one_byte_buffer;
std::vector<uc16> two_byte_buffer;
@@ -273,7 +273,7 @@ void EncodePair(uc16 cc1, uc16 cc2, std::vector<uint8_t>* buffer) {
MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
bool is_uri) {
- uri = String::Flatten(uri);
+ uri = String::Flatten(isolate, uri);
int uri_length = uri->length();
std::vector<uint8_t> buffer;
buffer.reserve(uri_length);
@@ -492,7 +492,7 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
MaybeHandle<String> Uri::Escape(Isolate* isolate, Handle<String> string) {
Handle<String> result;
- string = String::Flatten(string);
+ string = String::Flatten(isolate, string);
return string->IsOneByteRepresentationUnderneath()
? EscapePrivate<uint8_t>(isolate, string)
: EscapePrivate<uc16>(isolate, string);
@@ -500,7 +500,7 @@ MaybeHandle<String> Uri::Escape(Isolate* isolate, Handle<String> string) {
MaybeHandle<String> Uri::Unescape(Isolate* isolate, Handle<String> string) {
Handle<String> result;
- string = String::Flatten(string);
+ string = String::Flatten(isolate, string);
return string->IsOneByteRepresentationUnderneath()
? UnescapePrivate<uint8_t>(isolate, string)
: UnescapePrivate<uc16>(isolate, string);
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 31c1c15724..b922332172 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -21,7 +21,6 @@
#include "src/base/v8-fallthrough.h"
#include "src/globals.h"
#include "src/vector.h"
-#include "src/zone/zone.h"
#if defined(V8_OS_AIX)
#include <fenv.h> // NOLINT(build/c++11)
@@ -473,13 +472,13 @@ class BitSetComputer {
// ----------------------------------------------------------------------------
// Hash function.
-static const uint32_t kZeroHashSeed = 0;
+static const uint64_t kZeroHashSeed = 0;
// Thomas Wang, Integer Hash Functions.
// http://www.concentric.net/~Ttwang/tech/inthash.htm
-inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
+inline uint32_t ComputeIntegerHash(uint32_t key, uint64_t seed) {
uint32_t hash = key;
- hash = hash ^ seed;
+ hash = hash ^ static_cast<uint32_t>(seed);
hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
hash = hash ^ (hash >> 12);
hash = hash + (hash << 2);
@@ -1240,29 +1239,31 @@ Vector<const char> ReadFile(FILE* file,
bool* exists,
bool verbose = true);
-
template <typename sourcechar, typename sinkchar>
-INLINE(static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src,
- size_t chars));
+V8_INLINE static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src,
+ size_t chars);
#if defined(V8_HOST_ARCH_ARM)
-INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
-INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src,
- size_t chars));
-INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
- size_t chars));
+V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
+ size_t chars);
+V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src,
+ size_t chars);
+V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
+ size_t chars);
#elif defined(V8_HOST_ARCH_MIPS)
-INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
-INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
- size_t chars));
+V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
+ size_t chars);
+V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
+ size_t chars);
#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
-INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
-INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
- size_t chars));
+V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
+ size_t chars);
+V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
+ size_t chars);
#endif
// Copy from 8bit/16bit chars to 8bit/16bit chars.
template <typename sourcechar, typename sinkchar>
-INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars));
+V8_INLINE void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars);
template <typename sourcechar, typename sinkchar>
void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars) {
@@ -1635,7 +1636,7 @@ static inline V ReadLittleEndianValue(Address p) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
return ReadUnalignedValue<V>(p);
#elif defined(V8_TARGET_BIG_ENDIAN)
- V ret = 0;
+ V ret{};
const byte* src = reinterpret_cast<const byte*>(p);
byte* dst = reinterpret_cast<byte*>(&ret);
for (size_t i = 0; i < sizeof(V); i++) {
@@ -1664,13 +1665,13 @@ static inline V ByteReverse(V value) {
switch (size_of_v) {
case 2:
#if V8_HAS_BUILTIN_BSWAP16
- return __builtin_bswap16(value);
+ return static_cast<V>(__builtin_bswap16(static_cast<uint16_t>(value)));
#else
return value << 8 | (value >> 8 & 0x00FF);
#endif
case 4:
#if V8_HAS_BUILTIN_BSWAP32
- return __builtin_bswap32(value);
+ return static_cast<V>(__builtin_bswap32(static_cast<uint32_t>(value)));
#else
{
size_t bits_of_v = size_of_v * kBitsPerByte;
@@ -1682,7 +1683,7 @@ static inline V ByteReverse(V value) {
#endif
case 8:
#if V8_HAS_BUILTIN_BSWAP64
- return __builtin_bswap64(value);
+ return static_cast<V>(__builtin_bswap64(static_cast<uint64_t>(value)));
#else
{
size_t bits_of_v = size_of_v * kBitsPerByte;
@@ -1803,21 +1804,6 @@ class ThreadedList final {
DISALLOW_COPY_AND_ASSIGN(ThreadedList);
};
-// Can be used to create a threaded list of |T|.
-template <typename T>
-class ThreadedListZoneEntry final : public ZoneObject {
- public:
- explicit ThreadedListZoneEntry(T value) : value_(value), next_(nullptr) {}
-
- T value() { return value_; }
- ThreadedListZoneEntry<T>** next() { return &next_; }
-
- private:
- T value_;
- ThreadedListZoneEntry<T>* next_;
- DISALLOW_COPY_AND_ASSIGN(ThreadedListZoneEntry);
-};
-
V8_EXPORT_PRIVATE bool PassesFilter(Vector<const char> name,
Vector<const char> filter);
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 8d34dba912..e8f1f0e846 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -14,6 +14,7 @@
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/frames.h"
+#include "src/interface-descriptors.h"
#include "src/isolate.h"
#include "src/libsampler/sampler.h"
#include "src/objects-inl.h"
@@ -46,6 +47,7 @@ void V8::TearDown() {
#if defined(USE_SIMULATOR)
Simulator::GlobalTearDown();
#endif
+ CallDescriptors::TearDown();
Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
RegisteredExtension::UnregisterAll();
@@ -81,6 +83,7 @@ void V8::InitializeOncePerProcessImpl() {
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
Bootstrapper::InitializeOncePerProcess();
+ CallDescriptors::InitializeOncePerProcess();
}
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 0fb333c1f3..db927010ef 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -45,7 +45,7 @@ void Locker::Initialize(v8::Isolate* isolate) {
} else {
internal::ExecutionAccess access(isolate_);
isolate_->stack_guard()->ClearThread(access);
- isolate_->thread_manager()->InitThread(access);
+ isolate_->stack_guard()->InitThread(access);
}
}
DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread());
@@ -95,10 +95,6 @@ Unlocker::~Unlocker() {
namespace internal {
-void ThreadManager::InitThread(const ExecutionAccess& lock) {
- isolate_->stack_guard()->InitThread(lock);
- isolate_->debug()->InitThread(lock);
-}
bool ThreadManager::RestoreThread() {
DCHECK(IsLockedByCurrentThread());
@@ -131,7 +127,7 @@ bool ThreadManager::RestoreThread() {
isolate_->FindPerThreadDataForThisThread();
if (per_thread == nullptr || per_thread->thread_state() == nullptr) {
// This is a new thread.
- InitThread(access);
+ isolate_->stack_guard()->InitThread(access);
return false;
}
ThreadState* state = per_thread->thread_state();
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index 7fde0c9ec4..bb87afea7d 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -67,7 +67,6 @@ class ThreadManager {
void Lock();
void Unlock();
- void InitThread(const ExecutionAccess&);
void ArchiveThread();
bool RestoreThread();
void FreeThreadResources();
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 9fb678f84f..4c9c9a9aa2 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -15,6 +15,8 @@
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/snapshot/code-serializer.h"
#include "src/transitions.h"
@@ -354,9 +356,11 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
WriteOddball(Oddball::cast(*object));
return ThrowIfOutOfMemory();
case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
WriteHeapNumber(HeapNumber::cast(*object));
return ThrowIfOutOfMemory();
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ WriteMutableHeapNumber(MutableHeapNumber::cast(*object));
+ return ThrowIfOutOfMemory();
case BIGINT_TYPE:
WriteBigInt(BigInt::cast(*object));
return ThrowIfOutOfMemory();
@@ -423,13 +427,18 @@ void ValueSerializer::WriteHeapNumber(HeapNumber* number) {
WriteDouble(number->value());
}
+void ValueSerializer::WriteMutableHeapNumber(MutableHeapNumber* number) {
+ WriteTag(SerializationTag::kDouble);
+ WriteDouble(number->value());
+}
+
void ValueSerializer::WriteBigInt(BigInt* bigint) {
WriteTag(SerializationTag::kBigInt);
WriteBigIntContents(bigint);
}
void ValueSerializer::WriteString(Handle<String> string) {
- string = String::Flatten(string);
+ string = String::Flatten(isolate_, string);
DisallowHeapAllocation no_gc;
String::FlatContent flat = string->GetFlatContent();
DCHECK(flat.IsFlat());
@@ -734,12 +743,12 @@ void ValueSerializer::WriteJSRegExp(JSRegExp* regexp) {
Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
// First copy the key-value pairs, since getters could mutate them.
- Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()), isolate_);
int length = table->NumberOfElements() * 2;
Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
{
DisallowHeapAllocation no_gc;
- Oddball* the_hole = isolate_->heap()->the_hole_value();
+ Oddball* the_hole = ReadOnlyRoots(isolate_).the_hole_value();
int capacity = table->UsedCapacity();
int result_index = 0;
for (int i = 0; i < capacity; i++) {
@@ -765,12 +774,12 @@ Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
// First copy the element pointers, since getters could mutate them.
- Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()), isolate_);
int length = table->NumberOfElements();
Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
{
DisallowHeapAllocation no_gc;
- Oddball* the_hole = isolate_->heap()->the_hole_value();
+ Oddball* the_hole = ReadOnlyRoots(isolate_).the_hole_value();
int capacity = table->UsedCapacity();
int result_index = 0;
for (int i = 0; i < capacity; i++) {
@@ -877,22 +886,21 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
WriteTag(SerializationTag::kWasmModule);
WriteRawBytes(&encoding_tag, sizeof(encoding_tag));
- Handle<String> wire_bytes(object->shared()->module_bytes(), isolate_);
- int wire_bytes_length = wire_bytes->length();
- WriteVarint<uint32_t>(wire_bytes_length);
+ wasm::NativeModule* native_module = object->native_module();
+ Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ WriteVarint<uint32_t>(static_cast<uint32_t>(wire_bytes.size()));
uint8_t* destination;
- if (ReserveRawBytes(wire_bytes_length).To(&destination)) {
- String::WriteToFlat(*wire_bytes, destination, 0, wire_bytes_length);
+ if (ReserveRawBytes(wire_bytes.size()).To(&destination)) {
+ memcpy(destination, wire_bytes.start(), wire_bytes.size());
}
- Handle<WasmCompiledModule> compiled_part(object->compiled_module(), isolate_);
size_t module_size =
- wasm::GetSerializedNativeModuleSize(isolate_, compiled_part);
+ wasm::GetSerializedNativeModuleSize(isolate_, native_module);
CHECK_GE(std::numeric_limits<uint32_t>::max(), module_size);
WriteVarint<uint32_t>(static_cast<uint32_t>(module_size));
uint8_t* module_buffer;
if (ReserveRawBytes(module_size).To(&module_buffer)) {
- if (!wasm::SerializeNativeModule(isolate_, compiled_part,
+ if (!wasm::SerializeNativeModule(isolate_, native_module,
{module_buffer, module_size})) {
return Nothing<bool>();
}
@@ -992,7 +1000,7 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate,
end_(data.start() + data.length()),
pretenure_(data.length() > kPretenureThreshold ? TENURED : NOT_TENURED),
id_map_(isolate->global_handles()->Create(
- isolate_->heap()->empty_fixed_array())) {}
+ ReadOnlyRoots(isolate_).empty_fixed_array())) {}
ValueDeserializer::~ValueDeserializer() {
GlobalHandles::Destroy(Handle<Object>::cast(id_map_).location());
@@ -1126,8 +1134,8 @@ void ValueDeserializer::TransferArrayBuffer(
}
Handle<SimpleNumberDictionary> dictionary =
array_buffer_transfer_map_.ToHandleChecked();
- Handle<SimpleNumberDictionary> new_dictionary =
- SimpleNumberDictionary::Set(dictionary, transfer_id, array_buffer);
+ Handle<SimpleNumberDictionary> new_dictionary = SimpleNumberDictionary::Set(
+ isolate_, dictionary, transfer_id, array_buffer);
if (!new_dictionary.is_identical_to(dictionary)) {
GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
array_buffer_transfer_map_ =
@@ -1498,12 +1506,12 @@ MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
case SerializationTag::kTrueObject:
value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
isolate_->boolean_function(), pretenure_));
- value->set_value(isolate_->heap()->true_value());
+ value->set_value(ReadOnlyRoots(isolate_).true_value());
break;
case SerializationTag::kFalseObject:
value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
isolate_->boolean_function(), pretenure_));
- value->set_value(isolate_->heap()->false_value());
+ value->set_value(ReadOnlyRoots(isolate_).false_value());
break;
case SerializationTag::kNumberObject: {
double number;
@@ -1552,7 +1560,7 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
// TODO(adamk): Can we remove this check now that dotAll is always-on?
uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::FlagCount();
if ((raw_flags & flags_mask) ||
- !JSRegExp::New(pattern, static_cast<JSRegExp::Flags>(raw_flags))
+ !JSRegExp::New(isolate_, pattern, static_cast<JSRegExp::Flags>(raw_flags))
.ToHandle(&regexp)) {
return MaybeHandle<JSRegExp>();
}
@@ -1902,7 +1910,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
// transition was found.
Handle<Object> key;
Handle<Map> target;
- TransitionsAccessor transitions(map);
+ TransitionsAccessor transitions(isolate_, map);
Handle<String> expected_key = transitions.ExpectedTransitionKey();
if (!expected_key.is_null() && ReadExpectedString(expected_key)) {
key = expected_key;
@@ -1915,7 +1923,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
key =
isolate_->factory()->InternalizeString(Handle<String>::cast(key));
// Don't reuse |transitions| because it could be stale.
- transitioning = TransitionsAccessor(map)
+ transitioning = TransitionsAccessor(isolate_, map)
.FindTransitionToField(Handle<String>::cast(key))
.ToHandle(&target);
} else {
@@ -1942,8 +1950,9 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
->NowContains(value)) {
Handle<FieldType> value_type =
value->OptimalType(isolate_, expected_representation);
- Map::GeneralizeField(target, descriptor, details.constness(),
- expected_representation, value_type);
+ Map::GeneralizeField(isolate_, target, descriptor,
+ details.constness(), expected_representation,
+ value_type);
}
DCHECK(target->instance_descriptors()
->GetFieldType(descriptor)
@@ -2026,7 +2035,8 @@ MaybeHandle<JSReceiver> ValueDeserializer::GetObjectWithID(uint32_t id) {
void ValueDeserializer::AddObjectWithID(uint32_t id,
Handle<JSReceiver> object) {
DCHECK(!HasObjectWithID(id));
- Handle<FixedArray> new_array = FixedArray::SetAndGrow(id_map_, id, object);
+ Handle<FixedArray> new_array =
+ FixedArray::SetAndGrow(isolate_, id_map_, id, object);
// If the dictionary was reallocated, update the global handle.
if (!new_array.is_identical_to(id_map_)) {
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index e162ce22d7..91f8ecc7dd 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -29,6 +29,7 @@ class JSMap;
class JSRegExp;
class JSSet;
class JSValue;
+class MutableHeapNumber;
class Object;
class Oddball;
class Smi;
@@ -115,6 +116,7 @@ class ValueSerializer {
void WriteOddball(Oddball* oddball);
void WriteSmi(Smi* smi);
void WriteHeapNumber(HeapNumber* number);
+ void WriteMutableHeapNumber(MutableHeapNumber* number);
void WriteBigInt(BigInt* bigint);
void WriteString(Handle<String> string);
Maybe<bool> WriteJSReceiver(Handle<JSReceiver> receiver)
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index 97ce43e8aa..b8f10133b5 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -5,8 +5,9 @@
#ifndef V8_VECTOR_H_
#define V8_VECTOR_H_
-#include <string.h>
#include <algorithm>
+#include <cstring>
+#include <iterator>
#include "src/allocation.h"
#include "src/checks.h"
@@ -122,11 +123,18 @@ class Vector {
length_ = 0;
}
- inline Vector<T> operator+(size_t offset) {
+ Vector<T> operator+(size_t offset) {
DCHECK_LE(offset, length_);
return Vector<T>(start_ + offset, length_ - offset);
}
+ Vector<T> operator+=(size_t offset) {
+ DCHECK_LE(offset, length_);
+ start_ += offset;
+ length_ -= offset;
+ return *this;
+ }
+
// Implicit conversion from Vector<T> to Vector<const T>.
inline operator Vector<const T>() { return Vector<const T>::cast(*this); }
@@ -150,9 +158,6 @@ class Vector {
return true;
}
- protected:
- void set_start(T* start) { start_ = start; }
-
private:
T* start_;
size_t length_;
@@ -183,6 +188,66 @@ class ScopedVector : public Vector<T> {
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
};
+template <typename T>
+class OwnedVector {
+ public:
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(OwnedVector);
+ OwnedVector(std::unique_ptr<T[]> data, size_t length)
+ : data_(std::move(data)), length_(length) {
+ DCHECK_IMPLIES(length_ > 0, data_ != nullptr);
+ }
+ // Implicit conversion from {OwnedVector<U>} to {OwnedVector<T>}, instantiable
+ // if {std::unique_ptr<U>} can be converted to {std::unique_ptr<T>}.
+ // Can be used to convert {OwnedVector<T>} to {OwnedVector<const T>}.
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<
+ std::unique_ptr<U>, std::unique_ptr<T>>::value>::type>
+ OwnedVector(OwnedVector<U>&& other)
+ : data_(other.ReleaseData()), length_(other.size()) {}
+
+ // Returns the length of the vector as a size_t.
+ constexpr size_t size() const { return length_; }
+
+ // Returns whether or not the vector is empty.
+ constexpr bool is_empty() const { return length_ == 0; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const {
+ DCHECK_IMPLIES(length_ > 0, data_ != nullptr);
+ return data_.get();
+ }
+
+ // Returns a {Vector<T>} view of the data in this vector.
+ Vector<T> as_vector() const { return Vector<T>(start(), size()); }
+
+ // Releases the backing data from this vector and transfers ownership to the
+ // caller. This vectors data can no longer be used afterwards.
+ std::unique_ptr<T[]> ReleaseData() { return std::move(data_); }
+
+ // Allocates a new vector of the specified size via the default allocator.
+ static OwnedVector<T> New(size_t size) {
+ if (size == 0) return {};
+ return OwnedVector<T>(std::unique_ptr<T[]>(new T[size]), size);
+ }
+
+ // Allocates a new vector containing the specified collection of values.
+ // {Iterator} is the common type of {std::begin} and {std::end} called on a
+ // {const U&}. This function is only instantiable if that type exists.
+ template <typename U, typename Iterator = typename std::common_type<
+ decltype(std::begin(std::declval<const U&>())),
+ decltype(std::end(std::declval<const U&>()))>::type>
+ static OwnedVector<T> Of(const U& collection) {
+ Iterator begin = std::begin(collection);
+ Iterator end = std::end(collection);
+ OwnedVector<T> vec = New(std::distance(begin, end));
+ std::copy(begin, end, vec.start());
+ return vec;
+ }
+
+ private:
+ std::unique_ptr<T[]> data_;
+ size_t length_ = 0;
+};
inline int StrLength(const char* string) {
size_t length = strlen(string);
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 02509a7389..ca55fe5d52 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -13,12 +13,12 @@ namespace v8 {
namespace internal {
namespace wasm {
-uint32_t LiftoffAssembler::PrepareStackFrame() {
+int LiftoffAssembler::PrepareStackFrame() {
BAILOUT("PrepareStackFrame");
return 0;
}
-void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
BAILOUT("PatchPrepareStackFrame");
}
@@ -248,6 +248,10 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return false;
}
+void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
+ UNREACHABLE();
+}
+
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
@@ -296,7 +300,9 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
BAILOUT("emit_f64_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ BAILOUT("StackCheck");
+}
void LiftoffAssembler::CallTrapCallbackForTesting() {
BAILOUT("CallTrapCallbackForTesting");
@@ -330,16 +336,16 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
-void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- BAILOUT("CallRuntime");
-}
-
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
BAILOUT("CallIndirect");
}
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ BAILOUT("CallRuntimeStub");
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
BAILOUT("AllocateStackSlot");
}
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 2775cba847..a8928210bb 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -114,14 +114,14 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
} // namespace liftoff
-uint32_t LiftoffAssembler::PrepareStackFrame() {
- uint32_t offset = static_cast<uint32_t>(pc_offset());
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
InstructionAccurateScope scope(this, 1);
sub(sp, sp, 0);
return offset;
}
-void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
static_assert(kStackSlotSize == kXRegSize,
"kStackSlotSize must equal kXRegSize");
@@ -148,8 +148,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
return;
}
#endif
- PatchingAssembler patching_assembler(IsolateData(isolate()), buffer_ + offset,
- 1);
+ PatchingAssembler patching_assembler(AssemblerOptions{}, buffer_ + offset, 1);
patching_assembler.PatchSubSp(bytes);
}
@@ -478,7 +477,7 @@ bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
UseScratchRegisterScope temps(this);
VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch, src.X());
+ Fmov(scratch.S(), src.W());
Cnt(scratch, scratch);
Addv(scratch.B(), scratch);
Fmov(dst.W(), scratch.S());
@@ -619,11 +618,146 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
+void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
+ Sxtw(dst, src);
+}
+
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- BAILOUT("emit_type_conversion");
- return true;
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ if (src != dst) Mov(dst.gp().W(), src.gp().W());
+ return true;
+ case kExprI32SConvertF32:
+ Fcvtzs(dst.gp().W(), src.fp().S()); // f32 -> i32 round to zero.
+ // Check underflow and NaN.
+ Fcmp(src.fp().S(), static_cast<float>(INT32_MIN));
+ // Check overflow.
+ Ccmp(dst.gp().W(), -1, VFlag, ge);
+ B(trap, vs);
+ return true;
+ case kExprI32UConvertF32:
+ Fcvtzu(dst.gp().W(), src.fp().S()); // f32 -> i32 round to zero.
+ // Check underflow and NaN.
+ Fcmp(src.fp().S(), -1.0);
+ // Check overflow.
+ Ccmp(dst.gp().W(), -1, ZFlag, gt);
+ B(trap, eq);
+ return true;
+ case kExprI32SConvertF64: {
+ // INT32_MIN and INT32_MAX are valid results, we cannot test the result
+ // to detect the overflows. We could have done two immediate floating
+ // point comparisons but it would have generated two conditional branches.
+ UseScratchRegisterScope temps(this);
+ VRegister fp_ref = temps.AcquireD();
+ VRegister fp_cmp = temps.AcquireD();
+ Fcvtzs(dst.gp().W(), src.fp().D()); // f64 -> i32 round to zero.
+ Frintz(fp_ref, src.fp().D()); // f64 -> f64 round to zero.
+ Scvtf(fp_cmp, dst.gp().W()); // i32 -> f64.
+ // If comparison fails, we have an overflow or a NaN.
+ Fcmp(fp_cmp, fp_ref);
+ B(trap, ne);
+ return true;
+ }
+ case kExprI32UConvertF64: {
+ // INT32_MAX is a valid result, we cannot test the result to detect the
+ // overflows. We could have done two immediate floating point comparisons
+ // but it would have generated two conditional branches.
+ UseScratchRegisterScope temps(this);
+ VRegister fp_ref = temps.AcquireD();
+ VRegister fp_cmp = temps.AcquireD();
+ Fcvtzu(dst.gp().W(), src.fp().D()); // f64 -> i32 round to zero.
+ Frintz(fp_ref, src.fp().D()); // f64 -> f64 round to zero.
+ Ucvtf(fp_cmp, dst.gp().W()); // i32 -> f64.
+ // If comparison fails, we have an overflow or a NaN.
+ Fcmp(fp_cmp, fp_ref);
+ B(trap, ne);
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ Fmov(dst.gp().W(), src.fp().S());
+ return true;
+ case kExprI64SConvertI32:
+ Sxtw(dst.gp().X(), src.gp().W());
+ return true;
+ case kExprI64SConvertF32:
+ Fcvtzs(dst.gp().X(), src.fp().S()); // f32 -> i64 round to zero.
+ // Check underflow and NaN.
+ Fcmp(src.fp().S(), static_cast<float>(INT64_MIN));
+ // Check overflow.
+ Ccmp(dst.gp().X(), -1, VFlag, ge);
+ B(trap, vs);
+ return true;
+ case kExprI64UConvertF32:
+ Fcvtzu(dst.gp().X(), src.fp().S()); // f32 -> i64 round to zero.
+ // Check underflow and NaN.
+ Fcmp(src.fp().S(), -1.0);
+ // Check overflow.
+ Ccmp(dst.gp().X(), -1, ZFlag, gt);
+ B(trap, eq);
+ return true;
+ case kExprI64SConvertF64:
+ Fcvtzs(dst.gp().X(), src.fp().D()); // f64 -> i64 round to zero.
+ // Check underflow and NaN.
+ Fcmp(src.fp().D(), static_cast<float>(INT64_MIN));
+ // Check overflow.
+ Ccmp(dst.gp().X(), -1, VFlag, ge);
+ B(trap, vs);
+ return true;
+ case kExprI64UConvertF64:
+ Fcvtzu(dst.gp().X(), src.fp().D()); // f64 -> i64 round to zero.
+ // Check underflow and NaN.
+ Fcmp(src.fp().D(), -1.0);
+ // Check overflow.
+ Ccmp(dst.gp().X(), -1, ZFlag, gt);
+ B(trap, eq);
+ return true;
+ case kExprI64UConvertI32:
+ Mov(dst.gp().W(), src.gp().W());
+ return true;
+ case kExprI64ReinterpretF64:
+ Fmov(dst.gp().X(), src.fp().D());
+ return true;
+ case kExprF32SConvertI32:
+ Scvtf(dst.fp().S(), src.gp().W());
+ return true;
+ case kExprF32UConvertI32:
+ Ucvtf(dst.fp().S(), src.gp().W());
+ return true;
+ case kExprF32SConvertI64:
+ Scvtf(dst.fp().S(), src.gp().X());
+ return true;
+ case kExprF32UConvertI64:
+ Ucvtf(dst.fp().S(), src.gp().X());
+ return true;
+ case kExprF32ConvertF64:
+ Fcvt(dst.fp().S(), src.fp().D());
+ return true;
+ case kExprF32ReinterpretI32:
+ Fmov(dst.fp().S(), src.gp().W());
+ return true;
+ case kExprF64SConvertI32:
+ Scvtf(dst.fp().D(), src.gp().W());
+ return true;
+ case kExprF64UConvertI32:
+ Ucvtf(dst.fp().D(), src.gp().W());
+ return true;
+ case kExprF64SConvertI64:
+ Scvtf(dst.fp().D(), src.gp().X());
+ return true;
+ case kExprF64UConvertI64:
+ Ucvtf(dst.fp().D(), src.gp().X());
+ return true;
+ case kExprF64ConvertF32:
+ Fcvt(dst.fp().D(), src.fp().S());
+ return true;
+ case kExprF64ReinterpretI64:
+ Fmov(dst.fp().D(), src.gp().X());
+ return true;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { B(label); }
@@ -699,14 +833,9 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
}
-void LiftoffAssembler::StackCheck(Label* ool_code) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- Mov(scratch, Operand(stack_limit));
- Ldr(scratch, MemOperand(scratch));
- Cmp(sp, scratch);
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ Ldr(limit_address, MemOperand(limit_address));
+ Cmp(sp, limit_address);
B(ool_code, ls);
}
@@ -780,12 +909,6 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
Call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- // Set context to zero.
- Mov(cp, xzr);
- CallRuntimeDelayed(zone, fid);
-}
-
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
@@ -795,6 +918,12 @@ void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
Call(target);
}
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
// The stack pointer is required to be quadword aligned.
size = RoundUp(size, kQuadWordSizeInBytes);
@@ -827,13 +956,19 @@ void LiftoffStackSlots::Construct() {
asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.type()),
poke_offset);
break;
- case LiftoffAssembler::VarState::KIntConst: {
- UseScratchRegisterScope temps(asm_);
- Register scratch = temps.AcquireW();
- asm_->Mov(scratch, slot.src_.i32_const());
- asm_->Poke(scratch, poke_offset);
+ case LiftoffAssembler::VarState::KIntConst:
+ DCHECK(slot.src_.type() == kWasmI32 || slot.src_.type() == kWasmI64);
+ if (slot.src_.i32_const() == 0) {
+ Register zero_reg = slot.src_.type() == kWasmI32 ? wzr : xzr;
+ asm_->Poke(zero_reg, poke_offset);
+ } else {
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = slot.src_.type() == kWasmI32 ? temps.AcquireW()
+ : temps.AcquireX();
+ asm_->Mov(scratch, int64_t{slot.src_.i32_const()});
+ asm_->Poke(scratch, poke_offset);
+ }
break;
- }
}
slot_index++;
}
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index c02cc466b0..ae8c9e012f 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -121,25 +121,57 @@ inline void SpillRegisters(LiftoffAssembler* assm, Regs... regs) {
}
}
-} // namespace liftoff
+constexpr DoubleRegister kScratchDoubleReg = xmm7;
+
+constexpr int kSubSpSize = 6; // 6 bytes for "sub esp, <imm32>"
-static constexpr DoubleRegister kScratchDoubleReg = xmm7;
+} // namespace liftoff
-uint32_t LiftoffAssembler::PrepareStackFrame() {
- uint32_t offset = static_cast<uint32_t>(pc_offset());
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
sub_sp_32(0);
+ DCHECK_EQ(liftoff::kSubSpSize, pc_offset() - offset);
return offset;
}
-void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
- Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ Assembler patching_assembler(AssemblerOptions{}, buffer_ + offset,
+ kAvailableSpace);
+#if V8_OS_WIN
+ constexpr int kPageSize = 4 * 1024;
+ if (bytes > kPageSize) {
+ // Generate OOL code (at the end of the function, where the current
+ // assembler is pointing) to do the explicit stack limit check (see
+ // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
+ // visual-studio-6.0/aa227153(v=vs.60)).
+ // At the function start, emit a jump to that OOL code (from {offset} to
+ // {pc_offset()}).
+ int ool_offset = pc_offset() - offset;
+ patching_assembler.jmp_rel(ool_offset);
+ DCHECK_GE(liftoff::kSubSpSize, patching_assembler.pc_offset());
+ patching_assembler.Nop(liftoff::kSubSpSize -
+ patching_assembler.pc_offset());
+
+ // Now generate the OOL code.
+ // Use {edi} as scratch register; it is not being used as parameter
+ // register (see wasm-linkage.h).
+ mov(edi, bytes);
+ AllocateStackFrame(edi);
+ // Jump back to the start of the function (from {pc_offset()} to {offset +
+ // kSubSpSize}).
+ int func_start_offset = offset + liftoff::kSubSpSize - pc_offset();
+ jmp_rel(func_start_offset);
+ return;
+ }
+#endif
patching_assembler.sub_sp_32(bytes);
+ DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
}
void LiftoffAssembler::FinishCode() {}
@@ -860,6 +892,10 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShrPair_cl, pinned);
}
+void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
+ UNREACHABLE();
+}
+
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -879,9 +915,9 @@ void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
CpuFeatureScope scope(this, AVX);
vsubss(dst, lhs, rhs);
} else if (dst == rhs) {
- movss(kScratchDoubleReg, rhs);
+ movss(liftoff::kScratchDoubleReg, rhs);
movss(dst, lhs);
- subss(dst, kScratchDoubleReg);
+ subss(dst, liftoff::kScratchDoubleReg);
} else {
if (dst != lhs) movss(dst, lhs);
subss(dst, rhs);
@@ -907,9 +943,9 @@ void LiftoffAssembler::emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
CpuFeatureScope scope(this, AVX);
vdivss(dst, lhs, rhs);
} else if (dst == rhs) {
- movss(kScratchDoubleReg, rhs);
+ movss(liftoff::kScratchDoubleReg, rhs);
movss(dst, lhs);
- divss(dst, kScratchDoubleReg);
+ divss(dst, liftoff::kScratchDoubleReg);
} else {
if (dst != lhs) movss(dst, lhs);
divss(dst, rhs);
@@ -992,8 +1028,8 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
- TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1);
- Andps(dst, kScratchDoubleReg);
+ TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
+ Andps(dst, liftoff::kScratchDoubleReg);
} else {
TurboAssembler::Move(dst, kSignBit - 1);
Andps(dst, src);
@@ -1003,8 +1039,8 @@ void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
- TurboAssembler::Move(kScratchDoubleReg, kSignBit);
- Xorps(dst, kScratchDoubleReg);
+ TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
+ Xorps(dst, liftoff::kScratchDoubleReg);
} else {
TurboAssembler::Move(dst, kSignBit);
Xorps(dst, src);
@@ -1055,9 +1091,9 @@ void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
CpuFeatureScope scope(this, AVX);
vsubsd(dst, lhs, rhs);
} else if (dst == rhs) {
- movsd(kScratchDoubleReg, rhs);
+ movsd(liftoff::kScratchDoubleReg, rhs);
movsd(dst, lhs);
- subsd(dst, kScratchDoubleReg);
+ subsd(dst, liftoff::kScratchDoubleReg);
} else {
if (dst != lhs) movsd(dst, lhs);
subsd(dst, rhs);
@@ -1083,9 +1119,9 @@ void LiftoffAssembler::emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
CpuFeatureScope scope(this, AVX);
vdivsd(dst, lhs, rhs);
} else if (dst == rhs) {
- movsd(kScratchDoubleReg, rhs);
+ movsd(liftoff::kScratchDoubleReg, rhs);
movsd(dst, lhs);
- divsd(dst, kScratchDoubleReg);
+ divsd(dst, liftoff::kScratchDoubleReg);
} else {
if (dst != lhs) movsd(dst, lhs);
divsd(dst, rhs);
@@ -1107,8 +1143,8 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) {
- TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1);
- Andpd(dst, kScratchDoubleReg);
+ TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
+ Andpd(dst, liftoff::kScratchDoubleReg);
} else {
TurboAssembler::Move(dst, kSignBit - 1);
Andpd(dst, src);
@@ -1118,8 +1154,8 @@ void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) {
- TurboAssembler::Move(kScratchDoubleReg, kSignBit);
- Xorpd(dst, kScratchDoubleReg);
+ TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
+ Xorpd(dst, liftoff::kScratchDoubleReg);
} else {
TurboAssembler::Move(dst, kSignBit);
Xorpd(dst, src);
@@ -1168,7 +1204,7 @@ inline void ConvertFloatToIntAndBack(LiftoffAssembler* assm, Register dst,
assm->cvttsd2si(dst, src);
assm->Cvtsi2sd(converted_back, dst);
} else { // f64 -> u32
- assm->Cvttsd2ui(dst, src, kScratchDoubleReg);
+ assm->Cvttsd2ui(dst, src, liftoff::kScratchDoubleReg);
assm->Cvtui2sd(converted_back, dst);
}
} else { // f32
@@ -1176,7 +1212,7 @@ inline void ConvertFloatToIntAndBack(LiftoffAssembler* assm, Register dst,
assm->cvttss2si(dst, src);
assm->Cvtsi2ss(converted_back, dst);
} else { // f32 -> u32
- assm->Cvttss2ui(dst, src, kScratchDoubleReg);
+ assm->Cvttss2ui(dst, src, liftoff::kScratchDoubleReg);
assm->Cvtui2ss(converted_back, dst,
assm->GetUnusedRegister(kGpReg, pinned).gp());
}
@@ -1455,9 +1491,8 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
liftoff::EmitFloatSetCond<&Assembler::ucomisd>(this, cond, dst, lhs, rhs);
}
-void LiftoffAssembler::StackCheck(Label* ool_code) {
- cmp(esp,
- Operand(Immediate(ExternalReference::address_of_stack_limit(isolate()))));
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ cmp(esp, Operand(limit_address, 0));
j(below_equal, ool_code);
}
@@ -1565,12 +1600,6 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
wasm_call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- // Set context to zero (Smi::kZero) for the runtime call.
- xor_(kContextRegister, kContextRegister);
- CallRuntimeDelayed(zone, fid);
-}
-
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
@@ -1584,6 +1613,12 @@ void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
}
}
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ wasm_call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
sub(esp, Immediate(size));
mov(addr, esp);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 2eca0045e5..0e913c19dc 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -4,6 +4,8 @@
#include "src/wasm/baseline/liftoff-assembler.h"
+#include <sstream>
+
#include "src/assembler-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
@@ -334,10 +336,21 @@ void LiftoffAssembler::CacheState::Split(const CacheState& source) {
*this = source;
}
+namespace {
+
+constexpr AssemblerOptions DefaultLiftoffOptions() {
+ return AssemblerOptions{};
+}
+
+} // namespace
+
// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
// size.
-LiftoffAssembler::LiftoffAssembler(Isolate* isolate)
- : TurboAssembler(isolate, nullptr, 0, CodeObjectRequired::kNo) {}
+LiftoffAssembler::LiftoffAssembler()
+ : TurboAssembler(nullptr, DefaultLiftoffOptions(), nullptr, 0,
+ CodeObjectRequired::kNo) {
+ set_trap_on_abort(true); // Avoid calls to Abort.
+}
LiftoffAssembler::~LiftoffAssembler() {
if (num_locals_ > kInlineLocalTypes) {
@@ -480,7 +493,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
const int num_lowered_params = is_pair ? 2 : 1;
const uint32_t stack_idx = param_base + param;
const VarState& slot = cache_state_.stack_state[stack_idx];
- // Process both halfs of register pair separately, because they are passed
+ // Process both halfs of a register pair separately, because they are passed
// as separate parameters. One or both of them could end up on the stack.
for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) {
const RegPairHalf half =
@@ -589,6 +602,34 @@ void LiftoffAssembler::ParallelRegisterMove(
}
}
+bool LiftoffAssembler::ValidateCacheState() const {
+ uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
+ LiftoffRegList used_regs;
+ for (const VarState& var : cache_state_.stack_state) {
+ if (!var.is_reg()) continue;
+ LiftoffRegister reg = var.reg();
+ if (kNeedI64RegPair && reg.is_pair()) {
+ ++register_use_count[reg.low().liftoff_code()];
+ ++register_use_count[reg.high().liftoff_code()];
+ } else {
+ ++register_use_count[reg.liftoff_code()];
+ }
+ used_regs.set(reg);
+ }
+ bool valid = memcmp(register_use_count, cache_state_.register_use_count,
+ sizeof(register_use_count)) == 0 &&
+ used_regs == cache_state_.used_registers;
+ if (valid) return true;
+ std::ostringstream os;
+ os << "Error in LiftoffAssembler::ValidateCacheState().\n";
+ os << "expected: used_regs " << used_regs << ", counts "
+ << PrintCollection(register_use_count) << "\n";
+ os << "found: used_regs " << cache_state_.used_registers << ", counts "
+ << PrintCollection(cache_state_.register_use_count) << "\n";
+ os << "Use --trace-liftoff to debug.";
+ FATAL("%s", os.str().c_str());
+}
+
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
// Spill one cached value to free a register.
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 38dbde02ba..822c620b82 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -14,16 +14,20 @@
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
namespace v8 {
namespace internal {
-namespace wasm {
// Forward declarations.
-struct ModuleEnv;
+namespace compiler {
+class CallDescriptor;
+}
+
+namespace wasm {
class LiftoffAssembler : public TurboAssembler {
public:
@@ -243,7 +247,7 @@ class LiftoffAssembler : public TurboAssembler {
CacheState(const CacheState&) = delete;
};
- explicit LiftoffAssembler(Isolate* isolate);
+ LiftoffAssembler();
~LiftoffAssembler();
LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
@@ -294,16 +298,6 @@ class LiftoffAssembler : public TurboAssembler {
return SpillOneRegister(candidates, pinned);
}
- void DropStackSlot(VarState* slot) {
- // The only loc we care about is register. Other types don't occupy
- // anything.
- if (!slot->is_reg()) return;
- // Free the register, then set the loc to "stack".
- // No need to write back, the value should be dropped.
- cache_state_.dec_used(slot->reg());
- slot->MakeStack();
- }
-
void MergeFullStackWith(CacheState&);
void MergeStackWith(CacheState&, uint32_t arity);
@@ -339,6 +333,9 @@ class LiftoffAssembler : public TurboAssembler {
};
void ParallelRegisterMove(std::initializer_list<ParallelRegisterMoveTuple>);
+ // Validate that the register use counts reflect the state of the cache.
+ bool ValidateCacheState() const;
+
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
@@ -347,8 +344,8 @@ class LiftoffAssembler : public TurboAssembler {
// size of the stack frame is known. It returns an offset in the machine code
// which can later be patched (via {PatchPrepareStackFrame)} when the size of
// the frame is known.
- inline uint32_t PrepareStackFrame();
- inline void PatchPrepareStackFrame(uint32_t offset, uint32_t stack_slots);
+ inline int PrepareStackFrame();
+ inline void PatchPrepareStackFrame(int offset, uint32_t stack_slots);
inline void FinishCode();
inline void AbortCompilation();
@@ -441,6 +438,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned = {});
+ inline void emit_i32_to_intptr(Register dst, Register src);
+
inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
if (kPointerSize == 8) {
emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
@@ -517,7 +516,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_f64_set_cond(Condition condition, Register dst,
DoubleRegister lhs, DoubleRegister rhs);
- inline void StackCheck(Label* ool_code);
+ inline void StackCheck(Label* ool_code, Register limit_address);
inline void CallTrapCallbackForTesting();
@@ -538,11 +537,11 @@ class LiftoffAssembler : public TurboAssembler {
int stack_bytes, ExternalReference ext_ref);
inline void CallNativeWasmCode(Address addr);
- inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
// Indirect call: If {target == no_reg}, then pop the target from the stack.
inline void CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target);
+ inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
// Reserve space in the current frame, store address to space in {addr}.
inline void AllocateStackSlot(Register addr, uint32_t size);
@@ -573,12 +572,15 @@ class LiftoffAssembler : public TurboAssembler {
}
CacheState* cache_state() { return &cache_state_; }
+ const CacheState* cache_state() const { return &cache_state_; }
bool did_bailout() { return bailout_reason_ != nullptr; }
const char* bailout_reason() const { return bailout_reason_; }
void bailout(const char* reason) {
- if (bailout_reason_ == nullptr) bailout_reason_ = reason;
+ if (bailout_reason_ != nullptr) return;
+ AbortCompilation();
+ bailout_reason_ = reason;
}
private:
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 2852353b7c..1130cf0cdd 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -11,6 +11,7 @@
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/macro-assembler-inl.h"
+#include "src/tracing/trace-event.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
@@ -29,7 +30,7 @@ constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
-#define __ asm_->
+#define __ asm_.
#define TRACE(...) \
do { \
@@ -115,31 +116,25 @@ class LiftoffCompiler {
struct OutOfLineCode {
MovableLabel label;
MovableLabel continuation;
- Builtins::Name builtin;
+ WasmCode::RuntimeStubId stub;
WasmCodePosition position;
LiftoffRegList regs_to_save;
uint32_t pc; // for trap handler.
// Named constructors:
- static OutOfLineCode Trap(Builtins::Name b, WasmCodePosition pos,
+ static OutOfLineCode Trap(WasmCode::RuntimeStubId s, WasmCodePosition pos,
uint32_t pc) {
DCHECK_LT(0, pos);
- return {{}, {}, b, pos, {}, pc};
+ return {{}, {}, s, pos, {}, pc};
}
static OutOfLineCode StackCheck(WasmCodePosition pos, LiftoffRegList regs) {
- return {{}, {}, Builtins::kWasmStackGuard, pos, regs, 0};
+ return {{}, {}, WasmCode::kWasmStackGuard, pos, regs, 0};
}
};
- LiftoffCompiler(LiftoffAssembler* liftoff_asm,
- compiler::CallDescriptor* call_descriptor, ModuleEnv* env,
- SourcePositionTableBuilder* source_position_table_builder,
- std::vector<trap_handler::ProtectedInstructionData>*
- protected_instructions,
- Zone* compilation_zone, std::unique_ptr<Zone>* codegen_zone,
- WasmCode* const* code_table_entry)
- : asm_(liftoff_asm),
- descriptor_(
+ LiftoffCompiler(compiler::CallDescriptor* call_descriptor, ModuleEnv* env,
+ Zone* compilation_zone)
+ : descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
min_size_(uint64_t{env_->module->initial_pages} * wasm::kWasmPageSize),
@@ -147,17 +142,29 @@ class LiftoffCompiler {
? env_->module->maximum_pages
: wasm::kV8MaxWasmMemoryPages} *
wasm::kWasmPageSize),
- source_position_table_builder_(source_position_table_builder),
- protected_instructions_(protected_instructions),
compilation_zone_(compilation_zone),
- codegen_zone_(codegen_zone),
- safepoint_table_builder_(compilation_zone_),
- code_table_entry_(code_table_entry) {}
+ safepoint_table_builder_(compilation_zone_) {}
~LiftoffCompiler() { BindUnboundLabels(nullptr); }
bool ok() const { return ok_; }
+ void GetCode(CodeDesc* desc) { asm_.GetCode(nullptr, desc); }
+
+ OwnedVector<uint8_t> GetSourcePositionTable() {
+ return source_position_table_builder_.ToSourcePositionTableVector();
+ }
+
+ OwnedVector<trap_handler::ProtectedInstructionData> GetProtectedInstructions()
+ const {
+ return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
+ protected_instructions_);
+ }
+
+ uint32_t GetTotalFrameSlotCount() const {
+ return __ GetTotalFrameSlotCount();
+ }
+
void unsupported(Decoder* decoder, const char* reason) {
ok_ = false;
TRACE("unsupported: %s\n", reason);
@@ -166,8 +173,8 @@ class LiftoffCompiler {
}
bool DidAssemblerBailout(Decoder* decoder) {
- if (decoder->failed() || !asm_->did_bailout()) return false;
- unsupported(decoder, asm_->bailout_reason());
+ if (decoder->failed() || !__ did_bailout()) return false;
+ unsupported(decoder, __ bailout_reason());
return true;
}
@@ -293,65 +300,12 @@ class LiftoffCompiler {
out_of_line_code_.push_back(
OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
OutOfLineCode& ool = out_of_line_code_.back();
- __ StackCheck(ool.label.get());
+ LiftoffRegister limit_address = __ GetUnusedRegister(kGpReg);
+ LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kPointerLoadType);
+ __ StackCheck(ool.label.get(), limit_address.gp());
__ bind(ool.continuation.get());
}
- // Inserts a check whether the optimized version of this code already exists.
- // If so, it redirects execution to the optimized code.
- void JumpToOptimizedCodeIfExisting(LiftoffRegList param_regs) {
- // We need one register to keep the address of the optimized
- // code that is not used to keep parameters.
- LiftoffRegister address_tmp = LiftoffRegister(kNoParamRegister);
- DCHECK(!param_regs.has(address_tmp));
-
- LiftoffRegList available_regs = kGpCacheRegList & ~param_regs;
- // We already use the {address_tmp} later, so remove it too.
- available_regs.clear(address_tmp);
-
- // We require one general purpose register.
- if (available_regs.is_empty()) {
- LiftoffRegList taken_gp_regs = kGpCacheRegList & param_regs;
- LiftoffRegister reg = taken_gp_regs.GetFirstRegSet();
- available_regs.set(reg);
- }
-
- LiftoffRegister tmp = available_regs.GetFirstRegSet();
- if (param_regs.has(tmp)) __ PushRegisters(LiftoffRegList::ForRegs(tmp));
-
- static LoadType kPointerLoadType =
- LoadType::ForValueType(LiftoffAssembler::kWasmIntPtr);
- using int_t = std::conditional<kPointerSize == 8, uint64_t, uint32_t>::type;
- static_assert(sizeof(int_t) == sizeof(uintptr_t), "weird uintptr_t");
- // Get the address of the WasmCode* currently stored in the code table.
- __ LoadConstant(address_tmp,
- WasmValue(reinterpret_cast<int_t>(code_table_entry_)),
- RelocInfo::WASM_CODE_TABLE_ENTRY);
- // Load the corresponding WasmCode*.
- LiftoffRegister wasm_code_address = tmp;
- __ Load(wasm_code_address, address_tmp.gp(), Register::no_reg(), 0,
- kPointerLoadType, param_regs);
- // Load its target address ({instuctions_.start()}).
- __ Load(address_tmp, wasm_code_address.gp(), Register::no_reg(),
- WasmCode::kInstructionStartOffset, kPointerLoadType, param_regs);
- // Get the current code's target address ({instructions_.start()}).
- LiftoffRegister code_start_address = tmp;
- __ ComputeCodeStartAddress(code_start_address.gp());
-
- // If the current code's target address is the same as the
- // target address of the stored WasmCode, then continue executing, otherwise
- // jump to the updated WasmCode.
- Label cont;
- __ emit_cond_jump(kEqual, &cont, LiftoffAssembler::kWasmIntPtr,
- address_tmp.gp(), code_start_address.gp());
-
- if (param_regs.has(tmp)) __ PopRegisters(LiftoffRegList::ForRegs(tmp));
- __ emit_jump(address_tmp.gp());
-
- __ bind(&cont);
- if (param_regs.has(tmp)) __ PopRegisters(LiftoffRegList::ForRegs(tmp));
- }
-
void StartFunctionBody(Decoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
@@ -372,21 +326,6 @@ class LiftoffCompiler {
uint32_t num_params =
static_cast<uint32_t>(decoder->sig_->parameter_count());
- if (FLAG_wasm_tier_up) {
- if (!kNoParamRegister.is_valid()) {
- unsupported(decoder, "Please define kNoParamRegister.");
- return;
- }
-
- // Collect all registers that are allocated on function entry.
- LiftoffRegList param_regs;
- param_regs.set(instance_reg);
-
- CollectReservedRegsForParameters(kInstanceParameterIndex + 1, num_params,
- param_regs);
- JumpToOptimizedCodeIfExisting(param_regs);
- }
-
__ EnterFrame(StackFrame::WASM_COMPILED);
__ set_has_frame(true);
pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
@@ -444,14 +383,14 @@ class LiftoffCompiler {
void GenerateOutOfLineCode(OutOfLineCode& ool) {
__ bind(ool.label.get());
- const bool is_stack_check = ool.builtin == Builtins::kWasmStackGuard;
+ const bool is_stack_check = ool.stub == WasmCode::kWasmStackGuard;
const bool is_mem_out_of_bounds =
- ool.builtin == Builtins::kThrowWasmTrapMemOutOfBounds;
+ ool.stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
if (is_mem_out_of_bounds && env_->use_trap_handler) {
uint32_t pc = static_cast<uint32_t>(__ pc_offset());
DCHECK_EQ(pc, __ pc_offset());
- protected_instructions_->emplace_back(
+ protected_instructions_.emplace_back(
trap_handler::ProtectedInstructionData{ool.pc, pc});
}
@@ -469,11 +408,10 @@ class LiftoffCompiler {
if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
- source_position_table_builder_->AddPosition(
+ source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool.position), false);
- __ Call(__ isolate()->builtins()->builtin_handle(ool.builtin),
- RelocInfo::CODE_TARGET);
- safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ __ CallRuntimeStub(ool.stub);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
@@ -489,10 +427,10 @@ class LiftoffCompiler {
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(ool);
}
- __ FinishCode();
- safepoint_table_builder_.Emit(asm_, __ GetTotalFrameSlotCount());
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
__ GetTotalFrameSlotCount());
+ __ FinishCode();
+ safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCount());
// The previous calls may have also generated a bailout.
DidAssemblerBailout(decoder);
}
@@ -500,10 +438,12 @@ class LiftoffCompiler {
void OnFirstError(Decoder* decoder) {
ok_ = false;
BindUnboundLabels(decoder);
+ asm_.AbortCompilation();
}
void NextInstruction(Decoder* decoder, WasmOpcode opcode) {
TraceCacheState(decoder);
+ SLOW_DCHECK(__ ValidateCacheState());
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
}
@@ -615,7 +555,7 @@ class LiftoffCompiler {
Register),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
- if (emit_fn && (asm_->*emit_fn)(dst.gp(), src.gp())) return;
+ if (emit_fn && (asm_.*emit_fn)(dst.gp(), src.gp())) return;
ExternalReference ext_ref = fallback_fn();
ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
FunctionSig sig_i_i(1, 1, sig_i_i_reps);
@@ -629,7 +569,7 @@ class LiftoffCompiler {
bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
- if ((asm_->*emit_fn)(dst.fp(), src.fp())) return;
+ if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
ValueType sig_reps[] = {type};
FunctionSig sig(0, 1, sig_reps);
@@ -651,7 +591,7 @@ class LiftoffCompiler {
DCHECK_EQ(can_trap, trap_position > 0);
Label* trap = can_trap ? AddOutOfLineTrap(
trap_position,
- Builtins::kThrowWasmTrapFloatUnrepresentable)
+ WasmCode::kThrowWasmTrapFloatUnrepresentable)
: nullptr;
if (!__ emit_type_conversion(opcode, dst, src, trap)) {
DCHECK_NOT_NULL(fallback_fn);
@@ -690,9 +630,11 @@ class LiftoffCompiler {
__ emit_##fn(dst.fp(), src.fp()); \
}); \
break;
-#define CASE_FLOAT_UNOP_WITH_CFALLBACK(type, fn) \
- EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn, \
- &ExternalReference::wasm_##fn);
+#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn, \
+ &ExternalReference::wasm_##fn); \
+ break;
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
case WasmOpcode::kExpr##opcode: \
EmitTypeConversion<kWasm##dst_type, kWasm##src_type, can_trap>( \
@@ -711,10 +653,10 @@ class LiftoffCompiler {
CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
CASE_FLOAT_UNOP(F64Abs, F64, f64_abs)
CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
- CASE_FLOAT_UNOP_WITH_CFALLBACK(F64, f64_ceil)
- CASE_FLOAT_UNOP_WITH_CFALLBACK(F64, f64_floor)
- CASE_FLOAT_UNOP_WITH_CFALLBACK(F64, f64_trunc)
- CASE_FLOAT_UNOP_WITH_CFALLBACK(F64, f64_nearest_int)
+ CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Ceil, F64, f64_ceil)
+ CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Floor, F64, f64_floor)
+ CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Trunc, F64, f64_trunc)
+ CASE_FLOAT_UNOP_WITH_CFALLBACK(F64NearestInt, F64, f64_nearest_int)
CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt)
CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I32SConvertF32, I32, F32, nullptr, kCanTrap)
@@ -943,11 +885,11 @@ class LiftoffCompiler {
LiftoffRegister lhs,
LiftoffRegister rhs) {
WasmCodePosition position = decoder->position();
- AddOutOfLineTrap(position, Builtins::kThrowWasmTrapDivByZero);
+ AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
// the first one, thus get both pointers afterwards.
AddOutOfLineTrap(position,
- Builtins::kThrowWasmTrapDivUnrepresentable);
+ WasmCode::kThrowWasmTrapDivUnrepresentable);
Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
__ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero,
@@ -959,7 +901,7 @@ class LiftoffCompiler {
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* div_by_zero = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapDivByZero);
+ decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
__ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
});
break;
@@ -968,7 +910,7 @@ class LiftoffCompiler {
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapRemByZero);
+ decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
__ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
});
break;
@@ -977,7 +919,7 @@ class LiftoffCompiler {
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapRemByZero);
+ decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
__ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
});
break;
@@ -986,11 +928,11 @@ class LiftoffCompiler {
LiftoffRegister lhs,
LiftoffRegister rhs) {
WasmCodePosition position = decoder->position();
- AddOutOfLineTrap(position, Builtins::kThrowWasmTrapDivByZero);
+ AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
// the first one, thus get both pointers afterwards.
AddOutOfLineTrap(position,
- Builtins::kThrowWasmTrapDivUnrepresentable);
+ WasmCode::kThrowWasmTrapDivUnrepresentable);
Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero,
@@ -1006,7 +948,7 @@ class LiftoffCompiler {
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* div_by_zero = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapDivByZero);
+ decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_uint64_div();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
@@ -1018,7 +960,7 @@ class LiftoffCompiler {
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapRemByZero);
+ decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
@@ -1030,7 +972,7 @@ class LiftoffCompiler {
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapRemByZero);
+ decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_uint64_mod();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
@@ -1088,7 +1030,9 @@ class LiftoffCompiler {
}
void Drop(Decoder* decoder, const Value& value) {
- __ DropStackSlot(&__ cache_state()->stack_state.back());
+ auto& slot = __ cache_state()->stack_state.back();
+ // If the dropped slot contains a register, decrement it's use count.
+ if (slot.is_reg()) __ cache_state()->dec_used(slot.reg());
__ cache_state()->stack_state.pop_back();
}
@@ -1148,6 +1092,7 @@ class LiftoffCompiler {
return;
}
state.dec_used(slot_reg);
+ dst_slot.MakeStack();
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
@@ -1163,12 +1108,12 @@ class LiftoffCompiler {
auto& target_slot = state.stack_state[local_index];
switch (source_slot.loc()) {
case kRegister:
- __ DropStackSlot(&target_slot);
+ if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot = source_slot;
if (is_tee) state.inc_used(target_slot.reg());
break;
case KIntConst:
- __ DropStackSlot(&target_slot);
+ if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot = source_slot;
break;
case kStack:
@@ -1235,7 +1180,7 @@ class LiftoffCompiler {
void Unreachable(Decoder* decoder) {
Label* unreachable_label = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapUnreachable);
+ decoder->position(), WasmCode::kThrowWasmTrapUnreachable);
__ emit_jump(unreachable_label);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
@@ -1359,15 +1304,15 @@ class LiftoffCompiler {
__ cache_state()->Steal(if_block->else_state->state);
}
- Label* AddOutOfLineTrap(WasmCodePosition position, Builtins::Name builtin,
- uint32_t pc = 0) {
+ Label* AddOutOfLineTrap(WasmCodePosition position,
+ WasmCode::RuntimeStubId stub, uint32_t pc = 0) {
DCHECK(!FLAG_wasm_no_bounds_checks);
// The pc is needed for memory OOB trap with trap handler enabled. Other
// callers should not even compute it.
- DCHECK_EQ(pc != 0, builtin == Builtins::kThrowWasmTrapMemOutOfBounds &&
+ DCHECK_EQ(pc != 0, stub == WasmCode::kThrowWasmTrapMemOutOfBounds &&
env_->use_trap_handler);
- out_of_line_code_.push_back(OutOfLineCode::Trap(builtin, position, pc));
+ out_of_line_code_.push_back(OutOfLineCode::Trap(stub, position, pc));
return out_of_line_code_.back().label.get();
}
@@ -1387,7 +1332,7 @@ class LiftoffCompiler {
// instruction we are about to generate. It would be better to just not add
// protected instruction info when the pc is 0.
Label* trap_label = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapMemOutOfBounds,
+ decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds,
env_->use_trap_handler ? __ pc_offset() : 0);
if (statically_oob) {
@@ -1455,8 +1400,8 @@ class LiftoffCompiler {
__ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, mem_rep),
address, StoreType::kI32Store8, pinned);
- source_position_table_builder_->AddPosition(
- __ pc_offset(), SourcePosition(position), false);
+ source_position_table_builder_.AddPosition(__ pc_offset(),
+ SourcePosition(position), false);
Register args[] = {info.gp()};
GenerateRuntimeCall(Runtime::kWasmTraceMemory, arraysize(args), args);
@@ -1480,19 +1425,18 @@ class LiftoffCompiler {
LiftoffAssembler::kWasmIntPtr);
} else {
DCHECK(param_loc.IsCallerFrameSlot());
- LiftoffStackSlots stack_slots(asm_);
+ LiftoffStackSlots stack_slots(&asm_);
stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
LiftoffRegister(args[0])));
stack_slots.Construct();
}
- // Allocate the codegen zone if not done before.
- if (!*codegen_zone_) {
- codegen_zone_->reset(
- new Zone(__ isolate()->allocator(), "LiftoffCodegenZone"));
- }
- __ CallRuntime(codegen_zone_->get(), runtime_function);
- safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ // Set context to zero (Smi::kZero) for the runtime call.
+ __ TurboAssembler::Move(kContextRegister, Smi::kZero);
+ LiftoffRegister centry(kJavaScriptCallCodeStartRegister);
+ LOAD_INSTANCE_FIELD(centry, CEntryStub, kPointerLoadType);
+ __ CallRuntimeWithCEntry(runtime_function, centry.gp());
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
}
@@ -1540,7 +1484,7 @@ class LiftoffCompiler {
&protected_load_pc, true);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder->position(),
- Builtins::kThrowWasmTrapMemOutOfBounds,
+ WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
__ PushRegister(value_type, value);
@@ -1572,7 +1516,7 @@ class LiftoffCompiler {
&protected_store_pc, true);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder->position(),
- Builtins::kThrowWasmTrapMemOutOfBounds,
+ WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
if (FLAG_wasm_trace_memory) {
@@ -1594,38 +1538,6 @@ class LiftoffCompiler {
__ PushRegister(kWasmI32, mem_size);
}
- void Int32ToSmi(LiftoffRegister dst, Register src, Register scratch) {
- constexpr int kTotalSmiShift = kSmiTagSize + kSmiShiftSize;
- // TODO(clemensh): Shift by immediate directly.
- if (kPointerSize == 4) {
- __ LoadConstant(LiftoffRegister(scratch),
- WasmValue(int32_t{kTotalSmiShift}));
- __ emit_i32_shl(dst.gp(), src, scratch);
- } else {
- __ LoadConstant(LiftoffRegister(scratch),
- WasmValue(int64_t{kTotalSmiShift}));
- __ emit_i64_shl(dst, LiftoffRegister(src), scratch);
- }
- }
-
- void SmiToInt32(Register dst, LiftoffRegister src, Register scratch) {
- constexpr int kTotalSmiShift = kSmiTagSize + kSmiShiftSize;
- // TODO(clemensh): Shift by immediate directly.
- if (kPointerSize == 4) {
- __ LoadConstant(LiftoffRegister(scratch),
- WasmValue(int32_t{kTotalSmiShift}));
- __ emit_i32_sar(dst, src.gp(), scratch);
- } else {
- // Assert that we shift by exactly 32 bit. This makes the returned value a
- // zero-extended 32-bit value without emitting further instructions.
- static_assert(kPointerSize == 4 || kTotalSmiShift == 32,
- "shift by exactly 32 bit");
- __ LoadConstant(LiftoffRegister(scratch),
- WasmValue(int64_t{kTotalSmiShift}));
- __ emit_i64_shr(LiftoffRegister(dst), src, scratch);
- }
- }
-
void GrowMemory(Decoder* decoder, const Value& value, Value* result_val) {
// Pop the input, then spill all cache registers to make the runtime call.
LiftoffRegList pinned;
@@ -1638,28 +1550,23 @@ class LiftoffCompiler {
"complex code here otherwise)");
LiftoffRegister result = pinned.set(LiftoffRegister(kGpReturnReg));
- LiftoffRegister tmp_const =
- pinned.set(__ cache_state()->unused_register(kGpReg, pinned));
-
- Label done;
- Label do_runtime_call;
- // TODO(clemensh): Compare to immediate directly.
- __ LoadConstant(tmp_const, WasmValue(uint32_t{FLAG_wasm_max_mem_pages}));
- __ emit_cond_jump(kUnsignedLessEqual, &do_runtime_call, kWasmI32,
- input.gp(), tmp_const.gp());
- __ LoadConstant(result, WasmValue(int32_t{-1}));
- __ emit_jump(&done);
-
- // TODO(clemensh): Introduce new builtin for smi-conversion, runtime call,
- // and conversion back. Use in TF and here.
- __ bind(&do_runtime_call);
- LiftoffRegister input_smi = input;
- Int32ToSmi(input_smi, input.gp(), tmp_const.gp());
- Register args[] = {input_smi.gp()};
- GenerateRuntimeCall(Runtime::kWasmGrowMemory, arraysize(args), args);
- SmiToInt32(result.gp(), result, tmp_const.gp());
-
- __ bind(&done);
+ WasmGrowMemoryDescriptor descriptor;
+ DCHECK_EQ(0, descriptor.GetStackParameterCount());
+ DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
+ DCHECK_EQ(ValueTypes::MachineTypeFor(kWasmI32),
+ descriptor.GetParameterType(0));
+
+ Register param_reg = descriptor.GetRegisterParameter(0);
+ if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
+
+ __ CallRuntimeStub(wasm::WasmCode::kWasmGrowMemory);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+
+ if (kReturnRegister0 != result.gp()) {
+ __ Move(result.gp(), kReturnRegister0, kWasmI32);
+ }
+
__ PushRegister(kWasmI32, result);
}
@@ -1700,12 +1607,12 @@ class LiftoffCompiler {
LiftoffRegister* explicit_instance = &target_instance;
Register target_reg = target.gp();
__ PrepareCall(imm.sig, call_descriptor, &target_reg, explicit_instance);
- source_position_table_builder_->AddPosition(
+ source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
__ CallIndirect(imm.sig, call_descriptor, target_reg);
- safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
@@ -1713,14 +1620,14 @@ class LiftoffCompiler {
// A direct call within this module just gets the current instance.
__ PrepareCall(imm.sig, call_descriptor);
- source_position_table_builder_->AddPosition(
+ source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
// Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index);
__ CallNativeWasmCode(addr);
- safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
@@ -1759,7 +1666,7 @@ class LiftoffCompiler {
// Bounds check against the table size.
Label* invalid_func_label = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapFuncInvalid);
+ decoder->position(), WasmCode::kThrowWasmTrapFuncInvalid);
uint32_t canonical_sig_num = env_->module->signature_ids[imm.sig_index];
DCHECK_GE(canonical_sig_num, 0);
@@ -1809,7 +1716,7 @@ class LiftoffCompiler {
__ LoadConstant(tmp_const, WasmValue(canonical_sig_num));
Label* sig_mismatch_label = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
+ decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
__ emit_cond_jump(kUnequal, sig_mismatch_label,
LiftoffAssembler::kWasmIntPtr, scratch.gp(),
tmp_const.gp());
@@ -1833,7 +1740,7 @@ class LiftoffCompiler {
pinned);
LiftoffRegister* explicit_instance = &tmp_const;
- source_position_table_builder_->AddPosition(
+ source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
auto call_descriptor =
@@ -1845,7 +1752,7 @@ class LiftoffCompiler {
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
__ CallIndirect(imm.sig, call_descriptor, target);
- safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
@@ -1886,7 +1793,7 @@ class LiftoffCompiler {
}
private:
- LiftoffAssembler* const asm_;
+ LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_;
ModuleEnv* const env_;
// {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
@@ -1894,39 +1801,28 @@ class LiftoffCompiler {
const uint64_t max_size_;
bool ok_ = true;
std::vector<OutOfLineCode> out_of_line_code_;
- SourcePositionTableBuilder* const source_position_table_builder_;
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_;
+ SourcePositionTableBuilder source_position_table_builder_;
+ std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
// Zone used to store information during compilation. The result will be
// stored independently, such that this zone can die together with the
// LiftoffCompiler after compilation.
Zone* compilation_zone_;
- // This zone is allocated when needed, held externally, and survives until
- // code generation (in FinishCompilation).
- std::unique_ptr<Zone>* codegen_zone_;
SafepointTableBuilder safepoint_table_builder_;
// The pc offset of the instructions to reserve the stack frame. Needed to
// patch the actually needed stack size in the end.
uint32_t pc_offset_stack_frame_construction_ = 0;
- // Points to the cell within the {code_table_} of the NativeModule,
- // which corresponds to the currently compiled function
- WasmCode* const* code_table_entry_ = nullptr;
-
void TraceCacheState(Decoder* decoder) const {
#ifdef DEBUG
if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
- OFStream os(stdout);
+ StdoutStream os;
for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
--control_depth) {
- LiftoffAssembler::CacheState* cache_state =
- control_depth == -1
- ? asm_->cache_state()
- : &decoder->control_at(control_depth)->label_state;
- bool first = true;
- for (LiftoffAssembler::VarState& slot : cache_state->stack_state) {
- os << (first ? "" : "-") << slot;
- first = false;
- }
+ auto* cache_state =
+ control_depth == -1 ? __ cache_state()
+ : &decoder->control_at(control_depth)
+ ->label_state;
+ os << PrintCollection(cache_state->stack_state);
if (control_depth != -1) PrintF("; ");
}
os << "\n";
@@ -1937,38 +1833,34 @@ class LiftoffCompiler {
} // namespace
bool LiftoffCompilationUnit::ExecuteCompilation() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "ExecuteLiftoffCompilation");
base::ElapsedTimer compile_timer;
if (FLAG_trace_wasm_decode_time) {
compile_timer.Start();
}
- Zone zone(wasm_unit_->isolate_->allocator(), "LiftoffCompilationZone");
+ Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
const wasm::WasmModule* module =
wasm_unit_->env_ ? wasm_unit_->env_->module : nullptr;
auto call_descriptor =
compiler::GetWasmCallDescriptor(&zone, wasm_unit_->func_body_.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, wasm_unit_->counters_->liftoff_compile_time());
- wasm::WasmCode* const* code_table_entry =
- wasm_unit_->native_module_->code_table().data() + wasm_unit_->func_index_;
- DCHECK(!protected_instructions_);
- protected_instructions_.reset(
- new std::vector<trap_handler::ProtectedInstructionData>());
wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
- decoder(&zone, module, wasm_unit_->func_body_, &asm_, call_descriptor,
- wasm_unit_->env_, &source_position_table_builder_,
- protected_instructions_.get(), &zone, &codegen_zone_,
- code_table_entry);
+ decoder(&zone, module, wasm_unit_->func_body_, call_descriptor,
+ wasm_unit_->env_, &zone);
decoder.Decode();
liftoff_compile_time_scope.reset();
- if (!decoder.interface().ok()) {
+ wasm::LiftoffCompiler* compiler = &decoder.interface();
+ if (decoder.failed()) return false; // validation error
+ if (!compiler->ok()) {
// Liftoff compilation failed.
- wasm_unit_->isolate_->counters()
- ->liftoff_unsupported_functions()
- ->Increment();
+ wasm_unit_->counters_->liftoff_unsupported_functions()->Increment();
return false;
}
- if (decoder.failed()) return false; // Validation error
+
+ wasm_unit_->counters_->liftoff_compiled_functions()->Increment();
if (FLAG_trace_wasm_decode_time) {
double compile_ms = compile_timer.Elapsed().InMillisecondsF();
@@ -1980,40 +1872,25 @@ bool LiftoffCompilationUnit::ExecuteCompilation() {
compile_ms);
}
- // Record the memory cost this unit places on the system until
- // it is finalized.
- wasm_unit_->memory_cost_ =
- asm_.pc_offset() +
- protected_instructions_->size() *
- sizeof(trap_handler::ProtectedInstructionData) +
- (codegen_zone_ ? codegen_zone_->allocation_size() : 0);
-
- safepoint_table_offset_ = decoder.interface().GetSafepointTableOffset();
- wasm_unit_->isolate_->counters()->liftoff_compiled_functions()->Increment();
- return true;
-}
-
-wasm::WasmCode* LiftoffCompilationUnit::FinishCompilation(
- wasm::ErrorThrower* thrower) {
CodeDesc desc;
- asm_.GetCode(wasm_unit_->isolate_, &desc);
+ compiler->GetCode(&desc);
+ OwnedVector<byte> source_positions = compiler->GetSourcePositionTable();
+ OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions =
+ compiler->GetProtectedInstructions();
+ uint32_t frame_slot_count = compiler->GetTotalFrameSlotCount();
+ int safepoint_table_offset = compiler->GetSafepointTableOffset();
+
+ code_ = wasm_unit_->native_module_->AddCode(
+ wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset,
+ 0, std::move(protected_instructions), std::move(source_positions),
+ wasm::WasmCode::kLiftoff);
+ wasm_unit_->native_module_->PublishCode(code_);
- Handle<ByteArray> source_positions =
- source_position_table_builder_.ToSourcePositionTable(
- wasm_unit_->isolate_);
-
- wasm::WasmCode* code = wasm_unit_->native_module_->AddCode(
- desc, asm_.GetTotalFrameSlotCount(), wasm_unit_->func_index_,
- safepoint_table_offset_, 0, std::move(protected_instructions_),
- source_positions, wasm::WasmCode::kLiftoff);
-
- return code;
+ return true;
}
-void LiftoffCompilationUnit::AbortCompilation() {
- // The compilation is aborted. Put the assembler in a clean mode before
- // its deletion.
- asm_.AbortCompilation();
+wasm::WasmCode* LiftoffCompilationUnit::FinishCompilation(wasm::ErrorThrower*) {
+ return code_;
}
#undef __
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index 322b2f7d54..ce828c459b 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -5,36 +5,29 @@
#ifndef V8_WASM_BASELINE_LIFTOFF_COMPILER_H_
#define V8_WASM_BASELINE_LIFTOFF_COMPILER_H_
-#include "src/source-position-table.h"
-#include "src/trap-handler/trap-handler.h"
-#include "src/wasm/baseline/liftoff-assembler.h"
-#include "src/wasm/function-body-decoder.h"
-#include "src/wasm/function-compiler.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
namespace wasm {
+class ErrorThrower;
+class WasmCode;
+class WasmCompilationUnit;
+
class LiftoffCompilationUnit final {
public:
explicit LiftoffCompilationUnit(WasmCompilationUnit* wasm_unit)
- : wasm_unit_(wasm_unit), asm_(wasm_unit->isolate_) {}
+ : wasm_unit_(wasm_unit) {}
bool ExecuteCompilation();
wasm::WasmCode* FinishCompilation(wasm::ErrorThrower*);
- void AbortCompilation();
private:
WasmCompilationUnit* const wasm_unit_;
- wasm::LiftoffAssembler asm_;
- int safepoint_table_offset_;
- SourcePositionTableBuilder source_position_table_builder_;
- std::unique_ptr<std::vector<trap_handler::ProtectedInstructionData>>
- protected_instructions_;
-
- // The {codegen_zone_} needs to survive until FinishCompilation. It's only
- // rarely used (e.g. for runtime calls), so it's only allocated when needed.
- std::unique_ptr<Zone> codegen_zone_;
+
+ // Result of compilation:
+ WasmCode* code_;
DISALLOW_COPY_AND_ASSIGN(LiftoffCompilationUnit);
};
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 6657cd0c95..d2ea65211b 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -100,8 +100,8 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
} // namespace liftoff
-uint32_t LiftoffAssembler::PrepareStackFrame() {
- uint32_t offset = static_cast<uint32_t>(pc_offset());
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
// When constant that represents size of stack frame can't be represented
// as 16bit we need three instructions to add it to sp, so we reserve space
// for this case.
@@ -111,15 +111,16 @@ uint32_t LiftoffAssembler::PrepareStackFrame() {
return offset;
}
-void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(isolate(), buffer_ + offset,
- kAvailableSpace, CodeObjectRequired::kNo);
+ TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
+ buffer_ + offset, kAvailableSpace,
+ CodeObjectRequired::kNo);
// If bytes can be represented as 16bit, addiu will be generated and two
// nops will stay untouched. Otherwise, lui-ori sequence will load it to
// register and, as third instruction, addu will be generated.
@@ -743,6 +744,10 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShrPair, pinned);
}
+void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
+ UNREACHABLE();
+}
+
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_s(dst, src);
}
@@ -1210,12 +1215,9 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
-void LiftoffAssembler::StackCheck(Label* ool_code) {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
- TurboAssembler::li(
- tmp.gp(), Operand(ExternalReference::address_of_stack_limit(isolate())));
- TurboAssembler::Ulw(tmp.gp(), MemOperand(tmp.gp()));
- TurboAssembler::Branch(ool_code, ule, sp, Operand(tmp.gp()));
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ TurboAssembler::Ulw(limit_address, MemOperand(limit_address));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -1329,12 +1331,6 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
Call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- // Set instance to zero.
- TurboAssembler::Move(cp, zero_reg);
- CallRuntimeDelayed(zone, fid);
-}
-
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
@@ -1346,6 +1342,12 @@ void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
}
}
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
addiu(sp, sp, -size);
TurboAssembler::Move(addr, sp);
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index f85049d927..fdbbe0f7d4 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -90,8 +90,8 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
} // namespace liftoff
-uint32_t LiftoffAssembler::PrepareStackFrame() {
- uint32_t offset = static_cast<uint32_t>(pc_offset());
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
// When constant that represents size of stack frame can't be represented
// as 16bit we need three instructions to add it to sp, so we reserve space
// for this case.
@@ -101,15 +101,16 @@ uint32_t LiftoffAssembler::PrepareStackFrame() {
return offset;
}
-void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
uint64_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(isolate(), buffer_ + offset,
- kAvailableSpace, CodeObjectRequired::kNo);
+ TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
+ buffer_ + offset, kAvailableSpace,
+ CodeObjectRequired::kNo);
// If bytes can be represented as 16bit, daddiu will be generated and two
// nops will stay untouched. Otherwise, lui-ori sequence will load it to
// register and, as third instruction, daddu will be generated.
@@ -633,6 +634,10 @@ I64_SHIFTOP(shr, dsrlv)
#undef I64_SHIFTOP
+void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
+ addu(dst, src, zero_reg);
+}
+
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_s(dst, src);
}
@@ -1074,12 +1079,9 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
-void LiftoffAssembler::StackCheck(Label* ool_code) {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
- TurboAssembler::li(
- tmp.gp(), Operand(ExternalReference::address_of_stack_limit(isolate())));
- TurboAssembler::Uld(tmp.gp(), MemOperand(tmp.gp()));
- TurboAssembler::Branch(ool_code, ule, sp, Operand(tmp.gp()));
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ TurboAssembler::Uld(limit_address, MemOperand(limit_address));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -1193,12 +1195,6 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
Call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- // Set instance to zero.
- TurboAssembler::Move(cp, zero_reg);
- CallRuntimeDelayed(zone, fid);
-}
-
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
@@ -1210,6 +1206,12 @@ void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
}
}
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
daddiu(sp, sp, -size);
TurboAssembler::Move(addr, sp);
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 824302a94b..a4bd20622e 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -13,12 +13,12 @@ namespace v8 {
namespace internal {
namespace wasm {
-uint32_t LiftoffAssembler::PrepareStackFrame() {
+int LiftoffAssembler::PrepareStackFrame() {
BAILOUT("PrepareStackFrame");
return 0;
}
-void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
BAILOUT("PatchPrepareStackFrame");
}
@@ -257,6 +257,10 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
+void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
+ UNREACHABLE();
+}
+
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
@@ -305,7 +309,9 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
BAILOUT("emit_f64_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ BAILOUT("StackCheck");
+}
void LiftoffAssembler::CallTrapCallbackForTesting() {
BAILOUT("CallTrapCallbackForTesting");
@@ -339,16 +345,16 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
-void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- BAILOUT("CallRuntime");
-}
-
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
BAILOUT("CallIndirect");
}
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ BAILOUT("CallRuntimeStub");
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
BAILOUT("AllocateStackSlot");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 93502d37c8..ee142c7be4 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -13,12 +13,12 @@ namespace v8 {
namespace internal {
namespace wasm {
-uint32_t LiftoffAssembler::PrepareStackFrame() {
+int LiftoffAssembler::PrepareStackFrame() {
BAILOUT("PrepareStackFrame");
return 0;
}
-void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
BAILOUT("PatchPrepareStackFrame");
}
@@ -257,6 +257,10 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
+void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
+ UNREACHABLE();
+}
+
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
@@ -305,7 +309,9 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
BAILOUT("emit_f64_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ BAILOUT("StackCheck");
+}
void LiftoffAssembler::CallTrapCallbackForTesting() {
BAILOUT("CallTrapCallbackForTesting");
@@ -339,16 +345,16 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
-void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- BAILOUT("CallRuntime");
-}
-
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
BAILOUT("CallIndirect");
}
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ BAILOUT("CallRuntimeStub");
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
BAILOUT("AllocateStackSlot");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 0b92e1292c..b8d08c56aa 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -115,20 +115,21 @@ inline void SpillRegisters(LiftoffAssembler* assm, Regs... regs) {
} // namespace liftoff
-uint32_t LiftoffAssembler::PrepareStackFrame() {
- uint32_t offset = static_cast<uint32_t>(pc_offset());
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
sub_sp_32(0);
return offset;
}
-void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
- Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ Assembler patching_assembler(AssemblerOptions{}, buffer_ + offset,
+ kAvailableSpace);
patching_assembler.sub_sp_32(bytes);
}
@@ -755,6 +756,10 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&Assembler::shrq_cl, pinned);
}
+void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
+ movsxlq(dst, src);
+}
+
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -1302,10 +1307,8 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
rhs);
}
-void LiftoffAssembler::StackCheck(Label* ool_code) {
- Operand stack_limit = ExternalOperand(
- ExternalReference::address_of_stack_limit(isolate()), kScratchRegister);
- cmpp(rsp, stack_limit);
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ cmpp(rsp, Operand(limit_address, 0));
j(below_equal, ool_code);
}
@@ -1415,12 +1418,6 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
near_call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- // Set context to zero (Smi::kZero) for the runtime call.
- xorp(kContextRegister, kContextRegister);
- CallRuntimeDelayed(zone, fid);
-}
-
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
@@ -1435,6 +1432,12 @@ void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
}
}
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ near_call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
subp(rsp, Immediate(size));
movp(addr, rsp);
@@ -1449,7 +1452,18 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
- asm_->pushq(liftoff::GetStackSlot(slot.src_index_));
+ if (src.type() == kWasmI32) {
+ // Load i32 values to a register first to ensure they are zero
+ // extended.
+ asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_index_));
+ asm_->pushq(kScratchRegister);
+ } else {
+ // For all other types, just push the whole (8-byte) stack slot.
+ // This is also ok for f32 values (even though we copy 4 uninitialized
+ // bytes), because f32 and f64 values are clearly distinguished in
+ // Turbofan, so the uninitialized bytes are never accessed.
+ asm_->pushq(liftoff::GetStackSlot(slot.src_index_));
+ }
break;
case LiftoffAssembler::VarState::kRegister:
liftoff::push(asm_, src.reg(), src.type());
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 10e624d1bb..621f905d44 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -38,13 +38,13 @@ struct WasmException;
}())
#define RET_ON_PROTOTYPE_OPCODE(flag) \
- DCHECK(!this->module_ || !this->module_->is_asm_js()); \
+ DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \
if (!FLAG_experimental_wasm_##flag) { \
this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
}
#define CHECK_PROTOTYPE_OPCODE(flag) \
- DCHECK(!this->module_ || !this->module_->is_asm_js()); \
+ DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \
if (!FLAG_experimental_wasm_##flag) { \
this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
break; \
@@ -248,6 +248,9 @@ struct BlockTypeImmediate {
case kLocalS128:
*result = kWasmS128;
return true;
+ case kLocalAnyFunc:
+ *result = kWasmAnyFunc;
+ return true;
case kLocalAnyRef:
*result = kWasmAnyRef;
return true;
@@ -846,7 +849,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && !module_->function_tables.empty())) {
+ if (!VALIDATE(module_ != nullptr && !module_->tables.empty())) {
error("function table has to exist to execute call_indirect");
return false;
}
@@ -940,8 +943,9 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc,
Simd8x16ShuffleImmediate<validate>& imm) {
uint8_t max_lane = 0;
- for (uint32_t i = 0; i < kSimd128Size; ++i)
+ for (uint32_t i = 0; i < kSimd128Size; ++i) {
max_lane = std::max(max_lane, imm.shuffle[i]);
+ }
// Shuffle indices must be in [0..31] for a 16 lane shuffle.
if (!VALIDATE(max_lane <= 2 * kSimd128Size)) {
error(pc_ + 2, "invalid shuffle mask");
@@ -950,6 +954,24 @@ class WasmDecoder : public Decoder {
return true;
}
+ inline bool Complete(BlockTypeImmediate<validate>& imm) {
+ if (imm.type != kWasmVar) return true;
+ if (!VALIDATE((module_ && imm.sig_index < module_->signatures.size()))) {
+ return false;
+ }
+ imm.sig = module_->signatures[imm.sig_index];
+ return true;
+ }
+
+ inline bool Validate(BlockTypeImmediate<validate>& imm) {
+ if (!Complete(imm)) {
+ errorf(pc_, "block type index %u out of bounds (%zu signatures)",
+ imm.sig_index, module_ ? module_->signatures.size() : 0);
+ return false;
+ }
+ return true;
+ }
+
static unsigned OpcodeLength(Decoder* decoder, const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
switch (opcode) {
@@ -1138,13 +1160,11 @@ class WasmDecoder : public Decoder {
case kSimdPrefix: {
opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
switch (opcode) {
- case kExprI32AtomicStore:
- case kExprI32AtomicStore8U:
- case kExprI32AtomicStore16U:
- case kExprS128StoreMem:
- return {2, 0};
- FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(DECLARE_OPCODE_CASE)
return {1, 1};
+ FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_SIMD_MASK_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
+ return {2, 1};
default: {
sig = WasmOpcodes::Signature(opcode);
if (sig) {
@@ -1403,7 +1423,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#define TRACE_PART(...)
#endif
- FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ FunctionSig* sig = const_cast<FunctionSig*>(kSimpleOpcodeSigs[opcode]);
if (sig) {
BuildSimpleOperator(opcode, sig);
} else {
@@ -1413,7 +1433,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
case kExprBlock: {
BlockTypeImmediate<validate> imm(this, this->pc_);
- if (!LookupBlockType(&imm)) break;
+ if (!this->Validate(imm)) break;
PopArgs(imm.sig);
auto* block = PushBlock();
SetBlockType(block, imm);
@@ -1442,7 +1462,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprTry: {
CHECK_PROTOTYPE_OPCODE(eh);
BlockTypeImmediate<validate> imm(this, this->pc_);
- if (!LookupBlockType(&imm)) break;
+ if (!this->Validate(imm)) break;
PopArgs(imm.sig);
auto* try_block = PushTry();
SetBlockType(try_block, imm);
@@ -1495,7 +1515,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprLoop: {
BlockTypeImmediate<validate> imm(this, this->pc_);
- if (!LookupBlockType(&imm)) break;
+ if (!this->Validate(imm)) break;
PopArgs(imm.sig);
auto* block = PushLoop();
SetBlockType(&control_.back(), imm);
@@ -1506,7 +1526,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprIf: {
BlockTypeImmediate<validate> imm(this, this->pc_);
- if (!LookupBlockType(&imm)) break;
+ if (!this->Validate(imm)) break;
auto cond = Pop(0, kWasmI32);
PopArgs(imm.sig);
if (!VALIDATE(this->ok())) break;
@@ -1830,7 +1850,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
DCHECK_NOT_NULL(this->module_);
- if (!VALIDATE(this->module_->is_wasm())) {
+ if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
this->error("grow_memory is not supported for asmjs modules");
break;
}
@@ -1908,9 +1928,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len += DecodeAtomicOpcode(opcode);
break;
}
+// Note that prototype opcodes are not handled in the fastpath
+// above this switch, to avoid checking a feature flag.
+#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
+ case kExpr##name: /* fallthrough */
+ FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
+#undef SIMPLE_PROTOTYPE_CASE
+ BuildSimplePrototypeOperator(opcode);
+ break;
default: {
// Deal with special asmjs opcodes.
- if (this->module_ != nullptr && this->module_->is_asm_js()) {
+ if (this->module_ != nullptr &&
+ this->module_->origin == kAsmJsOrigin) {
sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) {
BuildSimpleOperator(opcode, sig);
@@ -2000,22 +2029,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
current->reachability = kUnreachable;
}
- bool LookupBlockType(BlockTypeImmediate<validate>* imm) {
- if (imm->type == kWasmVar) {
- if (!VALIDATE(this->module_ &&
- imm->sig_index < this->module_->signatures.size())) {
- this->errorf(
- this->pc_, "block type index %u out of bounds (%d signatures)",
- imm->sig_index,
- static_cast<int>(this->module_ ? this->module_->signatures.size()
- : 0));
- return false;
- }
- imm->sig = this->module_->signatures[imm->sig_index];
- }
- return true;
- }
-
template<typename func>
void InitMerge(Merge<Value>* merge, uint32_t arity, func get_val) {
merge->arity = arity;
@@ -2422,14 +2435,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE(OnFirstError);
}
- inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
+ void BuildSimplePrototypeOperator(WasmOpcode opcode) {
if (WasmOpcodes::IsSignExtensionOpcode(opcode)) {
RET_ON_PROTOTYPE_OPCODE(se);
}
if (WasmOpcodes::IsAnyRefOpcode(opcode)) {
RET_ON_PROTOTYPE_OPCODE(anyref);
}
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ BuildSimpleOperator(opcode, sig);
+ }
+ inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
switch (sig->parameter_count()) {
case 1: {
auto val = Pop(0, sig->GetParam(0));
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 5ad7caebc7..41398eba25 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -846,21 +846,29 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator,
const wasm::WasmModule* module,
- FunctionBody& body, bool is_wasm,
+ FunctionBody& body, ModuleOrigin origin,
Counters* counters) {
CHECK_LE(0, body.end - body.start);
- auto time_counter = is_wasm ? counters->wasm_decode_wasm_function_time()
- : counters->wasm_decode_asm_function_time();
+ auto time_counter = origin == kWasmOrigin
+ ? counters->wasm_decode_wasm_function_time()
+ : counters->wasm_decode_asm_function_time();
TimedHistogramScope wasm_decode_function_time_scope(time_counter);
return VerifyWasmCode(allocator, module, body);
}
DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
- FunctionBody& body) {
+ FunctionBody& body,
+ compiler::NodeOriginTable* node_origins) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kValidate, WasmGraphBuildingInterface> decoder(
&zone, builder->module(), body, builder);
+ if (node_origins) {
+ builder->AddBytecodePositionDecorator(node_origins, &decoder);
+ }
decoder.Decode();
+ if (node_origins) {
+ builder->RemoveBytecodePositionDecorator();
+ }
return decoder.toResult(nullptr);
}
@@ -900,15 +908,23 @@ const char* RawOpcodeName(WasmOpcode opcode) {
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
const wasm::WasmModule* module,
PrintLocals print_locals) {
- OFStream os(stdout);
+ StdoutStream os;
+ return PrintRawWasmCode(allocator, body, module, print_locals, os);
+}
+
+bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
+ const wasm::WasmModule* module, PrintLocals print_locals,
+ std::ostream& os, std::vector<int>* line_numbers) {
Zone zone(allocator, ZONE_NAME);
WasmDecoder<Decoder::kNoValidate> decoder(module, body.sig, body.start,
body.end);
int line_nr = 0;
+ constexpr int kNoByteCode = -1;
// Print the function signature.
if (body.sig) {
os << "// signature: " << *body.sig << std::endl;
+ if (line_numbers) line_numbers->push_back(kNoByteCode);
++line_nr;
}
@@ -931,16 +947,19 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
}
os << std::endl;
+ if (line_numbers) line_numbers->push_back(kNoByteCode);
++line_nr;
for (const byte* locals = body.start; locals < i.pc(); locals++) {
os << (locals == body.start ? "0x" : " 0x") << AsHex(*locals, 2) << ",";
}
os << std::endl;
+ if (line_numbers) line_numbers->push_back(kNoByteCode);
++line_nr;
}
os << "// body: " << std::endl;
+ if (line_numbers) line_numbers->push_back(kNoByteCode);
++line_nr;
unsigned control_depth = 0;
for (; i.has_next(); i.next()) {
@@ -948,6 +967,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, i.pc());
WasmOpcode opcode = i.current();
+ if (line_numbers) line_numbers->push_back(i.position());
if (opcode == kExprElse) control_depth--;
int num_whitespaces = control_depth < 32 ? 2 * control_depth : 64;
@@ -997,8 +1017,10 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprTry: {
BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << " // @" << i.pc_offset();
- for (unsigned i = 0; i < imm.out_arity(); i++) {
- os << " " << ValueTypes::TypeName(imm.out_type(i));
+ if (decoder.Complete(imm)) {
+ for (unsigned i = 0; i < imm.out_arity(); i++) {
+ os << " " << ValueTypes::TypeName(imm.out_type(i));
+ }
}
control_depth++;
break;
@@ -1044,6 +1066,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
os << std::endl;
++line_nr;
}
+ DCHECK(!line_numbers || line_numbers->size() == static_cast<size_t>(line_nr));
return decoder.ok();
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index a1470f4937..7dbb800399 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -20,6 +20,7 @@ class BitVector; // forward declaration
class Counters;
namespace compiler { // external declarations from compiler.
+class NodeOriginTable;
class WasmGraphBuilder;
}
@@ -27,6 +28,7 @@ namespace wasm {
typedef compiler::WasmGraphBuilder TFBuilder;
struct WasmModule; // forward declaration of module interface.
+enum ModuleOrigin : uint8_t;
// A wrapper around the signature and bytes of a function.
struct FunctionBody {
@@ -48,16 +50,23 @@ V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
// isolate::async_counters() to guarantee usability of counters argument.
DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator,
const wasm::WasmModule* module,
- FunctionBody& body, bool is_wasm,
+ FunctionBody& body, ModuleOrigin origin,
Counters* counters);
DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
- FunctionBody& body);
+ FunctionBody& body,
+ compiler::NodeOriginTable* node_origins);
enum PrintLocals { kPrintLocals, kOmitLocals };
V8_EXPORT_PRIVATE
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
const wasm::WasmModule* module, PrintLocals print_locals);
+V8_EXPORT_PRIVATE
+bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
+ const wasm::WasmModule* module, PrintLocals print_locals,
+ std::ostream& out,
+ std::vector<int>* line_numbers = nullptr);
+
// A simplified form of AST printing, e.g. from a debugger.
void PrintRawWasmCode(const byte* start, const byte* end);
@@ -72,7 +81,7 @@ inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
TFBuilder* builder, FunctionSig* sig,
const byte* start, const byte* end) {
FunctionBody body(sig, 0, start, end);
- return BuildTFGraph(allocator, builder, body);
+ return BuildTFGraph(allocator, builder, body, nullptr);
}
struct BodyLocalDecls {
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index b017074bc7..2e47f82ca3 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -4,12 +4,11 @@
#include "src/wasm/function-compiler.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/macro-assembler-inl.h"
#include "src/wasm/baseline/liftoff-compiler.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -38,15 +37,13 @@ WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate, ModuleEnv* env,
wasm::NativeModule* native_module,
wasm::FunctionBody body,
wasm::WasmName name, int index,
- Handle<Code> centry_stub,
CompilationMode mode,
Counters* counters, bool lower_simd)
- : isolate_(isolate),
- env_(env),
+ : env_(env),
+ wasm_engine_(isolate->wasm_engine()),
func_body_(body),
func_name_(name),
counters_(counters ? counters : isolate->counters()),
- centry_stub_(centry_stub),
func_index_(index),
native_module_(native_module),
lower_simd_(lower_simd),
@@ -56,7 +53,11 @@ WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate, ModuleEnv* env,
// Always disable Liftoff for asm.js, for two reasons:
// 1) asm-specific opcodes are not implemented, and
// 2) tier-up does not work with lazy compilation.
- if (env->module->is_asm_js()) mode = CompilationMode::kTurbofan;
+ if (env->module->origin == kAsmJsOrigin) mode = CompilationMode::kTurbofan;
+ if (V8_UNLIKELY(FLAG_wasm_tier_mask_for_testing) && index < 32 &&
+ (FLAG_wasm_tier_mask_for_testing & (1 << index))) {
+ mode = CompilationMode::kTurbofan;
+ }
SwitchMode(mode);
}
@@ -65,14 +66,12 @@ WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate, ModuleEnv* env,
WasmCompilationUnit::~WasmCompilationUnit() {}
void WasmCompilationUnit::ExecuteCompilation() {
- auto size_histogram = env_->module->is_wasm()
- ? counters_->wasm_wasm_function_size_bytes()
- : counters_->wasm_asm_function_size_bytes();
+ auto size_histogram = SELECT_WASM_COUNTER(counters_, env_->module->origin,
+ wasm, function_size_bytes);
size_histogram->AddSample(
static_cast<int>(func_body_.end - func_body_.start));
- auto timed_histogram = env_->module->is_wasm()
- ? counters_->wasm_compile_wasm_function_time()
- : counters_->wasm_compile_asm_function_time();
+ auto timed_histogram = SELECT_WASM_COUNTER(counters_, env_->module->origin,
+ wasm_compile, function_time);
TimedHistogramScope wasm_compile_function_time_scope(timed_histogram);
if (FLAG_trace_wasm_compiler) {
@@ -124,7 +123,6 @@ void WasmCompilationUnit::SwitchMode(CompilationMode new_mode) {
return;
case CompilationMode::kTurbofan:
DCHECK(!turbofan_unit_);
- if (liftoff_unit_ != nullptr) liftoff_unit_->AbortCompilation();
liftoff_unit_.reset();
turbofan_unit_.reset(new compiler::TurbofanWasmCompilationUnit(this));
return;
@@ -135,8 +133,9 @@ void WasmCompilationUnit::SwitchMode(CompilationMode new_mode) {
// static
wasm::WasmCode* WasmCompilationUnit::CompileWasmFunction(
wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
- Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
- const wasm::WasmFunction* function, CompilationMode mode) {
+ Isolate* isolate, ModuleEnv* env, const wasm::WasmFunction* function,
+ CompilationMode mode) {
+ ModuleWireBytes wire_bytes(native_module->wire_bytes());
wasm::FunctionBody function_body{
function->sig, function->code.offset(),
wire_bytes.start() + function->code.offset(),
@@ -144,8 +143,7 @@ wasm::WasmCode* WasmCompilationUnit::CompileWasmFunction(
WasmCompilationUnit unit(isolate, env, native_module, function_body,
wire_bytes.GetNameOrNull(function, env->module),
- function->func_index,
- CodeFactory::CEntry(isolate, 1), mode);
+ function->func_index, mode);
unit.ExecuteCompilation();
return unit.FinishCompilation(thrower);
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index ab71760998..a270d36f78 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -20,6 +20,7 @@ class LiftoffCompilationUnit;
struct ModuleWireBytes;
class NativeModule;
class WasmCode;
+class WasmEngine;
struct WasmFunction;
enum RuntimeExceptionSupport : bool {
@@ -63,7 +64,6 @@ class WasmCompilationUnit final {
// only allowed to happen on the foreground thread.
WasmCompilationUnit(Isolate*, ModuleEnv*, wasm::NativeModule*,
wasm::FunctionBody, wasm::WasmName, int index,
- Handle<Code> centry_stub,
CompilationMode = GetDefaultCompilationMode(),
Counters* = nullptr, bool lower_simd = false);
@@ -74,11 +74,9 @@ class WasmCompilationUnit final {
static wasm::WasmCode* CompileWasmFunction(
wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
- Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
- const wasm::WasmFunction* function,
+ Isolate* isolate, ModuleEnv* env, const wasm::WasmFunction* function,
CompilationMode = GetDefaultCompilationMode());
- size_t memory_cost() const { return memory_cost_; }
wasm::NativeModule* native_module() const { return native_module_; }
CompilationMode mode() const { return mode_; }
@@ -86,14 +84,12 @@ class WasmCompilationUnit final {
friend class LiftoffCompilationUnit;
friend class compiler::TurbofanWasmCompilationUnit;
- Isolate* isolate_;
ModuleEnv* env_;
+ WasmEngine* wasm_engine_;
wasm::FunctionBody func_body_;
wasm::WasmName func_name_;
Counters* counters_;
- Handle<Code> centry_stub_;
int func_index_;
- size_t memory_cost_ = 0;
wasm::NativeModule* native_module_;
// TODO(wasm): Put {lower_simd_} inside the {ModuleEnv}.
bool lower_simd_;
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
new file mode 100644
index 0000000000..3ac9d13e89
--- /dev/null
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -0,0 +1,210 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/jump-table-assembler.h"
+
+#include "src/assembler-inl.h"
+#include "src/macro-assembler-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// The implementation is compact enough to implement it inline here. If it gets
+// much bigger, we might want to split it in a separate file per architecture.
+#if V8_TARGET_ARCH_X64
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ // TODO(clemensh): Try more efficient sequences.
+ // Alternative 1:
+ // [header]: mov r10, [lazy_compile_target]
+ // jmp r10
+ // [slot 0]: push [0]
+ // jmp [header] // pc-relative --> slot size: 10 bytes
+ //
+ // Alternative 2:
+ // [header]: lea r10, [rip - [header]]
+ // shr r10, 3 // compute index from offset
+ // push r10
+ // mov r10, [lazy_compile_target]
+ // jmp r10
+ // [slot 0]: call [header]
+ // ret // -> slot size: 5 bytes
+
+ // Use a push, because mov to an extended register takes 6 bytes.
+ pushq(Immediate(func_index)); // max 5 bytes
+ movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes
+ jmp(kScratchRegister); // 3 bytes
+}
+
+void JumpTableAssembler::EmitJumpSlot(Address target) {
+ movq(kScratchRegister, static_cast<uint64_t>(target));
+ jmp(kScratchRegister);
+}
+
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ Nop(bytes);
+}
+
+#elif V8_TARGET_ARCH_IA32
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ mov(edi, func_index); // 5 bytes
+ jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
+}
+
+void JumpTableAssembler::EmitJumpSlot(Address target) {
+ jmp(target, RelocInfo::NONE);
+}
+
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ Nop(bytes);
+}
+
+#elif V8_TARGET_ARCH_ARM
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ // Load function index to r4.
+ // This generates [movw, movt] on ARMv7 and later, [ldr, constant pool marker,
+ // constant] on ARMv6.
+ Move32BitImmediate(r4, Operand(func_index));
+ // EmitJumpSlot emits either [b], [movw, movt, mov] (ARMv7+), or [ldr,
+ // constant].
+ // In total, this is <=5 instructions on all architectures.
+ // TODO(arm): Optimize this for code size; lazy compile is not performance
+ // critical, as it's only executed once per function.
+ EmitJumpSlot(lazy_compile_target);
+}
+
+void JumpTableAssembler::EmitJumpSlot(Address target) {
+ int offset =
+ target - reinterpret_cast<Address>(pc_) - Instruction::kPcLoadDelta;
+ DCHECK_EQ(0, offset % kInstrSize);
+ // If the offset is within 64 MB, emit a direct jump. Otherwise jump
+ // indirectly.
+ if (is_int26(offset)) {
+ b(offset); // 1 instr
+ } else {
+ // {Move32BitImmediate} emits either [movw, movt, mov] or [ldr, constant].
+ Move32BitImmediate(pc, Operand(target));
+ }
+
+ CheckConstPool(true, false); // force emit of const pool
+}
+
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % kInstrSize);
+ for (; bytes > 0; bytes -= kInstrSize) {
+ nop();
+ }
+}
+
+#elif V8_TARGET_ARCH_ARM64
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ Mov(w8, func_index); // max. 2 instr
+ Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
+}
+
+void JumpTableAssembler::EmitJumpSlot(Address target) {
+ Jump(target, RelocInfo::NONE);
+}
+
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % kInstructionSize);
+ for (; bytes > 0; bytes -= kInstructionSize) {
+ nop();
+ }
+}
+
+#elif V8_TARGET_ARCH_S390
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ // Load function index to r7. 6 bytes
+ lgfi(r7, Operand(func_index));
+ // Jump to {lazy_compile_target}. 6 bytes or 12 bytes
+ mov(r1, Operand(lazy_compile_target));
+ b(r1); // 2 bytes
+}
+
+void JumpTableAssembler::EmitJumpSlot(Address target) {
+ mov(r1, Operand(target));
+ b(r1);
+}
+
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % 2);
+ for (; bytes > 0; bytes -= 2) {
+ nop(0);
+ }
+}
+
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ li(t0, func_index); // max. 2 instr
+ // Jump produces max. 4 instructions for 32-bit platform
+ // and max. 6 instructions for 64-bit platform.
+ Jump(lazy_compile_target, RelocInfo::NONE);
+}
+
+void JumpTableAssembler::EmitJumpSlot(Address target) {
+ Jump(target, RelocInfo::NONE);
+}
+
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % kInstrSize);
+ for (; bytes > 0; bytes -= kInstrSize) {
+ nop();
+ }
+}
+
+#elif V8_TARGET_ARCH_PPC
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ // Load function index to r8. max 5 instrs
+ mov(r15, Operand(func_index));
+ // Jump to {lazy_compile_target}. max 5 instrs
+ mov(r0, Operand(lazy_compile_target));
+ mtctr(r0);
+ bctr();
+}
+
+void JumpTableAssembler::EmitJumpSlot(Address target) {
+ mov(r0, Operand(target));
+ mtctr(r0);
+ bctr();
+}
+
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % 4);
+ for (; bytes > 0; bytes -= 4) {
+ nop(0);
+ }
+}
+
+#else
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ UNIMPLEMENTED();
+}
+
+void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
+
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ UNIMPLEMENTED();
+}
+#endif
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
new file mode 100644
index 0000000000..1ef1a82f41
--- /dev/null
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -0,0 +1,79 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_JUMP_TABLE_ASSEMBLER_H_
+#define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
+
+#include "src/macro-assembler.h"
+#include "src/wasm/wasm-code-manager.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class JumpTableAssembler : public TurboAssembler {
+ public:
+ // Instantiate a {JumpTableAssembler} for patching.
+ explicit JumpTableAssembler(Address slot_addr, int size = 256)
+ : TurboAssembler(nullptr, JumpTableAssemblerOptions(),
+ reinterpret_cast<void*>(slot_addr), size,
+ CodeObjectRequired::kNo) {}
+
+#if V8_TARGET_ARCH_X64
+ static constexpr int kJumpTableSlotSize = 18;
+#elif V8_TARGET_ARCH_IA32
+ static constexpr int kJumpTableSlotSize = 10;
+#elif V8_TARGET_ARCH_ARM
+ static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
+#elif V8_TARGET_ARCH_ARM64
+ static constexpr int kJumpTableSlotSize = 3 * kInstructionSize;
+#elif V8_TARGET_ARCH_S390X
+ static constexpr int kJumpTableSlotSize = 20;
+#elif V8_TARGET_ARCH_S390
+ static constexpr int kJumpTableSlotSize = 14;
+#elif V8_TARGET_ARCH_PPC64
+ static constexpr int kJumpTableSlotSize = 48;
+#elif V8_TARGET_ARCH_PPC
+ static constexpr int kJumpTableSlotSize = 24;
+#elif V8_TARGET_ARCH_MIPS
+ static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+#elif V8_TARGET_ARCH_MIPS64
+ static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
+#else
+ static constexpr int kJumpTableSlotSize = 1;
+#endif
+
+ // {JumpTableAssembler} is never used during snapshot generation, and its code
+ // must be independent of the code range of any isolate anyway. Just ensure
+ // that no relocation information is recorded, there is no buffer to store it
+ // since it is instantiated in patching mode in existing code directly.
+ static AssemblerOptions JumpTableAssemblerOptions() {
+ AssemblerOptions options;
+ options.disable_reloc_info_for_patching = true;
+ return options;
+ }
+
+ void EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target);
+
+ void EmitJumpSlot(Address target);
+
+ void NopBytes(int bytes);
+
+ static void PatchJumpTableSlot(Address slot, Address new_target,
+ WasmCode::FlushICache flush_i_cache) {
+ JumpTableAssembler jsasm(slot);
+ jsasm.EmitJumpSlot(new_target);
+ jsasm.NopBytes(kJumpTableSlotSize - jsasm.pc_offset());
+ if (flush_i_cache) {
+ Assembler::FlushICache(slot, kJumpTableSlotSize);
+ }
+ }
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_JUMP_TABLE_ASSEMBLER_H_
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 999af23db9..59c7bedbc1 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -6,23 +6,21 @@
#include "src/api.h"
#include "src/asmjs/asm-js.h"
-#include "src/assembler-inl.h"
#include "src/base/optional.h"
#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/identity-map.h"
#include "src/property-descriptor.h"
+#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
@@ -32,11 +30,6 @@
if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
} while (false)
-#define TRACE_CHAIN(instance) \
- do { \
- instance->PrintInstancesChain(); \
- } while (false)
-
#define TRACE_COMPILE(...) \
do { \
if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
@@ -71,7 +64,7 @@ enum class CompileMode : uint8_t { kRegular, kTiering };
// compilation of functions.
class CompilationState {
public:
- CompilationState(internal::Isolate* isolate, ModuleEnv& env);
+ CompilationState(internal::Isolate*, const ModuleEnv&);
~CompilationState();
// Needs to be set before {AddCompilationUnits} is run, which triggers
@@ -102,8 +95,6 @@ class CompilationState {
bool SetFinisherIsRunning(bool value);
void ScheduleFinisherTask();
- bool StopBackgroundCompilationTaskForThrottling();
-
void Abort();
Isolate* isolate() const { return isolate_; }
@@ -118,20 +109,8 @@ class CompilationState {
}
CompileMode compile_mode() const { return compile_mode_; }
-
ModuleEnv* module_env() { return &module_env_; }
- const ModuleWireBytes& wire_bytes() const { return wire_bytes_; }
-
- void SetWireBytes(const ModuleWireBytes& wire_bytes) {
- DCHECK_NULL(bytes_copy_);
- DCHECK_EQ(0, wire_bytes_.length());
- bytes_copy_ = std::unique_ptr<byte[]>(new byte[wire_bytes.length()]);
- memcpy(bytes_copy_.get(), wire_bytes.start(), wire_bytes.length());
- wire_bytes_ = ModuleWireBytes(bytes_copy_.get(),
- bytes_copy_.get() + wire_bytes.length());
- }
-
private:
void NotifyOnEvent(CompilationEvent event, ErrorThrower* thrower);
@@ -140,17 +119,15 @@ class CompilationState {
: baseline_finish_units_;
}
+ // TODO(7423): Get rid of the Isolate field to make sure the CompilationState
+ // can be shared across multiple Isolates.
Isolate* const isolate_;
+ WasmEngine* const wasm_engine_;
+ // TODO(clemensh): Remove ModuleEnv, generate it when needed.
ModuleEnv module_env_;
- const size_t max_memory_;
const CompileMode compile_mode_;
bool baseline_compilation_finished_ = false;
- // TODO(wasm): eventually we want to get rid of this
- // additional copy (see AsyncCompileJob).
- std::unique_ptr<byte[]> bytes_copy_;
- ModuleWireBytes wire_bytes_;
-
// This mutex protects all information of this CompilationState which is being
// accessed concurrently.
mutable base::Mutex mutex_;
@@ -168,8 +145,6 @@ class CompilationState {
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_finish_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_finish_units_;
- size_t allocated_memory_ = 0;
-
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
@@ -194,49 +169,33 @@ namespace {
class JSToWasmWrapperCache {
public:
- Handle<Code> CloneOrCompileJSToWasmWrapper(
- Isolate* isolate, wasm::WasmModule* module, Address call_target,
- uint32_t index, wasm::UseTrapHandler use_trap_handler) {
- const bool is_import = index < module->num_imported_functions;
- DCHECK_EQ(is_import, call_target == kNullAddress);
- const wasm::WasmFunction* func = &module->functions[index];
- // We cannot cache js-to-wasm wrappers for imports, as they hard-code the
- // function index.
- if (!is_import) {
- int cached_idx = sig_map_.Find(func->sig);
- if (cached_idx >= 0) {
- Handle<Code> code =
- isolate->factory()->CopyCode(code_cache_[cached_idx]);
- // Now patch the call to wasm code.
- RelocIterator it(*code,
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- // If there is no reloc info, then it's an incompatible signature or
- // calls an import.
- if (!it.done()) it.rinfo()->set_js_to_wasm_address(call_target);
- return code;
- }
- }
-
- Handle<Code> code = compiler::CompileJSToWasmWrapper(
- isolate, module, call_target, index, use_trap_handler);
- if (!is_import) {
- uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
- DCHECK_EQ(code_cache_.size(), new_cache_idx);
- USE(new_cache_idx);
- code_cache_.push_back(code);
- }
+ Handle<Code> GetOrCompileJSToWasmWrapper(
+ Isolate* isolate, const wasm::NativeModule* native_module,
+ uint32_t func_index, wasm::UseTrapHandler use_trap_handler) {
+ const wasm::WasmModule* module = native_module->module();
+ const wasm::WasmFunction* func = &module->functions[func_index];
+ bool is_import = func_index < module->num_imported_functions;
+ std::pair<bool, wasm::FunctionSig> key(is_import, *func->sig);
+ Handle<Code>& cached = cache_[key];
+ if (!cached.is_null()) return cached;
+
+ Handle<Code> code =
+ compiler::CompileJSToWasmWrapper(isolate, native_module, func->sig,
+ is_import, use_trap_handler)
+ .ToHandleChecked();
+ cached = code;
return code;
}
private:
- // sig_map_ maps signatures to an index in code_cache_.
- wasm::SignatureMap sig_map_;
- std::vector<Handle<Code>> code_cache_;
+ // We generate different code for calling imports than calling wasm functions
+ // in this module. Both are cached separately.
+ using CacheKey = std::pair<bool, wasm::FunctionSig>;
+ std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
};
-// A helper class to simplify instantiating a module from a compiled module.
-// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
-// etc.
+// A helper class to simplify instantiating a module from a module object.
+// It closes over the {Isolate}, the {ErrorThrower}, etc.
class InstanceBuilder {
public:
InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
@@ -265,30 +224,21 @@ class InstanceBuilder {
};
Isolate* isolate_;
- WasmModule* const module_;
- const std::shared_ptr<Counters> async_counters_;
+ const WasmModule* const module_;
ErrorThrower* thrower_;
Handle<WasmModuleObject> module_object_;
MaybeHandle<JSReceiver> ffi_;
MaybeHandle<JSArrayBuffer> memory_;
Handle<JSArrayBuffer> globals_;
- Handle<WasmCompiledModule> compiled_module_;
std::vector<TableInstance> table_instances_;
std::vector<Handle<JSFunction>> js_wrappers_;
Handle<WasmExportedFunction> start_function_;
JSToWasmWrapperCache js_to_wasm_cache_;
std::vector<SanitizedImport> sanitized_imports_;
- const std::shared_ptr<Counters>& async_counters() const {
- return async_counters_;
- }
-
- Counters* counters() const { return async_counters().get(); }
-
wasm::UseTrapHandler use_trap_handler() const {
- return compiled_module_->GetNativeModule()->use_trap_handler()
- ? kUseTrapHandler
- : kNoTrapHandler;
+ return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler
+ : kNoTrapHandler;
}
// Helper routines to print out errors with imports.
@@ -328,18 +278,23 @@ class InstanceBuilder {
// Load data segments into the memory.
void LoadDataSegments(Handle<WasmInstanceObject> instance);
- void WriteGlobalValue(WasmGlobal& global, double value);
- void WriteGlobalValue(WasmGlobal& global, Handle<WasmGlobalObject> value);
+ void WriteGlobalValue(const WasmGlobal& global, double value);
+ void WriteGlobalValue(const WasmGlobal& global,
+ Handle<WasmGlobalObject> value);
void SanitizeImports();
+ // Find the imported memory buffer if there is one. This is used to see if we
+ // need to recompile with bounds checks before creating the instance.
+ MaybeHandle<JSArrayBuffer> FindImportedMemoryBuffer() const;
+
// Process the imports, including functions, tables, globals, and memory, in
// order, loading them from the {ffi_} object. Returns the number of imported
// functions.
int ProcessImports(Handle<WasmInstanceObject> instance);
template <typename T>
- T* GetRawGlobalPtr(WasmGlobal& global);
+ T* GetRawGlobalPtr(const WasmGlobal& global);
// Process initialization of globals.
void InitGlobals();
@@ -372,167 +327,38 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
return {};
}
-// A helper class to prevent pathological patching behavior for indirect
-// references to code which must be updated after lazy compiles.
-// Utilizes a reverse mapping to prevent O(n^2) behavior.
-class IndirectPatcher {
- public:
- void Patch(Handle<WasmInstanceObject> caller_instance,
- Handle<WasmInstanceObject> target_instance, int func_index,
- Address old_target, Address new_target) {
- TRACE_LAZY(
- "IndirectPatcher::Patch(caller=%p, target=%p, func_index=%i, "
- "old_target=%" PRIuPTR ", new_target=%" PRIuPTR ")\n",
- *caller_instance, *target_instance, func_index, old_target, new_target);
- if (mapping_.size() == 0 || misses_ >= kMaxMisses) {
- BuildMapping(caller_instance);
- }
- // Patch entries for the given function index.
- WasmCodeManager* code_manager =
- caller_instance->GetIsolate()->wasm_engine()->code_manager();
- USE(code_manager);
- auto& entries = mapping_[func_index];
- int patched = 0;
- for (auto index : entries) {
- if (index < 0) {
- // Imported function entry.
- int i = -1 - index;
- ImportedFunctionEntry entry(caller_instance, i);
- if (entry.target() == old_target) {
- DCHECK_EQ(
- func_index,
- code_manager->GetCodeFromStartAddress(entry.target())->index());
- entry.set_wasm_to_wasm(*target_instance, new_target);
- patched++;
- }
- } else {
- // Indirect function table entry.
- int i = index;
- IndirectFunctionTableEntry entry(caller_instance, i);
- if (entry.target() == old_target) {
- DCHECK_EQ(
- func_index,
- code_manager->GetCodeFromStartAddress(entry.target())->index());
- entry.set(entry.sig_id(), *target_instance, new_target);
- patched++;
- }
- }
- }
- if (patched == 0) misses_++;
- }
-
- private:
- void BuildMapping(Handle<WasmInstanceObject> caller_instance) {
- mapping_.clear();
- misses_ = 0;
- TRACE_LAZY("BuildMapping for (caller=%p)...\n", *caller_instance);
- Isolate* isolate = caller_instance->GetIsolate();
- WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager();
- uint32_t num_imported_functions =
- caller_instance->module()->num_imported_functions;
- // Process the imported function entries.
- for (unsigned i = 0; i < num_imported_functions; i++) {
- ImportedFunctionEntry entry(caller_instance, i);
- WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target());
- if (code->kind() != WasmCode::kLazyStub) continue;
- TRACE_LAZY(" +import[%u] -> #%d (%p)\n", i, code->index(),
- code->instructions().start());
- DCHECK(!entry.is_js_receiver_entry());
- WasmInstanceObject* target_instance = entry.instance();
- WasmCode* new_code =
- target_instance->compiled_module()->GetNativeModule()->code(
- code->index());
- if (new_code->kind() != WasmCode::kLazyStub) {
- // Patch an imported function entry which is already compiled.
- entry.set_wasm_to_wasm(target_instance, new_code->instruction_start());
- } else {
- int key = code->index();
- int index = -1 - i;
- mapping_[key].push_back(index);
- }
- }
- // Process the indirect function table entries.
- size_t ift_size = caller_instance->indirect_function_table_size();
- for (unsigned i = 0; i < ift_size; i++) {
- IndirectFunctionTableEntry entry(caller_instance, i);
- if (entry.target() == kNullAddress) continue; // null IFT entry
- WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target());
- if (code->kind() != WasmCode::kLazyStub) continue;
- TRACE_LAZY(" +indirect[%u] -> #%d (lazy:%p)\n", i, code->index(),
- code->instructions().start());
- WasmInstanceObject* target_instance = entry.instance();
- WasmCode* new_code =
- target_instance->compiled_module()->GetNativeModule()->code(
- code->index());
- if (new_code->kind() != WasmCode::kLazyStub) {
- // Patch an indirect function table entry which is already compiled.
- entry.set(entry.sig_id(), target_instance,
- new_code->instruction_start());
- } else {
- int key = code->index();
- int index = i;
- mapping_[key].push_back(index);
- }
- }
- }
-
- static constexpr int kMaxMisses = 5; // maximum misses before rebuilding
- std::unordered_map<int, std::vector<int>> mapping_;
- int misses_ = 0;
-};
-
-ModuleEnv CreateModuleEnvFromModuleObject(
- Isolate* isolate, Handle<WasmModuleObject> module_object) {
- WasmModule* module = module_object->shared()->module();
- wasm::UseTrapHandler use_trap_handler =
- module_object->compiled_module()->GetNativeModule()->use_trap_handler()
- ? kUseTrapHandler
- : kNoTrapHandler;
- return ModuleEnv(module, use_trap_handler, wasm::kRuntimeExceptionSupport);
-}
-
-const wasm::WasmCode* LazyCompileFunction(
- Isolate* isolate, Handle<WasmModuleObject> module_object, int func_index) {
+wasm::WasmCode* LazyCompileFunction(Isolate* isolate,
+ NativeModule* native_module,
+ int func_index) {
base::ElapsedTimer compilation_timer;
- NativeModule* native_module =
- module_object->compiled_module()->GetNativeModule();
- wasm::WasmCode* existing_code =
- native_module->code(static_cast<uint32_t>(func_index));
- if (existing_code != nullptr &&
- existing_code->kind() == wasm::WasmCode::kFunction) {
- TRACE_LAZY("Function %d already compiled.\n", func_index);
- return existing_code;
- }
+ DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
compilation_timer.Start();
+
+ ModuleEnv* module_env = native_module->compilation_state()->module_env();
// TODO(wasm): Refactor this to only get the name if it is really needed for
// tracing / debugging.
- std::string func_name;
+ WasmName func_name;
{
- WasmName name = Vector<const char>::cast(
- module_object->shared()->GetRawFunctionName(func_index));
- // Copy to std::string, because the underlying string object might move on
- // the heap.
- func_name.assign(name.start(), static_cast<size_t>(name.length()));
+ ModuleWireBytes wire_bytes(native_module->wire_bytes());
+ WireBytesRef name_ref =
+ module_env->module->LookupFunctionName(wire_bytes, func_index);
+ func_name = wire_bytes.GetName(name_ref);
}
- TRACE_LAZY("Compiling function %s, %d.\n", func_name.c_str(), func_index);
+ TRACE_LAZY("Compiling function '%.*s' (#%d).\n", func_name.length(),
+ func_name.start(), func_index);
- ModuleEnv module_env =
- CreateModuleEnvFromModuleObject(isolate, module_object);
+ const uint8_t* module_start = native_module->wire_bytes().start();
- const uint8_t* module_start =
- module_object->shared()->module_bytes()->GetChars();
-
- const WasmFunction* func = &module_env.module->functions[func_index];
+ const WasmFunction* func = &module_env->module->functions[func_index];
FunctionBody body{func->sig, func->code.offset(),
module_start + func->code.offset(),
module_start + func->code.end_offset()};
ErrorThrower thrower(isolate, "WasmLazyCompile");
- WasmCompilationUnit unit(isolate, &module_env, native_module, body,
- CStrVector(func_name.c_str()), func_index,
- CodeFactory::CEntry(isolate));
+ WasmCompilationUnit unit(isolate, module_env, native_module, body, func_name,
+ func_index);
unit.ExecuteCompilation();
wasm::WasmCode* wasm_code = unit.FinishCompilation(&thrower);
@@ -545,10 +371,6 @@ const wasm::WasmCode* LazyCompileFunction(
// module creation time, and return a function that always traps here.
CHECK(!thrower.error());
- // Now specialize the generated code for this instance.
- CodeSpecialization code_specialization;
- code_specialization.RelocateDirectCalls(native_module);
- code_specialization.ApplyToWasmCode(wasm_code, SKIP_ICACHE_FLUSH);
int64_t func_size =
static_cast<int64_t>(func->code.end_offset() - func->code.offset());
int64_t compilation_time = compilation_timer.Elapsed().InMicroseconds();
@@ -556,8 +378,6 @@ const wasm::WasmCode* LazyCompileFunction(
auto counters = isolate->counters();
counters->wasm_lazily_compiled_functions()->Increment();
- Assembler::FlushICache(wasm_code->instructions().start(),
- wasm_code->instructions().size());
counters->wasm_generated_code_size()->Increment(
static_cast<int>(wasm_code->instructions().size()));
counters->wasm_reloc_size()->Increment(
@@ -567,299 +387,22 @@ const wasm::WasmCode* LazyCompileFunction(
compilation_time != 0 ? static_cast<int>(func_size / compilation_time)
: 0);
- if (trap_handler::IsTrapHandlerEnabled()) {
- wasm_code->RegisterTrapHandlerData();
- }
return wasm_code;
}
-namespace {
-
-int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
- int offset) {
- DCHECK(!iterator.done());
- int byte_pos;
- do {
- byte_pos = iterator.source_position().ScriptOffset();
- iterator.Advance();
- } while (!iterator.done() && iterator.code_offset() <= offset);
- return byte_pos;
-}
-
-const wasm::WasmCode* LazyCompileFromJsToWasm(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- Handle<Code> js_to_wasm_caller, uint32_t callee_func_index) {
- Decoder decoder(nullptr, nullptr);
- Handle<WasmModuleObject> module_object(instance->module_object());
- NativeModule* native_module = instance->compiled_module()->GetNativeModule();
-
- TRACE_LAZY(
- "Starting lazy compilation (func %u, js_to_wasm: true, patch caller: "
- "true). \n",
- callee_func_index);
- LazyCompileFunction(isolate, module_object, callee_func_index);
- {
- DisallowHeapAllocation no_gc;
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- RelocIterator it(*js_to_wasm_caller,
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- DCHECK(!it.done());
- const wasm::WasmCode* callee_compiled =
- native_module->code(callee_func_index);
- DCHECK_NOT_NULL(callee_compiled);
- DCHECK_EQ(WasmCode::kLazyStub,
- isolate->wasm_engine()
- ->code_manager()
- ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address())
- ->kind());
- it.rinfo()->set_js_to_wasm_address(callee_compiled->instruction_start());
- TRACE_LAZY("Patched 1 location in js-to-wasm %p.\n", *js_to_wasm_caller);
-
-#ifdef DEBUG
- it.next();
- DCHECK(it.done());
-#endif
- }
-
- wasm::WasmCode* ret = native_module->code(callee_func_index);
- DCHECK_NOT_NULL(ret);
- DCHECK_EQ(wasm::WasmCode::kFunction, ret->kind());
- return ret;
-}
-
-const wasm::WasmCode* LazyCompileIndirectCall(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t func_index) {
- TRACE_LAZY(
- "Starting lazy compilation (func %u, js_to_wasm: false, patch caller: "
- "false). \n",
- func_index);
- Handle<WasmModuleObject> module_object(instance->module_object());
- return LazyCompileFunction(isolate, module_object, func_index);
-}
-
-const wasm::WasmCode* LazyCompileDirectCall(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- const wasm::WasmCode* wasm_caller,
- int32_t caller_ret_offset) {
- DCHECK_LE(0, caller_ret_offset);
-
- Decoder decoder(nullptr, nullptr);
-
- // Gather all the targets of direct calls inside the code of {wasm_caller}
- // and place their function indexes in {direct_callees}.
- std::vector<int32_t> direct_callees;
- // The last one before {caller_ret_offset} must be the call that triggered
- // this lazy compilation.
- int callee_pos = -1;
- uint32_t num_non_compiled_callees = 0; // For stats.
- {
- DisallowHeapAllocation no_gc;
- Handle<WasmSharedModuleData> shared(
- wasm_caller->native_module()->shared_module_data(), isolate);
- SeqOneByteString* module_bytes = shared->module_bytes();
- uint32_t caller_func_index = wasm_caller->index();
- SourcePositionTableIterator source_pos_iterator(
- wasm_caller->source_positions());
-
- const byte* func_bytes =
- module_bytes->GetChars() +
- shared->module()->functions[caller_func_index].code.offset();
- for (RelocIterator it(wasm_caller->instructions(),
- wasm_caller->reloc_info(),
- wasm_caller->constant_pool(),
- RelocInfo::ModeMask(RelocInfo::WASM_CALL));
- !it.done(); it.next()) {
- // TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
- // (depending on the bool) against limits of T and then static_casts.
- size_t offset_l = it.rinfo()->pc() - wasm_caller->instruction_start();
- DCHECK_GE(kMaxInt, offset_l);
- int offset = static_cast<int>(offset_l);
- int byte_pos =
- AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
-
- WasmCode* callee = isolate->wasm_engine()->code_manager()->LookupCode(
- it.rinfo()->target_address());
- if (callee->kind() == WasmCode::kLazyStub) {
- // The callee has not been compiled.
- ++num_non_compiled_callees;
- int32_t callee_func_index =
- ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
- DCHECK_LT(callee_func_index,
- wasm_caller->native_module()->function_count());
- // {caller_ret_offset} points to one instruction after the call.
- // Remember the last called function before that offset.
- if (offset < caller_ret_offset) {
- callee_pos = static_cast<int>(direct_callees.size());
- }
- direct_callees.push_back(callee_func_index);
- } else {
- // If the callee is not the lazy compile stub, assume this callee
- // has already been compiled.
- direct_callees.push_back(-1);
- continue;
- }
- }
-
- TRACE_LAZY("Found %d non-compiled callees in function=%p.\n",
- num_non_compiled_callees, wasm_caller);
- USE(num_non_compiled_callees);
- }
- CHECK_LE(0, callee_pos);
-
- // TODO(wasm): compile all functions in non_compiled_callees in
- // background, wait for direct_callees[callee_pos].
- auto callee_func_index = direct_callees[callee_pos];
- TRACE_LAZY(
- "Starting lazy compilation (function=%p retaddr=+%d direct_callees[%d] "
- "-> %d).\n",
- wasm_caller, caller_ret_offset, callee_pos, callee_func_index);
-
- Handle<WasmModuleObject> module_object(instance->module_object());
- NativeModule* native_module = instance->compiled_module()->GetNativeModule();
- const WasmCode* ret =
- LazyCompileFunction(isolate, module_object, callee_func_index);
- DCHECK_NOT_NULL(ret);
-
- int patched = 0;
- {
- // Now patch the code in {wasm_caller} with all functions which are now
- // compiled. This will pick up any other compiled functions, not only {ret}.
- size_t pos = 0;
- for (RelocIterator
- it(wasm_caller->instructions(), wasm_caller->reloc_info(),
- wasm_caller->constant_pool(),
- RelocInfo::ModeMask(RelocInfo::WASM_CALL));
- !it.done(); it.next(), ++pos) {
- auto callee_index = direct_callees[pos];
- if (callee_index < 0) continue; // callee already compiled.
- const WasmCode* callee_compiled = native_module->code(callee_index);
- if (callee_compiled->kind() != WasmCode::kFunction) continue;
- DCHECK_EQ(WasmCode::kLazyStub,
- isolate->wasm_engine()
- ->code_manager()
- ->GetCodeFromStartAddress(it.rinfo()->wasm_call_address())
- ->kind());
- it.rinfo()->set_wasm_call_address(callee_compiled->instruction_start());
- ++patched;
- }
- DCHECK_EQ(direct_callees.size(), pos);
- }
-
- DCHECK_LT(0, patched);
- TRACE_LAZY("Patched %d calls(s) in %p.\n", patched, wasm_caller);
- USE(patched);
-
- return ret;
-}
-
-} // namespace
-
-Address CompileLazy(Isolate* isolate,
- Handle<WasmInstanceObject> target_instance) {
+Address CompileLazy(Isolate* isolate, NativeModule* native_module,
+ uint32_t func_index) {
HistogramTimerScope lazy_time_scope(
isolate->counters()->wasm_lazy_compilation_time());
- //==========================================================================
- // Begin stack walk.
- //==========================================================================
- StackFrameIterator it(isolate);
-
- //==========================================================================
- // First frame: C entry stub.
- //==========================================================================
- DCHECK(!it.done());
- DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
- it.Advance();
-
- //==========================================================================
- // Second frame: WasmCompileLazy builtin.
- //==========================================================================
- DCHECK(!it.done());
- int target_func_index = -1;
- bool indirectly_called = false;
- const wasm::WasmCode* lazy_stub =
- isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
- CHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub->kind());
- if (!lazy_stub->IsAnonymous()) {
- // If the lazy stub is not "anonymous", then its copy encodes the target
- // function index. Used for import and indirect calls.
- target_func_index = lazy_stub->index();
- indirectly_called = true;
- }
- it.Advance();
-
- //==========================================================================
- // Third frame: The calling wasm code (direct or indirect), or js-to-wasm
- // wrapper.
- //==========================================================================
- DCHECK(!it.done());
- DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
- Handle<Code> js_to_wasm_caller_code;
- Handle<WasmInstanceObject> caller_instance;
- const WasmCode* wasm_caller_code = nullptr;
- int32_t caller_ret_offset = -1;
- if (it.frame()->is_js_to_wasm()) {
- js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate);
- // This wasn't actually an indirect call, but a JS->wasm call.
- indirectly_called = false;
- } else {
- caller_instance =
- handle(WasmCompiledFrame::cast(it.frame())->wasm_instance(), isolate);
- wasm_caller_code =
- isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
- auto offset = it.frame()->pc() - wasm_caller_code->instruction_start();
- caller_ret_offset = static_cast<int32_t>(offset);
- DCHECK_EQ(offset, caller_ret_offset);
- }
-
- //==========================================================================
- // Begin compilation.
- //==========================================================================
- Handle<WasmCompiledModule> compiled_module(
- target_instance->compiled_module());
-
- NativeModule* native_module = compiled_module->GetNativeModule();
DCHECK(!native_module->lazy_compile_frozen());
NativeModuleModificationScope native_module_modification_scope(native_module);
- const wasm::WasmCode* result = nullptr;
-
- if (!js_to_wasm_caller_code.is_null()) {
- result = LazyCompileFromJsToWasm(isolate, target_instance,
- js_to_wasm_caller_code, target_func_index);
- DCHECK_NOT_NULL(result);
- DCHECK_EQ(target_func_index, result->index());
- } else {
- DCHECK_NOT_NULL(wasm_caller_code);
- if (target_func_index < 0) {
- result = LazyCompileDirectCall(isolate, target_instance, wasm_caller_code,
- caller_ret_offset);
- DCHECK_NOT_NULL(result);
- } else {
- result =
- LazyCompileIndirectCall(isolate, target_instance, target_func_index);
- DCHECK_NOT_NULL(result);
- }
- }
-
- //==========================================================================
- // Update import and indirect function tables in the caller.
- //==========================================================================
- if (indirectly_called) {
- DCHECK(!caller_instance.is_null());
- if (!caller_instance->has_managed_indirect_patcher()) {
- auto patcher = Managed<IndirectPatcher>::Allocate(isolate);
- caller_instance->set_managed_indirect_patcher(*patcher);
- }
- IndirectPatcher* patcher = Managed<IndirectPatcher>::cast(
- caller_instance->managed_indirect_patcher())
- ->raw();
- Address old_target = lazy_stub->instruction_start();
- patcher->Patch(caller_instance, target_instance, target_func_index,
- old_target, result->instruction_start());
- }
+ wasm::WasmCode* result =
+ LazyCompileFunction(isolate, native_module, func_index);
+ DCHECK_NOT_NULL(result);
+ DCHECK_EQ(func_index, result->index());
return result->instruction_start();
}
@@ -867,27 +410,7 @@ Address CompileLazy(Isolate* isolate,
namespace {
bool compile_lazy(const WasmModule* module) {
return FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
-}
-
-void FlushICache(const wasm::NativeModule* native_module) {
- for (uint32_t i = native_module->num_imported_functions(),
- e = native_module->function_count();
- i < e; ++i) {
- const wasm::WasmCode* code = native_module->code(i);
- if (code == nullptr) continue;
- Assembler::FlushICache(code->instructions().start(),
- code->instructions().size());
- }
-}
-
-void FlushICache(Handle<FixedArray> functions) {
- for (int i = 0, e = functions->length(); i < e; ++i) {
- if (!functions->get(i)->IsCode()) continue;
- Code* code = Code::cast(functions->get(i));
- Assembler::FlushICache(code->raw_instruction_start(),
- code->raw_instruction_size());
- }
+ (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
}
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
@@ -908,7 +431,7 @@ void RecordStats(const wasm::WasmCode* code, Counters* counters) {
void RecordStats(const wasm::NativeModule* native_module, Counters* counters) {
for (uint32_t i = native_module->num_imported_functions(),
- e = native_module->function_count();
+ e = native_module->num_functions();
i < e; ++i) {
const wasm::WasmCode* code = native_module->code(i);
if (code != nullptr) RecordStats(code, counters);
@@ -927,37 +450,23 @@ double MonotonicallyIncreasingTimeInMs() {
base::Time::kMillisecondsPerSecond;
}
-ModuleEnv CreateDefaultModuleEnv(WasmModule* module) {
- // TODO(kschimpf): Add module-specific policy handling here (see v8:7143)?
+ModuleEnv CreateDefaultModuleEnv(const WasmModule* module,
+ bool allow_trap_handler = true) {
UseTrapHandler use_trap_handler =
- trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler;
+ trap_handler::IsTrapHandlerEnabled() && allow_trap_handler
+ ? kUseTrapHandler
+ : kNoTrapHandler;
return ModuleEnv(module, use_trap_handler, kRuntimeExceptionSupport);
}
-Handle<WasmCompiledModule> NewCompiledModule(Isolate* isolate,
- WasmModule* module,
- ModuleEnv& env) {
- Handle<WasmCompiledModule> compiled_module =
- WasmCompiledModule::New(isolate, module, env);
- return compiled_module;
-}
-
-size_t GetMaxUsableMemorySize(Isolate* isolate) {
- return isolate->heap()->memory_allocator()->code_range()->valid()
- ? isolate->heap()->memory_allocator()->code_range()->size()
- : isolate->heap()->code_space()->Capacity();
-}
-
// The CompilationUnitBuilder builds compilation units and stores them in an
// internal buffer. The buffer is moved into the working queue of the
// CompilationState when {Commit} is called.
class CompilationUnitBuilder {
public:
- explicit CompilationUnitBuilder(NativeModule* native_module,
- Handle<Code> centry_stub)
+ explicit CompilationUnitBuilder(NativeModule* native_module)
: native_module_(native_module),
- compilation_state_(native_module->compilation_state()),
- centry_stub_(centry_stub) {}
+ compilation_state_(native_module->compilation_state()) {}
void AddUnit(const WasmFunction* function, uint32_t buffer_offset,
Vector<const uint8_t> bytes, WasmName name) {
@@ -1001,13 +510,12 @@ class CompilationUnitBuilder {
native_module_,
wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
bytes.end()},
- name, function->func_index, centry_stub_, mode,
+ name, function->func_index, mode,
compilation_state_->isolate()->async_counters().get());
}
NativeModule* native_module_;
CompilationState* compilation_state_;
- Handle<Code> centry_stub_;
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
};
@@ -1038,33 +546,19 @@ bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state) {
return true;
}
-size_t GetNumFunctionsToCompile(const WasmModule* wasm_module) {
- // TODO(kimanh): Remove, FLAG_skip_compiling_wasm_funcs: previously used for
- // debugging, and now not necessarily working anymore.
- uint32_t start =
- wasm_module->num_imported_functions + FLAG_skip_compiling_wasm_funcs;
- uint32_t num_funcs = static_cast<uint32_t>(wasm_module->functions.size());
- uint32_t funcs_to_compile = start > num_funcs ? 0 : num_funcs - start;
- return funcs_to_compile;
-}
-
-void InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
- const ModuleWireBytes& wire_bytes,
- const WasmModule* wasm_module,
- Handle<Code> centry_stub,
- NativeModule* native_module) {
- uint32_t start =
- wasm_module->num_imported_functions + FLAG_skip_compiling_wasm_funcs;
- uint32_t num_funcs = static_cast<uint32_t>(functions.size());
-
- CompilationUnitBuilder builder(native_module, centry_stub);
- for (uint32_t i = start; i < num_funcs; ++i) {
- const WasmFunction* func = &functions[i];
+void InitializeCompilationUnits(NativeModule* native_module) {
+ ModuleWireBytes wire_bytes(native_module->wire_bytes());
+ const WasmModule* module = native_module->module();
+ CompilationUnitBuilder builder(native_module);
+ uint32_t start = module->num_imported_functions;
+ uint32_t end = start + module->num_declared_functions;
+ for (uint32_t i = start; i < end; ++i) {
+ const WasmFunction* func = &module->functions[i];
uint32_t buffer_offset = func->code.offset();
Vector<const uint8_t> bytes(wire_bytes.start() + func->code.offset(),
func->code.end_offset() - func->code.offset());
- WasmName name = wire_bytes.GetName(func, wasm_module);
+ WasmName name = wire_bytes.GetName(func, module);
DCHECK_NOT_NULL(native_module);
builder.AddUnit(func, buffer_offset, bytes, name);
}
@@ -1073,6 +567,7 @@ void InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
void FinishCompilationUnits(CompilationState* compilation_state,
ErrorThrower* thrower) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FinishCompilationUnits");
while (true) {
if (compilation_state->failed()) break;
std::unique_ptr<WasmCompilationUnit> unit =
@@ -1095,29 +590,9 @@ void FinishCompilationUnits(CompilationState* compilation_state,
}
}
-void UpdateAllCompiledModulesWithTopTierCode(
- Handle<WasmModuleObject> module_object) {
- WasmModule* module = module_object->shared()->module();
- DCHECK_GT(module->functions.size() - module->num_imported_functions, 0);
- USE(module);
-
- CodeSpaceMemoryModificationScope modification_scope(
- module_object->GetIsolate()->heap());
-
- NativeModule* native_module =
- module_object->compiled_module()->GetNativeModule();
-
- // Link.
- CodeSpecialization code_specialization;
- code_specialization.RelocateDirectCalls(native_module);
- code_specialization.ApplyToWholeModule(native_module, module_object);
-}
-
void CompileInParallel(Isolate* isolate, NativeModule* native_module,
- const ModuleWireBytes& wire_bytes, ModuleEnv* module_env,
Handle<WasmModuleObject> module_object,
- Handle<Code> centry_stub, ErrorThrower* thrower) {
- const WasmModule* module = module_env->module;
+ ErrorThrower* thrower) {
// Data structures for the parallel compilation.
//-----------------------------------------------------------------------
@@ -1149,66 +624,16 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
// the compilation units. This foreground thread will be
// responsible for finishing compilation.
compilation_state->SetFinisherIsRunning(true);
- size_t functions_count = GetNumFunctionsToCompile(module);
- compilation_state->SetNumberOfFunctionsToCompile(functions_count);
- compilation_state->SetWireBytes(wire_bytes);
-
- DeferredHandles* deferred_handles = nullptr;
- Handle<Code> centry_deferred = centry_stub;
- Handle<WasmModuleObject> module_object_deferred;
- if (compilation_state->compile_mode() == CompileMode::kTiering) {
- // Open a deferred handle scope for the centry_stub, in order to allow
- // for background tiering compilation.
- DeferredHandleScope deferred(isolate);
- centry_deferred = Handle<Code>(*centry_stub, isolate);
- module_object_deferred = handle(*module_object, isolate);
- deferred_handles = deferred.Detach();
- }
- compilation_state->AddCallback(
- [module_object_deferred, deferred_handles](
- // Callback is called from a foreground thread.
- CompilationEvent event, ErrorThrower* thrower) mutable {
- switch (event) {
- case CompilationEvent::kFinishedBaselineCompilation:
- // Nothing to do, since we are finishing baseline compilation
- // in this foreground thread.
- return;
- case CompilationEvent::kFinishedTopTierCompilation:
- UpdateAllCompiledModulesWithTopTierCode(module_object_deferred);
- // TODO(wasm): Currently compilation has to finish before the
- // {deferred_handles} can be removed. We need to make sure that
- // we can clean it up at a time when the native module
- // should die (but currently cannot, since it's kept alive
- // through the {deferred_handles} themselves).
- delete deferred_handles;
- deferred_handles = nullptr;
- return;
- case CompilationEvent::kFailedCompilation:
- // If baseline compilation failed, we will reflect this without
- // a callback, in this thread through {thrower}.
- // Tier-up compilation should not fail if baseline compilation
- // did not fail.
- DCHECK(!module_object_deferred->compiled_module()
- ->GetNativeModule()
- ->compilation_state()
- ->baseline_compilation_finished());
- delete deferred_handles;
- deferred_handles = nullptr;
- return;
- case CompilationEvent::kDestroyed:
- if (deferred_handles) delete deferred_handles;
- return;
- }
- UNREACHABLE();
- });
+ uint32_t num_wasm_functions =
+ native_module->num_functions() - native_module->num_imported_functions();
+ compilation_state->SetNumberOfFunctionsToCompile(num_wasm_functions);
// 1) The main thread allocates a compilation unit for each wasm function
// and stores them in the vector {compilation_units} within the
// {compilation_state}. By adding units to the {compilation_state}, new
// {BackgroundCompileTask} instances are spawned which run on
// background threads.
- InitializeCompilationUnits(module->functions, compilation_state->wire_bytes(),
- module, centry_deferred, native_module);
+ InitializeCompilationUnits(native_module);
// 2.a) The background threads and the main thread pick one compilation
// unit at a time and execute the parallel phase of the compilation
@@ -1249,19 +674,18 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
}
void CompileSequentially(Isolate* isolate, NativeModule* native_module,
- const ModuleWireBytes& wire_bytes,
ModuleEnv* module_env, ErrorThrower* thrower) {
DCHECK(!thrower->error());
+ ModuleWireBytes wire_bytes(native_module->wire_bytes());
const WasmModule* module = module_env->module;
- for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
- i < module->functions.size(); ++i) {
+ for (uint32_t i = 0; i < module->functions.size(); ++i) {
const WasmFunction& func = module->functions[i];
if (func.imported) continue; // Imports are compiled at instantiation time.
// Compile the function.
wasm::WasmCode* code = WasmCompilationUnit::CompileWasmFunction(
- native_module, thrower, isolate, wire_bytes, module_env, &func);
+ native_module, thrower, isolate, module_env, &func);
if (code == nullptr) {
TruncatedUserString<> name(wire_bytes.GetName(&func, module));
thrower->CompileError("Compilation of #%d:%.*s failed.", i, name.length(),
@@ -1271,20 +695,22 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module,
}
}
-void ValidateSequentially(Isolate* isolate, const ModuleWireBytes& wire_bytes,
- ModuleEnv* module_env, ErrorThrower* thrower) {
+void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
+ ErrorThrower* thrower) {
DCHECK(!thrower->error());
- const WasmModule* module = module_env->module;
- for (uint32_t i = 0; i < module->functions.size(); ++i) {
+ ModuleWireBytes wire_bytes(native_module->wire_bytes());
+ const WasmModule* module = native_module->module();
+ uint32_t start = module->num_imported_functions;
+ uint32_t end = start + module->num_declared_functions;
+ for (uint32_t i = start; i < end; ++i) {
const WasmFunction& func = module->functions[i];
- if (func.imported) continue;
const byte* base = wire_bytes.start();
FunctionBody body{func.sig, func.code.offset(), base + func.code.offset(),
base + func.code.end_offset()};
DecodeResult result = VerifyWasmCodeWithStats(
- isolate->allocator(), module, body, module->is_wasm(),
+ isolate->allocator(), module, body, module->origin,
isolate->async_counters().get());
if (result.failed()) {
TruncatedUserString<> name(wire_bytes.GetName(&func, module));
@@ -1296,91 +722,22 @@ void ValidateSequentially(Isolate* isolate, const ModuleWireBytes& wire_bytes,
}
}
-MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
- Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
- const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
- WasmModule* wasm_module = module.get();
- Handle<Code> centry_stub = CodeFactory::CEntry(isolate);
- TimedHistogramScope wasm_compile_module_time_scope(
- wasm_module->is_wasm()
- ? isolate->async_counters()->wasm_compile_wasm_module_time()
- : isolate->async_counters()->wasm_compile_asm_module_time());
- // TODO(6792): No longer needed once WebAssembly code is off heap. Use
- // base::Optional to be able to close the scope before notifying the debugger.
- base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
- base::in_place_t(), isolate->heap());
+void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ const WasmModule* wasm_module, ModuleEnv* env) {
+ NativeModule* const native_module = module_object->native_module();
+ ModuleWireBytes wire_bytes(native_module->wire_bytes());
- // Check whether lazy compilation is enabled for this module.
- bool lazy_compile = compile_lazy(wasm_module);
-
- Factory* factory = isolate->factory();
- // Create heap objects for script, module bytes and asm.js offset table to
- // be stored in the shared module data.
- Handle<Script> script;
- Handle<ByteArray> asm_js_offset_table;
- if (asm_js_script.is_null()) {
- script = CreateWasmScript(isolate, wire_bytes);
- } else {
- script = asm_js_script;
- asm_js_offset_table =
- isolate->factory()->NewByteArray(asm_js_offset_table_bytes.length());
- asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
- asm_js_offset_table_bytes.length());
- }
- // TODO(wasm): only save the sections necessary to deserialize a
- // {WasmModule}. E.g. function bodies could be omitted.
- Handle<String> module_bytes =
- factory
- ->NewStringFromOneByte({wire_bytes.start(), wire_bytes.length()},
- TENURED)
- .ToHandleChecked();
- DCHECK(module_bytes->IsSeqOneByteString());
-
- // The {managed_module} will take ownership of the {WasmModule} object,
- // and it will be destroyed when the GC reclaims the wrapper object.
- Handle<Managed<WasmModule>> managed_module =
- Managed<WasmModule>::FromUniquePtr(isolate, std::move(module));
-
- // Create the shared module data.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
- // only have one WasmSharedModuleData. Otherwise, we might only set
- // breakpoints on a (potentially empty) subset of the instances.
-
- Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
- isolate, managed_module, Handle<SeqOneByteString>::cast(module_bytes),
- script, asm_js_offset_table);
-
- int export_wrappers_size =
- static_cast<int>(wasm_module->num_exported_functions);
- Handle<FixedArray> export_wrappers =
- factory->NewFixedArray(static_cast<int>(export_wrappers_size), TENURED);
- Handle<Code> init_builtin = BUILTIN_CODE(isolate, Illegal);
- for (int i = 0, e = export_wrappers->length(); i < e; ++i) {
- export_wrappers->set(i, *init_builtin);
- }
- ModuleEnv env = CreateDefaultModuleEnv(wasm_module);
-
- // Create the compiled module object and populate with compiled functions
- // and information needed at instantiation time. This object needs to be
- // serializable. Instantiation may occur off a deserialized version of this
- // object.
- Handle<WasmCompiledModule> compiled_module =
- NewCompiledModule(isolate, shared->module(), env);
- NativeModule* native_module = compiled_module->GetNativeModule();
- compiled_module->GetNativeModule()->SetSharedModuleData(shared);
- Handle<WasmModuleObject> module_object =
- WasmModuleObject::New(isolate, compiled_module, export_wrappers, shared);
- if (lazy_compile) {
- if (wasm_module->is_wasm()) {
+ if (compile_lazy(wasm_module)) {
+ if (wasm_module->origin == kWasmOrigin) {
// Validate wasm modules for lazy compilation. Don't validate asm.js
// modules, they are valid by construction (otherwise a CHECK will fail
// during lazy compilation).
// TODO(clemensh): According to the spec, we can actually skip validation
// at module creation time, and return a function that always traps at
// (lazy) compilation time.
- ValidateSequentially(isolate, wire_bytes, &env, thrower);
- if (thrower->error()) return {};
+ ValidateSequentially(isolate, native_module, thrower);
+ if (thrower->error()) return;
}
native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy));
@@ -1393,30 +750,14 @@ MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
V8::GetCurrentPlatform()->NumberOfWorkerThreads() > 0;
if (compile_parallel) {
- CompileInParallel(isolate, native_module, wire_bytes, &env, module_object,
- centry_stub, thrower);
+ CompileInParallel(isolate, native_module, module_object, thrower);
} else {
- CompileSequentially(isolate, native_module, wire_bytes, &env, thrower);
+ CompileSequentially(isolate, native_module, env, thrower);
}
- if (thrower->error()) return {};
+ if (thrower->error()) return;
- RecordStats(native_module, isolate->async_counters().get());
+ RecordStats(native_module, isolate->counters());
}
-
- // Compile JS->wasm wrappers for exported functions.
- CompileJsToWasmWrappers(isolate, module_object,
- isolate->async_counters().get());
-
- // If we created a wasm script, finish it now and make it public to the
- // debugger.
- if (asm_js_script.is_null()) {
- // Close the CodeSpaceMemoryModificationScope before calling into the
- // debugger.
- modification_scope.reset();
- isolate->debug()->OnAfterCompile(script);
- }
-
- return module_object;
}
// The runnable task that finishes compilation in foreground (e.g. updating
@@ -1462,7 +803,6 @@ class FinishCompileTask : public CancelableTask {
ErrorThrower thrower(compilation_state_->isolate(), "AsyncCompile");
wasm::WasmCode* result = unit->FinishCompilation(&thrower);
- NativeModule* native_module = unit->native_module();
if (thrower.error()) {
DCHECK_NULL(result);
compilation_state_->OnError(&thrower);
@@ -1474,13 +814,9 @@ class FinishCompileTask : public CancelableTask {
if (compilation_state_->baseline_compilation_finished()) {
// If Liftoff compilation finishes it will directly start executing.
// As soon as we have Turbofan-compiled code available, it will
- // directly be used by Liftoff-compiled code. Therefore we need
- // to patch the compiled Turbofan function directly after finishing it.
+ // directly be used by Liftoff-compiled code via the jump table.
DCHECK_EQ(CompileMode::kTiering, compilation_state_->compile_mode());
DCHECK(!result->is_liftoff());
- CodeSpecialization code_specialization;
- code_specialization.RelocateDirectCalls(native_module);
- code_specialization.ApplyToWasmCode(result);
if (wasm::WasmCode::ShouldBeLogged(isolate)) result->LogCode(isolate);
@@ -1516,16 +852,14 @@ class BackgroundCompileTask : public CancelableTask {
void RunInternal() override {
TRACE_COMPILE("(3b) Compiling...\n");
- // The number of currently running background tasks is reduced either in
- // {StopBackgroundCompilationTaskForThrottling} or in
+ // The number of currently running background tasks is reduced in
// {OnBackgroundTaskStopped}.
- while (!compilation_state_->StopBackgroundCompilationTaskForThrottling()) {
- if (compilation_state_->failed() ||
- !FetchAndExecuteCompilationUnit(compilation_state_)) {
- compilation_state_->OnBackgroundTaskStopped();
+ while (!compilation_state_->failed()) {
+ if (!FetchAndExecuteCompilationUnit(compilation_state_)) {
break;
}
}
+ compilation_state_->OnBackgroundTaskStopped();
}
private:
@@ -1534,12 +868,69 @@ class BackgroundCompileTask : public CancelableTask {
} // namespace
MaybeHandle<WasmModuleObject> CompileToModuleObject(
- Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
- const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
+ Isolate* isolate, ErrorThrower* thrower,
+ std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
- return CompileToModuleObjectInternal(isolate, thrower, std::move(module),
- wire_bytes, asm_js_script,
- asm_js_offset_table_bytes);
+ const WasmModule* wasm_module = module.get();
+ TimedHistogramScope wasm_compile_module_time_scope(SELECT_WASM_COUNTER(
+ isolate->counters(), wasm_module->origin, wasm_compile, module_time));
+ // TODO(6792): No longer needed once WebAssembly code is off heap. Use
+ // base::Optional to be able to close the scope before notifying the debugger.
+ base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
+ base::in_place_t(), isolate->heap());
+
+ // Create heap objects for script, module bytes and asm.js offset table to
+ // be stored in the module object.
+ Handle<Script> script;
+ Handle<ByteArray> asm_js_offset_table;
+ if (asm_js_script.is_null()) {
+ script = CreateWasmScript(isolate, wire_bytes);
+ } else {
+ script = asm_js_script;
+ asm_js_offset_table =
+ isolate->factory()->NewByteArray(asm_js_offset_table_bytes.length());
+ asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
+ asm_js_offset_table_bytes.length());
+ }
+ // TODO(wasm): only save the sections necessary to deserialize a
+ // {WasmModule}. E.g. function bodies could be omitted.
+ OwnedVector<uint8_t> wire_bytes_copy =
+ OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
+
+ // Create the module object.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one WasmModuleObject. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+
+ ModuleEnv env = CreateDefaultModuleEnv(wasm_module);
+
+ // Create the compiled module object and populate with compiled functions
+ // and information needed at instantiation time. This object needs to be
+ // serializable. Instantiation may occur off a deserialized version of this
+ // object.
+ Handle<WasmModuleObject> module_object = WasmModuleObject::New(
+ isolate, std::move(module), env, std::move(wire_bytes_copy), script,
+ asm_js_offset_table);
+ CompileNativeModule(isolate, thrower, module_object, wasm_module, &env);
+ if (thrower->error()) return {};
+
+ // Compile JS->wasm wrappers for exported functions.
+ CompileJsToWasmWrappers(isolate, module_object);
+
+ // If we created a wasm script, finish it now and make it public to the
+ // debugger.
+ if (asm_js_script.is_null()) {
+ // Close the CodeSpaceMemoryModificationScope before calling into the
+ // debugger.
+ modification_scope.reset();
+ isolate->debug()->OnAfterCompile(script);
+ }
+
+ // Log the code within the generated module for profiling.
+ module_object->native_module()->LogWasmCodes(isolate);
+
+ return module_object;
}
InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
@@ -1547,8 +938,7 @@ InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
MaybeHandle<JSReceiver> ffi,
MaybeHandle<JSArrayBuffer> memory)
: isolate_(isolate),
- module_(module_object->shared()->module()),
- async_counters_(isolate->async_counters()),
+ module_(module_object->module()),
thrower_(thrower),
module_object_(module_object),
ffi_(ffi),
@@ -1558,6 +948,7 @@ InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "InstanceBuilder::Build");
// Check that an imports argument was provided, if the module requires it.
// No point in continuing otherwise.
if (!module_->import_table.empty() && ffi_.is_null()) {
@@ -1574,59 +965,92 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// From here on, we expect the build pipeline to run without exiting to JS.
DisallowJavascriptExecution no_js(isolate_);
// Record build time into correct bucket, then build instance.
- TimedHistogramScope wasm_instantiate_module_time_scope(
- module_->is_wasm() ? counters()->wasm_instantiate_wasm_module_time()
- : counters()->wasm_instantiate_asm_module_time());
- Factory* factory = isolate_->factory();
+ TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
+ isolate_->counters(), module_->origin, wasm_instantiate, module_time));
//--------------------------------------------------------------------------
- // Reuse the compiled module (if no owner), otherwise clone.
+ // Allocate the memory array buffer.
//--------------------------------------------------------------------------
- wasm::NativeModule* native_module = nullptr;
- // Root the old instance, if any, in case later allocation causes GC,
- // to prevent the finalizer running for the old instance.
- MaybeHandle<WasmInstanceObject> old_instance;
+ // We allocate the memory buffer before cloning or reusing the compiled module
+ // so we will know whether we need to recompile with bounds checks.
+ uint32_t initial_pages = module_->initial_pages;
+ auto initial_pages_counter = SELECT_WASM_COUNTER(
+ isolate_->counters(), module_->origin, wasm, min_mem_pages_count);
+ initial_pages_counter->AddSample(initial_pages);
+ // Asm.js has memory_ already set at this point, so we don't want to
+ // overwrite it.
+ if (memory_.is_null()) {
+ memory_ = FindImportedMemoryBuffer();
+ }
+ if (!memory_.is_null()) {
+ // Set externally passed ArrayBuffer non neuterable.
+ Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
+ memory->set_is_neuterable(false);
- TRACE("Starting new module instantiation\n");
- {
- Handle<WasmCompiledModule> original =
- handle(module_object_->compiled_module());
- if (original->has_instance()) {
- old_instance = handle(original->owning_instance());
- // Clone, but don't insert yet the clone in the instances chain.
- // We do that last. Since we are holding on to the old instance,
- // the owner + original state used for cloning and patching
- // won't be mutated by possible finalizer runs.
- TRACE("Cloning from %zu\n", original->GetNativeModule()->instance_id);
- compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
- native_module = compiled_module_->GetNativeModule();
- RecordStats(native_module, counters());
- } else {
- // No instance owned the original compiled module.
- compiled_module_ = original;
- native_module = compiled_module_->GetNativeModule();
- TRACE("Reusing existing instance %zu\n",
- compiled_module_->GetNativeModule()->instance_id);
+ DCHECK_IMPLIES(use_trap_handler(),
+ module_->origin == kAsmJsOrigin ||
+ memory->is_wasm_memory() ||
+ memory->backing_store() == nullptr ||
+ // TODO(836800) Remove once is_wasm_memory transfers over
+ // post-message.
+ (FLAG_experimental_wasm_threads && memory->is_shared()));
+ } else if (initial_pages > 0 || use_trap_handler()) {
+ // We need to unconditionally create a guard region if using trap handlers,
+ // even when the size is zero to prevent null-dereference issues
+ // (e.g. https://crbug.com/769637).
+ // Allocate memory if the initial size is more than 0 pages.
+ memory_ = AllocateMemory(initial_pages);
+ if (memory_.is_null()) return {}; // failed to allocate memory
+ }
+
+ //--------------------------------------------------------------------------
+ // Recompile module if using trap handlers but could not get guarded memory
+ //--------------------------------------------------------------------------
+ if (module_->origin == kWasmOrigin && use_trap_handler()) {
+ // Make sure the memory has suitable guard regions.
+ WasmMemoryTracker* const memory_tracker =
+ isolate_->wasm_engine()->memory_tracker();
+
+ if (!memory_tracker->HasFullGuardRegions(
+ memory_.ToHandleChecked()->backing_store())) {
+ if (!FLAG_wasm_trap_handler_fallback) {
+ return {};
+ }
+
+ TRACE("Recompiling module without bounds checks\n");
+ constexpr bool allow_trap_handler = false;
+ ModuleEnv env = CreateDefaultModuleEnv(module_, allow_trap_handler);
+ // Disable trap handlers on this native module.
+ NativeModule* native_module = module_object_->native_module();
+ native_module->DisableTrapHandler();
+
+ // Recompile all functions in this native module.
+ ErrorThrower thrower(isolate_, "recompile");
+ CompileNativeModule(isolate_, &thrower, module_object_, module_, &env);
+ if (thrower.error()) {
+ return {};
+ }
+ DCHECK(!native_module->use_trap_handler());
}
}
- DCHECK_NOT_NULL(native_module);
- wasm::NativeModuleModificationScope native_modification_scope(native_module);
//--------------------------------------------------------------------------
// Create the WebAssembly.Instance object.
//--------------------------------------------------------------------------
+ wasm::NativeModule* native_module = module_object_->native_module();
+ TRACE("New module instantiation for %p\n", native_module);
Handle<WasmInstanceObject> instance =
- WasmInstanceObject::New(isolate_, module_object_, compiled_module_);
- Handle<WeakCell> weak_instance = factory->NewWeakCell(instance);
+ WasmInstanceObject::New(isolate_, module_object_);
+ wasm::NativeModuleModificationScope native_modification_scope(native_module);
//--------------------------------------------------------------------------
// Set up the globals for the new instance.
//--------------------------------------------------------------------------
MaybeHandle<JSArrayBuffer> old_globals;
- uint32_t globals_size = module_->globals_size;
- if (globals_size > 0) {
+ uint32_t globals_buffer_size = module_->globals_buffer_size;
+ if (globals_buffer_size > 0) {
void* backing_store =
- isolate_->array_buffer_allocator()->Allocate(globals_size);
+ isolate_->array_buffer_allocator()->Allocate(globals_buffer_size);
if (backing_store == nullptr) {
thrower_->RangeError("Out of memory: wasm globals");
return {};
@@ -1636,7 +1060,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
constexpr bool is_external = false;
constexpr bool is_wasm_memory = false;
JSArrayBuffer::Setup(globals_, isolate_, is_external, backing_store,
- globals_size, SharedFlag::kNotShared, is_wasm_memory);
+ globals_buffer_size, SharedFlag::kNotShared,
+ is_wasm_memory);
if (globals_.is_null()) {
thrower_->RangeError("Out of memory: wasm globals");
return {};
@@ -1662,11 +1087,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Reserve the metadata for indirect function tables.
//--------------------------------------------------------------------------
- int function_table_count = static_cast<int>(module_->function_tables.size());
- table_instances_.reserve(module_->function_tables.size());
- for (int index = 0; index < function_table_count; ++index) {
- table_instances_.emplace_back();
- }
+ int table_count = static_cast<int>(module_->tables.size());
+ table_instances_.resize(table_count);
//--------------------------------------------------------------------------
// Process the imports for the module.
@@ -1682,39 +1104,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Initialize the indirect tables.
//--------------------------------------------------------------------------
- if (function_table_count > 0) {
+ if (table_count > 0) {
InitializeTables(instance);
}
//--------------------------------------------------------------------------
- // Allocate the memory array buffer.
- //--------------------------------------------------------------------------
- uint32_t initial_pages = module_->initial_pages;
- (module_->is_wasm() ? counters()->wasm_wasm_min_mem_pages_count()
- : counters()->wasm_asm_min_mem_pages_count())
- ->AddSample(initial_pages);
-
- if (!memory_.is_null()) {
- // Set externally passed ArrayBuffer non neuterable.
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- memory->set_is_neuterable(false);
-
- DCHECK_IMPLIES(use_trap_handler(),
- module_->is_asm_js() || memory->is_wasm_memory() ||
- memory->backing_store() == nullptr ||
- // TODO(836800) Remove once is_wasm_memory transfers over
- // post-message.
- (FLAG_experimental_wasm_threads && memory->is_shared()));
- } else if (initial_pages > 0 || use_trap_handler()) {
- // We need to unconditionally create a guard region if using trap handlers,
- // even when the size is zero to prevent null-dereference issues
- // (e.g. https://crbug.com/769637).
- // Allocate memory if the initial size is more than 0 pages.
- memory_ = AllocateMemory(initial_pages);
- if (memory_.is_null()) return {}; // failed to allocate memory
- }
-
- //--------------------------------------------------------------------------
// Create the WebAssembly.Memory object.
//--------------------------------------------------------------------------
if (module_->has_memory) {
@@ -1743,7 +1137,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Check that indirect function table segments are within bounds.
//--------------------------------------------------------------------------
- for (WasmTableInit& table_init : module_->table_inits) {
+ for (const WasmTableInit& table_init : module_->table_inits) {
DCHECK(table_init.table_index < table_instances_.size());
uint32_t base = EvalUint32InitExpr(table_init.offset);
size_t table_size = table_instances_[table_init.table_index].table_size;
@@ -1756,7 +1150,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Check that memory segments are within bounds.
//--------------------------------------------------------------------------
- for (WasmDataSegment& seg : module_->data_segments) {
+ for (const WasmDataSegment& seg : module_->data_segments) {
uint32_t base = EvalUint32InitExpr(seg.dest_addr);
if (!in_bounds(base, seg.source.length(), instance->memory_size())) {
thrower_->LinkError("data segment is out of bounds");
@@ -1773,7 +1167,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Initialize the indirect function tables.
//--------------------------------------------------------------------------
- if (function_table_count > 0) {
+ if (table_count > 0) {
LoadTableSegments(instance);
}
@@ -1785,44 +1179,17 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Patch all code with the relocations registered in code_specialization.
+ // Install a finalizer on the new instance object.
//--------------------------------------------------------------------------
- CodeSpecialization code_specialization;
- code_specialization.RelocateDirectCalls(native_module);
- code_specialization.ApplyToWholeModule(native_module, module_object_,
- SKIP_ICACHE_FLUSH);
- FlushICache(native_module);
- FlushICache(handle(module_object_->export_wrappers()));
-
- //--------------------------------------------------------------------------
- // Unpack and notify signal handler of protected instructions.
- //--------------------------------------------------------------------------
- if (use_trap_handler()) {
- native_module->UnpackAndRegisterProtectedInstructions();
- }
-
- //--------------------------------------------------------------------------
- // Insert the compiled module into the weak list of compiled modules.
- //--------------------------------------------------------------------------
- {
- if (!old_instance.is_null()) {
- // Publish the new instance to the instances chain.
- DisallowHeapAllocation no_gc;
- compiled_module_->InsertInChain(*module_object_);
- }
- module_object_->set_compiled_module(*compiled_module_);
- compiled_module_->set_weak_owning_instance(*weak_instance);
- WasmInstanceObject::InstallFinalizer(isolate_, instance);
- }
+ WasmInstanceObject::InstallFinalizer(isolate_, instance);
//--------------------------------------------------------------------------
// Debugging support.
//--------------------------------------------------------------------------
// Set all breakpoints that were set on the shared module.
- WasmSharedModuleData::SetBreakpointsOnNewInstance(
- handle(module_object_->shared(), isolate_), instance);
+ WasmModuleObject::SetBreakpointsOnNewInstance(module_object_, instance);
- if (FLAG_wasm_interpret_all && module_->is_wasm()) {
+ if (FLAG_wasm_interpret_all && module_->origin == kWasmOrigin) {
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
std::vector<int> func_indexes;
@@ -1841,29 +1208,25 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
int start_index = module_->start_function_index;
- Handle<WasmInstanceObject> start_function_instance = instance;
- Address start_call_address =
- static_cast<uint32_t>(start_index) < module_->num_imported_functions
- ? kNullAddress
- : native_module->GetCallTargetForFunction(start_index);
FunctionSig* sig = module_->functions[start_index].sig;
- Handle<Code> wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, start_call_address, start_index, use_trap_handler());
+ Handle<Code> wrapper_code = js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
+ isolate_, native_module, start_index, use_trap_handler());
// TODO(clemensh): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
- isolate_, start_function_instance, MaybeHandle<String>(), start_index,
+ isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code);
}
DCHECK(!isolate_->has_pending_exception());
- TRACE("Successfully built instance %zu\n",
- compiled_module_->GetNativeModule()->instance_id);
- TRACE_CHAIN(module_object_->compiled_module());
+ TRACE("Successfully built instance for module %p\n",
+ module_object_->native_module());
return instance;
}
bool InstanceBuilder::ExecuteStartFunction() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "InstanceBuilder::ExecuteStartFunction");
if (start_function_.is_null()) return true; // No start function.
HandleScope scope(isolate_);
@@ -1889,8 +1252,8 @@ MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
DCHECK(!ffi_.is_null());
// Look up the module first.
- MaybeHandle<Object> result =
- Object::GetPropertyOrElement(ffi_.ToHandleChecked(), module_name);
+ MaybeHandle<Object> result = Object::GetPropertyOrElement(
+ isolate_, ffi_.ToHandleChecked(), module_name);
if (result.is_null()) {
return ReportTypeError("module not found", index, module_name);
}
@@ -1903,7 +1266,7 @@ MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
module_name);
}
- result = Object::GetPropertyOrElement(module, import_name);
+ result = Object::GetPropertyOrElement(isolate_, module, import_name);
if (result.is_null()) {
ReportLinkError("import not found", index, module_name, import_name);
return MaybeHandle<JSFunction>();
@@ -1965,8 +1328,8 @@ uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
// Load data segments into the memory.
void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
- Handle<SeqOneByteString> module_bytes(
- module_object_->shared()->module_bytes(), isolate_);
+ Vector<const uint8_t> wire_bytes =
+ module_object_->native_module()->wire_bytes();
for (const WasmDataSegment& segment : module_->data_segments) {
uint32_t source_size = segment.source.length();
// Segments of size == 0 are just nops.
@@ -1974,13 +1337,12 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
DCHECK(in_bounds(dest_offset, source_size, instance->memory_size()));
byte* dest = instance->memory_start() + dest_offset;
- const byte* src = reinterpret_cast<const byte*>(
- module_bytes->GetCharsAddress() + segment.source.offset());
+ const byte* src = wire_bytes.start() + segment.source.offset();
memcpy(dest, src, source_size);
}
}
-void InstanceBuilder::WriteGlobalValue(WasmGlobal& global, double num) {
+void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
reinterpret_cast<void*>(raw_buffer_ptr(globals_, 0)), global.offset,
num, ValueTypes::TypeName(global.type));
@@ -2003,7 +1365,7 @@ void InstanceBuilder::WriteGlobalValue(WasmGlobal& global, double num) {
}
}
-void InstanceBuilder::WriteGlobalValue(WasmGlobal& global,
+void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
Handle<WasmGlobalObject> value) {
TRACE("init [globals_start=%p + %u] = ",
reinterpret_cast<void*>(raw_buffer_ptr(globals_, 0)), global.offset);
@@ -2040,15 +1402,15 @@ void InstanceBuilder::WriteGlobalValue(WasmGlobal& global,
}
void InstanceBuilder::SanitizeImports() {
- Handle<SeqOneByteString> module_bytes(
- module_object_->shared()->module_bytes());
+ Vector<const uint8_t> wire_bytes =
+ module_object_->native_module()->wire_bytes();
for (size_t index = 0; index < module_->import_table.size(); ++index) {
- WasmImport& import = module_->import_table[index];
+ const WasmImport& import = module_->import_table[index];
Handle<String> module_name;
MaybeHandle<String> maybe_module_name =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- isolate_, module_bytes, import.module_name);
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
+ import.module_name);
if (!maybe_module_name.ToHandle(&module_name)) {
thrower_->LinkError("Could not resolve module name for import %zu",
index);
@@ -2057,8 +1419,8 @@ void InstanceBuilder::SanitizeImports() {
Handle<String> import_name;
MaybeHandle<String> maybe_import_name =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- isolate_, module_bytes, import.field_name);
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
+ import.field_name);
if (!maybe_import_name.ToHandle(&import_name)) {
thrower_->LinkError("Could not resolve import name for import %zu",
index);
@@ -2067,7 +1429,7 @@ void InstanceBuilder::SanitizeImports() {
int int_index = static_cast<int>(index);
MaybeHandle<Object> result =
- module_->is_asm_js()
+ module_->origin == kAsmJsOrigin
? LookupImportAsm(int_index, import_name)
: LookupImport(int_index, module_name, import_name);
if (thrower_->error()) {
@@ -2079,6 +1441,24 @@ void InstanceBuilder::SanitizeImports() {
}
}
+MaybeHandle<JSArrayBuffer> InstanceBuilder::FindImportedMemoryBuffer() const {
+ DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
+ for (size_t index = 0; index < module_->import_table.size(); index++) {
+ const WasmImport& import = module_->import_table[index];
+
+ if (import.kind == kExternalMemory) {
+ const auto& value = sanitized_imports_[index].value;
+ if (!value->IsWasmMemoryObject()) {
+ return {};
+ }
+ auto memory = Handle<WasmMemoryObject>::cast(value);
+ Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
+ return buffer;
+ }
+ }
+ return {};
+}
+
// Process the imports, including functions, tables, globals, and memory, in
// order, loading them from the {ffi_} object. Returns the number of imported
// functions.
@@ -2088,15 +1468,14 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
int num_imported_mutable_globals = 0;
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
- for (int index = 0; index < static_cast<int>(module_->import_table.size());
- ++index) {
- WasmImport& import = module_->import_table[index];
+ int num_imports = static_cast<int>(module_->import_table.size());
+ NativeModule* native_module = instance->module_object()->native_module();
+ for (int index = 0; index < num_imports; ++index) {
+ const WasmImport& import = module_->import_table[index];
Handle<String> module_name = sanitized_imports_[index].module_name;
Handle<String> import_name = sanitized_imports_[index].import_name;
Handle<Object> value = sanitized_imports_[index].value;
- NativeModule* native_module =
- instance->compiled_module()->GetNativeModule();
switch (import.kind) {
case kExternalFunction: {
@@ -2119,23 +1498,25 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
imported_instance->module()
->functions[imported_function->function_index()]
.sig;
- if (!imported_sig->Equals(expected_sig)) {
+ if (*imported_sig != *expected_sig) {
ReportLinkError(
"imported function does not match the expected type", index,
module_name, import_name);
return -1;
}
// The import reference is the instance object itself.
- ImportedFunctionEntry entry(instance, func_index);
Address imported_target = imported_function->GetWasmCallTarget();
+ ImportedFunctionEntry entry(instance, func_index);
entry.set_wasm_to_wasm(*imported_instance, imported_target);
} else {
// The imported function is a callable.
Handle<JSReceiver> js_receiver(JSReceiver::cast(*value), isolate_);
- Handle<Code> wrapper_code = compiler::CompileWasmToJSWrapper(
- isolate_, js_receiver, expected_sig, func_index,
- module_->origin(), use_trap_handler());
- RecordStats(*wrapper_code, counters());
+ Handle<Code> wrapper_code =
+ compiler::CompileWasmToJSWrapper(
+ isolate_, js_receiver, expected_sig, func_index,
+ module_->origin, use_trap_handler())
+ .ToHandleChecked();
+ RecordStats(*wrapper_code, isolate_->counters());
WasmCode* wasm_code = native_module->AddCodeCopy(
wrapper_code, wasm::WasmCode::kWasmToJsWrapper, func_index);
@@ -2153,7 +1534,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
}
uint32_t table_num = import.index;
DCHECK_EQ(table_num, num_imported_tables);
- WasmIndirectFunctionTable& table = module_->function_tables[table_num];
+ const WasmTable& table = module_->tables[table_num];
TableInstance& table_instance = table_instances_[table_num];
table_instance.table_object = Handle<WasmTableObject>::cast(value);
instance->set_table_object(*table_instance.table_object);
@@ -2210,13 +1591,13 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
// so putting {-1} in the table will cause checks to always fail.
auto target = Handle<WasmExportedFunction>::cast(val);
Handle<WasmInstanceObject> imported_instance =
- handle(target->instance());
+ handle(target->instance(), isolate_);
Address exported_call_target = target->GetWasmCallTarget();
FunctionSig* sig = imported_instance->module()
->functions[target->function_index()]
.sig;
IndirectFunctionTableEntry(instance, i)
- .set(module_->signature_map.Find(sig), *imported_instance,
+ .set(module_->signature_map.Find(*sig), *imported_instance,
exported_call_target);
}
num_imported_tables++;
@@ -2234,7 +1615,8 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
auto memory = Handle<WasmMemoryObject>::cast(value);
instance->set_memory_object(*memory);
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
- memory_ = buffer;
+ // memory_ should have already been assigned in Build().
+ DCHECK_EQ(*memory_.ToHandleChecked(), *buffer);
uint32_t imported_cur_pages = static_cast<uint32_t>(
buffer->byte_length()->Number() / kWasmPageSize);
if (imported_cur_pages < module_->initial_pages) {
@@ -2276,7 +1658,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
// Mutable global imports instead have their backing array buffers
// referenced by this instance, and store the address of the imported
// global in the {imported_mutable_globals_} array.
- WasmGlobal& global = module_->globals[import.index];
+ const WasmGlobal& global = module_->globals[import.index];
// The mutable-global proposal allows importing i64 values, but only if
// they are passed as a WebAssembly.Global object.
@@ -2286,7 +1668,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
module_name, import_name);
return -1;
}
- if (module_->is_asm_js()) {
+ if (module_->origin == kAsmJsOrigin) {
// Accepting {JSFunction} on top of just primitive values here is a
// workaround to support legacy asm.js code with broken binding. Note
// that using {NaN} (or Smi::kZero) here is what using the observable
@@ -2298,7 +1680,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
if (global.type == kWasmI32) {
value = Object::ToInt32(isolate_, value).ToHandleChecked();
} else {
- value = Object::ToNumber(value).ToHandleChecked();
+ value = Object::ToNumber(isolate_, value).ToHandleChecked();
}
}
}
@@ -2369,7 +1751,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
}
template <typename T>
-T* InstanceBuilder::GetRawGlobalPtr(WasmGlobal& global) {
+T* InstanceBuilder::GetRawGlobalPtr(const WasmGlobal& global) {
return reinterpret_cast<T*>(raw_buffer_ptr(globals_, global.offset));
}
@@ -2440,7 +1822,7 @@ bool InstanceBuilder::NeedsWrappers() const {
for (auto& table_instance : table_instances_) {
if (!table_instance.js_wrappers.is_null()) return true;
}
- for (auto& table : module_->function_tables) {
+ for (auto& table : module_->tables) {
if (table.exported) return true;
}
return false;
@@ -2461,7 +1843,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
// imported WebAssembly functions into the js_wrappers_ list.
for (int index = 0, end = static_cast<int>(module_->import_table.size());
index < end; ++index) {
- WasmImport& import = module_->import_table[index];
+ const WasmImport& import = module_->import_table[index];
if (import.kind == kExternalFunction) {
Handle<Object> value = sanitized_imports_[index].value;
if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
@@ -2472,15 +1854,22 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
Handle<JSObject> exports_object;
- if (module_->is_wasm()) {
- // Create the "exports" object.
- exports_object = isolate_->factory()->NewJSObjectWithNullProto();
- } else if (module_->is_asm_js()) {
- Handle<JSFunction> object_function = Handle<JSFunction>(
- isolate_->native_context()->object_function(), isolate_);
- exports_object = isolate_->factory()->NewJSObject(object_function);
- } else {
- UNREACHABLE();
+ bool is_asm_js = false;
+ switch (module_->origin) {
+ case kWasmOrigin: {
+ // Create the "exports" object.
+ exports_object = isolate_->factory()->NewJSObjectWithNullProto();
+ break;
+ }
+ case kAsmJsOrigin: {
+ Handle<JSFunction> object_function = Handle<JSFunction>(
+ isolate_->native_context()->object_function(), isolate_);
+ exports_object = isolate_->factory()->NewJSObject(object_function);
+ is_asm_js = true;
+ break;
+ }
+ default:
+ UNREACHABLE();
}
instance->set_exports_object(*exports_object);
@@ -2488,20 +1877,19 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
PropertyDescriptor desc;
- desc.set_writable(module_->is_asm_js());
+ desc.set_writable(is_asm_js);
desc.set_enumerable(true);
- desc.set_configurable(module_->is_asm_js());
+ desc.set_configurable(is_asm_js);
// Process each export in the export table.
int export_index = 0; // Index into {export_wrappers}.
- for (WasmExport& exp : module_->export_table) {
- Handle<String> name =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- isolate_, handle(module_object_->shared(), isolate_), exp.name)
- .ToHandleChecked();
+ for (const WasmExport& exp : module_->export_table) {
+ Handle<String> name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate_, module_object_, exp.name)
+ .ToHandleChecked();
Handle<JSObject> export_to;
- if (module_->is_asm_js() && exp.kind == kExternalFunction &&
- String::Equals(name, single_function_name)) {
+ if (is_asm_js && exp.kind == kExternalFunction &&
+ String::Equals(isolate_, name, single_function_name)) {
export_to = instance;
} else {
export_to = exports_object;
@@ -2510,22 +1898,21 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
switch (exp.kind) {
case kExternalFunction: {
// Wrap and export the code as a JSFunction.
- WasmFunction& function = module_->functions[exp.index];
+ const WasmFunction& function = module_->functions[exp.index];
Handle<JSFunction> js_function = js_wrappers_[exp.index];
if (js_function.is_null()) {
// Wrap the exported code as a JSFunction.
Handle<Code> export_code =
export_wrappers->GetValueChecked<Code>(isolate_, export_index);
MaybeHandle<String> func_name;
- if (module_->is_asm_js()) {
+ if (is_asm_js) {
// For modules arising from asm.js, honor the names section.
- WireBytesRef func_name_ref = module_->LookupName(
- module_object_->shared()->module_bytes(), function.func_index);
- func_name =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- isolate_, handle(module_object_->shared(), isolate_),
- func_name_ref)
- .ToHandleChecked();
+ WireBytesRef func_name_ref = module_->LookupFunctionName(
+ module_object_->native_module()->wire_bytes(),
+ function.func_index);
+ func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate_, module_object_, func_name_ref)
+ .ToHandleChecked();
}
js_function = WasmExportedFunction::New(
isolate_, instance, func_name, function.func_index,
@@ -2539,7 +1926,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
case kExternalTable: {
// Export a table as a WebAssembly.Table object.
TableInstance& table_instance = table_instances_[exp.index];
- WasmIndirectFunctionTable& table = module_->function_tables[exp.index];
+ const WasmTable& table = module_->tables[exp.index];
if (table_instance.table_object.is_null()) {
uint32_t maximum = table.has_maximum_size ? table.maximum_size
: FLAG_wasm_max_table_size;
@@ -2560,15 +1947,37 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
break;
}
case kExternalGlobal: {
- WasmGlobal& global = module_->globals[exp.index];
+ const WasmGlobal& global = module_->globals[exp.index];
if (FLAG_experimental_wasm_mut_global) {
- Handle<JSArrayBuffer> globals_buffer(instance->globals_buffer(),
- isolate_);
+ Handle<JSArrayBuffer> buffer;
+ uint32_t offset;
+
+ if (global.mutability && global.imported) {
+ Handle<FixedArray> buffers_array(
+ instance->imported_mutable_globals_buffers(), isolate_);
+ buffer = buffers_array->GetValueChecked<JSArrayBuffer>(
+ isolate_, global.index);
+ Address global_addr =
+ instance->imported_mutable_globals()[global.index];
+
+ uint32_t buffer_size = 0;
+ CHECK(buffer->byte_length()->ToUint32(&buffer_size));
+
+ Address backing_store =
+ reinterpret_cast<Address>(buffer->backing_store());
+ CHECK(global_addr >= backing_store &&
+ global_addr < backing_store + buffer_size);
+ offset = static_cast<uint32_t>(global_addr - backing_store);
+ } else {
+ buffer = handle(instance->globals_buffer(), isolate_);
+ offset = global.offset;
+ }
+
// Since the global's array buffer is always provided, allocation
// should never fail.
Handle<WasmGlobalObject> global_obj =
- WasmGlobalObject::New(isolate_, globals_buffer, global.type,
- global.offset, global.mutability)
+ WasmGlobalObject::New(isolate_, buffer, global.type, offset,
+ global.mutability)
.ToHandleChecked();
desc.set_value(global_obj);
} else {
@@ -2611,7 +2020,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
DCHECK_EQ(export_index, export_wrappers->length());
- if (module_->is_wasm()) {
+ if (module_->origin == kWasmOrigin) {
v8::Maybe<bool> success =
JSReceiver::SetIntegrityLevel(exports_object, FROZEN, kDontThrow);
DCHECK(success.FromMaybe(false));
@@ -2620,12 +2029,13 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
- size_t table_count = module_->function_tables.size();
+ size_t table_count = module_->tables.size();
for (size_t index = 0; index < table_count; ++index) {
- WasmIndirectFunctionTable& table = module_->function_tables[index];
+ const WasmTable& table = module_->tables[index];
TableInstance& table_instance = table_instances_[index];
- if (!instance->has_indirect_function_table()) {
+ if (!instance->has_indirect_function_table() &&
+ table.type == kWasmAnyFunc) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
instance, table.initial_size);
table_instance.table_size = table.initial_size;
@@ -2634,105 +2044,98 @@ void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
}
void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
- NativeModule* native_module = compiled_module_->GetNativeModule();
- int function_table_count = static_cast<int>(module_->function_tables.size());
- for (int index = 0; index < function_table_count; ++index) {
+ NativeModule* native_module = module_object_->native_module();
+ for (auto& table_init : module_->table_inits) {
+ uint32_t base = EvalUint32InitExpr(table_init.offset);
+ uint32_t num_entries = static_cast<uint32_t>(table_init.entries.size());
+ uint32_t index = table_init.table_index;
TableInstance& table_instance = table_instances_[index];
-
- // TODO(titzer): this does redundant work if there are multiple tables,
- // since initializations are not sorted by table index.
- for (auto& table_init : module_->table_inits) {
- uint32_t base = EvalUint32InitExpr(table_init.offset);
- uint32_t num_entries = static_cast<uint32_t>(table_init.entries.size());
- DCHECK(in_bounds(base, num_entries, table_instance.table_size));
- for (uint32_t i = 0; i < num_entries; ++i) {
- uint32_t func_index = table_init.entries[i];
- WasmFunction* function = &module_->functions[func_index];
- int table_index = static_cast<int>(i + base);
-
- // Update the local dispatch table first.
- uint32_t sig_id = module_->signature_ids[function->sig_index];
- WasmInstanceObject* target_instance = *instance;
- Address call_target;
- const bool is_import = func_index < module_->num_imported_functions;
- if (is_import) {
- // For imported calls, take target instance and address from the
- // import table.
- ImportedFunctionEntry entry(instance, func_index);
- target_instance = entry.instance();
- call_target = entry.target();
- } else {
- call_target = native_module->GetCallTargetForFunction(func_index);
- }
- IndirectFunctionTableEntry(instance, table_index)
- .set(sig_id, target_instance, call_target);
-
- if (!table_instance.table_object.is_null()) {
- // Update the table object's other dispatch tables.
- if (js_wrappers_[func_index].is_null()) {
- // No JSFunction entry yet exists for this function. Create one.
- // TODO(titzer): We compile JS->wasm wrappers for functions are
- // not exported but are in an exported table. This should be done
- // at module compile time and cached instead.
-
- Handle<Code> wrapper_code =
- js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, is_import ? kNullAddress : call_target,
- func_index, use_trap_handler());
- MaybeHandle<String> func_name;
- if (module_->is_asm_js()) {
- // For modules arising from asm.js, honor the names section.
- WireBytesRef func_name_ref = module_->LookupName(
- module_object_->shared()->module_bytes(), func_index);
- func_name =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- isolate_, handle(module_object_->shared(), isolate_),
- func_name_ref)
- .ToHandleChecked();
- }
- Handle<WasmExportedFunction> js_function =
- WasmExportedFunction::New(
- isolate_, instance, func_name, func_index,
- static_cast<int>(function->sig->parameter_count()),
- wrapper_code);
- js_wrappers_[func_index] = js_function;
+ DCHECK(in_bounds(base, num_entries, table_instance.table_size));
+ for (uint32_t i = 0; i < num_entries; ++i) {
+ uint32_t func_index = table_init.entries[i];
+ const WasmFunction* function = &module_->functions[func_index];
+ int table_index = static_cast<int>(i + base);
+
+ // Update the local dispatch table first.
+ uint32_t sig_id = module_->signature_ids[function->sig_index];
+ Handle<WasmInstanceObject> target_instance = instance;
+ Address call_target;
+ const bool is_import = func_index < module_->num_imported_functions;
+ if (is_import) {
+ // For imported calls, take target instance and address from the
+ // import table.
+ ImportedFunctionEntry entry(instance, func_index);
+ target_instance = handle(entry.instance(), isolate_);
+ call_target = entry.target();
+ } else {
+ call_target = native_module->GetCallTargetForFunction(func_index);
+ }
+ IndirectFunctionTableEntry(instance, table_index)
+ .set(sig_id, *target_instance, call_target);
+
+ if (!table_instance.table_object.is_null()) {
+ // Update the table object's other dispatch tables.
+ if (js_wrappers_[func_index].is_null()) {
+ // No JSFunction entry yet exists for this function. Create one.
+ // TODO(titzer): We compile JS->wasm wrappers for functions are
+ // not exported but are in an exported table. This should be done
+ // at module compile time and cached instead.
+
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
+ isolate_, native_module, func_index, use_trap_handler());
+ MaybeHandle<String> func_name;
+ if (module_->origin == kAsmJsOrigin) {
+ // For modules arising from asm.js, honor the names section.
+ WireBytesRef func_name_ref = module_->LookupFunctionName(
+ native_module->wire_bytes(), func_index);
+ func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate_, module_object_, func_name_ref)
+ .ToHandleChecked();
}
- table_instance.js_wrappers->set(table_index,
- *js_wrappers_[func_index]);
- // UpdateDispatchTables() should update this instance as well.
- WasmTableObject::UpdateDispatchTables(
- isolate_, table_instance.table_object, table_index, function->sig,
- instance, call_target);
+ Handle<WasmExportedFunction> js_function = WasmExportedFunction::New(
+ isolate_, instance, func_name, func_index,
+ static_cast<int>(function->sig->parameter_count()), wrapper_code);
+ js_wrappers_[func_index] = js_function;
}
+ table_instance.js_wrappers->set(table_index, *js_wrappers_[func_index]);
+ // UpdateDispatchTables() updates all other dispatch tables, since
+ // we have not yet added the dispatch table we are currently building.
+ WasmTableObject::UpdateDispatchTables(
+ isolate_, table_instance.table_object, table_index, function->sig,
+ target_instance, call_target);
}
}
+ }
- // TODO(titzer): we add the new dispatch table at the end to avoid
- // redundant work and also because the new instance is not yet fully
- // initialized.
+ int table_count = static_cast<int>(module_->tables.size());
+ for (int index = 0; index < table_count; ++index) {
+ TableInstance& table_instance = table_instances_[index];
+
+ // Add the new dispatch table at the end to avoid redundant lookups.
if (!table_instance.table_object.is_null()) {
- // Add the new dispatch table to the WebAssembly.Table object.
WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object,
instance, index);
}
}
}
-AsyncCompileJob::AsyncCompileJob(Isolate* isolate,
- std::unique_ptr<byte[]> bytes_copy,
- size_t length, Handle<Context> context,
- Handle<JSPromise> promise)
+AsyncCompileJob::AsyncCompileJob(
+ Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
+ Handle<Context> context,
+ std::unique_ptr<CompilationResultResolver> resolver)
: isolate_(isolate),
async_counters_(isolate->async_counters()),
bytes_copy_(std::move(bytes_copy)),
- wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length) {
+ wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
+ resolver_(std::move(resolver)) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
- // The handles for the context and promise must be deferred.
+ // The handle for the context must be deferred.
DeferredHandleScope deferred(isolate);
- context_ = Handle<Context>(*context);
- module_promise_ = Handle<JSPromise>(*promise);
+ native_context_ = Handle<Context>(context->native_context(), isolate);
+ DCHECK(native_context_->IsNativeContext());
deferred_handles_.push_back(deferred.Detach());
}
@@ -2742,9 +2145,7 @@ void AsyncCompileJob::Start() {
void AsyncCompileJob::Abort() {
background_task_manager_.CancelAndWait();
- if (!compiled_module_.is_null()) {
- compiled_module_->GetNativeModule()->compilation_state()->Abort();
- }
+ if (native_module_) native_module_->compilation_state()->Abort();
if (num_pending_foreground_tasks_ == 0) {
// No task is pending, we can just remove the AsyncCompileJob.
isolate_->wasm_engine()->RemoveCompileJob(this);
@@ -2774,8 +2175,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
void OnFinishedChunk() override;
- void OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
- size_t length) override;
+ void OnFinishedStream(OwnedVector<uint8_t> bytes) override;
void OnError(DecodeResult result) override;
@@ -2806,53 +2206,15 @@ AsyncCompileJob::~AsyncCompileJob() {
}
void AsyncCompileJob::FinishCompile() {
- RecordStats(compiled_module_->GetNativeModule(), counters());
-
- // Create heap objects for script and module bytes to be stored in the
- // shared module data. Asm.js is not compiled asynchronously.
- Handle<Script> script = CreateWasmScript(isolate_, wire_bytes_);
- Handle<ByteArray> asm_js_offset_table;
- // TODO(wasm): Improve efficiency of storing module wire bytes.
- // 1. Only store relevant sections, not function bodies
- // 2. Don't make a second copy of the bytes here; reuse the copy made
- // for asynchronous compilation and store it as an external one
- // byte string for serialization/deserialization.
- Handle<String> module_bytes =
- isolate_->factory()
- ->NewStringFromOneByte({wire_bytes_.start(), wire_bytes_.length()},
- TENURED)
- .ToHandleChecked();
- DCHECK(module_bytes->IsSeqOneByteString());
- int export_wrapper_size = static_cast<int>(module_->num_exported_functions);
- Handle<FixedArray> export_wrappers =
- isolate_->factory()->NewFixedArray(export_wrapper_size, TENURED);
-
- // The {managed_module} will take ownership of the {WasmModule} object,
- // and it will be destroyed when the GC reclaims the wrapper object.
- Handle<Managed<WasmModule>> managed_module =
- Managed<WasmModule>::FromUniquePtr(isolate_, std::move(module_));
-
- // Create the shared module data.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
- // only have one WasmSharedModuleData. Otherwise, we might only set
- // breakpoints on a (potentially empty) subset of the instances.
- Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
- isolate_, managed_module, Handle<SeqOneByteString>::cast(module_bytes),
- script, asm_js_offset_table);
- compiled_module_->GetNativeModule()->SetSharedModuleData(shared);
-
- // Create the module object.
- module_object_ = WasmModuleObject::New(isolate_, compiled_module_,
- export_wrappers, shared);
- {
- DeferredHandleScope deferred(isolate_);
- module_object_ = handle(*module_object_, isolate_);
- deferred_handles_.push_back(deferred.Detach());
- }
+ RecordStats(native_module_, counters());
// Finish the wasm script now and make it public to the debugger.
+ Handle<Script> script(module_object_->script(), isolate_);
isolate_->debug()->OnAfterCompile(script);
+ // Log the code within the generated module for profiling.
+ native_module_->LogWasmCodes(isolate_);
+
// TODO(wasm): compiling wrappers should be made async as well.
DoSync<CompileWrappers>();
}
@@ -2862,15 +2224,11 @@ void AsyncCompileJob::AsyncCompileFailed(Handle<Object> error_reason) {
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
isolate_->wasm_engine()->RemoveCompileJob(this);
- MaybeHandle<Object> promise_result =
- JSPromise::Reject(module_promise_, error_reason);
- CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
+ resolver_->OnCompilationFailed(error_reason);
}
-void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
- MaybeHandle<Object> promise_result =
- JSPromise::Resolve(module_promise_, result);
- CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
+void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
+ resolver_->OnCompilationSucceeded(result);
}
// A closure to run a compilation step (either as foreground or background
@@ -2888,7 +2246,7 @@ class AsyncCompileJob::CompileStep {
--job_->num_pending_foreground_tasks_;
DCHECK_EQ(0, job_->num_pending_foreground_tasks_);
SaveContext saved_context(job_->isolate_);
- job_->isolate_->set_context(*job_->context_);
+ job_->isolate_->set_context(*job_->native_context_);
RunInForeground();
} else {
RunInBackground();
@@ -2987,7 +2345,7 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
} else {
// Decode passed.
job_->module_ = std::move(result.val);
- job_->DoSync<PrepareAndStartCompile>(job_->module_.get(), true);
+ job_->DoSync<PrepareAndStartCompile>(true);
}
}
};
@@ -3015,11 +2373,10 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
//==========================================================================
class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
public:
- explicit PrepareAndStartCompile(WasmModule* module, bool start_compilation)
- : module_(module), start_compilation_(start_compilation) {}
+ explicit PrepareAndStartCompile(bool start_compilation)
+ : start_compilation_(start_compilation) {}
private:
- WasmModule* module_;
bool start_compilation_;
void RunInForeground() override {
@@ -3029,25 +2386,35 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// is done.
job_->background_task_manager_.CancelAndWait();
- Isolate* isolate = job_->isolate_;
-
- job_->centry_stub_ = CodeFactory::CEntry(isolate);
-
- DCHECK_LE(module_->num_imported_functions, module_->functions.size());
- // Create the compiled module object and populate with compiled functions
- // and information needed at instantiation time. This object needs to be
- // serializable. Instantiation may occur off a deserialized version of
- // this object.
- ModuleEnv env = CreateDefaultModuleEnv(module_);
- job_->compiled_module_ = NewCompiledModule(job_->isolate_, module_, env);
+ // Create heap objects for script and module bytes to be stored in the
+ // module object. Asm.js is not compiled asynchronously.
+ Handle<Script> script = CreateWasmScript(job_->isolate_, job_->wire_bytes_);
+ Handle<ByteArray> asm_js_offset_table;
+
+ const WasmModule* module = job_->module_.get();
+ ModuleEnv env = CreateDefaultModuleEnv(module);
+ // TODO(wasm): Improve efficiency of storing module wire bytes. Only store
+ // relevant sections, not function bodies
+
+ // Create the module object and populate with compiled functions and
+ // information needed at instantiation time.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one {WasmModuleObject}. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+ // Create the module object.
+ job_->module_object_ = WasmModuleObject::New(
+ job_->isolate_, job_->module_, env,
+ {std::move(job_->bytes_copy_), job_->wire_bytes_.length()}, script,
+ asm_js_offset_table);
+ job_->native_module_ = job_->module_object_->native_module();
{
DeferredHandleScope deferred(job_->isolate_);
- job_->compiled_module_ = handle(*job_->compiled_module_, job_->isolate_);
+ job_->module_object_ = handle(*job_->module_object_, job_->isolate_);
job_->deferred_handles_.push_back(deferred.Detach());
}
size_t num_functions =
- module_->functions.size() - module_->num_imported_functions;
+ module->functions.size() - module->num_imported_functions;
if (num_functions == 0) {
// Tiering has nothing to do if module is empty.
@@ -3059,7 +2426,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
}
CompilationState* compilation_state =
- job_->compiled_module_->GetNativeModule()->compilation_state();
+ job_->native_module_->compilation_state();
{
// Instance field {job_} cannot be captured by copy, therefore
// we need to add a local helper variable {job}. We want to
@@ -3073,34 +2440,31 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
case CompilationEvent::kFinishedBaselineCompilation:
if (job->DecrementAndCheckFinisherCount()) {
SaveContext saved_context(job->isolate());
- // TODO(mstarzinger): Make {AsyncCompileJob::context} point
- // to the native context and also rename to {native_context}.
- job->isolate()->set_context(job->context_->native_context());
+ job->isolate()->set_context(*job->native_context_);
job->FinishCompile();
}
return;
case CompilationEvent::kFinishedTopTierCompilation:
- // It is only safe to schedule the UpdateToTopTierCompiledCode
- // step if no foreground task is currently pending, and no
- // finisher is outstanding (streaming compilation).
+ // It is only safe to remove the AsyncCompileJob if no
+ // foreground task is currently pending, and no finisher is
+ // outstanding (streaming compilation).
if (job->num_pending_foreground_tasks_ == 0 &&
- job->outstanding_finishers_.Value() == 0) {
- job->DoSync<UpdateToTopTierCompiledCode>();
+ job->outstanding_finishers_.load() == 0) {
+ job->isolate_->wasm_engine()->RemoveCompileJob(job);
+ } else {
+ // If a foreground task was pending or a finsher was pending,
+ // we will rely on FinishModule to remove the job.
+ job->tiering_completed_ = true;
}
- // If a foreground task was pending or a finsher was pending,
- // we will rely on FinishModule to switch the step to
- // UpdateToTopTierCompiledCode.
- job->tiering_completed_ = true;
return;
case CompilationEvent::kFailedCompilation: {
// Tier-up compilation should not fail if baseline compilation
// did not fail.
- DCHECK(!job->compiled_module_->GetNativeModule()
- ->compilation_state()
+ DCHECK(!job->native_module_->compilation_state()
->baseline_compilation_finished());
SaveContext saved_context(job->isolate());
- job->isolate()->set_context(job->context_->native_context());
+ job->isolate()->set_context(*job->native_context_);
Handle<Object> error = thrower->Reify();
DeferredHandleScope deferred(job->isolate());
@@ -3122,12 +2486,10 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// InitializeCompilationUnits always returns 0 for streaming compilation,
// then DoAsync would do the same as NextStep already.
- size_t functions_count = GetNumFunctionsToCompile(env.module);
- compilation_state->SetNumberOfFunctionsToCompile(functions_count);
+ compilation_state->SetNumberOfFunctionsToCompile(
+ module->num_declared_functions);
// Add compilation units and kick off compilation.
- InitializeCompilationUnits(module_->functions, job_->wire_bytes_,
- env.module, job_->centry_stub_,
- job_->compiled_module_->GetNativeModule());
+ InitializeCompilationUnits(job_->native_module_);
}
}
};
@@ -3160,8 +2522,7 @@ class AsyncCompileJob::CompileWrappers : public CompileStep {
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(job_->isolate_->heap());
// Compile JS->wasm wrappers for exported functions.
- CompileJsToWasmWrappers(job_->isolate_, job_->module_object_,
- job_->counters());
+ CompileJsToWasmWrappers(job_->isolate_, job_->module_object_);
job_->DoSync<FinishModule>();
}
};
@@ -3174,12 +2535,10 @@ class AsyncCompileJob::FinishModule : public CompileStep {
TRACE_COMPILE("(6) Finish module...\n");
job_->AsyncCompileSucceeded(job_->module_object_);
- WasmModule* module = job_->module_object_->shared()->module();
size_t num_functions =
- module->functions.size() - module->num_imported_functions;
- if (job_->compiled_module_->GetNativeModule()
- ->compilation_state()
- ->compile_mode() == CompileMode::kRegular ||
+ job_->module_->functions.size() - job_->module_->num_imported_functions;
+ if (job_->native_module_->compilation_state()->compile_mode() ==
+ CompileMode::kRegular ||
num_functions == 0) {
// If we do not tier up, the async compile job is done here and
// can be deleted.
@@ -3189,27 +2548,14 @@ class AsyncCompileJob::FinishModule : public CompileStep {
// If background tiering compilation finished before we resolved the
// promise, switch to patching now. Otherwise, patching will be scheduled
// by a callback.
- DCHECK_EQ(CompileMode::kTiering, job_->compiled_module_->GetNativeModule()
- ->compilation_state()
- ->compile_mode());
+ DCHECK_EQ(CompileMode::kTiering,
+ job_->native_module_->compilation_state()->compile_mode());
if (job_->tiering_completed_) {
- job_->DoSync<UpdateToTopTierCompiledCode>();
+ job_->isolate_->wasm_engine()->RemoveCompileJob(job_);
}
}
};
-//==========================================================================
-// Step 7 (sync): Update with top tier code.
-//==========================================================================
-class AsyncCompileJob::UpdateToTopTierCompiledCode : public CompileStep {
- void RunInForeground() override {
- TRACE_COMPILE("(7) Update native module to use optimized code...\n");
-
- UpdateAllCompiledModulesWithTopTierCode(job_->module_object_);
- job_->isolate_->wasm_engine()->RemoveCompileJob(job_);
- }
-};
-
class AsyncCompileJob::AbortCompilation : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("Abort asynchronous compilation ...\n");
@@ -3232,8 +2578,8 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
// Check if there is already a CompiledModule, in which case we have to clean
// up the CompilationState as well.
- if (!job_->compiled_module_.is_null()) {
- job_->compiled_module_->GetNativeModule()->compilation_state()->Abort();
+ if (job_->native_module_) {
+ job_->native_module_->compilation_state()->Abort();
if (job_->num_pending_foreground_tasks_ == 0) {
job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
@@ -3255,6 +2601,7 @@ bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes,
uint32_t offset) {
TRACE_STREAMING("Process module header...\n");
decoder_.StartDecoding(job_->isolate());
+ job_->module_ = decoder_.shared_module();
decoder_.DecodeModuleHeader(bytes, offset);
if (!decoder_.ok()) {
FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
@@ -3305,8 +2652,7 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(size_t functions_count,
FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
return false;
}
- job_->NextStep<AsyncCompileJob::PrepareAndStartCompile>(decoder_.module(),
- false);
+ job_->NextStep<AsyncCompileJob::PrepareAndStartCompile>(false);
// Execute the PrepareAndStartCompile step immediately and not in a separate
// task. The step expects to be run on a separate foreground thread though, so
// we to increment {num_pending_foreground_tasks_} to look like one.
@@ -3315,15 +2661,14 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(size_t functions_count,
constexpr bool on_foreground = true;
job_->step_->Run(on_foreground);
- NativeModule* native_module = job_->compiled_module_->GetNativeModule();
- native_module->compilation_state()->SetNumberOfFunctionsToCompile(
+ job_->native_module_->compilation_state()->SetNumberOfFunctionsToCompile(
functions_count);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
- job_->outstanding_finishers_.SetValue(2);
+ job_->outstanding_finishers_.store(2);
compilation_unit_builder_.reset(
- new CompilationUnitBuilder(native_module, job_->centry_stub_));
+ new CompilationUnitBuilder(job_->native_module_));
return true;
}
@@ -3332,7 +2677,6 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
uint32_t offset) {
TRACE_STREAMING("Process function body %d ...\n", next_function_);
- if (next_function_ >= FLAG_skip_compiling_wasm_funcs) {
decoder_.DecodeFunctionBody(
next_function_, static_cast<uint32_t>(bytes.length()), offset, false);
@@ -3340,7 +2684,6 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
const WasmFunction* func = &decoder_.module()->functions[index];
WasmName name = {nullptr, 0};
compilation_unit_builder_->AddUnit(func, offset, bytes, name);
- }
++next_function_;
// This method always succeeds. The return value is necessary to comply with
// the StreamingProcessor interface.
@@ -3358,23 +2701,22 @@ void AsyncStreamingProcessor::OnFinishedChunk() {
}
// Finish the processing of the stream.
-void AsyncStreamingProcessor::OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
- size_t length) {
+void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
TRACE_STREAMING("Finish stream...\n");
- job_->bytes_copy_ = std::move(bytes);
- job_->wire_bytes_ = ModuleWireBytes(job_->bytes_copy_.get(),
- job_->bytes_copy_.get() + length);
+ if (job_->native_module_) {
+ job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector());
+ job_->native_module_->set_wire_bytes(std::move(bytes));
+ }
ModuleResult result = decoder_.FinishDecoding(false);
DCHECK(result.ok());
- job_->module_ = std::move(result.val);
+ DCHECK_EQ(job_->module_, result.val);
if (job_->DecrementAndCheckFinisherCount()) {
- if (job_->compiled_module_.is_null()) {
+ if (job_->native_module_ == nullptr) {
// We are processing a WebAssembly module without code section. We need to
// prepare compilation first before we can finish it.
// {PrepareAndStartCompile} will call {FinishCompile} by itself if there
// is no code section.
- job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(job_->module_.get(),
- true);
+ job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(true);
} else {
job_->FinishCompile();
}
@@ -3398,7 +2740,7 @@ void CompilationStateDeleter::operator()(
}
std::unique_ptr<CompilationState, CompilationStateDeleter> NewCompilationState(
- Isolate* isolate, ModuleEnv& env) {
+ Isolate* isolate, const ModuleEnv& env) {
return std::unique_ptr<CompilationState, CompilationStateDeleter>(
new CompilationState(isolate, env));
}
@@ -3407,32 +2749,30 @@ ModuleEnv* GetModuleEnv(CompilationState* compilation_state) {
return compilation_state->module_env();
}
-CompilationState::CompilationState(internal::Isolate* isolate, ModuleEnv& env)
+CompilationState::CompilationState(internal::Isolate* isolate,
+ const ModuleEnv& env)
: isolate_(isolate),
+ wasm_engine_(isolate->wasm_engine()),
module_env_(env),
- max_memory_(GetMaxUsableMemorySize(isolate) / 2),
- // TODO(clemensh): Fix fuzzers such that {env.module} is always non-null.
- compile_mode_(FLAG_wasm_tier_up && (!env.module || env.module->is_wasm())
+ compile_mode_(FLAG_wasm_tier_up && env.module->origin == kWasmOrigin
? CompileMode::kTiering
: CompileMode::kRegular),
- wire_bytes_(ModuleWireBytes(nullptr, nullptr)),
max_background_tasks_(std::max(
1, std::min(FLAG_wasm_num_compilation_tasks,
V8::GetCurrentPlatform()->NumberOfWorkerThreads()))) {
- DCHECK_LT(0, max_memory_);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
- // Register task manager for clean shutdown in case of an isolate shutdown.
- isolate_->wasm_engine()->Register(&background_task_manager_);
- isolate_->wasm_engine()->Register(&foreground_task_manager_);
+ // Register task manager for clean shutdown in case of an engine shutdown.
+ wasm_engine_->Register(&background_task_manager_);
+ wasm_engine_->Register(&foreground_task_manager_);
}
CompilationState::~CompilationState() {
CancelAndWait();
foreground_task_manager_.CancelAndWait();
- isolate_->wasm_engine()->Unregister(&foreground_task_manager_);
+ wasm_engine_->Unregister(&foreground_task_manager_);
NotifyOnEvent(CompilationEvent::kDestroyed, nullptr);
}
@@ -3501,7 +2841,6 @@ std::unique_ptr<WasmCompilationUnit> CompilationState::GetNextExecutedUnit() {
if (units.empty()) return {};
std::unique_ptr<WasmCompilationUnit> ret = std::move(units.back());
units.pop_back();
- allocated_memory_ -= ret->memory_cost();
return ret;
}
@@ -3550,7 +2889,6 @@ void CompilationState::OnFinishedUnit() {
void CompilationState::ScheduleUnitForFinishing(
std::unique_ptr<WasmCompilationUnit> unit,
WasmCompilationUnit::CompilationMode mode) {
- size_t cost = unit->memory_cost();
base::LockGuard<base::Mutex> guard(&mutex_);
if (compile_mode_ == CompileMode::kTiering &&
mode == WasmCompilationUnit::CompilationMode::kTurbofan) {
@@ -3558,7 +2896,6 @@ void CompilationState::ScheduleUnitForFinishing(
} else {
baseline_finish_units_.push_back(std::move(unit));
}
- allocated_memory_ += cost;
if (!finisher_is_running_ && !failed_) {
ScheduleFinisherTask();
@@ -3569,7 +2906,7 @@ void CompilationState::ScheduleUnitForFinishing(
void CompilationState::CancelAndWait() {
background_task_manager_.CancelAndWait();
- isolate_->wasm_engine()->Unregister(&background_task_manager_);
+ wasm_engine_->Unregister(&background_task_manager_);
}
void CompilationState::OnBackgroundTaskStopped() {
@@ -3585,8 +2922,6 @@ void CompilationState::RestartBackgroundTasks(size_t max) {
// No need to restart tasks if compilation already failed.
if (failed_) return;
- bool should_increase_workload = allocated_memory_ <= max_memory_ / 2;
- if (!should_increase_workload) return;
DCHECK_LE(num_background_tasks_, max_background_tasks_);
if (num_background_tasks_ == max_background_tasks_) return;
size_t num_compilation_units =
@@ -3622,15 +2957,6 @@ void CompilationState::ScheduleFinisherTask() {
base::make_unique<FinishCompileTask>(this, &foreground_task_manager_));
}
-bool CompilationState::StopBackgroundCompilationTaskForThrottling() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- DCHECK_LE(1, num_background_tasks_);
- bool can_accept_work = allocated_memory_ < max_memory_;
- if (can_accept_work) return false;
- --num_background_tasks_;
- return true;
-}
-
void CompilationState::Abort() {
{
base::LockGuard<base::Mutex> guard(&mutex_);
@@ -3647,26 +2973,20 @@ void CompilationState::NotifyOnEvent(CompilationEvent event,
}
void CompileJsToWasmWrappers(Isolate* isolate,
- Handle<WasmModuleObject> module_object,
- Counters* counters) {
+ Handle<WasmModuleObject> module_object) {
JSToWasmWrapperCache js_to_wasm_cache;
int wrapper_index = 0;
Handle<FixedArray> export_wrappers(module_object->export_wrappers(), isolate);
- NativeModule* native_module =
- module_object->compiled_module()->GetNativeModule();
+ NativeModule* native_module = module_object->native_module();
wasm::UseTrapHandler use_trap_handler =
native_module->use_trap_handler() ? kUseTrapHandler : kNoTrapHandler;
- WasmModule* module = native_module->shared_module_data()->module();
+ const WasmModule* module = native_module->module();
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
- Address call_target =
- exp.index < module->num_imported_functions
- ? kNullAddress
- : native_module->GetCallTargetForFunction(exp.index);
- Handle<Code> wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
- isolate, module, call_target, exp.index, use_trap_handler);
+ Handle<Code> wrapper_code = js_to_wasm_cache.GetOrCompileJSToWasmWrapper(
+ isolate, native_module, exp.index, use_trap_handler);
export_wrappers->set(wrapper_index, *wrapper_code);
- RecordStats(*wrapper_code, counters);
+ RecordStats(*wrapper_code, isolate->counters());
++wrapper_index;
}
}
@@ -3706,7 +3026,6 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
} // namespace v8
#undef TRACE
-#undef TRACE_CHAIN
#undef TRACE_COMPILE
#undef TRACE_STREAMING
#undef TRACE_LAZY
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 59d5566898..eb9f271543 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -5,10 +5,10 @@
#ifndef V8_WASM_MODULE_COMPILER_H_
#define V8_WASM_MODULE_COMPILER_H_
+#include <atomic>
#include <functional>
#include <memory>
-#include "src/base/atomic-utils.h"
#include "src/cancelable-task.h"
#include "src/globals.h"
#include "src/wasm/wasm-module.h"
@@ -27,9 +27,11 @@ class Vector;
namespace wasm {
+class CompilationResultResolver;
class CompilationState;
class ErrorThrower;
class ModuleCompiler;
+class NativeModule;
class WasmCode;
struct ModuleEnv;
struct WasmModule;
@@ -39,16 +41,16 @@ struct CompilationStateDeleter {
};
// Wrapper to create a CompilationState exists in order to avoid having
-// the the CompilationState in the header file.
+// the CompilationState in the header file.
std::unique_ptr<CompilationState, CompilationStateDeleter> NewCompilationState(
- Isolate* isolate, ModuleEnv& env);
+ Isolate* isolate, const ModuleEnv& env);
ModuleEnv* GetModuleEnv(CompilationState* compilation_state);
MaybeHandle<WasmModuleObject> CompileToModuleObject(
- Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
- const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
+ Isolate* isolate, ErrorThrower* thrower,
+ std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
@@ -57,22 +59,14 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
V8_EXPORT_PRIVATE
void CompileJsToWasmWrappers(Isolate* isolate,
- Handle<WasmModuleObject> module_object,
- Counters* counters);
+ Handle<WasmModuleObject> module_object);
V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
Isolate* isolate, const ModuleWireBytes& wire_bytes);
// Triggered by the WasmCompileLazy builtin.
-// Walks the stack (top three frames) to determine the wasm instance involved
-// and which function to compile.
-// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
-// patching the call site or indirect function tables.
-// Returns either the Code object that has been lazily compiled, or Illegal if
-// an error occurred. In the latter case, a pending exception has been set,
-// which will be triggered when returning from the runtime function, i.e. the
-// Illegal builtin will never be called.
-Address CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance);
+// Returns the instruction start of the compiled code object.
+Address CompileLazy(Isolate*, NativeModule*, uint32_t func_index);
// Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed
@@ -85,7 +79,8 @@ class AsyncCompileJob {
public:
explicit AsyncCompileJob(Isolate* isolate, std::unique_ptr<byte[]> bytes_copy,
size_t length, Handle<Context> context,
- Handle<JSPromise> promise);
+ std::unique_ptr<CompilationResultResolver> resolver);
+ ~AsyncCompileJob();
void Start();
@@ -93,7 +88,7 @@ class AsyncCompileJob {
void Abort();
- ~AsyncCompileJob();
+ Isolate* isolate() const { return isolate_; }
private:
class CompileTask;
@@ -118,7 +113,7 @@ class AsyncCompileJob {
void AsyncCompileFailed(Handle<Object> error_reason);
- void AsyncCompileSucceeded(Handle<Object> result);
+ void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
void StartForegroundTask();
@@ -139,38 +134,39 @@ class AsyncCompileJob {
template <typename Step, typename... Args>
void NextStep(Args&&... args);
- Isolate* isolate() { return isolate_; }
-
friend class AsyncStreamingProcessor;
Isolate* isolate_;
const std::shared_ptr<Counters> async_counters_;
+ // Copy of the module wire bytes, moved into the {native_module_} on it's
+ // creation.
std::unique_ptr<byte[]> bytes_copy_;
+ // Reference to the wire bytes (hold in {bytes_copy_} or as part of
+ // {native_module_}).
ModuleWireBytes wire_bytes_;
- Handle<Context> context_;
- Handle<JSPromise> module_promise_;
- std::unique_ptr<WasmModule> module_;
+ Handle<Context> native_context_;
+ std::unique_ptr<CompilationResultResolver> resolver_;
+ std::shared_ptr<const WasmModule> module_;
std::vector<DeferredHandles*> deferred_handles_;
- Handle<WasmCompiledModule> compiled_module_;
Handle<WasmModuleObject> module_object_;
+ NativeModule* native_module_ = nullptr;
std::unique_ptr<CompileStep> step_;
CancelableTaskManager background_task_manager_;
- Handle<Code> centry_stub_;
std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
// For async compilation the AsyncCompileJob is the only finisher. For
// streaming compilation also the AsyncStreamingProcessor has to finish before
// compilation can be finished.
- base::AtomicNumber<int32_t> outstanding_finishers_{1};
+ std::atomic<int32_t> outstanding_finishers_{1};
// Decrements the number of outstanding finishers. The last caller of this
// function should finish the asynchronous compilation, see the comment on
// {outstanding_finishers_}.
V8_WARN_UNUSED_RESULT bool DecrementAndCheckFinisherCount() {
- return outstanding_finishers_.Decrement(1) == 0;
+ return outstanding_finishers_.fetch_sub(1) == 1;
}
// Counts the number of pending foreground tasks.
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 8f67cd81c7..bae8e4baf8 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -291,12 +291,12 @@ class ModuleDecoderImpl : public Decoder {
void StartDecoding(Isolate* isolate) {
CHECK_NULL(module_);
SetCounters(isolate->counters());
- module_.reset(new WasmModule(
- base::make_unique<Zone>(isolate->allocator(), "signatures")));
+ module_.reset(new WasmModule(base::make_unique<Zone>(
+ isolate->wasm_engine()->allocator(), "signatures")));
module_->initial_pages = 0;
module_->maximum_pages = 0;
module_->mem_export = false;
- module_->set_origin(origin_);
+ module_->origin = origin_;
}
void DecodeModuleHeader(Vector<const uint8_t> bytes, uint8_t offset) {
@@ -428,7 +428,7 @@ class ModuleDecoderImpl : public Decoder {
static_cast<int>(pc_ - start_));
FunctionSig* s = consume_sig(module_->signature_zone.get());
module_->signatures.push_back(s);
- uint32_t id = s ? module_->signature_map.FindOrInsert(s) : 0;
+ uint32_t id = s ? module_->signature_map.FindOrInsert(*s) : 0;
module_->signature_ids.push_back(id);
}
module_->signature_map.Freeze();
@@ -473,26 +473,33 @@ class ModuleDecoderImpl : public Decoder {
case kExternalTable: {
// ===== Imported table ==========================================
if (!AddTable(module_.get())) break;
- import->index =
- static_cast<uint32_t>(module_->function_tables.size());
- module_->function_tables.emplace_back();
- WasmIndirectFunctionTable* table = &module_->function_tables.back();
+ import->index = static_cast<uint32_t>(module_->tables.size());
+ module_->tables.emplace_back();
+ WasmTable* table = &module_->tables.back();
table->imported = true;
- expect_u8("element type", kWasmAnyFunctionTypeCode);
+ ValueType type = consume_reference_type();
+ if (!FLAG_experimental_wasm_anyref) {
+ if (type != kWasmAnyFunc) {
+ error(pc_ - 1, "invalid table type");
+ break;
+ }
+ }
+ table->type = type;
+ uint8_t flags = validate_table_flags("element count");
consume_resizable_limits(
"element count", "elements", FLAG_wasm_max_table_size,
&table->initial_size, &table->has_maximum_size,
- FLAG_wasm_max_table_size, &table->maximum_size);
+ FLAG_wasm_max_table_size, &table->maximum_size, flags);
break;
}
case kExternalMemory: {
// ===== Imported memory =========================================
if (!AddMemory(module_.get())) break;
+ uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
consume_resizable_limits(
"memory", "pages", FLAG_wasm_max_mem_pages,
&module_->initial_pages, &module_->has_maximum_pages,
- kSpecMaxWasmMemoryPages, &module_->maximum_pages,
- &module_->has_shared_memory);
+ kSpecMaxWasmMemoryPages, &module_->maximum_pages, flags);
break;
}
case kExternalGlobal: {
@@ -503,8 +510,12 @@ class ModuleDecoderImpl : public Decoder {
WasmGlobal* global = &module_->globals.back();
global->type = consume_value_type();
global->mutability = consume_mutability();
- if (!FLAG_experimental_wasm_mut_global && global->mutability) {
- error("mutable globals cannot be imported");
+ if (global->mutability) {
+ if (FLAG_experimental_wasm_mut_global) {
+ module_->num_imported_mutable_globals++;
+ } else {
+ error("mutable globals cannot be imported");
+ }
}
break;
}
@@ -518,12 +529,15 @@ class ModuleDecoderImpl : public Decoder {
void DecodeFunctionSection() {
uint32_t functions_count =
consume_count("functions count", kV8MaxWasmFunctions);
- (IsWasm() ? GetCounters()->wasm_functions_per_wasm_module()
- : GetCounters()->wasm_functions_per_asm_module())
- ->AddSample(static_cast<int>(functions_count));
- module_->functions.reserve(functions_count);
+ auto counter =
+ SELECT_WASM_COUNTER(GetCounters(), origin_, wasm_functions_per, module);
+ counter->AddSample(static_cast<int>(functions_count));
+ DCHECK_EQ(module_->functions.size(), module_->num_imported_functions);
+ uint32_t total_function_count =
+ module_->num_imported_functions + functions_count;
+ module_->functions.reserve(total_function_count);
module_->num_declared_functions = functions_count;
- for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+ for (uint32_t i = 0; i < functions_count; ++i) {
uint32_t func_index = static_cast<uint32_t>(module_->functions.size());
module_->functions.push_back({nullptr, // sig
func_index, // func_index
@@ -533,21 +547,27 @@ class ModuleDecoderImpl : public Decoder {
false}); // exported
WasmFunction* function = &module_->functions.back();
function->sig_index = consume_sig_index(module_.get(), &function->sig);
+ if (!ok()) return;
}
+ DCHECK_EQ(module_->functions.size(), total_function_count);
}
void DecodeTableSection() {
- uint32_t table_count = consume_count("table count", kV8MaxWasmTables);
+ // TODO(ahaas): Set the correct limit to {kV8MaxWasmTables} once the
+ // implementation of AnyRef landed.
+ uint32_t max_count = FLAG_experimental_wasm_anyref ? 10 : kV8MaxWasmTables;
+ uint32_t table_count = consume_count("table count", max_count);
for (uint32_t i = 0; ok() && i < table_count; i++) {
if (!AddTable(module_.get())) break;
- module_->function_tables.emplace_back();
- WasmIndirectFunctionTable* table = &module_->function_tables.back();
- expect_u8("table type", kWasmAnyFunctionTypeCode);
- consume_resizable_limits("table elements", "elements",
- FLAG_wasm_max_table_size, &table->initial_size,
- &table->has_maximum_size,
- FLAG_wasm_max_table_size, &table->maximum_size);
+ module_->tables.emplace_back();
+ WasmTable* table = &module_->tables.back();
+ table->type = consume_reference_type();
+ uint8_t flags = validate_table_flags("table elements");
+ consume_resizable_limits(
+ "table elements", "elements", FLAG_wasm_max_table_size,
+ &table->initial_size, &table->has_maximum_size,
+ FLAG_wasm_max_table_size, &table->maximum_size, flags);
}
}
@@ -556,10 +576,11 @@ class ModuleDecoderImpl : public Decoder {
for (uint32_t i = 0; ok() && i < memory_count; i++) {
if (!AddMemory(module_.get())) break;
+ uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
consume_resizable_limits(
"memory", "pages", FLAG_wasm_max_mem_pages, &module_->initial_pages,
&module_->has_maximum_pages, kSpecMaxWasmMemoryPages,
- &module_->maximum_pages, &module_->has_shared_memory);
+ &module_->maximum_pages, flags);
}
}
@@ -606,7 +627,7 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExternalTable: {
- WasmIndirectFunctionTable* table = nullptr;
+ WasmTable* table = nullptr;
exp->index = consume_table_index(module_.get(), &table);
if (table) table->exported = true;
break;
@@ -682,19 +703,24 @@ class ModuleDecoderImpl : public Decoder {
uint32_t element_count =
consume_count("element count", FLAG_wasm_max_table_size);
- if (element_count > 0 && module_->function_tables.size() == 0) {
+ if (element_count > 0 && module_->tables.size() == 0) {
error(pc_, "The element section requires a table");
}
for (uint32_t i = 0; ok() && i < element_count; ++i) {
const byte* pos = pc();
uint32_t table_index = consume_u32v("table index");
- if (table_index != 0) {
+ if (!FLAG_experimental_wasm_anyref && table_index != 0) {
errorf(pos, "illegal table index %u != 0", table_index);
}
- if (table_index >= module_->function_tables.size()) {
+ if (table_index >= module_->tables.size()) {
errorf(pos, "out of bounds table index %u", table_index);
break;
}
+ if (module_->tables[table_index].type != kWasmAnyFunc) {
+ errorf(pos, "Invalid element segment. Table %u is not of type AnyFunc",
+ table_index);
+ break;
+ }
WasmInitExpr offset = consume_init_expr(module_.get(), kWasmI32);
uint32_t num_elem =
consume_count("number of elements", kV8MaxWasmTableEntries);
@@ -889,11 +915,9 @@ class ModuleDecoderImpl : public Decoder {
return consume_init_expr(nullptr, kWasmStmt);
}
- WasmModule* module() { return module_.get(); }
+ const std::shared_ptr<WasmModule>& shared_module() const { return module_; }
- bool IsWasm() { return origin_ == kWasmOrigin; }
-
- Counters* GetCounters() {
+ Counters* GetCounters() const {
DCHECK_NOT_NULL(counters_);
return counters_;
}
@@ -904,7 +928,7 @@ class ModuleDecoderImpl : public Decoder {
}
private:
- std::unique_ptr<WasmModule> module_;
+ std::shared_ptr<WasmModule> module_;
Counters* counters_ = nullptr;
// The type section is the first section in a module.
uint8_t next_section_ = kFirstSectionInModule;
@@ -922,7 +946,8 @@ class ModuleDecoderImpl : public Decoder {
}
bool AddTable(WasmModule* module) {
- if (module->function_tables.size() > 0) {
+ if (FLAG_experimental_wasm_anyref) return true;
+ if (module->tables.size() > 0) {
error("At most one table is supported");
return false;
} else {
@@ -986,23 +1011,23 @@ class ModuleDecoderImpl : public Decoder {
// Calculate individual global offsets and total size of globals table.
void CalculateGlobalOffsets(WasmModule* module) {
uint32_t offset = 0;
+ uint32_t num_imported_mutable_globals = 0;
if (module->globals.size() == 0) {
- module->globals_size = 0;
- module->num_imported_mutable_globals = 0;
+ module->globals_buffer_size = 0;
return;
}
for (WasmGlobal& global : module->globals) {
byte size = ValueTypes::MemSize(ValueTypes::MachineTypeFor(global.type));
if (global.mutability && global.imported) {
DCHECK(FLAG_experimental_wasm_mut_global);
- global.index = module->num_imported_mutable_globals++;
+ global.index = num_imported_mutable_globals++;
} else {
offset = (offset + size - 1) & ~(size - 1); // align
global.offset = offset;
offset += size;
}
}
- module->globals_size = offset;
+ module->globals_buffer_size = offset;
}
// Verifies the body (code) of a given function.
@@ -1012,7 +1037,7 @@ class ModuleDecoderImpl : public Decoder {
WasmFunctionName func_name(function,
wire_bytes.GetNameOrNull(function, module));
if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
- OFStream os(stdout);
+ StdoutStream os;
os << "Verifying wasm function " << func_name << std::endl;
}
FunctionBody body = {
@@ -1020,7 +1045,7 @@ class ModuleDecoderImpl : public Decoder {
start_ + GetBufferRelativeOffset(function->code.offset()),
start_ + GetBufferRelativeOffset(function->code.end_offset())};
DecodeResult result = VerifyWasmCodeWithStats(allocator, module, body,
- IsWasm(), GetCounters());
+ origin_, GetCounters());
if (result.failed()) {
// Wrap the error message from the function decoder.
std::ostringstream wrapped;
@@ -1069,9 +1094,8 @@ class ModuleDecoderImpl : public Decoder {
return consume_index("global index", module->globals, global);
}
- uint32_t consume_table_index(WasmModule* module,
- WasmIndirectFunctionTable** table) {
- return consume_index("table index", module->function_tables, table);
+ uint32_t consume_table_index(WasmModule* module, WasmTable** table) {
+ return consume_index("table index", module->tables, table);
}
template <typename T>
@@ -1088,33 +1112,43 @@ class ModuleDecoderImpl : public Decoder {
return index;
}
- void consume_resizable_limits(const char* name, const char* units,
- uint32_t max_initial, uint32_t* initial,
- bool* has_max, uint32_t max_maximum,
- uint32_t* maximum,
- bool* has_shared_memory = nullptr) {
+ uint8_t validate_table_flags(const char* name) {
uint8_t flags = consume_u8("resizable limits flags");
const byte* pos = pc();
+ if (flags & 0xFE) {
+ errorf(pos - 1, "invalid %s limits flags", name);
+ }
+ return flags;
+ }
+ uint8_t validate_memory_flags(bool* has_shared_memory) {
+ uint8_t flags = consume_u8("resizable limits flags");
+ const byte* pos = pc();
+ *has_shared_memory = false;
if (FLAG_experimental_wasm_threads) {
- bool is_memory = (strcmp(name, "memory") == 0);
- if (flags & 0xFC || (!is_memory && (flags & 0xFE))) {
- errorf(pos - 1, "invalid %s limits flags", name);
- }
- if (flags == 3) {
+ if (flags & 0xFC) {
+ errorf(pos - 1, "invalid memory limits flags");
+ } else if (flags == 3) {
DCHECK_NOT_NULL(has_shared_memory);
*has_shared_memory = true;
} else if (flags == 2) {
errorf(pos - 1,
- "%s limits flags should have maximum defined if shared is true",
- name);
+ "memory limits flags should have maximum defined if shared is "
+ "true");
}
} else {
if (flags & 0xFE) {
- errorf(pos - 1, "invalid %s limits flags", name);
+ errorf(pos - 1, "invalid memory limits flags");
}
}
+ return flags;
+ }
+ void consume_resizable_limits(const char* name, const char* units,
+ uint32_t max_initial, uint32_t* initial,
+ bool* has_max, uint32_t max_maximum,
+ uint32_t* maximum, uint8_t flags) {
+ const byte* pos = pc();
*initial = consume_u32v("initial size");
*has_max = false;
if (*initial > max_initial) {
@@ -1255,11 +1289,14 @@ class ModuleDecoderImpl : public Decoder {
case kLocalF64:
return kWasmF64;
default:
- if (IsWasm()) {
+ if (origin_ == kWasmOrigin) {
switch (t) {
case kLocalS128:
if (FLAG_experimental_wasm_simd) return kWasmS128;
break;
+ case kLocalAnyFunc:
+ if (FLAG_experimental_wasm_anyref) return kWasmAnyFunc;
+ break;
case kLocalAnyRef:
if (FLAG_experimental_wasm_anyref) return kWasmAnyRef;
break;
@@ -1272,6 +1309,26 @@ class ModuleDecoderImpl : public Decoder {
}
}
+ // Reads a single 8-bit integer, interpreting it as a reference type.
+ ValueType consume_reference_type() {
+ byte val = consume_u8("reference type");
+ ValueTypeCode t = static_cast<ValueTypeCode>(val);
+ switch (t) {
+ case kLocalAnyFunc:
+ return kWasmAnyFunc;
+ case kLocalAnyRef:
+ if (!FLAG_experimental_wasm_anyref) {
+ error(pc_ - 1,
+ "Invalid type. Set --experimental-wasm-anyref to use 'AnyRef'");
+ }
+ return kWasmAnyRef;
+ default:
+ break;
+ }
+ error(pc_ - 1, "invalid reference type");
+ return kWasmStmt;
+ }
+
FunctionSig* consume_sig(Zone* zone) {
constexpr bool has_return_values = true;
return consume_sig_internal(zone, has_return_values);
@@ -1325,18 +1382,16 @@ class ModuleDecoderImpl : public Decoder {
ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end, bool verify_functions,
ModuleOrigin origin, Counters* counters) {
- auto counter = origin == kWasmOrigin
- ? counters->wasm_decode_wasm_module_time()
- : counters->wasm_decode_asm_module_time();
+ auto counter =
+ SELECT_WASM_COUNTER(counters, origin, wasm_decode, module_time);
TimedHistogramScope wasm_decode_module_time_scope(counter);
size_t size = module_end - module_start;
if (module_start > module_end) return ModuleResult::Error("start > end");
if (size >= kV8MaxWasmModuleSize)
return ModuleResult::Error("size > maximum module size: %zu", size);
// TODO(bradnelson): Improve histogram handling of size_t.
- auto size_counter = origin == kWasmOrigin
- ? counters->wasm_wasm_module_size_bytes()
- : counters->wasm_asm_module_size_bytes();
+ auto size_counter =
+ SELECT_WASM_COUNTER(counters, origin, wasm, module_size_bytes);
size_counter->AddSample(static_cast<int>(size));
// Signatures are stored in zone memory, which have the same lifetime
// as the {module}.
@@ -1347,10 +1402,8 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
// allocated on the C++ heap.
// https://bugs.chromium.org/p/chromium/issues/detail?id=657320
if (result.ok()) {
- auto peak_counter =
- origin == kWasmOrigin
- ? counters->wasm_decode_wasm_module_peak_memory_bytes()
- : counters->wasm_decode_asm_module_peak_memory_bytes();
+ auto peak_counter = SELECT_WASM_COUNTER(counters, origin, wasm_decode,
+ module_peak_memory_bytes);
peak_counter->AddSample(
static_cast<int>(result.val->signature_zone->allocation_size()));
}
@@ -1360,7 +1413,9 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
ModuleDecoder::ModuleDecoder() = default;
ModuleDecoder::~ModuleDecoder() = default;
-WasmModule* ModuleDecoder::module() const { return impl_->module(); }
+const std::shared_ptr<WasmModule>& ModuleDecoder::shared_module() const {
+ return impl_->shared_module();
+}
void ModuleDecoder::StartDecoding(Isolate* isolate, ModuleOrigin origin) {
DCHECK_NULL(impl_);
@@ -1454,9 +1509,8 @@ FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
size_t size = function_end - function_start;
if (function_start > function_end)
return FunctionResult::Error("start > end");
- auto size_histogram = module->is_wasm()
- ? counters->wasm_wasm_function_size_bytes()
- : counters->wasm_asm_function_size_bytes();
+ auto size_histogram =
+ SELECT_WASM_COUNTER(counters, module->origin, wasm, function_size_bytes);
// TODO(bradnelson): Improve histogram handling of ptrdiff_t.
size_histogram->AddSample(static_cast<int>(size));
if (size > kV8MaxWasmFunctionSize)
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 6ca2fc1f4f..dc6d4c4ae0 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -23,7 +23,7 @@ inline bool IsValidSectionCode(uint8_t byte) {
const char* SectionName(SectionCode code);
-typedef Result<std::unique_ptr<WasmModule>> ModuleResult;
+typedef Result<std::shared_ptr<WasmModule>> ModuleResult;
typedef Result<std::unique_ptr<WasmFunction>> FunctionResult;
typedef std::vector<std::pair<int, int>> FunctionOffsets;
typedef Result<FunctionOffsets> FunctionOffsetsResult;
@@ -138,7 +138,8 @@ class ModuleDecoder {
ModuleResult FinishDecoding(bool verify_functions = true);
- WasmModule* module() const;
+ const std::shared_ptr<WasmModule>& shared_module() const;
+ WasmModule* module() const { return shared_module().get(); }
bool ok();
diff --git a/deps/v8/src/wasm/signature-map.cc b/deps/v8/src/wasm/signature-map.cc
index b4054596f8..dca3b1ef89 100644
--- a/deps/v8/src/wasm/signature-map.cc
+++ b/deps/v8/src/wasm/signature-map.cc
@@ -10,7 +10,7 @@ namespace v8 {
namespace internal {
namespace wasm {
-uint32_t SignatureMap::FindOrInsert(FunctionSig* sig) {
+uint32_t SignatureMap::FindOrInsert(const FunctionSig& sig) {
CHECK(!frozen_);
auto pos = map_.find(sig);
if (pos != map_.end()) {
@@ -22,7 +22,7 @@ uint32_t SignatureMap::FindOrInsert(FunctionSig* sig) {
}
}
-int32_t SignatureMap::Find(FunctionSig* sig) const {
+int32_t SignatureMap::Find(const FunctionSig& sig) const {
auto pos = map_.find(sig);
if (pos != map_.end()) {
return static_cast<int32_t>(pos->second);
@@ -31,24 +31,6 @@ int32_t SignatureMap::Find(FunctionSig* sig) const {
}
}
-bool SignatureMap::CompareFunctionSigs::operator()(FunctionSig* a,
- FunctionSig* b) const {
- if (a == b) return false;
- if (a->return_count() < b->return_count()) return true;
- if (a->return_count() > b->return_count()) return false;
- if (a->parameter_count() < b->parameter_count()) return true;
- if (a->parameter_count() > b->parameter_count()) return false;
- for (size_t r = 0; r < a->return_count(); r++) {
- if (a->GetReturn(r) < b->GetReturn(r)) return true;
- if (a->GetReturn(r) > b->GetReturn(r)) return false;
- }
- for (size_t p = 0; p < a->parameter_count(); p++) {
- if (a->GetParam(p) < b->GetParam(p)) return true;
- if (a->GetParam(p) > b->GetParam(p)) return false;
- }
- return false;
-}
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/signature-map.h b/deps/v8/src/wasm/signature-map.h
index 3fab0fde16..5ed66976d5 100644
--- a/deps/v8/src/wasm/signature-map.h
+++ b/deps/v8/src/wasm/signature-map.h
@@ -5,16 +5,14 @@
#ifndef V8_WASM_SIGNATURE_MAP_H_
#define V8_WASM_SIGNATURE_MAP_H_
-#include <map>
+#include <unordered_map>
+#include "src/signature.h"
#include "src/wasm/value-type.h"
namespace v8 {
namespace internal {
-template <typename T>
-class Signature;
-
namespace wasm {
using FunctionSig = Signature<ValueType>;
@@ -30,22 +28,18 @@ class V8_EXPORT_PRIVATE SignatureMap {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(SignatureMap);
// Gets the index for a signature, assigning a new index if necessary.
- uint32_t FindOrInsert(FunctionSig* sig);
+ uint32_t FindOrInsert(const FunctionSig& sig);
// Gets the index for a signature, returning {-1} if not found.
- int32_t Find(FunctionSig* sig) const;
+ int32_t Find(const FunctionSig& sig) const;
// Disallows further insertions to this signature map.
void Freeze() { frozen_ = true; }
private:
- // TODO(wasm): use a hashmap instead of an ordered map?
- struct CompareFunctionSigs {
- bool operator()(FunctionSig* a, FunctionSig* b) const;
- };
uint32_t next_ = 0;
bool frozen_ = false;
- std::map<FunctionSig*, uint32_t, CompareFunctionSigs> map_;
+ std::unordered_map<FunctionSig, uint32_t, base::hash<FunctionSig>> map_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index afaa28a7de..07b425aad0 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -64,8 +64,8 @@ void StreamingDecoder::Finish() {
return;
}
- std::unique_ptr<uint8_t[]> bytes(new uint8_t[total_size_]);
- uint8_t* cursor = bytes.get();
+ OwnedVector<uint8_t> bytes = OwnedVector<uint8_t>::New(total_size_);
+ uint8_t* cursor = bytes.start();
{
#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
uint8_t module_header[]{BYTES(kWasmMagic), BYTES(kWasmVersion)};
@@ -74,11 +74,11 @@ void StreamingDecoder::Finish() {
cursor += arraysize(module_header);
}
for (auto&& buffer : section_buffers_) {
- DCHECK_LE(cursor - bytes.get() + buffer->length(), total_size_);
+ DCHECK_LE(cursor - bytes.start() + buffer->length(), total_size_);
memcpy(cursor, buffer->bytes(), buffer->length());
cursor += buffer->length();
}
- processor_->OnFinishedStream(std::move(bytes), total_size_);
+ processor_->OnFinishedStream(std::move(bytes));
}
void StreamingDecoder::Abort() {
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 571179c64d..7b986bc28b 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -44,8 +44,7 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
// Report the end of the stream. If the stream was successful, all
// received bytes are passed by parameter. If there has been an error, an
// empty array is passed.
- virtual void OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
- size_t length) = 0;
+ virtual void OnFinishedStream(OwnedVector<uint8_t> bytes) = 0;
// Report an error detected in the StreamingDecoder.
virtual void OnError(DecodeResult result) = 0;
// Report the abortion of the stream.
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 42b078aeac..8522b3a500 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -20,9 +20,12 @@ enum ValueType : uint8_t {
kWasmF64,
kWasmS128,
kWasmAnyRef,
+ kWasmAnyFunc,
kWasmVar,
};
+inline size_t hash_value(ValueType type) { return static_cast<size_t>(type); }
+
// TODO(clemensh): Compute memtype and size from ValueType once we have c++14
// constexpr support.
#define FOREACH_LOAD_TYPE(V) \
@@ -234,6 +237,7 @@ class V8_EXPORT_PRIVATE ValueTypes {
return MachineType::Float32();
case kWasmF64:
return MachineType::Float64();
+ case kWasmAnyFunc:
case kWasmAnyRef:
return MachineType::TaggedPointer();
case kWasmS128:
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index f997ef2a8c..676d14e1f7 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -7,10 +7,8 @@
#include <iomanip>
#include "src/assembler-inl.h"
-#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/disassembler.h"
#include "src/globals.h"
@@ -18,6 +16,7 @@
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/wasm/function-compiler.h"
+#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -43,140 +42,59 @@ struct WasmCodeUniquePtrComparator {
}
};
-#if V8_TARGET_ARCH_X64
-#define __ masm->
-constexpr bool kModuleCanAllocateMoreMemory = false;
-
-void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
- __ movq(kScratchRegister, static_cast<uint64_t>(target));
- __ jmp(kScratchRegister);
-}
-#undef __
-#elif V8_TARGET_ARCH_S390X
-#define __ masm->
-constexpr bool kModuleCanAllocateMoreMemory = false;
-
-void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
- __ mov(ip, Operand(bit_cast<intptr_t, Address>(target)));
- __ b(ip);
-}
-#undef __
-#elif V8_TARGET_ARCH_ARM64
-#define __ masm->
-constexpr bool kModuleCanAllocateMoreMemory = false;
-
-void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
- __ Mov(scratch, reinterpret_cast<uint64_t>(target));
- __ Br(scratch);
-}
-#undef __
-#else
-const bool kModuleCanAllocateMoreMemory = true;
-#endif
-
-void RelocateCode(WasmCode* code, const WasmCode* orig,
- WasmCode::FlushICache flush_icache) {
- intptr_t delta = code->instruction_start() - orig->instruction_start();
- for (RelocIterator it(code->instructions(), code->reloc_info(),
- code->constant_pool(), RelocInfo::kApplyMask);
- !it.done(); it.next()) {
- it.rinfo()->apply(delta);
- }
- if (flush_icache) {
- Assembler::FlushICache(code->instructions().start(),
- code->instructions().size());
- }
-}
-
} // namespace
-DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
- ranges_.push_back({start, end});
-}
-
-void DisjointAllocationPool::Merge(DisjointAllocationPool&& other) {
+void DisjointAllocationPool::Merge(AddressRange range) {
auto dest_it = ranges_.begin();
auto dest_end = ranges_.end();
- for (auto src_it = other.ranges_.begin(), src_end = other.ranges_.end();
- src_it != src_end;) {
- if (dest_it == dest_end) {
- // everything else coming from src will be inserted
- // at the back of ranges_ from now on.
- ranges_.push_back(*src_it);
- ++src_it;
- continue;
- }
- // Before or adjacent to dest. Insert or merge, and advance
- // just src.
- if (dest_it->first >= src_it->second) {
- if (dest_it->first == src_it->second) {
- dest_it->first = src_it->first;
- } else {
- ranges_.insert(dest_it, {src_it->first, src_it->second});
- }
- ++src_it;
- continue;
- }
- // Src is strictly after dest. Skip over this dest.
- if (dest_it->second < src_it->first) {
- ++dest_it;
- continue;
- }
- // Src is adjacent from above. Merge and advance
- // just src, because the next src, if any, is bound to be
- // strictly above the newly-formed range.
- DCHECK_EQ(dest_it->second, src_it->first);
- dest_it->second = src_it->second;
- ++src_it;
- // Now that we merged, maybe this new range is adjacent to
- // the next. Since we assume src to have come from the
- // same original memory pool, it follows that the next src
- // must be above or adjacent to the new bubble.
- auto next_dest = dest_it;
- ++next_dest;
- if (next_dest != dest_end && dest_it->second == next_dest->first) {
- dest_it->second = next_dest->second;
- ranges_.erase(next_dest);
- }
+ // Skip over dest ranges strictly before {range}.
+ while (dest_it != dest_end && dest_it->end < range.start) ++dest_it;
+
+ // After last dest range: insert and done.
+ if (dest_it == dest_end) {
+ ranges_.push_back(range);
+ return;
+ }
+
+ // Adjacent (from below) to dest: merge and done.
+ if (dest_it->start == range.end) {
+ dest_it->start = range.start;
+ return;
+ }
+
+ // Before dest: insert and done.
+ if (dest_it->start > range.end) {
+ ranges_.insert(dest_it, range);
+ return;
+ }
- // src_it points now at the next, if any, src
- DCHECK_IMPLIES(src_it != src_end, src_it->first >= dest_it->second);
+ // Src is adjacent from above. Merge and check whether the merged range is now
+ // adjacent to the next range.
+ DCHECK_EQ(dest_it->end, range.start);
+ dest_it->end = range.end;
+ auto next_dest = dest_it;
+ ++next_dest;
+ if (next_dest != dest_end && dest_it->end == next_dest->start) {
+ dest_it->end = next_dest->end;
+ ranges_.erase(next_dest);
}
}
-DisjointAllocationPool DisjointAllocationPool::Extract(size_t size,
- ExtractionMode mode) {
- DisjointAllocationPool ret;
- for (auto it = ranges_.begin(), end = ranges_.end(); it != end;) {
- auto current = it;
- ++it;
- DCHECK_LT(current->first, current->second);
- size_t current_size = static_cast<size_t>(current->second - current->first);
- if (size == current_size) {
- ret.ranges_.push_back(*current);
- ranges_.erase(current);
- return ret;
- }
- if (size < current_size) {
- ret.ranges_.push_back({current->first, current->first + size});
- current->first += size;
- DCHECK(current->first < current->second);
- return ret;
- }
- if (mode != kContiguous) {
- size -= current_size;
- ret.ranges_.push_back(*current);
- ranges_.erase(current);
+AddressRange DisjointAllocationPool::Allocate(size_t size) {
+ for (auto it = ranges_.begin(), end = ranges_.end(); it != end; ++it) {
+ size_t range_size = it->size();
+ if (size > range_size) continue;
+ AddressRange ret{it->start, it->start + size};
+ if (size == range_size) {
+ ranges_.erase(it);
+ } else {
+ it->start += size;
+ DCHECK_LT(it->start, it->end);
}
+ return ret;
}
- if (size > 0) {
- Merge(std::move(ret));
- return {};
- }
- return ret;
+ return {};
}
Address WasmCode::constant_pool() const {
@@ -198,15 +116,15 @@ void WasmCode::set_trap_handler_index(size_t value) {
}
void WasmCode::RegisterTrapHandlerData() {
+ DCHECK(!HasTrapHandlerIndex());
if (kind() != wasm::WasmCode::kFunction) return;
- if (HasTrapHandlerIndex()) return;
Address base = instruction_start();
size_t size = instructions().size();
const int index =
RegisterHandlerData(base, size, protected_instructions().size(),
- protected_instructions().data());
+ protected_instructions().start());
// TODO(eholk): if index is negative, fail.
CHECK_LE(0, index);
@@ -215,63 +133,102 @@ void WasmCode::RegisterTrapHandlerData() {
bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
-void WasmCode::ResetTrapHandlerIndex() { trap_handler_index_ = -1; }
-
bool WasmCode::ShouldBeLogged(Isolate* isolate) {
return isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling() || FLAG_print_wasm_code || FLAG_print_code;
+ isolate->is_profiling();
}
void WasmCode::LogCode(Isolate* isolate) const {
DCHECK(ShouldBeLogged(isolate));
- if (native_module()->shared_module_data() && index_.IsJust()) {
- uint32_t index = this->index();
- Handle<WasmSharedModuleData> shared_handle(
- native_module()->shared_module_data(), isolate);
- int name_length;
- Handle<String> name(
- WasmSharedModuleData::GetFunctionName(isolate, shared_handle, index));
- auto cname =
- name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
- RobustnessFlag::ROBUST_STRING_TRAVERSAL, &name_length);
- PROFILE(isolate,
- CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
- {cname.get(), static_cast<size_t>(name_length)}));
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code || FLAG_print_wasm_code) {
- // TODO(wasm): Use proper log files, here and elsewhere.
- OFStream os(stdout);
- os << "--- Wasm " << (is_liftoff() ? "liftoff" : "turbofan")
- << " code ---\n";
- this->Disassemble(cname.get(), isolate, os);
- os << "--- End code ---\n";
- }
-#endif
+ if (IsAnonymous()) return;
+ ModuleWireBytes wire_bytes(native_module()->wire_bytes());
+ // TODO(herhut): Allow to log code without on-heap round-trip of the name.
+ ModuleEnv* module_env = GetModuleEnv(native_module()->compilation_state());
+ WireBytesRef name_ref =
+ module_env->module->LookupFunctionName(wire_bytes, index());
+ WasmName name_vec = wire_bytes.GetName(name_ref);
+ MaybeHandle<String> maybe_name =
+ isolate->factory()->NewStringFromUtf8(Vector<const char>::cast(name_vec));
+ Handle<String> name;
+ if (!maybe_name.ToHandle(&name)) {
+ name = isolate->factory()->NewStringFromAsciiChecked("<name too long>");
+ }
+ int name_length;
+ auto cname =
+ name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
+ RobustnessFlag::ROBUST_STRING_TRAVERSAL, &name_length);
+ PROFILE(isolate,
+ CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
+ {cname.get(), static_cast<size_t>(name_length)}));
+ if (!source_positions().is_empty()) {
+ LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
+ source_positions()));
+ }
+}
- if (!source_positions().is_empty()) {
- LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
- source_positions()));
+void WasmCode::Validate() const {
+#ifdef DEBUG
+ // We expect certain relocation info modes to never appear in {WasmCode}
+ // objects or to be restricted to a small set of valid values. Hence the
+ // iteration below does not use a mask, but visits all relocation data.
+ for (RelocIterator it(instructions(), reloc_info(), constant_pool());
+ !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ switch (mode) {
+ case RelocInfo::WASM_CALL: {
+ Address target = it.rinfo()->wasm_call_address();
+ WasmCode* code = native_module_->Lookup(target);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(WasmCode::kJumpTable, code->kind());
+ CHECK(code->contains(target));
+ break;
+ }
+ case RelocInfo::WASM_STUB_CALL: {
+ Address target = it.rinfo()->wasm_stub_call_address();
+ WasmCode* code = native_module_->Lookup(target);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(WasmCode::kRuntimeStub, code->kind());
+ CHECK_EQ(target, code->instruction_start());
+ break;
+ }
+ case RelocInfo::INTERNAL_REFERENCE:
+ case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
+ Address target = it.rinfo()->target_internal_reference();
+ CHECK(contains(target));
+ break;
+ }
+ case RelocInfo::JS_TO_WASM_CALL:
+ case RelocInfo::EXTERNAL_REFERENCE:
+ case RelocInfo::OFF_HEAP_TARGET:
+ case RelocInfo::COMMENT:
+ case RelocInfo::CONST_POOL:
+ case RelocInfo::VENEER_POOL:
+ // These are OK to appear.
+ break;
+ default:
+ FATAL("Unexpected mode: %d", mode);
}
}
+#endif
}
-void WasmCode::Print(Isolate* isolate) const {
- OFStream os(stdout);
- Disassemble(nullptr, isolate, os);
+void WasmCode::Print(const char* name) const {
+ StdoutStream os;
+ os << "--- WebAssembly code ---\n";
+ Disassemble(name, os);
+ os << "--- End code ---\n";
}
-void WasmCode::Disassemble(const char* name, Isolate* isolate, std::ostream& os,
+void WasmCode::Disassemble(const char* name, std::ostream& os,
Address current_pc) const {
if (name) os << "name: " << name << "\n";
- if (index_.IsJust()) os << "index: " << index_.FromJust() << "\n";
+ if (!IsAnonymous()) os << "index: " << index() << "\n";
os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
size_t body_size = instructions().size();
os << "Body (size = " << body_size << ")\n";
#ifdef ENABLE_DISASSEMBLER
-
size_t instruction_size = body_size;
if (constant_pool_offset_ && constant_pool_offset_ < instruction_size) {
instruction_size = constant_pool_offset_;
@@ -281,9 +238,7 @@ void WasmCode::Disassemble(const char* name, Isolate* isolate, std::ostream& os,
}
DCHECK_LT(0, instruction_size);
os << "Instructions (size = " << instruction_size << ")\n";
- // TODO(mtrofin): rework the dependency on isolate and code in
- // Disassembler::Decode.
- Disassembler::Decode(isolate, &os, instructions().start(),
+ Disassembler::Decode(nullptr, &os, instructions().start(),
instructions().start() + instruction_size,
CodeReference(this), current_pc);
os << "\n";
@@ -299,10 +254,10 @@ void WasmCode::Disassemble(const char* name, Isolate* isolate, std::ostream& os,
os << "\n";
}
- os << "RelocInfo (size = " << reloc_size_ << ")\n";
+ os << "RelocInfo (size = " << reloc_info_.size() << ")\n";
for (RelocIterator it(instructions(), reloc_info(), constant_pool());
!it.done(); it.next()) {
- it.rinfo()->Print(isolate, os);
+ it.rinfo()->Print(nullptr, os);
}
os << "\n";
#endif // ENABLE_DISASSEMBLER
@@ -316,20 +271,17 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
return "wasm-to-js";
case WasmCode::kLazyStub:
return "lazy-compile";
+ case WasmCode::kRuntimeStub:
+ return "runtime-stub";
case WasmCode::kInterpreterEntry:
return "interpreter entry";
- case WasmCode::kTrampoline:
- return "trampoline";
+ case WasmCode::kJumpTable:
+ return "jump table";
}
return "unknown kind";
}
WasmCode::~WasmCode() {
- // Depending on finalizer order, the WasmCompiledModule finalizer may be
- // called first, case in which we release here. If the InstanceFinalizer is
- // called first, the handlers will be cleared in Reset, as-if the NativeModule
- // may be later used again (which would be the case if the WasmCompiledModule
- // were still held by a WasmModuleObject)
if (HasTrapHandlerIndex()) {
CHECK_LT(trap_handler_index(),
static_cast<size_t>(std::numeric_limits<int>::max()));
@@ -337,308 +289,329 @@ WasmCode::~WasmCode() {
}
}
-base::AtomicNumber<size_t> NativeModule::next_id_;
-
-NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
- bool can_request_more, VirtualMemory* code_space,
- WasmCodeManager* code_manager, ModuleEnv& env)
- : instance_id(next_id_.Increment(1)),
- code_table_(num_functions),
- num_imported_functions_(num_imports),
- compilation_state_(NewCompilationState(
- reinterpret_cast<Isolate*>(code_manager->isolate_), env)),
- free_code_space_(code_space->address(), code_space->end()),
+NativeModule::NativeModule(Isolate* isolate, bool can_request_more,
+ VirtualMemory* code_space,
+ WasmCodeManager* code_manager,
+ std::shared_ptr<const WasmModule> module,
+ const ModuleEnv& env)
+ : module_(std::move(module)),
+ compilation_state_(NewCompilationState(isolate, env)),
+ free_code_space_({code_space->address(), code_space->end()}),
wasm_code_manager_(code_manager),
can_request_more_memory_(can_request_more),
use_trap_handler_(env.use_trap_handler) {
+ DCHECK_EQ(module_.get(), env.module);
+ DCHECK_NOT_NULL(module_);
VirtualMemory my_mem;
owned_code_space_.push_back(my_mem);
owned_code_space_.back().TakeControl(code_space);
- owned_code_.reserve(num_functions);
+ owned_code_.reserve(num_functions());
+
+ uint32_t num_wasm_functions = module_->num_declared_functions;
+ if (num_wasm_functions > 0) {
+ code_table_.reset(new WasmCode*[num_wasm_functions]);
+ memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
+
+ jump_table_ = CreateEmptyJumpTable(num_wasm_functions);
+ }
}
-void NativeModule::ResizeCodeTableForTesting(size_t num_functions,
- size_t max_functions) {
- DCHECK_LE(num_functions, max_functions);
- if (num_imported_functions_ == num_functions) {
- // For some tests, the code table might have been initialized to store
- // a number of imported functions on creation. If that is the case,
- // we need to retroactively reserve the space.
- DCHECK_EQ(code_table_.capacity(), num_imported_functions_);
- DCHECK_EQ(code_table_.size(), num_imported_functions_);
- DCHECK_EQ(num_functions, 1);
- code_table_.reserve(max_functions);
- } else {
- DCHECK_GT(num_functions, function_count());
- if (code_table_.capacity() == 0) {
- code_table_.reserve(max_functions);
- }
- DCHECK_EQ(code_table_.capacity(), max_functions);
- code_table_.resize(num_functions);
+void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
+ DCHECK_LE(num_functions(), max_functions);
+ WasmCode** new_table = new WasmCode*[max_functions];
+ memset(new_table, 0, max_functions * sizeof(*new_table));
+ memcpy(new_table, code_table_.get(),
+ module_->num_declared_functions * sizeof(*new_table));
+ code_table_.reset(new_table);
+
+ // Re-allocate jump table.
+ jump_table_ = CreateEmptyJumpTable(max_functions);
+}
+
+void NativeModule::LogWasmCodes(Isolate* isolate) {
+ if (!wasm::WasmCode::ShouldBeLogged(isolate)) return;
+
+ // TODO(titzer): we skip the logging of the import wrappers
+ // here, but they should be included somehow.
+ for (wasm::WasmCode* code : code_table()) {
+ if (code != nullptr) code->LogCode(isolate);
}
}
WasmCode* NativeModule::AddOwnedCode(
- Vector<const byte> orig_instructions,
- std::unique_ptr<const byte[]> reloc_info, size_t reloc_size,
- std::unique_ptr<const byte[]> source_pos, size_t source_pos_size,
- Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
+ Maybe<uint32_t> index, Vector<const byte> instructions,
uint32_t stack_slots, size_t safepoint_table_offset,
- size_t handler_table_offset,
- std::unique_ptr<ProtectedInstructions> protected_instructions,
- WasmCode::Tier tier, WasmCode::FlushICache flush_icache) {
- // both allocation and insertion in owned_code_ happen in the same critical
- // section, thus ensuring owned_code_'s elements are rarely if ever moved.
- base::LockGuard<base::Mutex> lock(&allocation_mutex_);
- Address executable_buffer = AllocateForCode(orig_instructions.size());
- if (executable_buffer == kNullAddress) {
- V8::FatalProcessOutOfMemory(nullptr, "NativeModule::AddOwnedCode");
- UNREACHABLE();
- }
- memcpy(reinterpret_cast<void*>(executable_buffer), orig_instructions.start(),
- orig_instructions.size());
- std::unique_ptr<WasmCode> code(new WasmCode(
- {reinterpret_cast<byte*>(executable_buffer), orig_instructions.size()},
- std::move(reloc_info), reloc_size, std::move(source_pos), source_pos_size,
- this, index, kind, constant_pool_offset, stack_slots,
- safepoint_table_offset, handler_table_offset,
- std::move(protected_instructions), tier));
- WasmCode* ret = code.get();
-
- // TODO(mtrofin): We allocate in increasing address order, and
- // even if we end up with segmented memory, we may end up only with a few
- // large moves - if, for example, a new segment is below the current ones.
- auto insert_before =
- std::upper_bound(owned_code_.begin(), owned_code_.end(),
- ret->instruction_start(), WasmCodeUniquePtrComparator());
- owned_code_.insert(insert_before, std::move(code));
- if (flush_icache) {
- Assembler::FlushICache(ret->instructions().start(),
- ret->instructions().size());
+ size_t handler_table_offset, size_t constant_pool_offset,
+ OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
+ OwnedVector<const byte> reloc_info,
+ OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ WasmCode::Tier tier) {
+ WasmCode* code;
+ {
+ // Both allocation and insertion in owned_code_ happen in the same critical
+ // section, thus ensuring owned_code_'s elements are rarely if ever moved.
+ base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ Address executable_buffer = AllocateForCode(instructions.size());
+ if (executable_buffer == kNullAddress) {
+ V8::FatalProcessOutOfMemory(nullptr, "NativeModule::AddOwnedCode");
+ UNREACHABLE();
+ }
+ // Ownership will be transferred to {owned_code_} below.
+ code = new WasmCode(
+ this, index,
+ {reinterpret_cast<byte*>(executable_buffer), instructions.size()},
+ stack_slots, safepoint_table_offset, handler_table_offset,
+ constant_pool_offset, std::move(protected_instructions),
+ std::move(reloc_info), std::move(source_position_table), kind, tier);
+
+ if (owned_code_.empty() ||
+ code->instruction_start() > owned_code_.back()->instruction_start()) {
+ // Common case.
+ owned_code_.emplace_back(code);
+ } else {
+ // Slow but unlikely case.
+ // TODO(mtrofin): We allocate in increasing address order, and
+ // even if we end up with segmented memory, we may end up only with a few
+ // large moves - if, for example, a new segment is below the current ones.
+ auto insert_before = std::upper_bound(
+ owned_code_.begin(), owned_code_.end(), code->instruction_start(),
+ WasmCodeUniquePtrComparator{});
+ owned_code_.emplace(insert_before, code);
+ }
}
- return ret;
+ memcpy(reinterpret_cast<void*>(code->instruction_start()),
+ instructions.start(), instructions.size());
+
+ return code;
}
WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
uint32_t index) {
+ // TODO(wasm): Adding instance-specific wasm-to-js wrappers as owned code to
+ // this NativeModule is a memory leak until the whole NativeModule dies.
WasmCode* ret = AddAnonymousCode(code, kind);
- code_table_[index] = ret;
ret->index_ = Just(index);
+ if (index >= module_->num_imported_functions) set_code(index, ret);
return ret;
}
WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry);
ret->index_ = Just(index);
+ PatchJumpTable(index, ret->instruction_start(), WasmCode::kFlushICache);
return ret;
}
void NativeModule::SetLazyBuiltin(Handle<Code> code) {
+ uint32_t num_wasm_functions = module_->num_declared_functions;
+ if (num_wasm_functions == 0) return;
WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
- for (uint32_t i = num_imported_functions(), e = function_count(); i < e;
- ++i) {
- code_table_[i] = lazy_builtin;
- }
-}
-
-WasmSharedModuleData* NativeModule::shared_module_data() const {
- DCHECK_NOT_NULL(shared_module_data_);
- return *shared_module_data_;
-}
-
-void NativeModule::SetSharedModuleData(Handle<WasmSharedModuleData> shared) {
- DCHECK_NULL(shared_module_data_);
- shared_module_data_ =
- shared->GetIsolate()->global_handles()->Create(*shared).location();
- GlobalHandles::MakeWeak(reinterpret_cast<Object***>(&shared_module_data_));
+ // Fill the jump table with jumps to the lazy compile stub.
+ Address lazy_compile_target = lazy_builtin->instruction_start();
+ JumpTableAssembler jtasm(
+ jump_table_->instruction_start(),
+ static_cast<int>(jump_table_->instructions().size()) + 256);
+ for (uint32_t i = 0; i < num_wasm_functions; ++i) {
+ // Check that the offset in the jump table increases as expected.
+ DCHECK_EQ(i * JumpTableAssembler::kJumpTableSlotSize, jtasm.pc_offset());
+ jtasm.EmitLazyCompileJumpSlot(i + module_->num_imported_functions,
+ lazy_compile_target);
+ jtasm.NopBytes((i + 1) * JumpTableAssembler::kJumpTableSlotSize -
+ jtasm.pc_offset());
+ }
+ Assembler::FlushICache(jump_table_->instructions().start(),
+ jump_table_->instructions().size());
+}
+
+void NativeModule::SetRuntimeStubs(Isolate* isolate) {
+ DCHECK_NULL(runtime_stub_table_[0]); // Only called once.
+#define COPY_BUILTIN(Name) \
+ runtime_stub_table_[WasmCode::k##Name] = \
+ AddAnonymousCode(isolate->builtins()->builtin_handle(Builtins::k##Name), \
+ WasmCode::kRuntimeStub);
+#define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
+ WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP);
+#undef COPY_BUILTIN_TRAP
+#undef COPY_BUILTIN
}
WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
WasmCode::Kind kind) {
- std::unique_ptr<byte[]> reloc_info;
- if (code->relocation_size() > 0) {
- reloc_info.reset(new byte[code->relocation_size()]);
- memcpy(reloc_info.get(), code->relocation_start(), code->relocation_size());
- }
- std::unique_ptr<byte[]> source_pos;
- Handle<ByteArray> source_pos_table(code->SourcePositionTable());
- if (source_pos_table->length() > 0) {
- source_pos.reset(new byte[source_pos_table->length()]);
- source_pos_table->copy_out(0, source_pos.get(), source_pos_table->length());
- }
- std::unique_ptr<ProtectedInstructions> protected_instructions(
- new ProtectedInstructions(0));
- Vector<const byte> orig_instructions(
+ OwnedVector<byte> reloc_info =
+ OwnedVector<byte>::New(code->relocation_size());
+ memcpy(reloc_info.start(), code->relocation_start(), code->relocation_size());
+ Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
+ code->GetIsolate());
+ OwnedVector<byte> source_pos =
+ OwnedVector<byte>::New(source_pos_table->length());
+ source_pos_table->copy_out(0, source_pos.start(), source_pos_table->length());
+ Vector<const byte> instructions(
reinterpret_cast<byte*>(code->InstructionStart()),
static_cast<size_t>(code->InstructionSize()));
int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
int safepoint_table_offset =
code->has_safepoint_info() ? code->safepoint_table_offset() : 0;
WasmCode* ret =
- AddOwnedCode(orig_instructions, // instructions
- std::move(reloc_info), // reloc_info
- static_cast<size_t>(code->relocation_size()), // reloc_size
- std::move(source_pos), // source positions
- static_cast<size_t>(source_pos_table->length()),
- Nothing<uint32_t>(), // index
- kind, // kind
- code->constant_pool_offset(), // constant_pool_offset
- stack_slots, // stack_slots
- safepoint_table_offset, // safepoint_table_offset
- code->handler_table_offset(), // handler_table_offset
- std::move(protected_instructions), // protected_instructions
- WasmCode::kOther, // kind
- WasmCode::kNoFlushICache); // flush_icache
+ AddOwnedCode(Nothing<uint32_t>(), // index
+ instructions, // instructions
+ stack_slots, // stack_slots
+ safepoint_table_offset, // safepoint_table_offset
+ code->handler_table_offset(), // handler_table_offset
+ code->constant_pool_offset(), // constant_pool_offset
+ {}, // protected_instructions
+ std::move(reloc_info), // reloc_info
+ std::move(source_pos), // source positions
+ kind, // kind
+ WasmCode::kOther); // tier
+
+ // Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = ret->instruction_start() - code->InstructionStart();
- int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-
- RelocIterator orig_it(*code, mask);
+ int mode_mask = RelocInfo::kApplyMask |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
+ RelocIterator orig_it(*code, mode_mask);
for (RelocIterator it(ret->instructions(), ret->reloc_info(),
- ret->constant_pool(), mask);
+ ret->constant_pool(), mode_mask);
!it.done(); it.next(), orig_it.next()) {
- if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
- Code* call_target =
- Code::GetCodeFromTargetAddress(orig_it.rinfo()->target_address());
- it.rinfo()->set_target_address(GetLocalAddressFor(handle(call_target)),
- SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmStubCall(mode)) {
+ uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
+ DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
+ WasmCode* code =
+ runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
+ it.rinfo()->set_wasm_stub_call_address(code->instruction_start(),
+ SKIP_ICACHE_FLUSH);
} else {
- if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
- DCHECK(Heap::IsImmovable(it.rinfo()->target_object()));
- } else {
- it.rinfo()->apply(delta);
- }
+ it.rinfo()->apply(delta);
}
}
+
// Flush the i-cache here instead of in AddOwnedCode, to include the changes
// made while iterating over the RelocInfo above.
Assembler::FlushICache(ret->instructions().start(),
ret->instructions().size());
- if (FLAG_print_wasm_code) {
- // TODO(mstarzinger): don't need the isolate here.
- ret->Print(code->GetIsolate());
- }
+ if (FLAG_print_code || FLAG_print_wasm_code) ret->Print();
+ ret->Validate();
return ret;
}
WasmCode* NativeModule::AddCode(
- const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
+ uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- std::unique_ptr<ProtectedInstructions> protected_instructions,
- Handle<ByteArray> source_pos_table, WasmCode::Tier tier) {
- std::unique_ptr<byte[]> reloc_info;
- if (desc.reloc_size) {
- reloc_info.reset(new byte[desc.reloc_size]);
- memcpy(reloc_info.get(), desc.buffer + desc.buffer_size - desc.reloc_size,
- desc.reloc_size);
- }
- std::unique_ptr<byte[]> source_pos;
- if (source_pos_table->length() > 0) {
- source_pos.reset(new byte[source_pos_table->length()]);
- source_pos_table->copy_out(0, source_pos.get(), source_pos_table->length());
- }
- TurboAssembler* origin = reinterpret_cast<TurboAssembler*>(desc.origin);
+ OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
+ OwnedVector<const byte> source_pos_table, WasmCode::Tier tier) {
+ OwnedVector<byte> reloc_info = OwnedVector<byte>::New(desc.reloc_size);
+ memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
+ desc.reloc_size);
WasmCode* ret = AddOwnedCode(
- {desc.buffer, static_cast<size_t>(desc.instr_size)},
- std::move(reloc_info), static_cast<size_t>(desc.reloc_size),
- std::move(source_pos), static_cast<size_t>(source_pos_table->length()),
- Just(index), WasmCode::kFunction,
- desc.instr_size - desc.constant_pool_size, frame_slots,
- safepoint_table_offset, handler_table_offset,
- std::move(protected_instructions), tier, WasmCode::kNoFlushICache);
-
- code_table_[index] = ret;
- // TODO(mtrofin): this is a copy and paste from Code::CopyFrom.
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::kApplyMask;
- // Needed to find target_object and runtime_entry on X64
-
- AllowDeferredHandleDereference embedding_raw_address;
+ Just(index), {desc.buffer, static_cast<size_t>(desc.instr_size)},
+ stack_slots, safepoint_table_offset, handler_table_offset,
+ desc.instr_size - desc.constant_pool_size,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_pos_table), WasmCode::kFunction, tier);
+
+ // Apply the relocation delta by iterating over the RelocInfo.
+ intptr_t delta = ret->instructions().start() - desc.buffer;
+ int mode_mask = RelocInfo::kApplyMask |
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
for (RelocIterator it(ret->instructions(), ret->reloc_info(),
ret->constant_pool(), mode_mask);
!it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
- DCHECK(p->IsUndefined(p->GetIsolate()) || p->IsNull(p->GetIsolate()));
- it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- // rewrite code handles to direct pointers to the first instruction in the
- // code object
- Handle<Object> p = it.rinfo()->target_object_handle(origin);
- Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(GetLocalAddressFor(handle(code)),
- SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- } else if (RelocInfo::IsRuntimeEntry(mode)) {
- Address p = it.rinfo()->target_runtime_entry(origin);
- it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
+ if (RelocInfo::IsWasmCall(mode)) {
+ uint32_t call_tag = it.rinfo()->wasm_call_tag();
+ Address target = GetCallTargetForFunction(call_tag);
+ it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
+ } else if (RelocInfo::IsWasmStubCall(mode)) {
+ uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
+ DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
+ WasmCode* code =
+ runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
+ it.rinfo()->set_wasm_stub_call_address(code->instruction_start(),
+ SKIP_ICACHE_FLUSH);
} else {
- intptr_t delta = ret->instructions().start() - desc.buffer;
it.rinfo()->apply(delta);
}
}
+
// Flush the i-cache here instead of in AddOwnedCode, to include the changes
// made while iterating over the RelocInfo above.
Assembler::FlushICache(ret->instructions().start(),
ret->instructions().size());
+ if (FLAG_print_code || FLAG_print_wasm_code) ret->Print();
+ ret->Validate();
return ret;
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
-Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
- MacroAssembler masm(code->GetIsolate(), nullptr, 0, CodeObjectRequired::kNo);
- Address dest = code->raw_instruction_start();
- GenerateJumpTrampoline(&masm, dest);
- CodeDesc code_desc;
- masm.GetCode(nullptr, &code_desc);
- Vector<const byte> instructions(code_desc.buffer,
- static_cast<size_t>(code_desc.instr_size));
- WasmCode* wasm_code = AddOwnedCode(instructions, // instructions
- nullptr, // reloc_info
- 0, // reloc_size
- nullptr, // source_pos
- 0, // source_pos_size
- Nothing<uint32_t>(), // index
- WasmCode::kTrampoline, // kind
- 0, // constant_pool_offset
- 0, // stack_slots
- 0, // safepoint_table_offset
- 0, // handler_table_offset
- {}, // protected_instructions
- WasmCode::kOther, // tier
- WasmCode::kFlushICache); // flush_icache
- Address ret = wasm_code->instruction_start();
- trampolines_.emplace(std::make_pair(dest, ret));
- return ret;
-}
-#else
-Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
- Address ret = code->raw_instruction_start();
- trampolines_.insert(std::make_pair(ret, ret));
- return ret;
+WasmCode* NativeModule::AddDeserializedCode(
+ uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
+ size_t safepoint_table_offset, size_t handler_table_offset,
+ size_t constant_pool_offset,
+ OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
+ OwnedVector<const byte> reloc_info,
+ OwnedVector<const byte> source_position_table, WasmCode::Tier tier) {
+ WasmCode* code = AddOwnedCode(
+ Just(index), instructions, stack_slots, safepoint_table_offset,
+ handler_table_offset, constant_pool_offset,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_position_table), WasmCode::kFunction, tier);
+
+ if (!code->protected_instructions_.is_empty()) {
+ code->RegisterTrapHandlerData();
+ }
+ set_code(index, code);
+ PatchJumpTable(index, code->instruction_start(), WasmCode::kFlushICache);
+ // Note: we do not flush the i-cache here, since the code needs to be
+ // relocated anyway. The caller is responsible for flushing the i-cache later.
+ return code;
}
-#endif
-
-Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
- DCHECK(Heap::IsImmovable(*code));
- Address index = code->raw_instruction_start();
- auto trampoline_iter = trampolines_.find(index);
- if (trampoline_iter == trampolines_.end()) {
- return CreateTrampolineTo(code);
- } else {
- return trampoline_iter->second;
- }
+void NativeModule::PublishCode(WasmCode* code) {
+ // TODO(clemensh): Remove the need for locking here. Probably requires
+ // word-aligning the jump table slots.
+ base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ if (!code->protected_instructions_.is_empty()) {
+ code->RegisterTrapHandlerData();
+ }
+ DCHECK(!code->IsAnonymous());
+ set_code(code->index(), code);
+ PatchJumpTable(code->index(), code->instruction_start(),
+ WasmCode::kFlushICache);
+}
+
+WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
+ // Only call this if we really need a jump table.
+ DCHECK_LT(0, num_wasm_functions);
+ OwnedVector<byte> instructions = OwnedVector<byte>::New(
+ num_wasm_functions * JumpTableAssembler::kJumpTableSlotSize);
+ memset(instructions.start(), 0, instructions.size());
+ return AddOwnedCode(Nothing<uint32_t>(), // index
+ instructions.as_vector(), // instructions
+ 0, // stack_slots
+ 0, // safepoint_table_offset
+ 0, // handler_table_offset
+ 0, // constant_pool_offset
+ {}, // protected_instructions
+ {}, // reloc_info
+ {}, // source_pos
+ WasmCode::kJumpTable, // kind
+ WasmCode::kOther); // tier
+}
+
+void NativeModule::PatchJumpTable(uint32_t func_index, Address target,
+ WasmCode::FlushICache flush_icache) {
+ DCHECK_LE(module_->num_imported_functions, func_index);
+ uint32_t slot_idx = func_index - module_->num_imported_functions;
+ Address jump_table_slot = jump_table_->instruction_start() +
+ slot_idx * JumpTableAssembler::kJumpTableSlotSize;
+ JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, target, flush_icache);
}
Address NativeModule::AllocateForCode(size_t size) {
- // this happens under a lock assumed by the caller.
+ // This happens under a lock assumed by the caller.
size = RoundUp(size, kCodeAlignment);
- DisjointAllocationPool mem = free_code_space_.Allocate(size);
- if (mem.IsEmpty()) {
+ AddressRange mem = free_code_space_.Allocate(size);
+ if (mem.is_empty()) {
if (!can_request_more_memory_) return kNullAddress;
Address hint = owned_code_space_.empty() ? kNullAddress
@@ -649,18 +622,15 @@ Address NativeModule::AllocateForCode(size_t size) {
wasm_code_manager_->TryAllocate(size, &new_mem,
reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) return kNullAddress;
- DisjointAllocationPool mem_pool(new_mem.address(), new_mem.end());
wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
- free_code_space_.Merge(std::move(mem_pool));
+ free_code_space_.Merge({new_mem.address(), new_mem.end()});
mem = free_code_space_.Allocate(size);
- if (mem.IsEmpty()) return kNullAddress;
+ if (mem.is_empty()) return kNullAddress;
}
- Address ret = mem.ranges().front().first;
- Address end = ret + size;
- Address commit_start = RoundUp(ret, AllocatePageSize());
- Address commit_end = RoundUp(end, AllocatePageSize());
- // {commit_start} will be either ret or the start of the next page.
+ Address commit_start = RoundUp(mem.start, AllocatePageSize());
+ Address commit_end = RoundUp(mem.end, AllocatePageSize());
+ // {commit_start} will be either mem.start or the start of the next page.
// {commit_end} will be the start of the page after the one in which
// the allocation ends.
// We start from an aligned start, and we know we allocated vmem in
@@ -697,14 +667,14 @@ Address NativeModule::AllocateForCode(size_t size) {
committed_code_space_ += commit_size;
#endif
}
- DCHECK(IsAligned(ret, kCodeAlignment));
+ DCHECK(IsAligned(mem.start, kCodeAlignment));
allocated_code_space_.Merge(std::move(mem));
- TRACE_HEAP("ID: %zu. Code alloc: %p,+%zu\n", instance_id,
- reinterpret_cast<void*>(ret), size);
- return ret;
+ TRACE_HEAP("Code alloc for %p: %" PRIuPTR ",+%zu\n", this, mem.start, size);
+ return mem.start;
}
-WasmCode* NativeModule::Lookup(Address pc) {
+WasmCode* NativeModule::Lookup(Address pc) const {
+ base::LockGuard<base::Mutex> lock(&allocation_mutex_);
if (owned_code_.empty()) return nullptr;
auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
WasmCodeUniquePtrComparator());
@@ -715,111 +685,51 @@ WasmCode* NativeModule::Lookup(Address pc) {
return candidate->contains(pc) ? candidate : nullptr;
}
-Address NativeModule::GetCallTargetForFunction(uint32_t func_index) {
- // TODO(clemensh): Introduce a jump table and return a slot of it here.
- WasmCode* wasm_code = code(func_index);
- if (!wasm_code) return kNullAddress;
- if (wasm_code->kind() != WasmCode::kLazyStub) {
- return wasm_code->instruction_start();
- }
+Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
+ // TODO(clemensh): Measure performance win of returning instruction start
+ // directly if we have turbofan code. Downside: Redirecting functions (e.g.
+ // for debugging) gets much harder.
-#if DEBUG
- auto num_imported_functions =
- shared_module_data()->module()->num_imported_functions;
- if (func_index < num_imported_functions) {
- DCHECK(!wasm_code->IsAnonymous());
- }
-#endif
- if (!wasm_code->IsAnonymous()) {
- // If the function wasn't imported, its index should match.
- DCHECK_IMPLIES(func_index >= num_imported_functions,
- func_index == wasm_code->index());
- return wasm_code->instruction_start();
- }
- if (!lazy_compile_stubs_.get()) {
- lazy_compile_stubs_ =
- base::make_unique<std::vector<WasmCode*>>(function_count());
- }
- WasmCode* cloned_code = lazy_compile_stubs_.get()->at(func_index);
- if (cloned_code == nullptr) {
- cloned_code = CloneCode(wasm_code, WasmCode::kNoFlushICache);
- RelocateCode(cloned_code, wasm_code, WasmCode::kFlushICache);
- cloned_code->index_ = Just(func_index);
- lazy_compile_stubs_.get()->at(func_index) = cloned_code;
- }
- DCHECK_EQ(func_index, cloned_code->index());
- return cloned_code->instruction_start();
+ // Return the jump table slot for that function index.
+ DCHECK_NOT_NULL(jump_table_);
+ uint32_t slot_idx = func_index - module_->num_imported_functions;
+ DCHECK_LT(slot_idx, jump_table_->instructions().size() /
+ JumpTableAssembler::kJumpTableSlotSize);
+ return jump_table_->instruction_start() +
+ slot_idx * JumpTableAssembler::kJumpTableSlotSize;
}
-WasmCode* NativeModule::CloneCode(const WasmCode* original_code,
- WasmCode::FlushICache flush_icache) {
- std::unique_ptr<byte[]> reloc_info;
- if (original_code->reloc_info().size() > 0) {
- reloc_info.reset(new byte[original_code->reloc_info().size()]);
- memcpy(reloc_info.get(), original_code->reloc_info().start(),
- original_code->reloc_info().size());
- }
- std::unique_ptr<byte[]> source_pos;
- if (original_code->source_positions().size() > 0) {
- source_pos.reset(new byte[original_code->source_positions().size()]);
- memcpy(source_pos.get(), original_code->source_positions().start(),
- original_code->source_positions().size());
- }
- DCHECK_EQ(0, original_code->protected_instructions().size());
- std::unique_ptr<ProtectedInstructions> protected_instructions(
- new ProtectedInstructions(0));
- WasmCode* ret = AddOwnedCode(
- original_code->instructions(), std::move(reloc_info),
- original_code->reloc_info().size(), std::move(source_pos),
- original_code->source_positions().size(), original_code->index_,
- original_code->kind(), original_code->constant_pool_offset_,
- original_code->stack_slots(), original_code->safepoint_table_offset_,
- original_code->handler_table_offset_, std::move(protected_instructions),
- original_code->tier(), flush_icache);
- if (!ret->IsAnonymous()) {
- code_table_[ret->index()] = ret;
- }
- return ret;
+uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
+ Address slot_address) const {
+ DCHECK(is_jump_table_slot(slot_address));
+ uint32_t offset =
+ static_cast<uint32_t>(slot_address - jump_table_->instruction_start());
+ uint32_t slot_idx = offset / JumpTableAssembler::kJumpTableSlotSize;
+ DCHECK_LT(slot_idx, module_->num_declared_functions);
+ return module_->num_imported_functions + slot_idx;
}
-void NativeModule::UnpackAndRegisterProtectedInstructions() {
- for (uint32_t i = num_imported_functions(), e = function_count(); i < e;
- ++i) {
- WasmCode* wasm_code = code(i);
- if (wasm_code == nullptr) continue;
- wasm_code->RegisterTrapHandlerData();
- }
-}
+void NativeModule::DisableTrapHandler() {
+ // Switch {use_trap_handler_} from true to false.
+ DCHECK(use_trap_handler_);
+ use_trap_handler_ = false;
-void NativeModule::ReleaseProtectedInstructions() {
- for (uint32_t i = num_imported_functions(), e = function_count(); i < e;
- ++i) {
- WasmCode* wasm_code = code(i);
- if (wasm_code->HasTrapHandlerIndex()) {
- CHECK_LT(wasm_code->trap_handler_index(),
- static_cast<size_t>(std::numeric_limits<int>::max()));
- trap_handler::ReleaseHandlerData(
- static_cast<int>(wasm_code->trap_handler_index()));
- wasm_code->ResetTrapHandlerIndex();
- }
- }
+ // Clear the code table (just to increase the chances to hit an error if we
+ // forget to re-add all code).
+ uint32_t num_wasm_functions = module_->num_declared_functions;
+ memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
+
+ // TODO(clemensh): Actually free the owned code, such that the memory can be
+ // recycled.
}
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
- // Clear the handle at the beginning of destructor to make it robust against
- // potential GCs in the rest of the desctructor.
- if (shared_module_data_ != nullptr) {
- Isolate* isolate = shared_module_data()->GetIsolate();
- isolate->global_handles()->Destroy(
- reinterpret_cast<Object**>(shared_module_data_));
- shared_module_data_ = nullptr;
- }
+ compilation_state_.reset(); // Cancels tasks, needs to be done first.
wasm_code_manager_->FreeNativeModule(this);
}
-WasmCodeManager::WasmCodeManager(v8::Isolate* isolate, size_t max_committed)
- : isolate_(isolate) {
+WasmCodeManager::WasmCodeManager(size_t max_committed) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory);
remaining_uncommitted_code_space_.store(max_committed);
}
@@ -846,34 +756,15 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
TRACE_HEAP("Setting rw permissions for %p:%p\n",
reinterpret_cast<void*>(start),
reinterpret_cast<void*>(start + size));
+
if (!ret) {
// Highly unlikely.
remaining_uncommitted_code_space_.fetch_add(size);
return false;
}
- // This API assumes main thread
- isolate_->AdjustAmountOfExternalAllocatedMemory(size);
- if (WouldGCHelp()) {
- // This API does not assume main thread, and would schedule
- // a GC if called from a different thread, instead of synchronously
- // doing one.
- isolate_->MemoryPressureNotification(MemoryPressureLevel::kCritical);
- }
return ret;
}
-bool WasmCodeManager::WouldGCHelp() const {
- // If all we have is one module, or none, no GC would help.
- // GC would help if there's some remaining native modules that
- // would be collected.
- if (active_ <= 1) return false;
- // We have an expectation on the largest size a native function
- // may have.
- constexpr size_t kMaxNativeFunction = 32 * MB;
- size_t remaining = remaining_uncommitted_code_space_.load();
- return remaining < kMaxNativeFunction;
-}
-
void WasmCodeManager::AssignRanges(Address start, Address end,
NativeModule* native_module) {
lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
@@ -893,57 +784,70 @@ void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
reinterpret_cast<void*>(ret->end()), ret->size());
}
-size_t WasmCodeManager::GetAllocationChunk(const WasmModule& module) {
- // TODO(mtrofin): this should pick up its 'maximal code range size'
- // from something embedder-provided
- if (kRequiresCodeRange) return kMaxWasmCodeMemory;
- DCHECK(kModuleCanAllocateMoreMemory);
- size_t ret = AllocatePageSize();
- // a ballpark guesstimate on native inflation factor.
- constexpr size_t kMultiplier = 4;
-
- for (auto& function : module.functions) {
- ret += kMultiplier * function.code.length();
+// static
+size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) {
+ constexpr size_t kCodeSizeMultiplier = 4;
+ constexpr size_t kImportSize = 32 * kPointerSize;
+
+ uint32_t num_wasm_functions = module->num_declared_functions;
+
+ size_t estimate =
+ AllocatePageSize() /* TODO(titzer): 1 page spot bonus */ +
+ sizeof(NativeModule) +
+ (sizeof(WasmCode*) * num_wasm_functions /* code table size */) +
+ (sizeof(WasmCode) * num_wasm_functions /* code object size */) +
+ (kImportSize * module->num_imported_functions /* import size */) +
+ (JumpTableAssembler::kJumpTableSlotSize *
+ num_wasm_functions /* jump table size */);
+
+ for (auto& function : module->functions) {
+ estimate += kCodeSizeMultiplier * function.code.length();
}
- return ret;
-}
-std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
- const WasmModule& module, ModuleEnv& env) {
- size_t code_size = GetAllocationChunk(module);
- return NewNativeModule(
- code_size, static_cast<uint32_t>(module.functions.size()),
- module.num_imported_functions, kModuleCanAllocateMoreMemory, env);
+ return estimate;
}
std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
- size_t size_estimate, uint32_t num_functions,
- uint32_t num_imported_functions, bool can_request_more, ModuleEnv& env) {
+ Isolate* isolate, size_t memory_estimate, bool can_request_more,
+ std::shared_ptr<const WasmModule> module, const ModuleEnv& env) {
+ // TODO(titzer): we force a critical memory pressure notification
+ // when the code space is almost exhausted, but only upon the next module
+ // creation. This is only for one isolate, and it should really do this for
+ // all isolates, at the point of commit.
+ constexpr size_t kCriticalThreshold = 32 * 1024 * 1024;
+ bool force_critical_notification =
+ (active_ > 1) &&
+ (remaining_uncommitted_code_space_.load() < kCriticalThreshold);
+
+ if (force_critical_notification) {
+ (reinterpret_cast<v8::Isolate*>(isolate))
+ ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
+ }
+
VirtualMemory mem;
- TryAllocate(size_estimate, &mem);
+ // If the code must be contiguous, reserve enough address space up front.
+ size_t vmem_size = kRequiresCodeRange ? kMaxWasmCodeMemory : memory_estimate;
+ TryAllocate(vmem_size, &mem);
if (mem.IsReserved()) {
Address start = mem.address();
size_t size = mem.size();
Address end = mem.end();
- std::unique_ptr<NativeModule> ret(
- new NativeModule(num_functions, num_imported_functions,
- can_request_more, &mem, this, env));
- TRACE_HEAP("New Module: ID:%zu. Mem: %p,+%zu\n", ret->instance_id,
- reinterpret_cast<void*>(start), size);
+ std::unique_ptr<NativeModule> ret(new NativeModule(
+ isolate, can_request_more, &mem, this, std::move(module), env));
+ TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", this, start,
+ size);
AssignRanges(start, end, ret.get());
++active_;
return ret;
}
- V8::FatalProcessOutOfMemory(reinterpret_cast<Isolate*>(isolate_),
- "WasmCodeManager::NewNativeModule");
+ V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
return nullptr;
}
bool NativeModule::SetExecutable(bool executable) {
if (is_executable_ == executable) return true;
- TRACE_HEAP("Setting module %zu as executable: %d.\n", instance_id,
- executable);
+ TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
PageAllocator::Permission permission =
executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
@@ -972,14 +876,13 @@ bool NativeModule::SetExecutable(bool executable) {
for (auto& range : allocated_code_space_.ranges()) {
// allocated_code_space_ is fine-grained, so we need to
// page-align it.
- size_t range_size = RoundUp(
- static_cast<size_t>(range.second - range.first), AllocatePageSize());
- if (!SetPermissions(range.first, range_size, permission)) {
+ size_t range_size = RoundUp(range.size(), AllocatePageSize());
+ if (!SetPermissions(range.start, range_size, permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n",
- reinterpret_cast<void*>(range.first),
- reinterpret_cast<void*>(range.second), executable);
+ reinterpret_cast<void*>(range.start),
+ reinterpret_cast<void*>(range.end), executable);
}
}
is_executable_ = executable;
@@ -989,7 +892,7 @@ bool NativeModule::SetExecutable(bool executable) {
void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
DCHECK_GE(active_, 1);
--active_;
- TRACE_HEAP("Freeing %zu\n", native_module->instance_id);
+ TRACE_HEAP("Freeing NativeModule %p\n", this);
for (auto& vmem : native_module->owned_code_space_) {
lookup_map_.erase(vmem.address());
Free(&vmem);
@@ -1004,12 +907,7 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
module_code_size_mb_->AddSample(static_cast<int>(code_size / MB));
}
- // No need to tell the GC anything if we're destroying the heap,
- // which we currently indicate by having the isolate_ as null
- if (isolate_ == nullptr) return;
remaining_uncommitted_code_space_.fetch_add(code_size);
- isolate_->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<int64_t>(code_size));
}
// TODO(wasm): We can make this more efficient if needed. For
@@ -1025,7 +923,7 @@ WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
return code;
}
-WasmCode* WasmCodeManager::LookupCode(Address pc) const {
+NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
if (lookup_map_.empty()) return nullptr;
auto iter = lookup_map_.upper_bound(pc);
@@ -1036,8 +934,12 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
NativeModule* candidate = iter->second.second;
DCHECK_NOT_NULL(candidate);
- if (range_start <= pc && pc < range_end) return candidate->Lookup(pc);
- return nullptr;
+ return range_start <= pc && pc < range_end ? candidate : nullptr;
+}
+
+WasmCode* WasmCodeManager::LookupCode(Address pc) const {
+ NativeModule* candidate = LookupNativeModule(pc);
+ return candidate ? candidate->Lookup(pc) : nullptr;
}
void WasmCodeManager::Free(VirtualMemory* mem) {
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 273f1ed425..7d01aa513d 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -17,13 +17,11 @@
#include "src/wasm/module-compiler.h"
namespace v8 {
-class Isolate;
namespace internal {
struct CodeDesc;
class Code;
class Histogram;
-class WasmCompiledModule;
namespace wasm {
@@ -31,63 +29,83 @@ class NativeModule;
class WasmCodeManager;
struct WasmModule;
+// Convenience macro listing all wasm runtime stubs. Note that the first few
+// elements of the list coincide with {compiler::TrapId}, order matters.
+#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
+ FOREACH_WASM_TRAPREASON(VTRAP) \
+ V(WasmAllocateHeapNumber) \
+ V(WasmArgumentsAdaptor) \
+ V(WasmCallJavaScript) \
+ V(WasmGrowMemory) \
+ V(WasmStackGuard) \
+ V(WasmToNumber) \
+ V(DoubleToI)
+
+struct AddressRange {
+ Address start;
+ Address end;
+
+ AddressRange(Address s, Address e) : start(s), end(e) {
+ DCHECK_LE(start, end);
+ DCHECK_IMPLIES(start == kNullAddress, end == kNullAddress);
+ }
+ AddressRange() : AddressRange(kNullAddress, kNullAddress) {}
+
+ size_t size() const { return static_cast<size_t>(end - start); }
+ bool is_empty() const { return start == end; }
+ operator bool() const { return start == kNullAddress; }
+};
+
// Sorted, disjoint and non-overlapping memory ranges. A range is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
-using AddressRange = std::pair<Address, Address>;
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
public:
- enum ExtractionMode : bool { kAny = false, kContiguous = true };
- DisjointAllocationPool() {}
+ DisjointAllocationPool() = default;
- explicit DisjointAllocationPool(Address, Address);
+ explicit DisjointAllocationPool(AddressRange range) : ranges_({range}) {}
DisjointAllocationPool(DisjointAllocationPool&& other) = default;
DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
- // Merge the ranges of the parameter into this object. Ordering is
- // preserved. The assumption is that the passed parameter is
- // not intersecting this object - for example, it was obtained
- // from a previous Allocate{Pool}.
- void Merge(DisjointAllocationPool&&);
+ // Merge the parameter range into this object while preserving ordering of the
+ // ranges. The assumption is that the passed parameter is not intersecting
+ // this object - for example, it was obtained from a previous Allocate.
+ void Merge(AddressRange);
// Allocate a contiguous range of size {size}. Return an empty pool on
// failure.
- DisjointAllocationPool Allocate(size_t size) {
- return Extract(size, kContiguous);
- }
-
- // Allocate a sub-pool of size {size}. Return an empty pool on failure.
- DisjointAllocationPool AllocatePool(size_t size) {
- return Extract(size, kAny);
- }
+ AddressRange Allocate(size_t size);
bool IsEmpty() const { return ranges_.empty(); }
const std::list<AddressRange>& ranges() const { return ranges_; }
private:
- // Extract out a total of {size}. By default, the return may
- // be more than one range. If kContiguous is passed, the return
- // will be one range. If the operation fails, this object is
- // unchanged, and the return {IsEmpty()}
- DisjointAllocationPool Extract(size_t size, ExtractionMode mode);
-
std::list<AddressRange> ranges_;
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
};
-using ProtectedInstructions =
- std::vector<trap_handler::ProtectedInstructionData>;
-
class V8_EXPORT_PRIVATE WasmCode final {
public:
enum Kind {
kFunction,
kWasmToJsWrapper,
kLazyStub,
+ kRuntimeStub,
kInterpreterEntry,
- kTrampoline
+ kJumpTable
+ };
+
+ // Each runtime stub is identified by an id. This id is used to reference the
+ // stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
+ enum RuntimeStubId {
+#define DEF_ENUM(Name) k##Name,
+#define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
+ WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
+#undef DEF_ENUM_TRAP
+#undef DEF_ENUM
+ kRuntimeStubCount
};
// kOther is used if we have WasmCode that is neither
@@ -99,16 +117,13 @@ class V8_EXPORT_PRIVATE WasmCode final {
Address instruction_start() const {
return reinterpret_cast<Address>(instructions_.start());
}
- Vector<const byte> reloc_info() const {
- return {reloc_info_.get(), reloc_size_};
- }
+ Vector<const byte> reloc_info() const { return reloc_info_.as_vector(); }
Vector<const byte> source_positions() const {
- return {source_position_table_.get(), source_position_size_};
+ return source_position_table_.as_vector();
}
uint32_t index() const { return index_.ToChecked(); }
- // Anonymous functions are functions that don't carry an index, like
- // trampolines.
+ // Anonymous functions are functions that don't carry an index.
bool IsAnonymous() const { return index_.IsNothing(); }
Kind kind() const { return kind_; }
NativeModule* native_module() const { return native_module_; }
@@ -124,19 +139,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
pc < reinterpret_cast<Address>(instructions_.end());
}
- const ProtectedInstructions& protected_instructions() const {
- // TODO(mstarzinger): Code that doesn't have trapping instruction should
- // not be required to have this vector, make it possible to be null.
- DCHECK_NOT_NULL(protected_instructions_);
- return *protected_instructions_.get();
+ Vector<trap_handler::ProtectedInstructionData> protected_instructions()
+ const {
+ return protected_instructions_.as_vector();
}
- // Register protected instruction information with the trap handler. Sets
- // trap_handler_index.
- void RegisterTrapHandlerData();
-
- void Print(Isolate* isolate) const;
- void Disassemble(const char* name, Isolate* isolate, std::ostream& os,
+ void Validate() const;
+ void Print(const char* name = nullptr) const;
+ void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress) const;
static bool ShouldBeLogged(Isolate* isolate);
@@ -146,27 +156,20 @@ class V8_EXPORT_PRIVATE WasmCode final {
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
- // Offset of {instructions_.start()}. It is used for tiering, when
- // we check if optimized code is available during the prologue
- // of Liftoff-compiled code.
- static constexpr int kInstructionStartOffset = 0;
-
private:
friend class NativeModule;
- WasmCode(Vector<byte> instructions, std::unique_ptr<const byte[]> reloc_info,
- size_t reloc_size, std::unique_ptr<const byte[]> source_pos,
- size_t source_pos_size, NativeModule* native_module,
- Maybe<uint32_t> index, Kind kind, size_t constant_pool_offset,
- uint32_t stack_slots, size_t safepoint_table_offset,
- size_t handler_table_offset,
- std::unique_ptr<ProtectedInstructions> protected_instructions,
- Tier tier)
+ WasmCode(NativeModule* native_module, Maybe<uint32_t> index,
+ Vector<byte> instructions, uint32_t stack_slots,
+ size_t safepoint_table_offset, size_t handler_table_offset,
+ size_t constant_pool_offset,
+ OwnedVector<trap_handler::ProtectedInstructionData>
+ protected_instructions,
+ OwnedVector<const byte> reloc_info,
+ OwnedVector<const byte> source_position_table, Kind kind, Tier tier)
: instructions_(instructions),
reloc_info_(std::move(reloc_info)),
- reloc_size_(reloc_size),
- source_position_table_(std::move(source_pos)),
- source_position_size_(source_pos_size),
+ source_position_table_(std::move(source_position_table)),
native_module_(native_module),
index_(index),
kind_(kind),
@@ -179,7 +182,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
DCHECK_LE(safepoint_table_offset, instructions.size());
DCHECK_LE(constant_pool_offset, instructions.size());
DCHECK_LE(handler_table_offset, instructions.size());
- DCHECK_EQ(kInstructionStartOffset, OFFSET_OF(WasmCode, instructions_));
}
// Code objects that have been registered with the global trap handler within
@@ -187,13 +189,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
size_t trap_handler_index() const;
void set_trap_handler_index(size_t);
bool HasTrapHandlerIndex() const;
- void ResetTrapHandlerIndex();
+
+ // Register protected instruction information with the trap handler. Sets
+ // trap_handler_index.
+ void RegisterTrapHandlerData();
Vector<byte> instructions_;
- std::unique_ptr<const byte[]> reloc_info_;
- size_t reloc_size_ = 0;
- std::unique_ptr<const byte[]> source_position_table_;
- size_t source_position_size_ = 0;
+ OwnedVector<const byte> reloc_info_;
+ OwnedVector<const byte> source_position_table_;
NativeModule* native_module_ = nullptr;
Maybe<uint32_t> index_;
Kind kind_;
@@ -205,7 +208,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
size_t safepoint_table_offset_ = 0;
size_t handler_table_offset_ = 0;
intptr_t trap_handler_index_ = -1;
- std::unique_ptr<ProtectedInstructions> protected_instructions_;
+ OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
Tier tier_;
DISALLOW_COPY_AND_ASSIGN(WasmCode);
@@ -214,19 +217,32 @@ class V8_EXPORT_PRIVATE WasmCode final {
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);
-// Note that we currently need to add code on the main thread, because we may
-// trigger a GC if we believe there's a chance the GC would clear up native
-// modules. The code is ready for concurrency otherwise, we just need to be
-// careful about this GC consideration. See WouldGCHelp and
-// WasmCodeManager::Commit.
class V8_EXPORT_PRIVATE NativeModule final {
public:
- WasmCode* AddCode(const CodeDesc& desc, uint32_t frame_count, uint32_t index,
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
+ static constexpr bool kCanAllocateMoreMemory = false;
+#else
+ static constexpr bool kCanAllocateMoreMemory = true;
+#endif
+
+ // {AddCode} is thread safe w.r.t. other calls to {AddCode} or {AddCodeCopy},
+ // i.e. it can be called concurrently from background threads.
+ WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- std::unique_ptr<ProtectedInstructions>,
- Handle<ByteArray> source_position_table,
+ OwnedVector<trap_handler::ProtectedInstructionData>
+ protected_instructions,
+ OwnedVector<const byte> source_position_table,
WasmCode::Tier tier);
+ WasmCode* AddDeserializedCode(
+ uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
+ size_t safepoint_table_offset, size_t handler_table_offset,
+ size_t constant_pool_offset,
+ OwnedVector<trap_handler::ProtectedInstructionData>
+ protected_instructions,
+ OwnedVector<const byte> reloc_info,
+ OwnedVector<const byte> source_position_table, WasmCode::Tier tier);
+
// A way to copy over JS-allocated code. This is because we compile
// certain wrappers using a different pipeline.
WasmCode* AddCodeCopy(Handle<Code> code, WasmCode::Kind kind, uint32_t index);
@@ -234,84 +250,102 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Add an interpreter entry. For the same reason as AddCodeCopy, we
// currently compile these using a different pipeline and we can't get a
// CodeDesc here. When adding interpreter wrappers, we do not insert them in
- // the code_table, however, we let them self-identify as the {index} function
+ // the code_table, however, we let them self-identify as the {index} function.
WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index);
// When starting lazy compilation, provide the WasmLazyCompile builtin by
- // calling SetLazyBuiltin. It will initialize the code table with it. Copies
- // of it might be cloned from them later when creating entries for exported
- // functions and indirect callable functions, so that they may be identified
- // by the runtime.
+ // calling SetLazyBuiltin. It will be copied into this NativeModule and the
+ // jump table will be populated with that copy.
void SetLazyBuiltin(Handle<Code> code);
- // function_count is WasmModule::functions.size().
- uint32_t function_count() const {
- DCHECK_LE(code_table_.size(), std::numeric_limits<uint32_t>::max());
- return static_cast<uint32_t>(code_table_.size());
- }
+ // Initializes all runtime stubs by copying them over from the JS-allocated
+ // heap into this native module. It must be called exactly once per native
+ // module before adding other WasmCode so that runtime stub ids can be
+ // resolved during relocation.
+ void SetRuntimeStubs(Isolate* isolate);
+
+ // Makes the code available to the system (by entering it into the code table
+ // and patching the jump table). Callers have to take care not to race with
+ // threads executing the old code.
+ void PublishCode(WasmCode* code);
WasmCode* code(uint32_t index) const {
- DCHECK_LT(index, function_count());
- DCHECK_LE(num_imported_functions(), index);
- return code_table_[index];
+ DCHECK_LT(index, num_functions());
+ DCHECK_LE(module_->num_imported_functions, index);
+ return code_table_[index - module_->num_imported_functions];
+ }
+
+ bool has_code(uint32_t index) const { return code(index) != nullptr; }
+
+ WasmCode* runtime_stub(WasmCode::RuntimeStubId index) const {
+ DCHECK_LT(index, WasmCode::kRuntimeStubCount);
+ WasmCode* code = runtime_stub_table_[index];
+ DCHECK_NOT_NULL(code);
+ return code;
}
- // TODO(clemensh): Remove this method once we have the jump table
- // (crbug.com/v8/7758).
- void SetCodeForTesting(uint32_t index, WasmCode* code) {
- DCHECK_LT(index, function_count());
- DCHECK_LE(num_imported_functions(), index);
- code_table_[index] = code;
+ Address jump_table_start() const {
+ return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
}
- bool has_code(uint32_t index) const {
- DCHECK_LT(index, function_count());
- return code_table_[index] != nullptr;
+ bool is_jump_table_slot(Address address) const {
+ return jump_table_->contains(address);
}
- // Register/release the protected instructions in all code objects with the
- // global trap handler for this process.
- void UnpackAndRegisterProtectedInstructions();
- void ReleaseProtectedInstructions();
+ uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
+
+ // Transition this module from code relying on trap handlers (i.e. without
+ // explicit memory bounds checks) to code that does not require trap handlers
+ // (i.e. code with explicit bounds checks).
+ // This method must only be called if {use_trap_handler()} is true (it will be
+ // false afterwards). All code in this {NativeModule} needs to be re-added
+ // after calling this method.
+ void DisableTrapHandler();
- // Returns the instruction start of code suitable for indirect or import calls
- // for the given function index. If the code at the given index is the lazy
- // compile stub, it will clone a non-anonymous lazy compile stub for the
- // purpose. This will soon change to always return a jump table slot.
- Address GetCallTargetForFunction(uint32_t index);
+ // Returns the target to call for the given function (returns a jump table
+ // slot within {jump_table_}).
+ Address GetCallTargetForFunction(uint32_t func_index) const;
bool SetExecutable(bool executable);
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
- void ResizeCodeTableForTesting(size_t num_functions, size_t max_functions);
+ void ReserveCodeTableForTesting(uint32_t max_functions);
- CompilationState* compilation_state() { return compilation_state_.get(); }
+ void LogWasmCodes(Isolate* isolate);
- // TODO(mstarzinger): The link to the {shared_module_data} is deprecated and
- // all uses should vanish to make {NativeModule} independent of the Isolate.
- WasmSharedModuleData* shared_module_data() const;
- void SetSharedModuleData(Handle<WasmSharedModuleData>);
+ CompilationState* compilation_state() { return compilation_state_.get(); }
- uint32_t num_imported_functions() const { return num_imported_functions_; }
- const std::vector<WasmCode*>& code_table() const { return code_table_; }
+ uint32_t num_functions() const {
+ return module_->num_declared_functions + module_->num_imported_functions;
+ }
+ uint32_t num_imported_functions() const {
+ return module_->num_imported_functions;
+ }
+ Vector<WasmCode*> code_table() const {
+ return {code_table_.get(), module_->num_declared_functions};
+ }
bool use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
+ Vector<const byte> wire_bytes() const { return wire_bytes_.as_vector(); }
+ void set_wire_bytes(OwnedVector<const byte> wire_bytes) {
+ wire_bytes_ = std::move(wire_bytes);
+ }
+ const WasmModule* module() const { return module_.get(); }
+
+ WasmCode* Lookup(Address) const;
- const size_t instance_id = 0;
~NativeModule();
private:
+ friend class WasmCode;
friend class WasmCodeManager;
- friend class NativeModuleSerializer;
- friend class NativeModuleDeserializer;
friend class NativeModuleModificationScope;
- static base::AtomicNumber<size_t> next_id_;
- NativeModule(uint32_t num_functions, uint32_t num_imports,
- bool can_request_more, VirtualMemory* code_space,
- WasmCodeManager* code_manager, ModuleEnv& env);
+ NativeModule(Isolate* isolate, bool can_request_more,
+ VirtualMemory* code_space, WasmCodeManager* code_manager,
+ std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
Address AllocateForCode(size_t size);
@@ -320,50 +354,61 @@ class V8_EXPORT_PRIVATE NativeModule final {
// module is owned by that module. Various callers get to decide on how the
// code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
// whether it has an index or is anonymous, etc.
- WasmCode* AddOwnedCode(Vector<const byte> orig_instructions,
- std::unique_ptr<const byte[]> reloc_info,
- size_t reloc_size,
- std::unique_ptr<const byte[]> source_pos,
- size_t source_pos_size, Maybe<uint32_t> index,
- WasmCode::Kind kind, size_t constant_pool_offset,
+ WasmCode* AddOwnedCode(Maybe<uint32_t> index, Vector<const byte> instructions,
uint32_t stack_slots, size_t safepoint_table_offset,
size_t handler_table_offset,
- std::unique_ptr<ProtectedInstructions>, WasmCode::Tier,
- WasmCode::FlushICache);
- WasmCode* CloneCode(const WasmCode*, WasmCode::FlushICache);
- WasmCode* Lookup(Address);
- Address GetLocalAddressFor(Handle<Code>);
- Address CreateTrampolineTo(Handle<Code>);
+ size_t constant_pool_offset,
+ OwnedVector<trap_handler::ProtectedInstructionData>,
+ OwnedVector<const byte> reloc_info,
+ OwnedVector<const byte> source_position_table,
+ WasmCode::Kind, WasmCode::Tier);
+
+ WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);
+
+ void PatchJumpTable(uint32_t func_index, Address target,
+ WasmCode::FlushICache);
+
+ void set_code(uint32_t index, WasmCode* code) {
+ DCHECK_LT(index, num_functions());
+ DCHECK_LE(module_->num_imported_functions, index);
+ DCHECK_EQ(code->index(), index);
+ code_table_[index - module_->num_imported_functions] = code;
+ }
+
+ // TODO(clemensh): Make this a unique_ptr (requires refactoring
+ // AsyncCompileJob).
+ std::shared_ptr<const WasmModule> module_;
// Holds all allocated code objects, is maintained to be in ascending order
// according to the codes instruction start address to allow lookups.
std::vector<std::unique_ptr<WasmCode>> owned_code_;
- std::vector<WasmCode*> code_table_;
- std::unique_ptr<std::vector<WasmCode*>> lazy_compile_stubs_;
- uint32_t num_imported_functions_;
+ std::unique_ptr<WasmCode* []> code_table_;
+
+ OwnedVector<const byte> wire_bytes_;
- // Maps from instruction start of an immovable code object to instruction
- // start of the trampoline.
- std::unordered_map<Address, Address> trampolines_;
+ WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
+ // Jump table used to easily redirect wasm function calls.
+ WasmCode* jump_table_ = nullptr;
+
+ // The compilation state keeps track of compilation tasks for this module.
+ // Note that its destructor blocks until all tasks are finished/aborted and
+ // hence needs to be destructed first when this native module dies.
std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;
- // A phantom reference to the {WasmSharedModuleData}. It is intentionally not
- // typed {Handle<WasmSharedModuleData>} because this location will be cleared
- // when the phantom reference is cleared.
- WasmSharedModuleData** shared_module_data_ = nullptr;
+ // This mutex protects concurrent calls to {AddCode} and {AddCodeCopy}.
+ mutable base::Mutex allocation_mutex_;
DisjointAllocationPool free_code_space_;
DisjointAllocationPool allocated_code_space_;
std::list<VirtualMemory> owned_code_space_;
WasmCodeManager* wasm_code_manager_;
- base::Mutex allocation_mutex_;
size_t committed_code_space_ = 0;
int modification_scope_depth_ = 0;
bool can_request_more_memory_;
- bool use_trap_handler_;
+ bool use_trap_handler_ = false;
bool is_executable_ = false;
bool lazy_compile_frozen_ = false;
@@ -372,23 +417,18 @@ class V8_EXPORT_PRIVATE NativeModule final {
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
- // The only reason we depend on Isolate is to report native memory used
- // and held by a GC-ed object. We'll need to mitigate that when we
- // start sharing wasm heaps.
- WasmCodeManager(v8::Isolate*, size_t max_committed);
+ explicit WasmCodeManager(size_t max_committed);
// Create a new NativeModule. The caller is responsible for its
// lifetime. The native module will be given some memory for code,
// which will be page size aligned. The size of the initial memory
// is determined with a heuristic based on the total size of wasm
// code. The native module may later request more memory.
- std::unique_ptr<NativeModule> NewNativeModule(const WasmModule& module,
- ModuleEnv& env);
- std::unique_ptr<NativeModule> NewNativeModule(size_t memory_estimate,
- uint32_t num_functions,
- uint32_t num_imported_functions,
- bool can_request_more,
- ModuleEnv& env);
+ // TODO(titzer): isolate is only required here for CompilationState.
+ std::unique_ptr<NativeModule> NewNativeModule(
+ Isolate* isolate, size_t memory_estimate, bool can_request_more,
+ std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
+ NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
WasmCode* GetCodeFromStartAddress(Address pc) const;
size_t remaining_uncommitted_code_space() const;
@@ -396,6 +436,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void SetModuleCodeSizeHistogram(Histogram* histogram) {
module_code_size_mb_ = histogram;
}
+ static size_t EstimateNativeModuleSize(const WasmModule* module);
private:
friend class NativeModule;
@@ -409,8 +450,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void FreeNativeModule(NativeModule*);
void Free(VirtualMemory* mem);
void AssignRanges(Address start, Address end, NativeModule*);
- size_t GetAllocationChunk(const WasmModule& module);
- bool WouldGCHelp() const;
std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
// Count of NativeModules not yet collected. Helps determine if it's
@@ -418,9 +457,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
size_t active_ = 0;
std::atomic<size_t> remaining_uncommitted_code_space_;
- // TODO(mtrofin): remove the dependency on isolate.
- v8::Isolate* isolate_;
-
// Histogram to update with the maximum used code space for each NativeModule.
Histogram* module_code_size_mb_ = nullptr;
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
deleted file mode 100644
index 6a589de47e..0000000000
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/wasm/wasm-code-specialization.h"
-
-#include "src/assembler-inl.h"
-#include "src/base/optional.h"
-#include "src/objects-inl.h"
-#include "src/source-position-table.h"
-#include "src/wasm/decoder.h"
-#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-uint32_t ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
- DCHECK_EQ(static_cast<int>(kExprCallFunction), static_cast<int>(*pc));
- decoder.Reset(pc + 1, pc + 6);
- uint32_t call_idx = decoder.consume_u32v("call index");
- DCHECK(decoder.ok());
- DCHECK_GE(kMaxInt, call_idx);
- return call_idx;
-}
-
-namespace {
-
-int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
- size_t offset_l) {
- DCHECK_GE(kMaxInt, offset_l);
- int offset = static_cast<int>(offset_l);
- DCHECK(!iterator.done());
- int byte_pos;
- do {
- byte_pos = iterator.source_position().ScriptOffset();
- iterator.Advance();
- } while (!iterator.done() && iterator.code_offset() <= offset);
- return byte_pos;
-}
-
-class PatchDirectCallsHelper {
- public:
- PatchDirectCallsHelper(NativeModule* native_module, const WasmCode* code)
- : source_pos_it(code->source_positions()), decoder(nullptr, nullptr) {
- uint32_t func_index = code->index();
- WasmSharedModuleData* shared = native_module->shared_module_data();
- func_bytes = shared->module_bytes()->GetChars() +
- shared->module()->functions[func_index].code.offset();
- }
-
- SourcePositionTableIterator source_pos_it;
- Decoder decoder;
- const byte* func_bytes;
-};
-
-} // namespace
-
-CodeSpecialization::CodeSpecialization() {}
-
-CodeSpecialization::~CodeSpecialization() {}
-
-void CodeSpecialization::RelocateDirectCalls(NativeModule* native_module) {
- DCHECK_NULL(relocate_direct_calls_module_);
- DCHECK_NOT_NULL(native_module);
- relocate_direct_calls_module_ = native_module;
-}
-
-bool CodeSpecialization::ApplyToWholeModule(
- NativeModule* native_module, Handle<WasmModuleObject> module_object,
- ICacheFlushMode icache_flush_mode) {
- DisallowHeapAllocation no_gc;
- WasmSharedModuleData* shared = module_object->shared();
- WasmModule* module = shared->module();
- std::vector<WasmFunction>* wasm_functions = &shared->module()->functions;
- FixedArray* export_wrappers = module_object->export_wrappers();
- DCHECK_EQ(export_wrappers->length(), module->num_exported_functions);
-
- bool changed = false;
- int func_index = module->num_imported_functions;
-
- // Patch all wasm functions.
- for (int num_wasm_functions = static_cast<int>(wasm_functions->size());
- func_index < num_wasm_functions; ++func_index) {
- WasmCode* wasm_function = native_module->code(func_index);
- // TODO(clemensh): Get rid of this nullptr check
- if (wasm_function == nullptr ||
- wasm_function->kind() != WasmCode::kFunction) {
- continue;
- }
- changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
- }
-
- // Patch all exported functions (JS_TO_WASM_FUNCTION).
- int reloc_mode = 0;
- // Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
- // calls, the instance registered for that (relocate_direct_calls_module_)
- // should match the instance we currently patch (instance).
- if (relocate_direct_calls_module_ != nullptr) {
- DCHECK_EQ(native_module, relocate_direct_calls_module_);
- reloc_mode |= RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
- }
- if (!reloc_mode) return changed;
- int wrapper_index = 0;
- for (auto exp : module->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Code* export_wrapper = Code::cast(export_wrappers->get(wrapper_index++));
- if (export_wrapper->kind() != Code::JS_TO_WASM_FUNCTION) continue;
- for (RelocIterator it(export_wrapper, reloc_mode); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- switch (mode) {
- case RelocInfo::JS_TO_WASM_CALL: {
- changed = true;
- Address new_target =
- native_module->GetCallTargetForFunction(exp.index);
- it.rinfo()->set_js_to_wasm_address(new_target, icache_flush_mode);
- } break;
- default:
- UNREACHABLE();
- }
- }
- }
- DCHECK_EQ(module->functions.size(), func_index);
- DCHECK_EQ(export_wrappers->length(), wrapper_index);
- return changed;
-}
-
-bool CodeSpecialization::ApplyToWasmCode(wasm::WasmCode* code,
- ICacheFlushMode icache_flush_mode) {
- DisallowHeapAllocation no_gc;
- DCHECK_EQ(wasm::WasmCode::kFunction, code->kind());
-
- bool reloc_direct_calls = relocate_direct_calls_module_ != nullptr;
-
- int reloc_mode = 0;
- if (reloc_direct_calls) {
- reloc_mode |= RelocInfo::ModeMask(RelocInfo::WASM_CALL);
- }
- if (!reloc_mode) return false;
-
- base::Optional<PatchDirectCallsHelper> patch_direct_calls_helper;
- bool changed = false;
-
- NativeModule* native_module = code->native_module();
-
- RelocIterator it(code->instructions(), code->reloc_info(),
- code->constant_pool(), reloc_mode);
- for (; !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- switch (mode) {
- case RelocInfo::WASM_CALL: {
- DCHECK(reloc_direct_calls);
- // Iterate simultaneously over the relocation information and the source
- // position table. For each call in the reloc info, move the source
- // position iterator forward to that position to find the byte offset of
- // the respective call. Then extract the call index from the module wire
- // bytes to find the new compiled function.
- size_t offset = it.rinfo()->pc() - code->instruction_start();
- if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.emplace(relocate_direct_calls_module_,
- code);
- }
- int byte_pos = AdvanceSourcePositionTableIterator(
- patch_direct_calls_helper->source_pos_it, offset);
- uint32_t called_func_index = ExtractDirectCallIndex(
- patch_direct_calls_helper->decoder,
- patch_direct_calls_helper->func_bytes + byte_pos);
- const WasmCode* new_code = native_module->code(called_func_index);
- it.rinfo()->set_wasm_call_address(new_code->instruction_start(),
- icache_flush_mode);
- changed = true;
- } break;
-
- default:
- UNREACHABLE();
- }
- }
-
- return changed;
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-specialization.h b/deps/v8/src/wasm/wasm-code-specialization.h
deleted file mode 100644
index 4fabf81aaa..0000000000
--- a/deps/v8/src/wasm/wasm-code-specialization.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_WASM_CODE_SPECIALIZATION_H_
-#define V8_WASM_WASM_CODE_SPECIALIZATION_H_
-
-#include "src/assembler.h"
-#include "src/wasm/decoder.h"
-#include "src/wasm/wasm-objects.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-uint32_t ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc);
-
-// Helper class to specialize wasm code for a specific module.
-//
-// Note that code is shared among instances belonging to one module, hence all
-// specialization actions will implicitly apply to all instances of a module.
-//
-// Set up all relocations / patching that should be performed by the Relocate* /
-// Patch* methods, then apply all changes in one step using the Apply* methods.
-class CodeSpecialization {
- public:
- CodeSpecialization();
- ~CodeSpecialization();
-
- // Update all direct call sites based on the code table in the given module.
- void RelocateDirectCalls(NativeModule* module);
- // Apply all relocations and patching to all code in the module (i.e. wasm
- // code and exported function wrapper code).
- bool ApplyToWholeModule(NativeModule*, Handle<WasmModuleObject>,
- ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
- // Apply all relocations and patching to one wasm code object.
- bool ApplyToWasmCode(wasm::WasmCode*,
- ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
-
- private:
- NativeModule* relocate_direct_calls_module_ = nullptr;
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_WASM_CODE_SPECIALIZATION_H_
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index dc6f1ee675..0233ced6ac 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -21,11 +21,11 @@ enum ValueTypeCode : uint8_t {
kLocalF32 = 0x7d,
kLocalF64 = 0x7c,
kLocalS128 = 0x7b,
+ kLocalAnyFunc = 0x70,
kLocalAnyRef = 0x6f
};
// Binary encoding of other types.
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
-constexpr uint8_t kWasmAnyFunctionTypeCode = 0x70;
// Binary encoding of import/export kinds.
enum ImportExportKindCode : uint8_t {
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index ea8a14d2fe..b1f57fa8f8 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -70,9 +70,10 @@ MaybeHandle<String> GetLocalName(Isolate* isolate,
DCHECK_LE(0, func_index);
DCHECK_LE(0, local_index);
if (!debug_info->has_locals_names()) {
- Handle<WasmSharedModuleData> shared(
- debug_info->wasm_instance()->module_object()->shared(), isolate);
- Handle<FixedArray> locals_names = wasm::DecodeLocalNames(isolate, shared);
+ Handle<WasmModuleObject> module_object(
+ debug_info->wasm_instance()->module_object(), isolate);
+ Handle<FixedArray> locals_names =
+ wasm::DecodeLocalNames(isolate, module_object);
debug_info->set_locals_names(*locals_names);
}
@@ -88,7 +89,7 @@ MaybeHandle<String> GetLocalName(Isolate* isolate,
func_locals_names->get(local_index)->IsUndefined(isolate)) {
return {};
}
- return handle(String::cast(func_locals_names->get(local_index)));
+ return handle(String::cast(func_locals_names->get(local_index)), isolate);
}
class InterpreterHandle {
@@ -132,19 +133,18 @@ class InterpreterHandle {
static Vector<const byte> GetBytes(WasmDebugInfo* debug_info) {
// Return raw pointer into heap. The WasmInterpreter will make its own copy
// of this data anyway, and there is no heap allocation in-between.
- SeqOneByteString* bytes_str =
- debug_info->wasm_instance()->module_object()->shared()->module_bytes();
- return {bytes_str->GetChars(), static_cast<size_t>(bytes_str->length())};
+ NativeModule* native_module =
+ debug_info->wasm_instance()->module_object()->native_module();
+ return native_module->wire_bytes();
}
public:
// TODO(wasm): properly handlify this constructor.
InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
: isolate_(isolate),
- module_(
- debug_info->wasm_instance()->module_object()->shared()->module()),
+ module_(debug_info->wasm_instance()->module_object()->module()),
interpreter_(isolate, module_, GetBytes(debug_info),
- handle(debug_info->wasm_instance())) {}
+ handle(debug_info->wasm_instance(), isolate)) {}
~InterpreterHandle() { DCHECK_EQ(0, activations_.size()); }
@@ -299,18 +299,16 @@ class InterpreterHandle {
void NotifyDebugEventListeners(WasmInterpreter::Thread* thread) {
// Enter the debugger.
DebugScope debug_scope(isolate_->debug());
- if (debug_scope.failed()) return;
-
// Postpone interrupt during breakpoint processing.
PostponeInterruptsScope postpone(isolate_);
// Check whether we hit a breakpoint.
if (isolate_->debug()->break_points_active()) {
- Handle<WasmSharedModuleData> shared(
- GetInstanceObject()->module_object()->shared(), isolate_);
- int position = GetTopPosition(shared);
+ Handle<WasmModuleObject> module_object(
+ GetInstanceObject()->module_object(), isolate_);
+ int position = GetTopPosition(module_object);
Handle<FixedArray> breakpoints;
- if (WasmSharedModuleData::CheckBreakPoints(isolate_, shared, position)
+ if (WasmModuleObject::CheckBreakPoints(isolate_, module_object, position)
.ToHandle(&breakpoints)) {
// We hit one or several breakpoints. Clear stepping, notify the
// listeners and return.
@@ -343,13 +341,13 @@ class InterpreterHandle {
isolate_->debug()->OnDebugBreak(isolate_->factory()->empty_fixed_array());
}
- int GetTopPosition(Handle<WasmSharedModuleData> shared) {
+ int GetTopPosition(Handle<WasmModuleObject> module_object) {
DCHECK_EQ(1, interpreter()->GetThreadCount());
WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
DCHECK_LT(0, thread->GetFrameCount());
auto frame = thread->GetFrame(thread->GetFrameCount() - 1);
- return shared->GetFunctionOffset(frame->function()->func_index) +
+ return module_object->GetFunctionOffset(frame->function()->func_index) +
frame->pc();
}
@@ -410,7 +408,7 @@ class InterpreterHandle {
Handle<JSObject> GetGlobalScopeObject(wasm::InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
- Isolate* isolate = debug_info->GetIsolate();
+ Isolate* isolate = isolate_;
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
// TODO(clemensh): Add globals to the global scope.
@@ -434,7 +432,7 @@ class InterpreterHandle {
Handle<JSObject> GetLocalScopeObject(wasm::InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
- Isolate* isolate = debug_info->GetIsolate();
+ Isolate* isolate = isolate_;
Handle<JSObject> local_scope_object =
isolate_->factory()->NewJSObjectWithNullProto();
@@ -533,8 +531,9 @@ wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
Handle<Object> handle(debug_info->interpreter_handle(), isolate);
if (handle->IsUndefined(isolate)) {
- handle = Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate,
- *debug_info);
+ size_t interpreter_size = 0; // TODO(titzer): estimate size properly.
+ handle = Managed<wasm::InterpreterHandle>::Allocate(
+ isolate, interpreter_size, isolate, *debug_info);
debug_info->set_interpreter_handle(*handle);
}
@@ -543,13 +542,13 @@ wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
Object* handle_obj = debug_info->interpreter_handle();
- DCHECK(!handle_obj->IsUndefined(debug_info->GetIsolate()));
+ DCHECK(!handle_obj->IsUndefined());
return Managed<wasm::InterpreterHandle>::cast(handle_obj)->raw();
}
wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
Object* handle_obj = debug_info->interpreter_handle();
- if (handle_obj->IsUndefined(debug_info->GetIsolate())) return nullptr;
+ if (handle_obj->IsUndefined()) return nullptr;
return Managed<wasm::InterpreterHandle>::cast(handle_obj)->raw();
}
@@ -559,64 +558,14 @@ Handle<FixedArray> GetOrCreateInterpretedFunctions(
if (!obj->IsUndefined(isolate)) return Handle<FixedArray>::cast(obj);
int num_functions = debug_info->wasm_instance()
- ->compiled_module()
- ->GetNativeModule()
- ->function_count();
+ ->module_object()
+ ->native_module()
+ ->num_functions();
Handle<FixedArray> new_arr = isolate->factory()->NewFixedArray(num_functions);
debug_info->set_interpreted_functions(*new_arr);
return new_arr;
}
-using CodeRelocationMap = std::map<Address, Address>;
-
-void RedirectCallsitesInCode(Isolate* isolate, const wasm::WasmCode* code,
- CodeRelocationMap* map) {
- DisallowHeapAllocation no_gc;
- for (RelocIterator it(code->instructions(), code->reloc_info(),
- code->constant_pool(),
- RelocInfo::ModeMask(RelocInfo::WASM_CALL));
- !it.done(); it.next()) {
- Address target = it.rinfo()->target_address();
- auto new_target = map->find(target);
- if (new_target == map->end()) continue;
- it.rinfo()->set_wasm_call_address(new_target->second);
- }
-}
-
-void RedirectCallsitesInJSWrapperCode(Isolate* isolate, Code* code,
- CodeRelocationMap* map) {
- DisallowHeapAllocation no_gc;
- for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- !it.done(); it.next()) {
- Address target = it.rinfo()->js_to_wasm_address();
- auto new_target = map->find(target);
- if (new_target == map->end()) continue;
- it.rinfo()->set_js_to_wasm_address(new_target->second);
- }
-}
-
-void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
- CodeRelocationMap* map) {
- DisallowHeapAllocation no_gc;
- // Redirect all calls in wasm functions.
- wasm::NativeModule* native_module =
- instance->compiled_module()->GetNativeModule();
- for (uint32_t i = native_module->num_imported_functions(),
- e = native_module->function_count();
- i < e; ++i) {
- wasm::WasmCode* code = native_module->code(i);
- RedirectCallsitesInCode(isolate, code, map);
- }
- // TODO(6668): Find instances that imported our code and also patch those.
-
- // Redirect all calls in exported functions.
- FixedArray* export_wrapper = instance->module_object()->export_wrappers();
- for (int i = 0, e = export_wrapper->length(); i != e; ++i) {
- Code* code = Code::cast(export_wrapper->get(i));
- RedirectCallsitesInJSWrapperCode(isolate, code, map);
- }
-}
-
} // namespace
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
@@ -633,8 +582,9 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
Handle<WasmInstanceObject> instance_obj) {
Handle<WasmDebugInfo> debug_info = WasmDebugInfo::New(instance_obj);
Isolate* isolate = instance_obj->GetIsolate();
- auto interp_handle =
- Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate, *debug_info);
+ size_t interpreter_size = 0; // TODO(titzer): estimate size properly.
+ auto interp_handle = Managed<wasm::InterpreterHandle>::Allocate(
+ isolate, interpreter_size, isolate, *debug_info);
debug_info->set_interpreter_handle(*interp_handle);
auto ret = interp_handle->raw()->interpreter();
ret->SetCallIndirectTestMode();
@@ -659,9 +609,8 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
GetOrCreateInterpretedFunctions(isolate, debug_info);
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
wasm::NativeModule* native_module =
- instance->compiled_module()->GetNativeModule();
- wasm::WasmModule* module = instance->module();
- CodeRelocationMap code_to_relocate;
+ instance->module_object()->native_module();
+ const wasm::WasmModule* module = instance->module();
// We may modify js wrappers, as well as wasm functions. Hence the 2
// modification scopes.
@@ -674,20 +623,14 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
DCHECK_GT(module->functions.size(), func_index);
if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
- Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
+ MaybeHandle<Code> new_code = compiler::CompileWasmInterpreterEntry(
isolate, func_index, module->functions[func_index].sig);
- const wasm::WasmCode* wasm_new_code =
- native_module->AddInterpreterEntry(new_code, func_index);
- const wasm::WasmCode* old_code =
- native_module->code(static_cast<uint32_t>(func_index));
+ const wasm::WasmCode* wasm_new_code = native_module->AddInterpreterEntry(
+ new_code.ToHandleChecked(), func_index);
Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
wasm_new_code->instruction_start(), TENURED);
interpreted_functions->set(func_index, *foreign_holder);
- DCHECK_EQ(0, code_to_relocate.count(old_code->instruction_start()));
- code_to_relocate.insert(std::make_pair(old_code->instruction_start(),
- wasm_new_code->instruction_start()));
}
- RedirectCallsitesInInstance(isolate, *instance, &code_to_relocate);
}
void WasmDebugInfo::PrepareStep(StepAction step_action) {
@@ -697,7 +640,8 @@ void WasmDebugInfo::PrepareStep(StepAction step_action) {
bool WasmDebugInfo::RunInterpreter(Address frame_pointer, int func_index,
Address arg_buffer) {
DCHECK_LE(0, func_index);
- Handle<WasmInstanceObject> instance(wasm_instance());
+ Handle<WasmInstanceObject> instance(wasm_instance(),
+ wasm_instance()->GetIsolate());
return GetInterpreterHandle(this)->Execute(
instance, frame_pointer, static_cast<uint32_t>(func_index), arg_buffer);
}
@@ -753,21 +697,23 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
if (!debug_info->has_c_wasm_entries()) {
auto entries = isolate->factory()->NewFixedArray(4, TENURED);
debug_info->set_c_wasm_entries(*entries);
- auto managed_map = Managed<wasm::SignatureMap>::Allocate(isolate);
+ size_t map_size = 0; // size estimate not so important here.
+ auto managed_map = Managed<wasm::SignatureMap>::Allocate(isolate, map_size);
debug_info->set_c_wasm_entry_map(*managed_map);
}
Handle<FixedArray> entries(debug_info->c_wasm_entries(), isolate);
wasm::SignatureMap* map = debug_info->c_wasm_entry_map()->raw();
- int32_t index = map->Find(sig);
+ int32_t index = map->Find(*sig);
if (index == -1) {
- index = static_cast<int32_t>(map->FindOrInsert(sig));
+ index = static_cast<int32_t>(map->FindOrInsert(*sig));
if (index == entries->length()) {
entries = isolate->factory()->CopyFixedArrayAndGrow(
entries, entries->length(), TENURED);
debug_info->set_c_wasm_entries(*entries);
}
DCHECK(entries->get(index)->IsUndefined(isolate));
- Handle<Code> new_entry_code = compiler::CompileCWasmEntry(isolate, sig);
+ Handle<Code> new_entry_code =
+ compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
Handle<WasmExportedFunctionData> function_data =
Handle<WasmExportedFunctionData>::cast(isolate->factory()->NewStruct(
WASM_EXPORTED_FUNCTION_DATA_TYPE, TENURED));
@@ -784,7 +730,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
compiler::CWasmEntryParameters::kNumParameters);
entries->set(index, *new_entry);
}
- return handle(JSFunction::cast(entries->get(index)));
+ return handle(JSFunction::cast(entries->get(index)), isolate);
}
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index a5782e405f..8367c07cd7 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -4,7 +4,10 @@
#include "src/wasm/wasm-engine.h"
+#include "src/code-tracer.h"
+#include "src/compilation-statistics.h"
#include "src/objects-inl.h"
+#include "src/objects/js-promise.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
@@ -14,6 +17,11 @@ namespace v8 {
namespace internal {
namespace wasm {
+WasmEngine::WasmEngine(std::unique_ptr<WasmCodeManager> code_manager)
+ : code_manager_(std::move(code_manager)) {}
+
+WasmEngine::~WasmEngine() = default;
+
bool WasmEngine::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
// TODO(titzer): remove dependency on the isolate.
if (bytes.start() == nullptr || bytes.length() == 0) return false;
@@ -59,9 +67,9 @@ MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
memory);
}
-void WasmEngine::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports) {
+void WasmEngine::AsyncInstantiate(
+ Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) {
ErrorThrower thrower(isolate, nullptr);
// Instantiate a TryCatch so that caught exceptions won't progagate out.
// They will still be set as pending exceptions on the isolate.
@@ -75,9 +83,7 @@ void WasmEngine::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
if (!instance_object.is_null()) {
- Handle<WasmInstanceObject> instance = instance_object.ToHandleChecked();
- MaybeHandle<Object> result = JSPromise::Resolve(promise, instance);
- CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ resolver->OnInstantiationSucceeded(instance_object.ToHandleChecked());
return;
}
@@ -85,22 +91,21 @@ void WasmEngine::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
// exception in the ErrorThrower.
DCHECK_EQ(1, isolate->has_pending_exception() + thrower.error());
if (thrower.error()) {
- MaybeHandle<Object> result = JSPromise::Reject(promise, thrower.Reify());
- CHECK_EQ(result.is_null(), isolate->has_pending_exception());
- return;
+ resolver->OnInstantiationFailed(thrower.Reify());
+ } else {
+ // The start function has thrown an exception. We have to move the
+ // exception to the promise chain.
+ Handle<Object> exception(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
+ DCHECK(*isolate->external_caught_exception_address());
+ *isolate->external_caught_exception_address() = false;
+ resolver->OnInstantiationFailed(exception);
}
- // The start function has thrown an exception. We have to move the
- // exception to the promise chain.
- Handle<Object> exception(isolate->pending_exception(), isolate);
- isolate->clear_pending_exception();
- DCHECK(*isolate->external_caught_exception_address());
- *isolate->external_caught_exception_address() = false;
- MaybeHandle<Object> result = JSPromise::Reject(promise, exception);
- CHECK_EQ(result.is_null(), isolate->has_pending_exception());
-}
-
-void WasmEngine::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes, bool is_shared) {
+}
+
+void WasmEngine::AsyncCompile(
+ Isolate* isolate, std::unique_ptr<CompilationResultResolver> resolver,
+ const ModuleWireBytes& bytes, bool is_shared) {
if (!FLAG_wasm_async_compilation) {
// Asynchronous compilation disabled; fall back on synchronous compilation.
ErrorThrower thrower(isolate, "WasmCompile");
@@ -117,21 +122,18 @@ void WasmEngine::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
module_object = SyncCompile(isolate, &thrower, bytes);
}
if (thrower.error()) {
- MaybeHandle<Object> result = JSPromise::Reject(promise, thrower.Reify());
- CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ resolver->OnCompilationFailed(thrower.Reify());
return;
}
Handle<WasmModuleObject> module = module_object.ToHandleChecked();
- MaybeHandle<Object> result = JSPromise::Resolve(promise, module);
- CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ resolver->OnCompilationSucceeded(module);
return;
}
if (FLAG_wasm_test_streaming) {
std::shared_ptr<StreamingDecoder> streaming_decoder =
- isolate->wasm_engine()
- ->StartStreamingCompilation(isolate, handle(isolate->context()),
- promise);
+ isolate->wasm_engine()->StartStreamingCompilation(
+ isolate, handle(isolate->context(), isolate), std::move(resolver));
streaming_decoder->OnBytesReceived(bytes.module_bytes());
streaming_decoder->Finish();
return;
@@ -141,19 +143,44 @@ void WasmEngine::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
- AsyncCompileJob* job =
- CreateAsyncCompileJob(isolate, std::move(copy), bytes.length(),
- handle(isolate->context()), promise);
+ AsyncCompileJob* job = CreateAsyncCompileJob(
+ isolate, std::move(copy), bytes.length(),
+ handle(isolate->context(), isolate), std::move(resolver));
job->Start();
}
std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
- Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise) {
- AsyncCompileJob* job = CreateAsyncCompileJob(
- isolate, std::unique_ptr<byte[]>(nullptr), 0, context, promise);
+ Isolate* isolate, Handle<Context> context,
+ std::unique_ptr<CompilationResultResolver> resolver) {
+ AsyncCompileJob* job =
+ CreateAsyncCompileJob(isolate, std::unique_ptr<byte[]>(nullptr), 0,
+ context, std::move(resolver));
return job->CreateStreamingDecoder();
}
+CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (compilation_stats_ == nullptr) {
+ compilation_stats_.reset(new CompilationStatistics());
+ }
+ return compilation_stats_.get();
+}
+
+void WasmEngine::DumpAndResetTurboStatistics() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (compilation_stats_ != nullptr) {
+ StdoutStream os;
+ os << AsPrintableStatistics{*compilation_stats_.get(), false} << std::endl;
+ }
+ compilation_stats_.reset();
+}
+
+CodeTracer* WasmEngine::GetCodeTracer() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (code_tracer_ == nullptr) code_tracer_.reset(new CodeTracer(-1));
+ return code_tracer_.get();
+}
+
void WasmEngine::Register(CancelableTaskManager* task_manager) {
task_managers_.emplace_back(task_manager);
}
@@ -164,9 +191,10 @@ void WasmEngine::Unregister(CancelableTaskManager* task_manager) {
AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
- Handle<Context> context, Handle<JSPromise> promise) {
- AsyncCompileJob* job = new AsyncCompileJob(isolate, std::move(bytes_copy),
- length, context, promise);
+ Handle<Context> context,
+ std::unique_ptr<CompilationResultResolver> resolver) {
+ AsyncCompileJob* job = new AsyncCompileJob(
+ isolate, std::move(bytes_copy), length, context, std::move(resolver));
// Pass ownership to the unique_ptr in {jobs_}.
jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
return job;
@@ -181,14 +209,16 @@ std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
return result;
}
-void WasmEngine::AbortAllCompileJobs() {
+void WasmEngine::AbortCompileJobsOnIsolate(Isolate* isolate) {
// Iterate over a copy of {jobs_}, because {job->Abort} modifies {jobs_}.
- std::vector<AsyncCompileJob*> copy;
- copy.reserve(jobs_.size());
+ std::vector<AsyncCompileJob*> isolate_jobs;
- for (auto& entry : jobs_) copy.push_back(entry.first);
+ for (auto& entry : jobs_) {
+ if (entry.first->isolate() != isolate) continue;
+ isolate_jobs.push_back(entry.first);
+ }
- for (auto* job : copy) job->Abort();
+ for (auto* job : isolate_jobs) job->Abort();
}
void WasmEngine::TearDown() {
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 675603b108..4d34b4d3de 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -9,10 +9,13 @@
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-memory.h"
+#include "src/zone/accounting-allocator.h"
namespace v8 {
namespace internal {
+class CodeTracer;
+class CompilationStatistics;
class WasmModuleObject;
class WasmInstanceObject;
@@ -21,12 +24,26 @@ namespace wasm {
class ErrorThrower;
struct ModuleWireBytes;
+class V8_EXPORT_PRIVATE CompilationResultResolver {
+ public:
+ virtual void OnCompilationSucceeded(Handle<WasmModuleObject> result) = 0;
+ virtual void OnCompilationFailed(Handle<Object> error_reason) = 0;
+ virtual ~CompilationResultResolver() {}
+};
+
+class V8_EXPORT_PRIVATE InstantiationResultResolver {
+ public:
+ virtual void OnInstantiationSucceeded(Handle<WasmInstanceObject> result) = 0;
+ virtual void OnInstantiationFailed(Handle<Object> error_reason) = 0;
+ virtual ~InstantiationResultResolver() {}
+};
+
// The central data structure that represents an engine instance capable of
// loading, instantiating, and executing WASM code.
class V8_EXPORT_PRIVATE WasmEngine {
public:
- explicit WasmEngine(std::unique_ptr<WasmCodeManager> code_manager)
- : code_manager_(std::move(code_manager)) {}
+ explicit WasmEngine(std::unique_ptr<WasmCodeManager> code_manager);
+ ~WasmEngine();
// Synchronously validates the given bytes that represent an encoded WASM
// module.
@@ -54,28 +71,40 @@ class V8_EXPORT_PRIVATE WasmEngine {
MaybeHandle<JSArrayBuffer> memory);
// Begin an asynchronous compilation of the given bytes that represent an
- // encoded WASM module, placing the result in the supplied {promise}.
+ // encoded WASM module.
// The {is_shared} flag indicates if the bytes backing the module could
// be shared across threads, i.e. could be concurrently modified.
- void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ void AsyncCompile(Isolate* isolate,
+ std::unique_ptr<CompilationResultResolver> resolver,
const ModuleWireBytes& bytes, bool is_shared);
- // Begin an asynchronous instantiation of the given WASM module, placing the
- // result in the supplied {promise}.
- void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ // Begin an asynchronous instantiation of the given WASM module.
+ void AsyncInstantiate(Isolate* isolate,
+ std::unique_ptr<InstantiationResultResolver> resolver,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> imports);
std::shared_ptr<StreamingDecoder> StartStreamingCompilation(
- Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise);
+ Isolate* isolate, Handle<Context> context,
+ std::unique_ptr<CompilationResultResolver> resolver);
WasmCodeManager* code_manager() const { return code_manager_.get(); }
WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
- // We register and unregister CancelableTaskManagers that run
- // isolate-dependent tasks. These tasks need to be shutdown if the isolate is
- // shut down.
+ AccountingAllocator* allocator() { return &allocator_; }
+
+ // Compilation statistics for TurboFan compilations.
+ CompilationStatistics* GetOrCreateTurboStatistics();
+
+ // Prints the gathered compilation statistics, then resets them.
+ void DumpAndResetTurboStatistics();
+
+ // Used to redirect tracing output from {stdout} to a file.
+ CodeTracer* GetCodeTracer();
+
+ // We register and unregister CancelableTaskManagers that run engine-dependent
+ // tasks. These tasks need to be shutdown if the engine is shut down.
void Register(CancelableTaskManager* task_manager);
void Unregister(CancelableTaskManager* task_manager);
@@ -85,30 +114,43 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Returns true if at lease one AsyncCompileJob is currently running.
bool HasRunningCompileJob() const { return !jobs_.empty(); }
- // Cancel all AsyncCompileJobs so that they are not processed any further,
- // but delay the deletion of their state until all tasks accessing the
- // AsyncCompileJob finish their execution. This is used to clean-up the
- // isolate to be reused.
- void AbortAllCompileJobs();
+ // Cancel all AsyncCompileJobs that belong to the given Isolate. Their
+ // deletion is delayed until all tasks accessing the AsyncCompileJob finish
+ // their execution. This is used to clean-up the isolate to be reused.
+ void AbortCompileJobsOnIsolate(Isolate*);
void TearDown();
private:
- AsyncCompileJob* CreateAsyncCompileJob(Isolate* isolate,
- std::unique_ptr<byte[]> bytes_copy,
- size_t length, Handle<Context> context,
- Handle<JSPromise> promise);
+ AsyncCompileJob* CreateAsyncCompileJob(
+ Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
+ Handle<Context> context,
+ std::unique_ptr<CompilationResultResolver> resolver);
// We use an AsyncCompileJob as the key for itself so that we can delete the
// job from the map when it is finished.
std::unordered_map<AsyncCompileJob*, std::unique_ptr<AsyncCompileJob>> jobs_;
std::unique_ptr<WasmCodeManager> code_manager_;
WasmMemoryTracker memory_tracker_;
+ AccountingAllocator allocator_;
// Contains all CancelableTaskManagers that run tasks that are dependent
// on the isolate.
std::list<CancelableTaskManager*> task_managers_;
+ // This mutex protects all information which is mutated concurrently or
+ // fields that are initialized lazily on the first access.
+ base::Mutex mutex_;
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Protected by {mutex_}:
+
+ std::unique_ptr<CompilationStatistics> compilation_stats_;
+ std::unique_ptr<CodeTracer> code_tracer_;
+
+ // End of fields protected by {mutex_}.
+ //////////////////////////////////////////////////////////////////////////////
+
DISALLOW_COPY_AND_ASSIGN(WasmEngine);
};
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index a692833e5e..581277cbab 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -969,16 +969,14 @@ class CodeMap {
InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
uint32_t saved_index;
USE(saved_index);
- if (table_index >= module_->function_tables.size()) return nullptr;
+ if (table_index >= module_->tables.size()) return nullptr;
// Mask table index for SSCA mitigation.
saved_index = table_index;
- table_index &=
- static_cast<int32_t>((table_index - module_->function_tables.size()) &
- ~static_cast<int32_t>(table_index)) >>
- 31;
+ table_index &= static_cast<int32_t>((table_index - module_->tables.size()) &
+ ~static_cast<int32_t>(table_index)) >>
+ 31;
DCHECK_EQ(table_index, saved_index);
- const WasmIndirectFunctionTable* table =
- &module_->function_tables[table_index];
+ const WasmTable* table = &module_->tables[table_index];
if (entry_index >= table->values.size()) return nullptr;
// Mask entry_index for SSCA mitigation.
saved_index = entry_index;
@@ -1081,7 +1079,6 @@ class ThreadImpl {
Handle<WasmInstanceObject> instance_object)
: codemap_(codemap),
instance_object_(instance_object),
- zone_(zone),
frames_(zone),
activations_(zone) {}
@@ -1123,7 +1120,7 @@ class ThreadImpl {
void Reset() {
TRACE("----- RESET -----\n");
- sp_ = stack_start_;
+ sp_ = stack_.get();
frames_.clear();
state_ = WasmInterpreter::STOPPED;
trap_reason_ = kTrapCount;
@@ -1146,12 +1143,12 @@ class ThreadImpl {
WasmValue GetStackValue(sp_t index) {
DCHECK_GT(StackHeight(), index);
- return stack_start_[index];
+ return stack_[index];
}
void SetStackValue(sp_t index, WasmValue value) {
DCHECK_GT(StackHeight(), index);
- stack_start_[index] = value;
+ stack_[index] = value;
}
TrapReason GetTrapReason() { return trap_reason_; }
@@ -1190,7 +1187,7 @@ class ThreadImpl {
// first).
DCHECK_EQ(activations_.back().fp, frames_.size());
DCHECK_LE(activations_.back().sp, StackHeight());
- sp_ = stack_start_ + activations_.back().sp;
+ sp_ = stack_.get() + activations_.back().sp;
activations_.pop_back();
}
@@ -1212,7 +1209,7 @@ class ThreadImpl {
DCHECK_LE(act.fp, frames_.size());
frames_.resize(act.fp);
DCHECK_LE(act.sp, StackHeight());
- sp_ = stack_start_ + act.sp;
+ sp_ = stack_.get() + act.sp;
state_ = WasmInterpreter::STOPPED;
return WasmInterpreter::Thread::UNWOUND;
}
@@ -1241,8 +1238,7 @@ class ThreadImpl {
CodeMap* codemap_;
Handle<WasmInstanceObject> instance_object_;
- Zone* zone_;
- WasmValue* stack_start_ = nullptr; // Start of allocated stack space.
+ std::unique_ptr<WasmValue[]> stack_;
WasmValue* stack_limit_ = nullptr; // End of allocated stack space.
WasmValue* sp_ = nullptr; // Current stack pointer.
ZoneVector<Frame> frames_;
@@ -1344,7 +1340,7 @@ class ThreadImpl {
bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
size_t arity) {
DCHECK_GT(frames_.size(), 0);
- WasmValue* sp_dest = stack_start_ + frames_.back().sp;
+ WasmValue* sp_dest = stack_.get() + frames_.back().sp;
frames_.pop_back();
if (frames_.size() == current_activation().fp) {
// A return from the last frame terminates the execution.
@@ -1458,14 +1454,14 @@ class ThreadImpl {
return true;
}
- template <typename type>
+ template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
Address& address, pc_t pc, int& len,
type* val = nullptr, type* val2 = nullptr) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
sizeof(type));
- if (val2) *val2 = Pop().to<uint32_t>();
- if (val) *val = Pop().to<uint32_t>();
+ if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
+ if (val) *val = static_cast<type>(Pop().to<op_type>());
uint32_t index = Pop().to<uint32_t>();
address = BoundsCheckMem<type>(imm.offset, index);
if (!address) {
@@ -1515,84 +1511,133 @@ class ThreadImpl {
InterpreterCode* code, pc_t pc, int& len) {
WasmValue result;
switch (opcode) {
-#define ATOMIC_BINOP_CASE(name, type, operation) \
+// Disabling on Mips as 32 bit atomics are not correctly laid out for load/store
+// on big endian and 64 bit atomics fail to compile.
+#if !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
+#define ATOMIC_BINOP_CASE(name, type, op_type, operation) \
case kExpr##name: { \
type val; \
Address addr; \
- if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
+ &val)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
- result = WasmValue( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
+ result = WasmValue(static_cast<op_type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val))); \
Push(result); \
break; \
}
- ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange);
- ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange);
- ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t,
+ atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
+ atomic_exchange);
+ ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t,
+ atomic_exchange);
+ ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
+ atomic_exchange);
+ ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
+ atomic_exchange);
#undef ATOMIC_BINOP_CASE
-#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type) \
- case kExpr##name: { \
- type val; \
- type val2; \
- Address addr; \
- if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val, \
- &val2)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- std::atomic_compare_exchange_strong( \
- reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
- Push(WasmValue(val)); \
- break; \
- }
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t);
+#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
+ case kExpr##name: { \
+ type val; \
+ type val2; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
+ &val, &val2)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ std::atomic_compare_exchange_strong( \
+ reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
+ Push(WasmValue(static_cast<op_type>(val))); \
+ break; \
+ }
+ ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
+ uint32_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
+ uint32_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
+ uint32_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
+ uint64_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
+ uint64_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
+ uint64_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
+ uint64_t);
#undef ATOMIC_COMPARE_EXCHANGE_CASE
-#define ATOMIC_LOAD_CASE(name, type, operation) \
+#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
case kExpr##name: { \
Address addr; \
- if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len)) { \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
- result = \
- WasmValue(std::operation(reinterpret_cast<std::atomic<type>*>(addr))); \
+ result = WasmValue(static_cast<op_type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr)))); \
Push(result); \
break; \
}
- ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, atomic_load);
- ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, atomic_load);
- ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, uint32_t, atomic_load);
+ ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
+ ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
+ ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
+ ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
#undef ATOMIC_LOAD_CASE
-#define ATOMIC_STORE_CASE(name, type, operation) \
+#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
case kExpr##name: { \
type val; \
Address addr; \
- if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
+ &val)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
@@ -1601,10 +1646,15 @@ class ThreadImpl {
std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \
break; \
}
- ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, atomic_store);
- ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, atomic_store);
- ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, uint32_t, atomic_store);
+ ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
+ ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
+ ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
+ ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
#undef ATOMIC_STORE_CASE
+#endif // !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
default:
UNREACHABLE();
return false;
@@ -1653,6 +1703,331 @@ class ThreadImpl {
EXTRACT_LANE_CASE(I16x8, i16x8)
EXTRACT_LANE_CASE(I8x16, i8x16)
#undef EXTRACT_LANE_CASE
+#define BINOP_CASE(op, name, stype, count, expr) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s1.val[i]; \
+ auto b = s2.val[i]; \
+ res.val[i] = expr; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
+ BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
+ BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
+ BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
+ BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
+ BINOP_CASE(I32x4Add, i32x4, int4, 4, a + b)
+ BINOP_CASE(I32x4Sub, i32x4, int4, 4, a - b)
+ BINOP_CASE(I32x4Mul, i32x4, int4, 4, a * b)
+ BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
+ BINOP_CASE(I32x4MinU, i32x4, int4, 4,
+ static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
+ BINOP_CASE(I32x4MaxS, i32x4, int4, 4, a > b ? a : b)
+ BINOP_CASE(I32x4MaxU, i32x4, int4, 4,
+ static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
+ BINOP_CASE(S128And, i32x4, int4, 4, a & b)
+ BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
+ BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
+ BINOP_CASE(I16x8Add, i16x8, int8, 8, a + b)
+ BINOP_CASE(I16x8Sub, i16x8, int8, 8, a - b)
+ BINOP_CASE(I16x8Mul, i16x8, int8, 8, a * b)
+ BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
+ BINOP_CASE(I16x8MinU, i16x8, int8, 8,
+ static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
+ BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
+ BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
+ static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
+ BINOP_CASE(I16x8AddSaturateS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
+ BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
+ BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
+ BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
+ BINOP_CASE(I8x16Add, i8x16, int16, 16, a + b)
+ BINOP_CASE(I8x16Sub, i8x16, int16, 16, a - b)
+ BINOP_CASE(I8x16Mul, i8x16, int16, 16, a * b)
+ BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
+ BINOP_CASE(I8x16MinU, i8x16, int16, 16,
+ static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
+ BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
+ BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
+ static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
+ BINOP_CASE(I8x16AddSaturateS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
+ BINOP_CASE(I8x16AddSaturateU, i8x16, int16, 16,
+ SaturateAdd<uint8_t>(a, b))
+ BINOP_CASE(I8x16SubSaturateS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
+ BINOP_CASE(I8x16SubSaturateU, i8x16, int16, 16,
+ SaturateSub<uint8_t>(a, b))
+#undef BINOP_CASE
+#define UNOP_CASE(op, name, stype, count, expr) \
+ case kExpr##op: { \
+ WasmValue v = Pop(); \
+ stype s = v.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s.val[i]; \
+ res.val[i] = expr; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
+ UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
+ UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, 1.0f / a)
+ UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, 1.0f / std::sqrt(a))
+ UNOP_CASE(I32x4Neg, i32x4, int4, 4, -a)
+ UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
+ UNOP_CASE(I16x8Neg, i16x8, int8, 8, -a)
+ UNOP_CASE(I8x16Neg, i8x16, int16, 16, -a)
+#undef UNOP_CASE
+#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ out_stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s1.val[i]; \
+ auto b = s2.val[i]; \
+ res.val[i] = expr ? -1 : 0; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
+ CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
+ CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
+ CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
+ CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
+ CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
+ CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
+ CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
+ CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
+ CMPOP_CASE(I32x4GeS, i32x4, int4, int4, 4, a >= b)
+ CMPOP_CASE(I32x4LtS, i32x4, int4, int4, 4, a < b)
+ CMPOP_CASE(I32x4LeS, i32x4, int4, int4, 4, a <= b)
+ CMPOP_CASE(I32x4GtU, i32x4, int4, int4, 4,
+ static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
+ CMPOP_CASE(I32x4GeU, i32x4, int4, int4, 4,
+ static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
+ CMPOP_CASE(I32x4LtU, i32x4, int4, int4, 4,
+ static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
+ CMPOP_CASE(I32x4LeU, i32x4, int4, int4, 4,
+ static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
+ CMPOP_CASE(I16x8Eq, i16x8, int8, int8, 8, a == b)
+ CMPOP_CASE(I16x8Ne, i16x8, int8, int8, 8, a != b)
+ CMPOP_CASE(I16x8GtS, i16x8, int8, int8, 8, a > b)
+ CMPOP_CASE(I16x8GeS, i16x8, int8, int8, 8, a >= b)
+ CMPOP_CASE(I16x8LtS, i16x8, int8, int8, 8, a < b)
+ CMPOP_CASE(I16x8LeS, i16x8, int8, int8, 8, a <= b)
+ CMPOP_CASE(I16x8GtU, i16x8, int8, int8, 8,
+ static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
+ CMPOP_CASE(I16x8GeU, i16x8, int8, int8, 8,
+ static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
+ CMPOP_CASE(I16x8LtU, i16x8, int8, int8, 8,
+ static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
+ CMPOP_CASE(I16x8LeU, i16x8, int8, int8, 8,
+ static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
+ CMPOP_CASE(I8x16Eq, i8x16, int16, int16, 16, a == b)
+ CMPOP_CASE(I8x16Ne, i8x16, int16, int16, 16, a != b)
+ CMPOP_CASE(I8x16GtS, i8x16, int16, int16, 16, a > b)
+ CMPOP_CASE(I8x16GeS, i8x16, int16, int16, 16, a >= b)
+ CMPOP_CASE(I8x16LtS, i8x16, int16, int16, 16, a < b)
+ CMPOP_CASE(I8x16LeS, i8x16, int16, int16, 16, a <= b)
+ CMPOP_CASE(I8x16GtU, i8x16, int16, int16, 16,
+ static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
+ CMPOP_CASE(I8x16GeU, i8x16, int16, int16, 16,
+ static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
+ CMPOP_CASE(I8x16LtU, i8x16, int16, int16, 16,
+ static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
+ CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
+ static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
+#undef CMPOP_CASE
+#define REPLACE_LANE_CASE(format, name, stype, ctype) \
+ case kExpr##format##ReplaceLane: { \
+ SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
+ ++len; \
+ WasmValue new_val = Pop(); \
+ WasmValue simd_val = Pop(); \
+ stype s = simd_val.to_s128().to_##name(); \
+ s.val[imm.lane] = new_val.to<ctype>(); \
+ Push(WasmValue(Simd128(s))); \
+ return true; \
+ }
+ REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
+ REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
+ REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
+ REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
+#undef REPLACE_LANE_CASE
+ case kExprS128LoadMem:
+ return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
+ MachineRepresentation::kSimd128);
+ case kExprS128StoreMem:
+ return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
+ MachineRepresentation::kSimd128);
+#define SHIFT_CASE(op, name, stype, count, expr) \
+ case kExpr##op: { \
+ SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
+ ++len; \
+ WasmValue v = Pop(); \
+ stype s = v.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s.val[i]; \
+ res.val[i] = expr; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ SHIFT_CASE(I32x4Shl, i32x4, int4, 4, a << imm.shift)
+ SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
+ SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
+ static_cast<uint32_t>(a) >> imm.shift)
+ SHIFT_CASE(I16x8Shl, i16x8, int8, 8, a << imm.shift)
+ SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> imm.shift)
+ SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
+ static_cast<uint16_t>(a) >> imm.shift)
+ SHIFT_CASE(I8x16Shl, i8x16, int16, 16, a << imm.shift)
+ SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> imm.shift)
+ SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
+ static_cast<uint8_t>(a) >> imm.shift)
+#undef SHIFT_CASE
+#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
+ expr) \
+ case kExpr##op: { \
+ WasmValue v = Pop(); \
+ src_type s = v.to_s128().to_##name(); \
+ dst_type res; \
+ for (size_t i = 0; i < count; ++i) { \
+ ctype a = s.val[start_index + i]; \
+ res.val[i] = expr; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
+ static_cast<float>(a))
+ CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t,
+ static_cast<float>(a))
+ CONVERT_CASE(I32x4SConvertF32x4, float4, f32x4, int4, 4, 0, double,
+ std::isnan(a) ? 0
+ : a<kMinInt ? kMinInt : a> kMaxInt
+ ? kMaxInt
+ : static_cast<int32_t>(a))
+ CONVERT_CASE(I32x4UConvertF32x4, float4, f32x4, int4, 4, 0, double,
+ std::isnan(a)
+ ? 0
+ : a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
+ : static_cast<uint32_t>(a))
+ CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
+ a)
+ CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
+ a)
+ CONVERT_CASE(I32x4SConvertI16x8Low, int8, i16x8, int4, 4, 0, int16_t, a)
+ CONVERT_CASE(I32x4UConvertI16x8Low, int8, i16x8, int4, 4, 0, uint16_t,
+ a)
+ CONVERT_CASE(I16x8SConvertI8x16High, int16, i8x16, int8, 8, 8, int8_t,
+ a)
+ CONVERT_CASE(I16x8UConvertI8x16High, int16, i8x16, int8, 8, 8, uint8_t,
+ a)
+ CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
+ CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
+ a)
+#undef CONVERT_CASE
+#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype, \
+ is_unsigned) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ src_type s1 = v1.to_s128().to_##name(); \
+ src_type s2 = v2.to_s128().to_##name(); \
+ dst_type res; \
+ int64_t min = std::numeric_limits<ctype>::min(); \
+ int64_t max = std::numeric_limits<ctype>::max(); \
+ for (size_t i = 0; i < count; ++i) { \
+ int32_t v = i < count / 2 ? s1.val[i] : s2.val[i - count / 2]; \
+ int64_t a = is_unsigned ? static_cast<int64_t>(v & 0xFFFFFFFFu) : v; \
+ res.val[i] = static_cast<dst_ctype>(std::max(min, std::min(max, a))); \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t,
+ false)
+ PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t, int16_t,
+ true)
+ PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t, int8_t,
+ false)
+ PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t, int8_t,
+ true)
+#undef PACK_CASE
+ case kExprS128Select: {
+ int4 v2 = Pop().to_s128().to_i32x4();
+ int4 v1 = Pop().to_s128().to_i32x4();
+ int4 bool_val = Pop().to_s128().to_i32x4();
+ int4 res;
+ for (size_t i = 0; i < 4; ++i) {
+ res.val[i] = v2.val[i] ^ ((v1.val[i] ^ v2.val[i]) & bool_val.val[i]);
+ }
+ Push(WasmValue(Simd128(res)));
+ return true;
+ }
+#define ADD_HORIZ_CASE(op, name, stype, count) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count / 2; ++i) { \
+ res.val[i] = s1.val[i * 2] + s1.val[i * 2 + 1]; \
+ res.val[i + count / 2] = s2.val[i * 2] + s2.val[i * 2 + 1]; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
+ ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
+ ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
+#undef ADD_HORIZ_CASE
+ case kExprS8x16Shuffle: {
+ Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc));
+ len += 16;
+ int16 v2 = Pop().to_s128().to_i8x16();
+ int16 v1 = Pop().to_s128().to_i8x16();
+ int16 res;
+ for (size_t i = 0; i < kSimd128Size; ++i) {
+ int lane = imm.shuffle[i];
+ res.val[i] =
+ lane < kSimd128Size ? v1.val[lane] : v2.val[lane - kSimd128Size];
+ }
+ Push(WasmValue(Simd128(res)));
+ return true;
+ }
+#define REDUCTION_CASE(op, name, stype, count, operation) \
+ case kExpr##op: { \
+ stype s = Pop().to_s128().to_##name(); \
+ int32_t res = s.val[0]; \
+ for (size_t i = 1; i < count; ++i) { \
+ res = res operation static_cast<int32_t>(s.val[i]); \
+ } \
+ Push(WasmValue(res)); \
+ return true; \
+ }
+ REDUCTION_CASE(S1x4AnyTrue, i32x4, int4, 4, |)
+ REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
+ REDUCTION_CASE(S1x8AnyTrue, i16x8, int8, 8, |)
+ REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
+ REDUCTION_CASE(S1x16AnyTrue, i8x16, int16, 16, |)
+ REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
+#undef REDUCTION_CASE
default:
return false;
}
@@ -1672,7 +2047,7 @@ class ThreadImpl {
const size_t stack_size_limit = FLAG_stack_size * KB;
// Sum up the value stack size and the control stack size.
const size_t current_stack_size =
- (sp_ - stack_start_) + frames_.size() * sizeof(Frame);
+ (sp_ - stack_.get()) + frames_.size() * sizeof(Frame);
if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
return true;
}
@@ -1693,7 +2068,7 @@ class ThreadImpl {
DCHECK_LE(code->function->sig->parameter_count() +
code->locals.type_list.size() +
code->side_table->max_stack_height_,
- stack_limit_ - stack_start_ - frames_.back().sp);
+ stack_limit_ - stack_.get() - frames_.back().sp);
Decoder decoder(code->start, code->end);
pc_t limit = code->end - code->start;
@@ -1926,7 +2301,7 @@ class ThreadImpl {
code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
// Assume only one table for now.
- DCHECK_LE(module()->function_tables.size(), 1u);
+ DCHECK_LE(module()->tables.size(), 1u);
ExternalCallResult result =
CallIndirectFunction(0, entry_index, imm.sig_index);
switch (result.type) {
@@ -2079,7 +2454,8 @@ class ThreadImpl {
MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
- Handle<WasmMemoryObject> memory(instance_object_->memory_object());
+ Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
+ instance_object_->GetIsolate());
Isolate* isolate = memory->GetIsolate();
int32_t result = WasmMemoryObject::Grow(isolate, memory, delta_pages);
Push(WasmValue(result));
@@ -2110,6 +2486,18 @@ class ThreadImpl {
Push(WasmValue(ExecuteI64ReinterpretF64(val)));
break;
}
+#define SIGN_EXTENSION_CASE(name, wtype, ntype) \
+ case kExpr##name: { \
+ ntype val = static_cast<ntype>(Pop().to<wtype>()); \
+ Push(WasmValue(static_cast<wtype>(val))); \
+ break; \
+ }
+ SIGN_EXTENSION_CASE(I32SExtendI8, int32_t, int8_t);
+ SIGN_EXTENSION_CASE(I32SExtendI16, int32_t, int16_t);
+ SIGN_EXTENSION_CASE(I64SExtendI8, int64_t, int8_t);
+ SIGN_EXTENSION_CASE(I64SExtendI16, int64_t, int16_t);
+ SIGN_EXTENSION_CASE(I64SExtendI32, int64_t, int32_t);
+#undef SIGN_EXTENSION_CASE
case kNumericPrefix: {
++len;
if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
@@ -2238,18 +2626,18 @@ class ThreadImpl {
void EnsureStackSpace(size_t size) {
if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
- size_t old_size = stack_limit_ - stack_start_;
+ size_t old_size = stack_limit_ - stack_.get();
size_t requested_size =
- base::bits::RoundUpToPowerOfTwo64((sp_ - stack_start_) + size);
+ base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
- WasmValue* new_stack = zone_->NewArray<WasmValue>(new_size);
- memcpy(new_stack, stack_start_, old_size * sizeof(*sp_));
- sp_ = new_stack + (sp_ - stack_start_);
- stack_start_ = new_stack;
- stack_limit_ = new_stack + new_size;
+ std::unique_ptr<WasmValue[]> new_stack(new WasmValue[new_size]);
+ memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
+ sp_ = new_stack.get() + (sp_ - stack_.get());
+ stack_ = std::move(new_stack);
+ stack_limit_ = stack_.get() + new_size;
}
- sp_t StackHeight() { return sp_ - stack_start_; }
+ sp_t StackHeight() { return sp_ - stack_.get(); }
void TraceValueStack() {
#ifdef DEBUG
@@ -2415,6 +2803,19 @@ class ThreadImpl {
return {ExternalCallResult::EXTERNAL_RETURNED};
}
+ static WasmCode* GetTargetCode(WasmCodeManager* code_manager,
+ Address target) {
+ NativeModule* native_module = code_manager->LookupNativeModule(target);
+ if (native_module->is_jump_table_slot(target)) {
+ uint32_t func_index =
+ native_module->GetFunctionIndexFromJumpTableSlot(target);
+ return native_module->code(func_index);
+ }
+ WasmCode* code = native_module->Lookup(target);
+ DCHECK_EQ(code->instruction_start(), target);
+ return code;
+ }
+
ExternalCallResult CallImportedFunction(uint32_t function_index) {
// Use a new HandleScope to avoid leaking / accumulating handles in the
// outer scope.
@@ -2423,13 +2824,10 @@ class ThreadImpl {
DCHECK_GT(module()->num_imported_functions, function_index);
Handle<WasmInstanceObject> instance;
- WasmCode* code;
- {
- ImportedFunctionEntry entry(instance_object_, function_index);
- instance = handle(entry.instance(), isolate);
- code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
- entry.target());
- }
+ ImportedFunctionEntry entry(instance_object_, function_index);
+ instance = handle(entry.instance(), isolate);
+ WasmCode* code =
+ GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
FunctionSig* sig = codemap()->module()->functions[function_index].sig;
return CallExternalWasmFunction(isolate, instance, code, sig);
}
@@ -2448,7 +2846,7 @@ class ThreadImpl {
module()->signature_ids[code->function->sig_index];
int expected_canonical_id = module()->signature_ids[sig_index];
DCHECK_EQ(function_canonical_id,
- module()->signature_map.Find(code->function->sig));
+ module()->signature_map.Find(*code->function->sig));
if (function_canonical_id != expected_canonical_id) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
@@ -2459,7 +2857,7 @@ class ThreadImpl {
Isolate* isolate = instance_object_->GetIsolate();
uint32_t expected_sig_id = module()->signature_ids[sig_index];
DCHECK_EQ(expected_sig_id,
- module()->signature_map.Find(module()->signatures[sig_index]));
+ module()->signature_map.Find(*module()->signatures[sig_index]));
// The function table is stored in the instance.
// TODO(wasm): the wasm interpreter currently supports only one table.
@@ -2469,20 +2867,16 @@ class ThreadImpl {
return {ExternalCallResult::INVALID_FUNC};
}
- WasmCode* code;
- Handle<WasmInstanceObject> instance;
- {
- IndirectFunctionTableEntry entry(instance_object_, entry_index);
- // Signature check.
- if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
- return {ExternalCallResult::SIGNATURE_MISMATCH};
- }
-
- instance = handle(entry.instance(), isolate);
- code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
- entry.target());
+ IndirectFunctionTableEntry entry(instance_object_, entry_index);
+ // Signature check.
+ if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
+ return {ExternalCallResult::SIGNATURE_MISMATCH};
}
+ Handle<WasmInstanceObject> instance = handle(entry.instance(), isolate);
+ WasmCode* code =
+ GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
+
// Call either an internal or external WASM function.
HandleScope scope(isolate);
FunctionSig* signature = module()->signatures[sig_index];
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index f050a01cb7..896196ef67 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -10,9 +10,6 @@
#include "src/zone/zone-containers.h"
namespace v8 {
-namespace base {
-class AccountingAllocator;
-}
namespace internal {
class WasmInstanceObject;
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 2acb4d7aa4..ef316c64d0 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -13,9 +13,11 @@
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/js-promise-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
@@ -25,6 +27,41 @@ using v8::internal::wasm::ErrorThrower;
namespace v8 {
+class WasmStreaming::WasmStreamingImpl {
+ public:
+ void OnBytesReceived(const uint8_t* bytes, size_t size) {}
+
+ void Finish() {}
+
+ void Abort(MaybeLocal<Value> exception) {}
+};
+
+WasmStreaming::WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl)
+ : impl_(std::move(impl)) {}
+
+// The destructor is defined here because we have a unique_ptr with forward
+// declaration.
+WasmStreaming::~WasmStreaming() = default;
+
+void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
+ impl_->OnBytesReceived(bytes, size);
+}
+
+void WasmStreaming::Finish() { impl_->Finish(); }
+
+void WasmStreaming::Abort(MaybeLocal<Value> exception) {
+ impl_->Abort(exception);
+}
+
+// static
+std::shared_ptr<WasmStreaming> WasmStreaming::Unpack(Isolate* isolate,
+ Local<Value> value) {
+ i::HandleScope scope(reinterpret_cast<i::Isolate*>(isolate));
+ auto managed =
+ i::Handle<i::Managed<WasmStreaming>>::cast(Utils::OpenHandle(*value));
+ return managed->get();
+}
+
namespace {
#define ASSIGN(type, var, expr) \
@@ -163,6 +200,177 @@ void WebAssemblyCompileStreaming(
i_isolate->wasm_compile_streaming_callback()(args);
}
+namespace {
+// This class resolves the result of WebAssembly.compile. It just places the
+// compilation result in the supplied {promise}.
+class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
+ public:
+ AsyncCompilationResolver(i::Isolate* isolate, i::Handle<i::JSPromise> promise)
+ : promise_(isolate->global_handles()->Create(*promise)) {}
+
+ ~AsyncCompilationResolver() {
+ i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
+ }
+
+ void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override {
+ i::MaybeHandle<i::Object> promise_result =
+ i::JSPromise::Resolve(promise_, result);
+ CHECK_EQ(promise_result.is_null(),
+ promise_->GetIsolate()->has_pending_exception());
+ }
+
+ void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
+ i::MaybeHandle<i::Object> promise_result =
+ i::JSPromise::Reject(promise_, error_reason);
+ CHECK_EQ(promise_result.is_null(),
+ promise_->GetIsolate()->has_pending_exception());
+ }
+
+ private:
+ i::Handle<i::JSPromise> promise_;
+};
+
+// This class resolves the result of WebAssembly.instantiate(module, imports).
+// It just places the instantiation result in the supplied {promise}.
+class InstantiateModuleResultResolver
+ : public i::wasm::InstantiationResultResolver {
+ public:
+ InstantiateModuleResultResolver(i::Isolate* isolate,
+ i::Handle<i::JSPromise> promise)
+ : promise_(isolate->global_handles()->Create(*promise)) {}
+
+ ~InstantiateModuleResultResolver() {
+ i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
+ }
+
+ void OnInstantiationSucceeded(
+ i::Handle<i::WasmInstanceObject> instance) override {
+ i::MaybeHandle<i::Object> promise_result =
+ i::JSPromise::Resolve(promise_, instance);
+ CHECK_EQ(promise_result.is_null(),
+ promise_->GetIsolate()->has_pending_exception());
+ }
+
+ void OnInstantiationFailed(i::Handle<i::Object> error_reason) override {
+ i::MaybeHandle<i::Object> promise_result =
+ i::JSPromise::Reject(promise_, error_reason);
+ CHECK_EQ(promise_result.is_null(),
+ promise_->GetIsolate()->has_pending_exception());
+ }
+
+ private:
+ i::Handle<i::JSPromise> promise_;
+};
+
+// This class resolves the result of WebAssembly.instantiate(bytes, imports).
+// For that it creates a new {JSObject} which contains both the provided
+// {WasmModuleObject} and the resulting {WebAssemblyInstanceObject} itself.
+class InstantiateBytesResultResolver
+ : public i::wasm::InstantiationResultResolver {
+ public:
+ InstantiateBytesResultResolver(i::Isolate* isolate,
+ i::Handle<i::JSPromise> promise,
+ i::Handle<i::WasmModuleObject> module)
+ : isolate_(isolate),
+ promise_(isolate_->global_handles()->Create(*promise)),
+ module_(isolate_->global_handles()->Create(*module)) {}
+
+ ~InstantiateBytesResultResolver() {
+ i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
+ i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(module_).location());
+ }
+
+ void OnInstantiationSucceeded(
+ i::Handle<i::WasmInstanceObject> instance) override {
+ // The result is a JSObject with 2 fields which contain the
+ // WasmInstanceObject and the WasmModuleObject.
+ i::Handle<i::JSObject> result =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+
+ const uint8_t* instance_str = reinterpret_cast<const uint8_t*>("instance");
+ i::Handle<i::String> instance_name =
+ isolate_->factory()
+ ->NewStringFromOneByte(i::Vector<const uint8_t>(
+ instance_str,
+ i::StrLength(reinterpret_cast<const char*>(instance_str))))
+ .ToHandleChecked();
+
+ const uint8_t* module_str = reinterpret_cast<const uint8_t*>("module");
+ i::Handle<i::String> module_name =
+ isolate_->factory()
+ ->NewStringFromOneByte(i::Vector<const uint8_t>(
+ module_str,
+ i::StrLength(reinterpret_cast<const char*>(module_str))))
+ .ToHandleChecked();
+
+ i::JSObject::AddProperty(isolate_, result, instance_name, instance,
+ i::NONE);
+ i::JSObject::AddProperty(isolate_, result, module_name, module_, i::NONE);
+
+ i::MaybeHandle<i::Object> promise_result =
+ i::JSPromise::Resolve(promise_, result);
+ CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
+ }
+
+ void OnInstantiationFailed(i::Handle<i::Object> error_reason) override {
+ i::MaybeHandle<i::Object> promise_result =
+ i::JSPromise::Reject(promise_, error_reason);
+ CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
+ }
+
+ private:
+ i::Isolate* isolate_;
+ i::Handle<i::JSPromise> promise_;
+ i::Handle<i::WasmModuleObject> module_;
+};
+
+// This class is the {CompilationResultResolver} for
+// WebAssembly.instantiate(bytes, imports). When compilation finishes,
+// {AsyncInstantiate} is started on the compilation result.
+class AsyncInstantiateCompileResultResolver
+ : public i::wasm::CompilationResultResolver {
+ public:
+ AsyncInstantiateCompileResultResolver(
+ i::Isolate* isolate, i::Handle<i::JSPromise> promise,
+ i::MaybeHandle<i::JSReceiver> maybe_imports)
+ : isolate_(isolate),
+ promise_(isolate_->global_handles()->Create(*promise)),
+ maybe_imports_(maybe_imports.is_null()
+ ? maybe_imports
+ : isolate_->global_handles()->Create(
+ *maybe_imports.ToHandleChecked())) {}
+
+ ~AsyncInstantiateCompileResultResolver() {
+ i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
+ if (!maybe_imports_.is_null()) {
+ i::GlobalHandles::Destroy(
+ i::Handle<i::Object>::cast(maybe_imports_.ToHandleChecked())
+ .location());
+ }
+ }
+
+ void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override {
+ isolate_->wasm_engine()->AsyncInstantiate(
+ isolate_,
+ base::make_unique<InstantiateBytesResultResolver>(isolate_, promise_,
+ result),
+ result, maybe_imports_);
+ }
+
+ void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
+ i::MaybeHandle<i::Object> promise_result =
+ i::JSPromise::Reject(promise_, error_reason);
+ CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
+ }
+
+ private:
+ i::Isolate* isolate_;
+ i::Handle<i::JSPromise> promise_;
+ i::MaybeHandle<i::JSReceiver> maybe_imports_;
+};
+
+} // namespace
+
// WebAssembly.compile(bytes) -> Promise
void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -177,21 +385,23 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Local<Context> context = isolate->GetCurrentContext();
- ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
+ ASSIGN(Promise::Resolver, promise_resolver, Promise::Resolver::New(context));
+ Local<Promise> promise = promise_resolver->GetPromise();
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(resolver->GetPromise());
+ return_value.Set(promise);
+
+ std::unique_ptr<i::wasm::CompilationResultResolver> resolver(
+ new AsyncCompilationResolver(i_isolate, Utils::OpenHandle(*promise)));
bool is_shared = false;
auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
if (thrower.error()) {
- auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false),
- i_isolate->has_scheduled_exception());
+ resolver->OnCompilationFailed(thrower.Reify());
return;
}
- i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
// Asynchronous compilation handles copying wire bytes if necessary.
- i_isolate->wasm_engine()->AsyncCompile(i_isolate, promise, bytes, is_shared);
+ i_isolate->wasm_engine()->AsyncCompile(i_isolate, std::move(resolver), bytes,
+ is_shared);
}
// WebAssembly.validate(bytes) -> bool
@@ -463,53 +673,67 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
- ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
- Local<Promise> promise = resolver->GetPromise();
+ ASSIGN(Promise::Resolver, promise_resolver, Promise::Resolver::New(context));
+ Local<Promise> promise = promise_resolver->GetPromise();
args.GetReturnValue().Set(promise);
+ std::unique_ptr<i::wasm::InstantiationResultResolver> resolver(
+ new InstantiateModuleResultResolver(i_isolate,
+ Utils::OpenHandle(*promise)));
+
Local<Value> first_arg_value = args[0];
- // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
- Local<Value> ffi = args[1];
i::Handle<i::Object> first_arg = Utils::OpenHandle(*first_arg_value);
if (!first_arg->IsJSObject()) {
thrower.TypeError(
"Argument 0 must be a buffer source or a WebAssembly.Module object");
- auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false),
- i_isolate->has_scheduled_exception());
+ resolver->OnInstantiationFailed(thrower.Reify());
+ return;
+ }
+
+ // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
+ Local<Value> ffi = args[1];
+ i::MaybeHandle<i::JSReceiver> maybe_imports =
+ GetValueAsImports(ffi, &thrower);
+
+ if (thrower.error()) {
+ resolver->OnInstantiationFailed(thrower.Reify());
return;
}
if (first_arg->IsWasmModuleObject()) {
i::Handle<i::WasmModuleObject> module_obj =
i::Handle<i::WasmModuleObject>::cast(first_arg);
- // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
- i::MaybeHandle<i::JSReceiver> maybe_imports =
- GetValueAsImports(ffi, &thrower);
- if (thrower.error()) {
- auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false),
- i_isolate->has_scheduled_exception());
- return;
- }
+ i_isolate->wasm_engine()->AsyncInstantiate(i_isolate, std::move(resolver),
+ module_obj, maybe_imports);
+ return;
+ }
- i_isolate->wasm_engine()->AsyncInstantiate(
- i_isolate, Utils::OpenHandle(*promise), module_obj, maybe_imports);
+ bool is_shared = false;
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
+ if (thrower.error()) {
+ resolver->OnInstantiationFailed(thrower.Reify());
return;
}
- // We did not get a WasmModuleObject as input, we first have to compile the
- // input.
- ASSIGN(Function, async_compile, Function::New(context, WebAssemblyCompile));
- ASSIGN(Value, async_compile_retval,
- async_compile->Call(context, args.Holder(), 1, &first_arg_value));
- promise = Local<Promise>::Cast(async_compile_retval);
- DCHECK(!promise.IsEmpty());
- ASSIGN(Function, instantiate_impl,
- Function::New(context, WebAssemblyInstantiateCallback, ffi));
- ASSIGN(Promise, result, promise->Then(context, instantiate_impl));
- args.GetReturnValue().Set(result);
+ // We start compilation now, we have no use for the
+ // {InstantiationResultResolver}.
+ resolver.reset();
+
+ std::unique_ptr<i::wasm::CompilationResultResolver> compilation_resolver(
+ new AsyncInstantiateCompileResultResolver(
+ i_isolate, Utils::OpenHandle(*promise), maybe_imports));
+
+ // The first parameter is a buffer source, we have to check if we are allowed
+ // to compile it.
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ thrower.CompileError("Wasm code generation disallowed by embedder");
+ compilation_resolver->OnCompilationFailed(thrower.Reify());
+ }
+
+ // Asynchronous compilation handles copying wire bytes if necessary.
+ i_isolate->wasm_engine()->AsyncCompile(
+ i_isolate, std::move(compilation_resolver), bytes, is_shared);
}
bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
@@ -701,11 +925,13 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
- // The descriptor's 'type'.
+ // The descriptor's type, called 'value'. It is called 'value' because this
+ // descriptor is planned to be re-used as the global's type for reflection,
+ // so calling it 'type' is redundant.
i::wasm::ValueType type;
{
v8::MaybeLocal<v8::Value> maybe =
- descriptor->Get(context, v8_str(isolate, "type"));
+ descriptor->Get(context, v8_str(isolate, "value"));
v8::Local<v8::Value> value;
if (!maybe.ToLocal(&value)) return;
v8::Local<v8::String> string;
@@ -722,7 +948,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
type = i::wasm::kWasmF64;
} else {
thrower.TypeError(
- "Descriptor property 'type' must be 'i32', 'f32', or 'f64'");
+ "Descriptor property 'value' must be 'i32', 'f32', or 'f64'");
return;
}
}
@@ -804,7 +1030,7 @@ void WebAssemblyInstanceGetExports(
HandleScope scope(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance.exports()");
EXTRACT_THIS(receiver, WasmInstanceObject);
- i::Handle<i::JSObject> exports_object(receiver->exports_object());
+ i::Handle<i::JSObject> exports_object(receiver->exports_object(), i_isolate);
args.GetReturnValue().Set(Utils::ToLocal(exports_object));
}
@@ -851,7 +1077,7 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::FixedArray> new_array =
i_isolate->factory()->NewFixedArray(new_size);
for (int i = 0; i < old_size; ++i) new_array->set(i, old_array->get(i));
- i::Object* null = i_isolate->heap()->null_value();
+ i::Object* null = i::ReadOnlyRoots(i_isolate).null_value();
for (int i = old_size; i < new_size; ++i) new_array->set(i, null);
receiver->set_functions(*new_array);
}
@@ -931,7 +1157,7 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_mem_pages)) {
max_size64 = i::FLAG_wasm_max_mem_pages;
}
- i::Handle<i::JSArrayBuffer> old_buffer(receiver->array_buffer());
+ i::Handle<i::JSArrayBuffer> old_buffer(receiver->array_buffer(), i_isolate);
if (!old_buffer->is_growable()) {
thrower.RangeError("This memory cannot be grown");
return;
@@ -965,7 +1191,8 @@ void WebAssemblyMemoryGetBuffer(
i::Handle<i::Object> buffer_obj(receiver->array_buffer(), i_isolate);
DCHECK(buffer_obj->IsJSArrayBuffer());
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(*buffer_obj));
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(*buffer_obj),
+ i_isolate);
if (buffer->is_shared()) {
// TODO(gdeepti): More needed here for when cached buffer, and current
// buffer are out of sync, handle that here when bounds checks, and Grow
@@ -1092,12 +1319,12 @@ Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
Handle<JSFunction> function = CreateFunc(isolate, name, func);
function->shared()->set_length(length);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
- JSObject::AddProperty(object, name, function, attributes);
+ JSObject::AddProperty(isolate, object, name, function, attributes);
return function;
}
Handle<String> GetterName(Isolate* isolate, Handle<String> name) {
- return Name::ToFunctionName(name, isolate->factory()->get_string())
+ return Name::ToFunctionName(isolate, name, isolate->factory()->get_string())
.ToHandleChecked();
}
@@ -1115,7 +1342,7 @@ void InstallGetter(Isolate* isolate, Handle<JSObject> object,
}
Handle<String> SetterName(Isolate* isolate, Handle<String> name) {
- return Name::ToFunctionName(name, isolate->factory()->set_string())
+ return Name::ToFunctionName(isolate, name, isolate->factory()->set_string())
.ToHandleChecked();
}
@@ -1160,8 +1387,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
PropertyAttributes ro_attributes =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- JSObject::AddProperty(webassembly, factory->to_string_tag_symbol(), name,
- ro_attributes);
+ JSObject::AddProperty(isolate, webassembly, factory->to_string_tag_symbol(),
+ name, ro_attributes);
InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile, 1);
InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1);
InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1);
@@ -1175,7 +1402,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Expose the API on the global object if configured to do so.
if (exposed_on_global_object) {
- JSObject::AddProperty(global, name, webassembly, attributes);
+ JSObject::AddProperty(isolate, global, name, webassembly, attributes);
}
// Setup Module
@@ -1184,7 +1411,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
context->set_wasm_module_constructor(*module_constructor);
JSFunction::EnsureHasInitialMap(module_constructor);
Handle<JSObject> module_proto(
- JSObject::cast(module_constructor->instance_prototype()));
+ JSObject::cast(module_constructor->instance_prototype()), isolate);
i::Handle<i::Map> module_map =
isolate->factory()->NewMap(i::WASM_MODULE_TYPE, WasmModuleObject::kSize);
JSFunction::SetInitialMap(module_constructor, module_map, module_proto);
@@ -1194,7 +1421,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
1);
InstallFunc(isolate, module_constructor, "customSections",
WebAssemblyModuleCustomSections, 2);
- JSObject::AddProperty(module_proto, factory->to_string_tag_symbol(),
+ JSObject::AddProperty(isolate, module_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Module"), ro_attributes);
// Setup Instance
@@ -1203,13 +1430,14 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
context->set_wasm_instance_constructor(*instance_constructor);
JSFunction::EnsureHasInitialMap(instance_constructor);
Handle<JSObject> instance_proto(
- JSObject::cast(instance_constructor->instance_prototype()));
+ JSObject::cast(instance_constructor->instance_prototype()), isolate);
i::Handle<i::Map> instance_map = isolate->factory()->NewMap(
i::WASM_INSTANCE_TYPE, WasmInstanceObject::kSize);
JSFunction::SetInitialMap(instance_constructor, instance_map, instance_proto);
InstallGetter(isolate, instance_proto, "exports",
WebAssemblyInstanceGetExports);
- JSObject::AddProperty(instance_proto, factory->to_string_tag_symbol(),
+ JSObject::AddProperty(isolate, instance_proto,
+ factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Instance"), ro_attributes);
// Setup Table
@@ -1218,7 +1446,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
context->set_wasm_table_constructor(*table_constructor);
JSFunction::EnsureHasInitialMap(table_constructor);
Handle<JSObject> table_proto(
- JSObject::cast(table_constructor->instance_prototype()));
+ JSObject::cast(table_constructor->instance_prototype()), isolate);
i::Handle<i::Map> table_map =
isolate->factory()->NewMap(i::WASM_TABLE_TYPE, WasmTableObject::kSize);
JSFunction::SetInitialMap(table_constructor, table_map, table_proto);
@@ -1226,7 +1454,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1);
InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1);
InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
- JSObject::AddProperty(table_proto, factory->to_string_tag_symbol(),
+ JSObject::AddProperty(isolate, table_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Table"), ro_attributes);
// Setup Memory
@@ -1235,13 +1463,13 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
context->set_wasm_memory_constructor(*memory_constructor);
JSFunction::EnsureHasInitialMap(memory_constructor);
Handle<JSObject> memory_proto(
- JSObject::cast(memory_constructor->instance_prototype()));
+ JSObject::cast(memory_constructor->instance_prototype()), isolate);
i::Handle<i::Map> memory_map =
isolate->factory()->NewMap(i::WASM_MEMORY_TYPE, WasmMemoryObject::kSize);
JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto);
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
- JSObject::AddProperty(memory_proto, factory->to_string_tag_symbol(),
+ JSObject::AddProperty(isolate, memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
// Setup Global
@@ -1251,30 +1479,34 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
context->set_wasm_global_constructor(*global_constructor);
JSFunction::EnsureHasInitialMap(global_constructor);
Handle<JSObject> global_proto(
- JSObject::cast(global_constructor->instance_prototype()));
+ JSObject::cast(global_constructor->instance_prototype()), isolate);
i::Handle<i::Map> global_map = isolate->factory()->NewMap(
i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize);
JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
InstallGetterSetter(isolate, global_proto, "value",
WebAssemblyGlobalGetValue, WebAssemblyGlobalSetValue);
- JSObject::AddProperty(global_proto, factory->to_string_tag_symbol(),
+ JSObject::AddProperty(isolate, global_proto,
+ factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Global"), ro_attributes);
}
// Setup errors
attributes = static_cast<PropertyAttributes>(DONT_ENUM);
Handle<JSFunction> compile_error(
- isolate->native_context()->wasm_compile_error_function());
- JSObject::AddProperty(webassembly, isolate->factory()->CompileError_string(),
+ isolate->native_context()->wasm_compile_error_function(), isolate);
+ JSObject::AddProperty(isolate, webassembly,
+ isolate->factory()->CompileError_string(),
compile_error, attributes);
Handle<JSFunction> link_error(
- isolate->native_context()->wasm_link_error_function());
- JSObject::AddProperty(webassembly, isolate->factory()->LinkError_string(),
- link_error, attributes);
+ isolate->native_context()->wasm_link_error_function(), isolate);
+ JSObject::AddProperty(isolate, webassembly,
+ isolate->factory()->LinkError_string(), link_error,
+ attributes);
Handle<JSFunction> runtime_error(
- isolate->native_context()->wasm_runtime_error_function());
- JSObject::AddProperty(webassembly, isolate->factory()->RuntimeError_string(),
+ isolate->native_context()->wasm_runtime_error_function(), isolate);
+ JSObject::AddProperty(isolate, webassembly,
+ isolate->factory()->RuntimeError_string(),
runtime_error, attributes);
}
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 199da51532..bda06e42cd 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <limits>
+
#include "src/wasm/wasm-memory.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-engine.h"
@@ -14,6 +16,8 @@ namespace wasm {
namespace {
+constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
+
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
size_t size, bool require_full_guard_regions,
void** allocation_base,
@@ -24,9 +28,12 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
#endif
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
+ //
+ // To protect against 32-bit integer overflow issues, we also protect the 2GiB
+ // before the valid part of the memory buffer.
*allocation_length =
require_full_guard_regions
- ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
+ ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
: RoundUp(
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
kWasmPageSize);
@@ -68,7 +75,10 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
memory_tracker->AddAllocationStatusSample(AllocationStatus::kOtherFailure);
return nullptr;
}
- void* memory = *allocation_base;
+ byte* memory = reinterpret_cast<byte*>(*allocation_base);
+ if (require_full_guard_regions) {
+ memory += kNegativeGuardSize;
+ }
// Make the part we care about accessible.
if (size > 0) {
@@ -91,12 +101,6 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
} // namespace
WasmMemoryTracker::~WasmMemoryTracker() {
- if (empty_backing_store_.allocation_base != nullptr) {
- CHECK(FreePages(empty_backing_store_.allocation_base,
- empty_backing_store_.allocation_length));
- InternalReleaseAllocation(empty_backing_store_.buffer_start);
- }
-
// All reserved address space should be released before the allocation tracker
// is destroyed.
DCHECK_EQ(reserved_address_space_, 0u);
@@ -107,8 +111,14 @@ bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
// Address space reservations are currently only meaningful using guard
// regions, which is currently only supported on 64-bit systems. On other
// platforms, we always fall back on bounds checks.
-#if V8_TARGET_ARCH_64_BIT
- constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
+#if V8_TARGET_ARCH_MIPS64
+ // MIPS64 has a user space of 2^40 bytes on most processors,
+ // address space limits needs to be smaller.
+ constexpr size_t kAddressSpaceLimit = 0x2100000000L; // 132 GiB
+#elif V8_TARGET_ARCH_64_BIT
+ // We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
+ // once we fill everything up with full-sized guard regions.
+ constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4GiB
#else
constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
#endif
@@ -144,9 +154,6 @@ void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
const void* buffer_start) {
- if (IsEmptyBackingStore(buffer_start)) {
- return AllocationData();
- }
return InternalReleaseAllocation(buffer_start);
}
@@ -187,39 +194,22 @@ bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
return allocations_.find(buffer_start) != allocations_.end();
}
-void* WasmMemoryTracker::GetEmptyBackingStore(void** allocation_base,
- size_t* allocation_length,
- Heap* heap) {
- if (empty_backing_store_.allocation_base == nullptr) {
- constexpr size_t buffer_length = 0;
- const bool require_full_guard_regions =
- trap_handler::IsTrapHandlerEnabled();
- void* local_allocation_base;
- size_t local_allocation_length;
- void* buffer_start = TryAllocateBackingStore(
- this, heap, buffer_length, require_full_guard_regions,
- &local_allocation_base, &local_allocation_length);
-
- empty_backing_store_ =
- AllocationData(local_allocation_base, local_allocation_length,
- buffer_start, buffer_length);
+bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
+ base::LockGuard<base::Mutex> scope_lock(&mutex_);
+ const auto allocation = allocations_.find(buffer_start);
+
+ if (allocation == allocations_.end()) {
+ return false;
}
- *allocation_base = empty_backing_store_.allocation_base;
- *allocation_length = empty_backing_store_.allocation_length;
- return empty_backing_store_.buffer_start;
-}
-bool WasmMemoryTracker::IsEmptyBackingStore(const void* buffer_start) const {
- return buffer_start == empty_backing_store_.buffer_start;
+ Address start = reinterpret_cast<Address>(buffer_start);
+ Address limit =
+ reinterpret_cast<Address>(allocation->second.allocation_base) +
+ allocation->second.allocation_length;
+ return start + kWasmMaxHeapOffset < limit;
}
bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(const void* buffer_start) {
- if (IsEmptyBackingStore(buffer_start)) {
- // We don't need to do anything for the empty backing store, because this
- // will be freed when WasmMemoryTracker shuts down. Return true so callers
- // will not try to free the buffer on their own.
- return true;
- }
if (IsWasmMemory(buffer_start)) {
const AllocationData allocation = ReleaseAllocation(buffer_start);
CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
@@ -274,27 +264,21 @@ MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void* allocation_base = nullptr;
size_t allocation_length = 0;
- void* memory;
- if (size == 0) {
- memory = memory_tracker->GetEmptyBackingStore(
- &allocation_base, &allocation_length, isolate->heap());
- } else {
#if V8_TARGET_ARCH_64_BIT
- bool require_full_guard_regions = true;
+ bool require_full_guard_regions = true;
#else
- bool require_full_guard_regions = false;
+ bool require_full_guard_regions = false;
#endif
+ void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
+ require_full_guard_regions,
+ &allocation_base, &allocation_length);
+ if (memory == nullptr && FLAG_wasm_trap_handler_fallback) {
+ // If we failed to allocate with full guard regions, fall back on
+ // mini-guards.
+ require_full_guard_regions = false;
memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
require_full_guard_regions,
&allocation_base, &allocation_length);
- if (memory == nullptr && !trap_handler::IsTrapHandlerEnabled()) {
- // If we failed to allocate with full guard regions, fall back on
- // mini-guards.
- require_full_guard_regions = false;
- memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
- require_full_guard_regions,
- &allocation_base, &allocation_length);
- }
}
if (memory == nullptr) {
return {};
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index e0ff6ef65c..2ab24739a8 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -69,18 +69,14 @@ class WasmMemoryTracker {
bool IsWasmMemory(const void* buffer_start);
+ // Returns whether the given buffer is a Wasm memory with guard regions large
+ // enough to safely use trap handlers.
+ bool HasFullGuardRegions(const void* buffer_start);
+
// Returns a pointer to a Wasm buffer's allocation data, or nullptr if the
// buffer is not tracked.
const AllocationData* FindAllocationData(const void* buffer_start);
- // Empty WebAssembly memories are all backed by a shared inaccessible
- // reservation. This method creates this store or returns the existing one if
- // already created.
- void* GetEmptyBackingStore(void** allocation_base, size_t* allocation_length,
- Heap* heap);
-
- bool IsEmptyBackingStore(const void* buffer_start) const;
-
// Checks if a buffer points to a Wasm memory and if so does any necessary
// work to reclaim the buffer. If this function returns false, the caller must
// free the buffer manually.
@@ -133,18 +129,16 @@ class WasmMemoryTracker {
// buffer, rather than by the start of the allocation.
std::unordered_map<const void*, AllocationData> allocations_;
- // Empty backing stores still need to be backed by mapped pages when using
- // trap handlers. Because this could eat up address space quickly, we keep a
- // shared backing store here.
- AllocationData empty_backing_store_;
-
// Keep pointers to
- Histogram* allocation_result_;
- Histogram* address_space_usage_mb_; // in MiB
+ Histogram* allocation_result_ = nullptr;
+ Histogram* address_space_usage_mb_ = nullptr; // in MiB
DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
};
+// Attempts to allocate an array buffer with guard regions suitable for trap
+// handling. If address space is not available, it will return a buffer with
+// mini-guards that will require bounds checks.
MaybeHandle<JSArrayBuffer> NewArrayBuffer(
Isolate*, size_t size, SharedFlag shared = SharedFlag::kNotShared);
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 0604502432..15a4b0bbf1 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -249,33 +249,13 @@ void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
}
}
-bool WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
- FunctionSig* b) const {
- if (a->return_count() < b->return_count()) return true;
- if (a->return_count() > b->return_count()) return false;
- if (a->parameter_count() < b->parameter_count()) return true;
- if (a->parameter_count() > b->parameter_count()) return false;
- for (size_t r = 0; r < a->return_count(); r++) {
- if (a->GetReturn(r) < b->GetReturn(r)) return true;
- if (a->GetReturn(r) > b->GetReturn(r)) return false;
- }
- for (size_t p = 0; p < a->parameter_count(); p++) {
- if (a->GetParam(p) < b->GetParam(p)) return true;
- if (a->GetParam(p) > b->GetParam(p)) return false;
- }
- return false;
-}
-
uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
- SignatureMap::iterator pos = signature_map_.find(sig);
- if (pos != signature_map_.end()) {
- return pos->second;
- } else {
- uint32_t index = static_cast<uint32_t>(signatures_.size());
- signature_map_[sig] = index;
- signatures_.push_back(sig);
- return index;
- }
+ auto sig_entry = signature_map_.find(*sig);
+ if (sig_entry != signature_map_.end()) return sig_entry->second;
+ uint32_t index = static_cast<uint32_t>(signatures_.size());
+ signature_map_.emplace(*sig, index);
+ signatures_.push_back(sig);
+ return index;
}
uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
@@ -392,7 +372,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
if (indirect_functions_.size() > 0) {
size_t start = EmitSection(kTableSectionCode, buffer);
buffer.write_u8(1); // table count
- buffer.write_u8(kWasmAnyFunctionTypeCode);
+ buffer.write_u8(kLocalAnyFunc);
buffer.write_u8(kHasMaximumFlag);
buffer.write_size(indirect_functions_.size());
buffer.write_size(indirect_functions_.size());
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 19ca123f0e..db70502886 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -241,13 +241,6 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
void WriteTo(ZoneBuffer& buffer) const;
void WriteAsmJsOffsetTable(ZoneBuffer& buffer) const;
- // TODO(titzer): use SignatureMap from signature-map.h here.
- // This signature map is zone-allocated, but the other is heap allocated.
- struct CompareFunctionSigs {
- bool operator()(FunctionSig* a, FunctionSig* b) const;
- };
- typedef ZoneMap<FunctionSig*, uint32_t, CompareFunctionSigs> SignatureMap;
-
Zone* zone() { return zone_; }
FunctionSig* GetSignature(uint32_t index) { return signatures_[index]; }
@@ -290,7 +283,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
ZoneVector<WasmDataSegment> data_segments_;
ZoneVector<uint32_t> indirect_functions_;
ZoneVector<WasmGlobal> globals_;
- SignatureMap signature_map_;
+ ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
int start_function_index_;
uint32_t min_memory_size_;
uint32_t max_memory_size_;
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index b35613fc84..bd23345870 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -7,7 +7,6 @@
#include "src/api.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/interface-types.h"
#include "src/frames-inl.h"
@@ -18,7 +17,6 @@
#include "src/v8.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -37,59 +35,52 @@ constexpr const char* WasmException::kRuntimeIdStr;
// static
constexpr const char* WasmException::kRuntimeValuesStr;
-WireBytesRef WasmModule::LookupName(const ModuleWireBytes* wire_bytes,
- uint32_t function_index) const {
- if (!names_) {
- names_.reset(new std::unordered_map<uint32_t, WireBytesRef>());
- wasm::DecodeFunctionNames(wire_bytes->start(), wire_bytes->end(),
- names_.get());
+WireBytesRef WasmModule::LookupFunctionName(const ModuleWireBytes& wire_bytes,
+ uint32_t function_index) const {
+ if (!function_names) {
+ function_names.reset(new std::unordered_map<uint32_t, WireBytesRef>());
+ wasm::DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(),
+ function_names.get());
}
- auto it = names_->find(function_index);
- if (it == names_->end()) return WireBytesRef();
+ auto it = function_names->find(function_index);
+ if (it == function_names->end()) return WireBytesRef();
return it->second;
}
-WireBytesRef WasmModule::LookupName(SeqOneByteString* wire_bytes,
- uint32_t function_index) const {
- DisallowHeapAllocation no_gc;
- uint8_t* chars = wire_bytes->GetChars();
- ModuleWireBytes module_wire_bytes(chars, chars + wire_bytes->length());
- return LookupName(&module_wire_bytes, function_index);
-}
-
-void WasmModule::AddNameForTesting(int function_index, WireBytesRef name) {
- if (!names_) {
- names_.reset(new std::unordered_map<uint32_t, WireBytesRef>());
+void WasmModule::AddFunctionNameForTesting(int function_index,
+ WireBytesRef name) {
+ if (!function_names) {
+ function_names.reset(new std::unordered_map<uint32_t, WireBytesRef>());
}
- names_->insert(std::make_pair(function_index, name));
+ function_names->insert(std::make_pair(function_index, name));
}
// Get a string stored in the module bytes representing a name.
WasmName ModuleWireBytes::GetName(WireBytesRef ref) const {
if (ref.is_empty()) return {"<?>", 3}; // no name.
CHECK(BoundsCheck(ref.offset(), ref.length()));
- return Vector<const char>::cast(
+ return WasmName::cast(
module_bytes_.SubVector(ref.offset(), ref.end_offset()));
}
// Get a string stored in the module bytes representing a function name.
WasmName ModuleWireBytes::GetName(const WasmFunction* function,
const WasmModule* module) const {
- return GetName(module->LookupName(this, function->func_index));
+ return GetName(module->LookupFunctionName(*this, function->func_index));
}
// Get a string stored in the module bytes representing a name.
WasmName ModuleWireBytes::GetNameOrNull(WireBytesRef ref) const {
if (!ref.is_set()) return {nullptr, 0}; // no name.
CHECK(BoundsCheck(ref.offset(), ref.length()));
- return Vector<const char>::cast(
+ return WasmName::cast(
module_bytes_.SubVector(ref.offset(), ref.end_offset()));
}
// Get a string stored in the module bytes representing a function name.
WasmName ModuleWireBytes::GetNameOrNull(const WasmFunction* function,
const WasmModule* module) const {
- return GetNameOrNull(module->LookupName(this, function->func_index));
+ return GetNameOrNull(module->LookupFunctionName(*this, function->func_index));
}
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
@@ -127,7 +118,6 @@ bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
Handle<JSArray> GetImports(Isolate* isolate,
Handle<WasmModuleObject> module_object) {
- Handle<WasmSharedModuleData> shared(module_object->shared(), isolate);
Factory* factory = isolate->factory();
Handle<String> module_string = factory->InternalizeUtf8String("module");
@@ -140,7 +130,7 @@ Handle<JSArray> GetImports(Isolate* isolate,
Handle<String> global_string = factory->InternalizeUtf8String("global");
// Create the result array.
- WasmModule* module = shared->module();
+ const WasmModule* module = module_object->module();
int num_imports = static_cast<int>(module->import_table.size());
Handle<JSArray> array_object = factory->NewJSArray(PACKED_ELEMENTS, 0, 0);
Handle<FixedArray> storage = factory->NewFixedArray(num_imports);
@@ -152,7 +142,7 @@ Handle<JSArray> GetImports(Isolate* isolate,
// Populate the result array.
for (int index = 0; index < num_imports; ++index) {
- WasmImport& import = module->import_table[index];
+ const WasmImport& import = module->import_table[index];
Handle<JSObject> entry = factory->NewJSObject(object_function);
@@ -175,18 +165,18 @@ Handle<JSArray> GetImports(Isolate* isolate,
}
MaybeHandle<String> import_module =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- isolate, shared, import.module_name);
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, import.module_name);
MaybeHandle<String> import_name =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- isolate, shared, import.field_name);
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, import.field_name);
- JSObject::AddProperty(entry, module_string, import_module.ToHandleChecked(),
- NONE);
- JSObject::AddProperty(entry, name_string, import_name.ToHandleChecked(),
- NONE);
- JSObject::AddProperty(entry, kind_string, import_kind, NONE);
+ JSObject::AddProperty(isolate, entry, module_string,
+ import_module.ToHandleChecked(), NONE);
+ JSObject::AddProperty(isolate, entry, name_string,
+ import_name.ToHandleChecked(), NONE);
+ JSObject::AddProperty(isolate, entry, kind_string, import_kind, NONE);
storage->set(index, *entry);
}
@@ -196,7 +186,6 @@ Handle<JSArray> GetImports(Isolate* isolate,
Handle<JSArray> GetExports(Isolate* isolate,
Handle<WasmModuleObject> module_object) {
- Handle<WasmSharedModuleData> shared(module_object->shared(), isolate);
Factory* factory = isolate->factory();
Handle<String> name_string = factory->InternalizeUtf8String("name");
@@ -208,7 +197,7 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<String> global_string = factory->InternalizeUtf8String("global");
// Create the result array.
- WasmModule* module = shared->module();
+ const WasmModule* module = module_object->module();
int num_exports = static_cast<int>(module->export_table.size());
Handle<JSArray> array_object = factory->NewJSArray(PACKED_ELEMENTS, 0, 0);
Handle<FixedArray> storage = factory->NewFixedArray(num_exports);
@@ -220,7 +209,7 @@ Handle<JSArray> GetExports(Isolate* isolate,
// Populate the result array.
for (int index = 0; index < num_exports; ++index) {
- WasmExport& exp = module->export_table[index];
+ const WasmExport& exp = module->export_table[index];
Handle<String> export_kind;
switch (exp.kind) {
@@ -243,12 +232,12 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<JSObject> entry = factory->NewJSObject(object_function);
MaybeHandle<String> export_name =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(isolate, shared,
- exp.name);
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, exp.name);
- JSObject::AddProperty(entry, name_string, export_name.ToHandleChecked(),
- NONE);
- JSObject::AddProperty(entry, kind_string, export_kind, NONE);
+ JSObject::AddProperty(isolate, entry, name_string,
+ export_name.ToHandleChecked(), NONE);
+ JSObject::AddProperty(isolate, entry, kind_string, export_kind, NONE);
storage->set(index, *entry);
}
@@ -259,26 +248,20 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<JSArray> GetCustomSections(Isolate* isolate,
Handle<WasmModuleObject> module_object,
Handle<String> name, ErrorThrower* thrower) {
- Handle<WasmSharedModuleData> shared(module_object->shared(), isolate);
Factory* factory = isolate->factory();
- std::vector<CustomSectionOffset> custom_sections;
- {
- DisallowHeapAllocation no_gc; // for raw access to string bytes.
- Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
- const byte* start =
- reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
- const byte* end = start + module_bytes->length();
- custom_sections = DecodeCustomSections(start, end);
- }
+ Vector<const uint8_t> wire_bytes =
+ module_object->native_module()->wire_bytes();
+ std::vector<CustomSectionOffset> custom_sections =
+ DecodeCustomSections(wire_bytes.start(), wire_bytes.end());
std::vector<Handle<Object>> matching_sections;
// Gather matching sections.
for (auto& section : custom_sections) {
MaybeHandle<String> section_name =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(isolate, shared,
- section.name);
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, section.name);
if (!name->Equals(*section_name.ToHandleChecked())) continue;
@@ -294,11 +277,8 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
constexpr bool is_external = false;
JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size);
- DisallowHeapAllocation no_gc; // for raw access to string bytes.
- Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
- const byte* start =
- reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
- memcpy(memory, start + section.payload.offset(), section.payload.length());
+ memcpy(memory, wire_bytes.start() + section.payload.offset(),
+ section.payload.length());
matching_sections.push_back(buffer);
}
@@ -317,15 +297,11 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
}
Handle<FixedArray> DecodeLocalNames(Isolate* isolate,
- Handle<WasmSharedModuleData> shared) {
- Handle<SeqOneByteString> wire_bytes(shared->module_bytes(), isolate);
+ Handle<WasmModuleObject> module_object) {
+ Vector<const uint8_t> wire_bytes =
+ module_object->native_module()->wire_bytes();
LocalNames decoded_locals;
- {
- DisallowHeapAllocation no_gc;
- DecodeLocalNames(wire_bytes->GetChars(),
- wire_bytes->GetChars() + wire_bytes->length(),
- &decoded_locals);
- }
+ DecodeLocalNames(wire_bytes.start(), wire_bytes.end(), &decoded_locals);
Handle<FixedArray> locals_names =
isolate->factory()->NewFixedArray(decoded_locals.max_function_index + 1);
for (LocalNamesPerFunction& func : decoded_locals.names) {
@@ -334,14 +310,32 @@ Handle<FixedArray> DecodeLocalNames(Isolate* isolate,
locals_names->set(func.function_index, *func_locals_names);
for (LocalName& name : func.names) {
Handle<String> name_str =
- WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- isolate, shared, name.name)
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, name.name)
.ToHandleChecked();
func_locals_names->set(name.local_index, *name_str);
}
}
return locals_names;
}
+
+namespace {
+template <typename T>
+inline size_t VectorSize(const std::vector<T>& vector) {
+ return sizeof(T) * vector.size();
+}
+} // namespace
+
+size_t EstimateWasmModuleSize(const WasmModule* module) {
+ size_t estimate =
+ sizeof(WasmModule) + VectorSize(module->signatures) +
+ VectorSize(module->signature_ids) + VectorSize(module->functions) +
+ VectorSize(module->data_segments) + VectorSize(module->tables) +
+ VectorSize(module->import_table) + VectorSize(module->export_table) +
+ VectorSize(module->exceptions) + VectorSize(module->table_inits);
+ // TODO(wasm): include names table and wire bytes in size estimate
+ return estimate;
+}
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index be8f5766e4..3020548927 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -7,11 +7,8 @@
#include <memory>
-#include "src/debug/debug-interface.h"
#include "src/globals.h"
#include "src/handles.h"
-#include "src/objects/managed.h"
-#include "src/parsing/preparse-data.h"
#include "src/wasm/decoder.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-constants.h"
@@ -20,23 +17,12 @@
namespace v8 {
namespace internal {
-class WasmCompiledModule;
class WasmDebugInfo;
-class WasmGlobalObject;
-class WasmInstanceObject;
-class WasmMemoryObject;
class WasmModuleObject;
-class WasmSharedModuleData;
-class WasmTableObject;
-
-namespace compiler {
-class CallDescriptor;
-}
namespace wasm {
+
class ErrorThrower;
-class NativeModule;
-class TestingModuleBuilder;
// Static representation of a wasm function.
struct WasmFunction {
@@ -87,9 +73,9 @@ struct WasmDataSegment {
};
// Static representation of a wasm indirect call table.
-struct WasmIndirectFunctionTable {
- MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmIndirectFunctionTable);
-
+struct WasmTable {
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmTable);
+ ValueType type = kWasmStmt; // table type.
uint32_t initial_size = 0; // initial table size.
uint32_t maximum_size = 0; // maximum table size.
bool has_maximum_size = false; // true if there is a maximum size.
@@ -128,6 +114,10 @@ struct WasmExport {
enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
+#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \
+ ((origin) == wasm::kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \
+ : (counters)->prefix##_asm_##suffix())
+
struct ModuleWireBytes;
// Static representation of a module.
@@ -146,45 +136,36 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmGlobal> globals;
// Size of the buffer required for all globals that are not imported and
// mutable.
- // TODO(wasm): Rename for clarity?
- uint32_t globals_size = 0;
+ uint32_t globals_buffer_size = 0;
uint32_t num_imported_mutable_globals = 0;
uint32_t num_imported_functions = 0;
- uint32_t num_declared_functions = 0;
+ uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
WireBytesRef name = {0, 0};
- // TODO(wasm): Add url here, for spec'ed location information.
std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
- std::vector<WasmIndirectFunctionTable> function_tables;
+ std::vector<WasmTable> tables;
std::vector<WasmImport> import_table;
std::vector<WasmExport> export_table;
std::vector<WasmException> exceptions;
std::vector<WasmTableInit> table_inits;
SignatureMap signature_map; // canonicalizing map for signature indexes.
- WasmModule() : WasmModule(nullptr) {}
- WasmModule(std::unique_ptr<Zone> owned);
+ ModuleOrigin origin = kWasmOrigin; // origin of the module
+ mutable std::unique_ptr<std::unordered_map<uint32_t, WireBytesRef>>
+ function_names;
- ModuleOrigin origin() const { return origin_; }
- void set_origin(ModuleOrigin new_value) { origin_ = new_value; }
- bool is_wasm() const { return origin_ == kWasmOrigin; }
- bool is_asm_js() const { return origin_ == kAsmJsOrigin; }
+ explicit WasmModule(std::unique_ptr<Zone> owned = nullptr);
- WireBytesRef LookupName(const ModuleWireBytes* wire_bytes,
- uint32_t function_index) const;
- WireBytesRef LookupName(SeqOneByteString* wire_bytes,
- uint32_t function_index) const;
- void AddNameForTesting(int function_index, WireBytesRef name);
-
- private:
- // TODO(kschimpf) - Encapsulate more fields.
- ModuleOrigin origin_ = kWasmOrigin; // origin of the module
- mutable std::unique_ptr<std::unordered_map<uint32_t, WireBytesRef>> names_;
+ WireBytesRef LookupFunctionName(const ModuleWireBytes& wire_bytes,
+ uint32_t function_index) const;
+ void AddFunctionNameForTesting(int function_index, WireBytesRef name);
};
+size_t EstimateWasmModuleSize(const WasmModule* module);
+
// Interface to the storage (wire bytes) of a wasm module.
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
// on module_bytes, as this storage is only guaranteed to be alive as long as
@@ -265,7 +246,7 @@ V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
// Decode local variable names from the names section. Return FixedArray of
// FixedArray of <undefined|String>. The outer fixed array is indexed by the
// function index, the inner one by the local index.
-Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmSharedModuleData>);
+Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmModuleObject>);
// TruncatedUserString makes it easy to output names up to a certain length, and
// output a truncation followed by '...' if they exceed a limit.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 34b08666f9..96bb622afc 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -10,6 +10,7 @@
#include "src/contexts-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/managed.h"
#include "src/v8memory.h"
#include "src/wasm/wasm-module.h"
@@ -19,20 +20,18 @@
namespace v8 {
namespace internal {
-CAST_ACCESSOR(WasmCompiledModule)
CAST_ACCESSOR(WasmDebugInfo)
CAST_ACCESSOR(WasmExportedFunctionData)
CAST_ACCESSOR(WasmGlobalObject)
CAST_ACCESSOR(WasmInstanceObject)
CAST_ACCESSOR(WasmMemoryObject)
CAST_ACCESSOR(WasmModuleObject)
-CAST_ACCESSOR(WasmSharedModuleData)
CAST_ACCESSOR(WasmTableObject)
-#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
- bool holder::has_##name() { \
- return !READ_FIELD(this, offset)->IsUndefined(GetIsolate()); \
- } \
+#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
+ bool holder::has_##name() { \
+ return !READ_FIELD(this, offset)->IsUndefined(); \
+ } \
ACCESSORS(holder, name, type, offset)
#define READ_PRIMITIVE_FIELD(p, type, offset) \
@@ -50,10 +49,33 @@ CAST_ACCESSOR(WasmTableObject)
}
// WasmModuleObject
-ACCESSORS(WasmModuleObject, compiled_module, WasmCompiledModule,
- kCompiledModuleOffset)
+ACCESSORS(WasmModuleObject, managed_native_module, Managed<wasm::NativeModule>,
+ kNativeModuleOffset)
ACCESSORS(WasmModuleObject, export_wrappers, FixedArray, kExportWrappersOffset)
-ACCESSORS(WasmModuleObject, shared, WasmSharedModuleData, kSharedOffset)
+ACCESSORS(WasmModuleObject, script, Script, kScriptOffset)
+ACCESSORS(WasmModuleObject, weak_instance_list, WeakArrayList,
+ kWeakInstanceListOffset)
+OPTIONAL_ACCESSORS(WasmModuleObject, asm_js_offset_table, ByteArray,
+ kAsmJsOffsetTableOffset)
+OPTIONAL_ACCESSORS(WasmModuleObject, breakpoint_infos, FixedArray,
+ kBreakPointInfosOffset)
+wasm::NativeModule* WasmModuleObject::native_module() const {
+ return managed_native_module()->raw();
+}
+const wasm::WasmModule* WasmModuleObject::module() const {
+ // TODO(clemensh): Remove this helper (inline in callers).
+ return native_module()->module();
+}
+void WasmModuleObject::reset_breakpoint_infos() {
+ WRITE_FIELD(this, kBreakPointInfosOffset,
+ GetReadOnlyRoots().undefined_value());
+}
+bool WasmModuleObject::is_asm_js() {
+ bool asm_js = module()->origin == wasm::kAsmJsOrigin;
+ DCHECK_EQ(asm_js, script()->IsUserJavaScript());
+ DCHECK_EQ(asm_js, has_asm_js_offset_table());
+ return asm_js;
+}
// WasmTableObject
ACCESSORS(WasmTableObject, functions, FixedArray, kFunctionsOffset)
@@ -116,6 +138,12 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, uint32_t,
kMemorySizeOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, uint32_t,
kMemoryMaskOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, roots_array_address, Address,
+ kRootsArrayAddressOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address,
+ kStackLimitAddressOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, real_stack_limit_address, Address,
+ kRealStackLimitAddressOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, imported_function_targets, Address*,
kImportedFunctionTargetsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, globals_start, byte*,
@@ -128,9 +156,9 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_sig_ids,
uint32_t*, kIndirectFunctionTableSigIdsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets,
Address*, kIndirectFunctionTableTargetsOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_adjusted_start, Address,
+ kJumpTableAdjustedStartOffset)
-ACCESSORS(WasmInstanceObject, compiled_module, WasmCompiledModule,
- kCompiledModuleOffset)
ACCESSORS(WasmInstanceObject, module_object, WasmModuleObject,
kModuleObjectOffset)
ACCESSORS(WasmInstanceObject, exports_object, JSObject, kExportsObjectOffset)
@@ -153,8 +181,9 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_instances,
FixedArray, kIndirectFunctionTableInstancesOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, managed_indirect_patcher, Foreign,
- kManagedIndirectPatcherOffset)
+ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
+ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
+ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
inline bool WasmInstanceObject::has_indirect_function_table() {
return indirect_function_table_sig_ids() != nullptr;
@@ -180,20 +209,6 @@ ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
kInstanceOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
-// WasmSharedModuleData
-ACCESSORS(WasmSharedModuleData, managed_module, Object, kManagedModuleOffset)
-ACCESSORS(WasmSharedModuleData, module_bytes, SeqOneByteString,
- kModuleBytesOffset)
-ACCESSORS(WasmSharedModuleData, script, Script, kScriptOffset)
-OPTIONAL_ACCESSORS(WasmSharedModuleData, asm_js_offset_table, ByteArray,
- kAsmJsOffsetTableOffset)
-OPTIONAL_ACCESSORS(WasmSharedModuleData, breakpoint_infos, FixedArray,
- kBreakPointInfosOffset)
-void WasmSharedModuleData::reset_breakpoint_infos() {
- DCHECK(IsWasmSharedModuleData());
- WRITE_FIELD(this, kBreakPointInfosOffset, GetHeap()->undefined_value());
-}
-
// WasmDebugInfo
ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
@@ -206,41 +221,6 @@ OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
kCWasmEntryMapOffset)
#undef OPTIONAL_ACCESSORS
-
-#define WCM_OBJECT_OR_WEAK(TYPE, NAME, OFFSET, TYPE_CHECK) \
- bool WasmCompiledModule::has_##NAME() const { \
- Object* value = READ_FIELD(this, OFFSET); \
- return TYPE_CHECK; \
- } \
- \
- void WasmCompiledModule::reset_##NAME() { \
- WRITE_FIELD(this, OFFSET, GetHeap()->undefined_value()); \
- } \
- \
- ACCESSORS_CHECKED2(WasmCompiledModule, NAME, TYPE, OFFSET, TYPE_CHECK, true)
-
-#define WCM_OBJECT(TYPE, NAME, OFFSET) \
- WCM_OBJECT_OR_WEAK(TYPE, NAME, OFFSET, value->Is##TYPE())
-
-#define WCM_WEAK_LINK(TYPE, NAME, OFFSET) \
- WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, OFFSET, value->IsWeakCell()) \
- \
- TYPE* WasmCompiledModule::NAME() const { \
- DCHECK(!weak_##NAME()->cleared()); \
- return TYPE::cast(weak_##NAME()->value()); \
- }
-
-// WasmCompiledModule
-WCM_OBJECT(WasmCompiledModule, next_instance, kNextInstanceOffset)
-WCM_OBJECT(WasmCompiledModule, prev_instance, kPrevInstanceOffset)
-WCM_WEAK_LINK(WasmInstanceObject, owning_instance, kOwningInstanceOffset)
-WCM_OBJECT(Foreign, native_module, kNativeModuleOffset)
-ACCESSORS(WasmCompiledModule, raw_next_instance, Object, kNextInstanceOffset);
-ACCESSORS(WasmCompiledModule, raw_prev_instance, Object, kPrevInstanceOffset);
-
-#undef WCM_OBJECT_OR_WEAK
-#undef WCM_OBJECT
-#undef WCM_WEAK_LINK
#undef READ_PRIMITIVE_FIELD
#undef WRITE_PRIMITIVE_FIELD
#undef PRIMITIVE_ACCESSORS
@@ -249,10 +229,6 @@ uint32_t WasmTableObject::current_length() { return functions()->length(); }
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
-inline bool WasmCompiledModule::has_instance() const {
- return !weak_owning_instance()->cleared();
-}
-
#include "src/objects/object-macros-undef.h"
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 5c9efc756c..7cd2fecb7f 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -7,11 +7,13 @@
#include "src/assembler-inl.h"
#include "src/base/iterator.h"
+#include "src/code-factory.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug-interface.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
@@ -126,6 +128,16 @@ class WasmInstanceNativeAllocations {
#undef SET
};
+size_t EstimateNativeAllocationsSize(const WasmModule* module) {
+ size_t estimate = sizeof(WasmInstanceNativeAllocations) +
+ (1 * kPointerSize * module->num_imported_mutable_globals) +
+ (2 * kPointerSize * module->num_imported_functions);
+ for (auto& table : module->tables) {
+ estimate += 3 * kPointerSize * table.initial_size;
+ }
+ return estimate;
+}
+
WasmInstanceNativeAllocations* GetNativeAllocations(
WasmInstanceObject* instance) {
return reinterpret_cast<Managed<WasmInstanceNativeAllocations>*>(
@@ -133,111 +145,14 @@ WasmInstanceNativeAllocations* GetNativeAllocations(
->raw();
}
-// An iterator that returns first the module itself, then all modules linked via
-// next, then all linked via prev.
-class CompiledModulesIterator
- : public v8::base::iterator<std::input_iterator_tag,
- Handle<WasmCompiledModule>> {
- public:
- CompiledModulesIterator(Isolate* isolate,
- Handle<WasmCompiledModule> start_module, bool at_end)
- : isolate_(isolate),
- start_module_(start_module),
- current_(
- at_end ? Handle<WasmCompiledModule>::null()
- : Handle<WasmCompiledModule>::New(*start_module, isolate)) {}
-
- Handle<WasmCompiledModule> operator*() const {
- DCHECK(!current_.is_null());
- return current_;
- }
-
- void operator++() { Advance(); }
-
- bool operator!=(const CompiledModulesIterator& other) {
- DCHECK(start_module_.is_identical_to(other.start_module_));
- return !current_.is_identical_to(other.current_);
- }
-
- private:
- void Advance() {
- DCHECK(!current_.is_null());
- if (!is_backwards_) {
- if (current_->has_next_instance()) {
- *current_.location() = current_->next_instance();
- return;
- }
- // No more modules in next-links, now try the previous-links.
- is_backwards_ = true;
- current_ = start_module_;
- }
- if (current_->has_prev_instance()) {
- *current_.location() = current_->prev_instance();
- return;
- }
- current_ = Handle<WasmCompiledModule>::null();
- }
-
- friend class CompiledModuleInstancesIterator;
- Isolate* isolate_;
- Handle<WasmCompiledModule> start_module_;
- Handle<WasmCompiledModule> current_;
- bool is_backwards_ = false;
-};
-
-// An iterator based on the CompiledModulesIterator, but it returns all live
-// instances, not the WasmCompiledModules itself.
-class CompiledModuleInstancesIterator
- : public v8::base::iterator<std::input_iterator_tag,
- Handle<WasmInstanceObject>> {
- public:
- CompiledModuleInstancesIterator(Isolate* isolate,
- Handle<WasmCompiledModule> start_module,
- bool at_end)
- : it(isolate, start_module, at_end) {
- while (NeedToAdvance()) ++it;
- }
-
- Handle<WasmInstanceObject> operator*() {
- return handle(
- WasmInstanceObject::cast((*it)->weak_owning_instance()->value()),
- it.isolate_);
- }
-
- void operator++() {
- do {
- ++it;
- } while (NeedToAdvance());
- }
-
- bool operator!=(const CompiledModuleInstancesIterator& other) {
- return it != other.it;
- }
-
- private:
- bool NeedToAdvance() {
- return !it.current_.is_null() && !it.current_->has_instance();
- }
- CompiledModulesIterator it;
-};
-
-v8::base::iterator_range<CompiledModuleInstancesIterator>
-iterate_compiled_module_instance_chain(Isolate* isolate,
- Handle<WasmModuleObject> module_object) {
- Handle<WasmCompiledModule> compiled_module(module_object->compiled_module());
- return {CompiledModuleInstancesIterator(isolate, compiled_module, false),
- CompiledModuleInstancesIterator(isolate, compiled_module, true)};
-}
-
#ifdef DEBUG
-bool IsBreakablePosition(WasmSharedModuleData* shared, int func_index,
+bool IsBreakablePosition(wasm::NativeModule* native_module, int func_index,
int offset_in_func) {
- DisallowHeapAllocation no_gc;
AccountingAllocator alloc;
Zone tmp(&alloc, ZONE_NAME);
wasm::BodyLocalDecls locals(&tmp);
- const byte* module_start = shared->module_bytes()->GetChars();
- WasmFunction& func = shared->module()->functions[func_index];
+ const byte* module_start = native_module->wire_bytes().start();
+ const WasmFunction& func = native_module->module()->functions[func_index];
wasm::BytecodeIterator iterator(module_start + func.code.offset(),
module_start + func.code.end_offset(),
&locals);
@@ -260,21 +175,62 @@ enum DispatchTableElements : int {
} // namespace
+// static
Handle<WasmModuleObject> WasmModuleObject::New(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- Handle<FixedArray> export_wrappers, Handle<WasmSharedModuleData> shared) {
- Handle<JSFunction> module_cons(
- isolate->native_context()->wasm_module_constructor());
- auto module_object = Handle<WasmModuleObject>::cast(
- isolate->factory()->NewJSObject(module_cons));
- module_object->set_compiled_module(*compiled_module);
- module_object->set_export_wrappers(*export_wrappers);
- if (shared->script()->type() == Script::TYPE_WASM) {
- shared->script()->set_wasm_module_object(*module_object);
+ Isolate* isolate, std::shared_ptr<const wasm::WasmModule> shared_module,
+ wasm::ModuleEnv& env, OwnedVector<const uint8_t> wire_bytes,
+ Handle<Script> script, Handle<ByteArray> asm_js_offset_table) {
+ DCHECK_EQ(shared_module.get(), env.module);
+
+ // Create a new {NativeModule} first.
+ size_t native_memory_estimate =
+ isolate->wasm_engine()->code_manager()->EstimateNativeModuleSize(
+ env.module);
+ auto native_module = isolate->wasm_engine()->code_manager()->NewNativeModule(
+ isolate, native_memory_estimate,
+ wasm::NativeModule::kCanAllocateMoreMemory, std::move(shared_module),
+ env);
+ native_module->set_wire_bytes(std::move(wire_bytes));
+ native_module->SetRuntimeStubs(isolate);
+
+ // Delegate to the shared {WasmModuleObject::New} allocator.
+ Handle<WasmModuleObject> module_object =
+ New(isolate, std::move(native_module), script);
+ if (!asm_js_offset_table.is_null()) {
+ module_object->set_asm_js_offset_table(*asm_js_offset_table);
}
- module_object->set_shared(*shared);
+ return module_object;
+}
- compiled_module->LogWasmCodes(isolate);
+// static
+Handle<WasmModuleObject> WasmModuleObject::New(
+ Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
+ Handle<Script> script) {
+ int export_wrapper_size =
+ static_cast<int>(native_module->module()->num_exported_functions);
+ Handle<FixedArray> export_wrappers =
+ isolate->factory()->NewFixedArray(export_wrapper_size, TENURED);
+ Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast(
+ isolate->factory()->NewJSObject(isolate->wasm_module_constructor()));
+ module_object->set_export_wrappers(*export_wrappers);
+ if (script->type() == Script::TYPE_WASM) {
+ script->set_wasm_module_object(*module_object);
+ }
+ module_object->set_script(*script);
+ module_object->set_weak_instance_list(
+ ReadOnlyRoots(isolate).empty_weak_array_list());
+
+ // Use the given shared {NativeModule}, but increase its reference count by
+ // allocating a new {Managed<T>} that the {WasmModuleObject} references.
+ size_t native_memory_estimate =
+ isolate->wasm_engine()->code_manager()->EstimateNativeModuleSize(
+ native_module->module());
+ size_t memory_estimate =
+ EstimateWasmModuleSize(native_module->module()) + native_memory_estimate;
+ Handle<Managed<wasm::NativeModule>> managed_native_module =
+ Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
+ std::move(native_module));
+ module_object->set_managed_native_module(*managed_native_module);
return module_object;
}
@@ -282,52 +238,511 @@ bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
int* position,
Handle<BreakPoint> break_point) {
Isolate* isolate = module_object->GetIsolate();
- Handle<WasmSharedModuleData> shared(module_object->shared(), isolate);
// Find the function for this breakpoint.
- int func_index = shared->GetContainingFunction(*position);
+ int func_index = module_object->GetContainingFunction(*position);
if (func_index < 0) return false;
- WasmFunction& func = shared->module()->functions[func_index];
+ const WasmFunction& func = module_object->module()->functions[func_index];
int offset_in_func = *position - func.code.offset();
// According to the current design, we should only be called with valid
// breakable positions.
- DCHECK(IsBreakablePosition(*shared, func_index, offset_in_func));
+ DCHECK(IsBreakablePosition(module_object->native_module(), func_index,
+ offset_in_func));
- // Insert new break point into break_positions of shared module data.
- WasmSharedModuleData::AddBreakpoint(shared, *position, break_point);
+ // Insert new break point into break_positions of module object.
+ WasmModuleObject::AddBreakpoint(module_object, *position, break_point);
// Iterate over all instances of this module and tell them to set this new
- // breakpoint.
- for (Handle<WasmInstanceObject> instance :
- iterate_compiled_module_instance_chain(isolate, module_object)) {
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
+ // breakpoint. We do this using the weak list of all instances.
+ Handle<WeakArrayList> weak_instance_list(module_object->weak_instance_list(),
+ isolate);
+ for (int i = 0; i < weak_instance_list->length(); ++i) {
+ MaybeObject* maybe_instance = weak_instance_list->Get(i);
+ if (maybe_instance->IsWeakHeapObject()) {
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(maybe_instance->ToWeakHeapObject()),
+ isolate);
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
+ }
}
return true;
}
-void WasmModuleObject::ValidateStateForTesting(
- Isolate* isolate, Handle<WasmModuleObject> module_obj) {
+namespace {
+
+int GetBreakpointPos(Isolate* isolate, Object* break_point_info_or_undef) {
+ if (break_point_info_or_undef->IsUndefined(isolate)) return kMaxInt;
+ return BreakPointInfo::cast(break_point_info_or_undef)->source_position();
+}
+
+int FindBreakpointInfoInsertPos(Isolate* isolate,
+ Handle<FixedArray> breakpoint_infos,
+ int position) {
+ // Find insert location via binary search, taking care of undefined values on
+ // the right. Position is always greater than zero.
+ DCHECK_LT(0, position);
+
+ int left = 0; // inclusive
+ int right = breakpoint_infos->length(); // exclusive
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ Object* mid_obj = breakpoint_infos->get(mid);
+ if (GetBreakpointPos(isolate, mid_obj) <= position) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+
+ int left_pos = GetBreakpointPos(isolate, breakpoint_infos->get(left));
+ return left_pos < position ? left + 1 : left;
+}
+
+} // namespace
+
+void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
+ int position,
+ Handle<BreakPoint> break_point) {
+ Isolate* isolate = module_object->GetIsolate();
+ Handle<FixedArray> breakpoint_infos;
+ if (module_object->has_breakpoint_infos()) {
+ breakpoint_infos = handle(module_object->breakpoint_infos(), isolate);
+ } else {
+ breakpoint_infos = isolate->factory()->NewFixedArray(4, TENURED);
+ module_object->set_breakpoint_infos(*breakpoint_infos);
+ }
+
+ int insert_pos =
+ FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+
+ // If a BreakPointInfo object already exists for this position, add the new
+ // breakpoint object and return.
+ if (insert_pos < breakpoint_infos->length() &&
+ GetBreakpointPos(isolate, breakpoint_infos->get(insert_pos)) ==
+ position) {
+ Handle<BreakPointInfo> old_info(
+ BreakPointInfo::cast(breakpoint_infos->get(insert_pos)), isolate);
+ BreakPointInfo::SetBreakPoint(isolate, old_info, break_point);
+ return;
+ }
+
+ // Enlarge break positions array if necessary.
+ bool need_realloc = !breakpoint_infos->get(breakpoint_infos->length() - 1)
+ ->IsUndefined(isolate);
+ Handle<FixedArray> new_breakpoint_infos = breakpoint_infos;
+ if (need_realloc) {
+ new_breakpoint_infos = isolate->factory()->NewFixedArray(
+ 2 * breakpoint_infos->length(), TENURED);
+ module_object->set_breakpoint_infos(*new_breakpoint_infos);
+ // Copy over the entries [0, insert_pos).
+ for (int i = 0; i < insert_pos; ++i)
+ new_breakpoint_infos->set(i, breakpoint_infos->get(i));
+ }
+
+ // Move elements [insert_pos, ...] up by one.
+ for (int i = breakpoint_infos->length() - 1; i >= insert_pos; --i) {
+ Object* entry = breakpoint_infos->get(i);
+ if (entry->IsUndefined(isolate)) continue;
+ new_breakpoint_infos->set(i + 1, entry);
+ }
+
+ // Generate new BreakpointInfo.
+ Handle<BreakPointInfo> breakpoint_info =
+ isolate->factory()->NewBreakPointInfo(position);
+ BreakPointInfo::SetBreakPoint(isolate, breakpoint_info, break_point);
+
+ // Now insert new position at insert_pos.
+ new_breakpoint_infos->set(insert_pos, *breakpoint_info);
+}
+
+void WasmModuleObject::SetBreakpointsOnNewInstance(
+ Handle<WasmModuleObject> module_object,
+ Handle<WasmInstanceObject> instance) {
+ if (!module_object->has_breakpoint_infos()) return;
+ Isolate* isolate = module_object->GetIsolate();
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+
+ Handle<FixedArray> breakpoint_infos(module_object->breakpoint_infos(),
+ isolate);
+ // If the array exists, it should not be empty.
+ DCHECK_LT(0, breakpoint_infos->length());
+
+ for (int i = 0, e = breakpoint_infos->length(); i < e; ++i) {
+ Handle<Object> obj(breakpoint_infos->get(i), isolate);
+ if (obj->IsUndefined(isolate)) {
+ for (; i < e; ++i) {
+ DCHECK(breakpoint_infos->get(i)->IsUndefined(isolate));
+ }
+ break;
+ }
+ Handle<BreakPointInfo> breakpoint_info = Handle<BreakPointInfo>::cast(obj);
+ int position = breakpoint_info->source_position();
+
+ // Find the function for this breakpoint, and set the breakpoint.
+ int func_index = module_object->GetContainingFunction(position);
+ DCHECK_LE(0, func_index);
+ const WasmFunction& func = module_object->module()->functions[func_index];
+ int offset_in_func = position - func.code.offset();
+ WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
+ }
+}
+
+namespace {
+
+enum AsmJsOffsetTableEntryLayout {
+ kOTEByteOffset,
+ kOTECallPosition,
+ kOTENumberConvPosition,
+ kOTESize
+};
+
+Handle<ByteArray> GetDecodedAsmJsOffsetTable(
+ Handle<WasmModuleObject> module_object, Isolate* isolate) {
+ DCHECK(module_object->is_asm_js());
+ Handle<ByteArray> offset_table(module_object->asm_js_offset_table(), isolate);
+
+ // The last byte in the asm_js_offset_tables ByteArray tells whether it is
+ // still encoded (0) or decoded (1).
+ enum AsmJsTableType : int { Encoded = 0, Decoded = 1 };
+ int table_type = offset_table->get(offset_table->length() - 1);
+ DCHECK(table_type == Encoded || table_type == Decoded);
+ if (table_type == Decoded) return offset_table;
+
+ wasm::AsmJsOffsetsResult asm_offsets;
+ {
+ DisallowHeapAllocation no_gc;
+ byte* bytes_start = offset_table->GetDataStartAddress();
+ byte* bytes_end = reinterpret_cast<byte*>(
+ reinterpret_cast<Address>(bytes_start) + offset_table->length() - 1);
+ asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
+ }
+ // Wasm bytes must be valid and must contain asm.js offset table.
+ DCHECK(asm_offsets.ok());
+ DCHECK_GE(kMaxInt, asm_offsets.val.size());
+ int num_functions = static_cast<int>(asm_offsets.val.size());
+ int num_imported_functions =
+ static_cast<int>(module_object->module()->num_imported_functions);
+ DCHECK_EQ(module_object->module()->functions.size(),
+ static_cast<size_t>(num_functions) + num_imported_functions);
+ int num_entries = 0;
+ for (int func = 0; func < num_functions; ++func) {
+ size_t new_size = asm_offsets.val[func].size();
+ DCHECK_LE(new_size, static_cast<size_t>(kMaxInt) - num_entries);
+ num_entries += static_cast<int>(new_size);
+ }
+ // One byte to encode that this is a decoded table.
+ DCHECK_GE(kMaxInt,
+ 1 + static_cast<uint64_t>(num_entries) * kOTESize * kIntSize);
+ int total_size = 1 + num_entries * kOTESize * kIntSize;
+ Handle<ByteArray> decoded_table =
+ isolate->factory()->NewByteArray(total_size, TENURED);
+ decoded_table->set(total_size - 1, AsmJsTableType::Decoded);
+ module_object->set_asm_js_offset_table(*decoded_table);
+
+ int idx = 0;
+ const std::vector<WasmFunction>& wasm_funs =
+ module_object->module()->functions;
+ for (int func = 0; func < num_functions; ++func) {
+ std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets =
+ asm_offsets.val[func];
+ if (func_asm_offsets.empty()) continue;
+ int func_offset = wasm_funs[num_imported_functions + func].code.offset();
+ for (wasm::AsmJsOffsetEntry& e : func_asm_offsets) {
+ // Byte offsets must be strictly monotonously increasing:
+ DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
+ decoded_table->get_int(idx - kOTESize));
+ decoded_table->set_int(idx + kOTEByteOffset, func_offset + e.byte_offset);
+ decoded_table->set_int(idx + kOTECallPosition, e.source_position_call);
+ decoded_table->set_int(idx + kOTENumberConvPosition,
+ e.source_position_number_conversion);
+ idx += kOTESize;
+ }
+ }
+ DCHECK_EQ(total_size, idx * kIntSize + 1);
+ return decoded_table;
+}
+
+} // namespace
+
+int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object,
+ uint32_t func_index,
+ uint32_t byte_offset,
+ bool is_at_number_conversion) {
+ Isolate* isolate = module_object->GetIsolate();
+ const WasmModule* module = module_object->module();
+
+ if (module->origin != wasm::kAsmJsOrigin) {
+ // for non-asm.js modules, we just add the function's start offset
+ // to make a module-relative position.
+ return byte_offset + module_object->GetFunctionOffset(func_index);
+ }
+
+ // asm.js modules have an additional offset table that must be searched.
+ Handle<ByteArray> offset_table =
+ GetDecodedAsmJsOffsetTable(module_object, isolate);
+
+ DCHECK_LT(func_index, module->functions.size());
+ uint32_t func_code_offset = module->functions[func_index].code.offset();
+ uint32_t total_offset = func_code_offset + byte_offset;
+
+ // Binary search for the total byte offset.
+ int left = 0; // inclusive
+ int right = offset_table->length() / kIntSize / kOTESize; // exclusive
+ DCHECK_LT(left, right);
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ int mid_entry = offset_table->get_int(kOTESize * mid);
+ DCHECK_GE(kMaxInt, mid_entry);
+ if (static_cast<uint32_t>(mid_entry) <= total_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+ // There should be an entry for each position that could show up on the stack
+ // trace:
+ DCHECK_EQ(total_offset, offset_table->get_int(kOTESize * left));
+ int idx = is_at_number_conversion ? kOTENumberConvPosition : kOTECallPosition;
+ return offset_table->get_int(kOTESize * left + idx);
+}
+
+v8::debug::WasmDisassembly WasmModuleObject::DisassembleFunction(
+ int func_index) {
DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module = module_obj->compiled_module();
- CHECK(!compiled_module->has_prev_instance());
- CHECK(!compiled_module->has_next_instance());
- CHECK(!compiled_module->has_instance());
+
+ if (func_index < 0 ||
+ static_cast<uint32_t>(func_index) >= module()->functions.size())
+ return {};
+
+ Vector<const byte> wire_bytes = native_module()->wire_bytes();
+
+ std::ostringstream disassembly_os;
+ v8::debug::WasmDisassembly::OffsetTable offset_table;
+
+ PrintWasmText(module(), wire_bytes, static_cast<uint32_t>(func_index),
+ disassembly_os, &offset_table);
+
+ return {disassembly_os.str(), std::move(offset_table)};
+}
+
+bool WasmModuleObject::GetPossibleBreakpoints(
+ const v8::debug::Location& start, const v8::debug::Location& end,
+ std::vector<v8::debug::BreakLocation>* locations) {
+ DisallowHeapAllocation no_gc;
+
+ const std::vector<WasmFunction>& functions = module()->functions;
+ if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
+ (!end.IsEmpty() &&
+ (end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
+ return false;
+
+ // start_func_index, start_offset and end_func_index is inclusive.
+ // end_offset is exclusive.
+ // start_offset and end_offset are module-relative byte offsets.
+ uint32_t start_func_index = start.GetLineNumber();
+ if (start_func_index >= functions.size()) return false;
+ int start_func_len = functions[start_func_index].code.length();
+ if (start.GetColumnNumber() > start_func_len) return false;
+ uint32_t start_offset =
+ functions[start_func_index].code.offset() + start.GetColumnNumber();
+ uint32_t end_func_index;
+ uint32_t end_offset;
+ if (end.IsEmpty()) {
+ // Default: everything till the end of the Script.
+ end_func_index = static_cast<uint32_t>(functions.size() - 1);
+ end_offset = functions[end_func_index].code.end_offset();
+ } else {
+ // If end is specified: Use it and check for valid input.
+ end_func_index = static_cast<uint32_t>(end.GetLineNumber());
+
+ // Special case: Stop before the start of the next function. Change to: Stop
+ // at the end of the function before, such that we don't disassemble the
+ // next function also.
+ if (end.GetColumnNumber() == 0 && end_func_index > 0) {
+ --end_func_index;
+ end_offset = functions[end_func_index].code.end_offset();
+ } else {
+ if (end_func_index >= functions.size()) return false;
+ end_offset =
+ functions[end_func_index].code.offset() + end.GetColumnNumber();
+ if (end_offset > functions[end_func_index].code.end_offset())
+ return false;
+ }
+ }
+
+ AccountingAllocator alloc;
+ Zone tmp(&alloc, ZONE_NAME);
+ const byte* module_start = native_module()->wire_bytes().start();
+
+ for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
+ ++func_idx) {
+ const WasmFunction& func = functions[func_idx];
+ if (func.code.length() == 0) continue;
+
+ wasm::BodyLocalDecls locals(&tmp);
+ wasm::BytecodeIterator iterator(module_start + func.code.offset(),
+ module_start + func.code.end_offset(),
+ &locals);
+ DCHECK_LT(0u, locals.encoded_size);
+ for (uint32_t offset : iterator.offsets()) {
+ uint32_t total_offset = func.code.offset() + offset;
+ if (total_offset >= end_offset) {
+ DCHECK_EQ(end_func_index, func_idx);
+ break;
+ }
+ if (total_offset < start_offset) continue;
+ locations->emplace_back(func_idx, offset, debug::kCommonBreakLocation);
+ }
+ }
+ return true;
+}
+
+MaybeHandle<FixedArray> WasmModuleObject::CheckBreakPoints(
+ Isolate* isolate, Handle<WasmModuleObject> module_object, int position) {
+ if (!module_object->has_breakpoint_infos()) return {};
+
+ Handle<FixedArray> breakpoint_infos(module_object->breakpoint_infos(),
+ isolate);
+ int insert_pos =
+ FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+ if (insert_pos >= breakpoint_infos->length()) return {};
+
+ Handle<Object> maybe_breakpoint_info(breakpoint_infos->get(insert_pos),
+ isolate);
+ if (maybe_breakpoint_info->IsUndefined(isolate)) return {};
+ Handle<BreakPointInfo> breakpoint_info =
+ Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
+ if (breakpoint_info->source_position() != position) return {};
+
+ // There is no support for conditional break points. Just assume that every
+ // break point always hits.
+ Handle<Object> break_points(breakpoint_info->break_points(), isolate);
+ if (break_points->IsFixedArray()) {
+ return Handle<FixedArray>::cast(break_points);
+ }
+ Handle<FixedArray> break_points_hit = isolate->factory()->NewFixedArray(1);
+ break_points_hit->set(0, *break_points);
+ return break_points_hit;
+}
+
+MaybeHandle<String> WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<WasmModuleObject> module_object,
+ wasm::WireBytesRef ref) {
+ // TODO(wasm): cache strings from modules if it's a performance win.
+ Vector<const uint8_t> wire_bytes =
+ module_object->native_module()->wire_bytes();
+ return ExtractUtf8StringFromModuleBytes(isolate, wire_bytes, ref);
+}
+
+MaybeHandle<String> WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Vector<const uint8_t> wire_bytes,
+ wasm::WireBytesRef ref) {
+ Vector<const uint8_t> name_vec = wire_bytes + ref.offset();
+ name_vec.Truncate(ref.length());
+ // UTF8 validation happens at decode time.
+ DCHECK(unibrow::Utf8::ValidateEncoding(name_vec.start(), name_vec.length()));
+ return isolate->factory()->NewStringFromUtf8(
+ Vector<const char>::cast(name_vec));
+}
+
+MaybeHandle<String> WasmModuleObject::GetModuleNameOrNull(
+ Isolate* isolate, Handle<WasmModuleObject> module_object) {
+ const WasmModule* module = module_object->module();
+ if (!module->name.is_set()) return {};
+ return ExtractUtf8StringFromModuleBytes(isolate, module_object, module->name);
+}
+
+MaybeHandle<String> WasmModuleObject::GetFunctionNameOrNull(
+ Isolate* isolate, Handle<WasmModuleObject> module_object,
+ uint32_t func_index) {
+ DCHECK_LT(func_index, module_object->module()->functions.size());
+ wasm::WireBytesRef name = module_object->module()->LookupFunctionName(
+ wasm::ModuleWireBytes(module_object->native_module()->wire_bytes()),
+ func_index);
+ if (!name.is_set()) return {};
+ return ExtractUtf8StringFromModuleBytes(isolate, module_object, name);
+}
+
+Handle<String> WasmModuleObject::GetFunctionName(
+ Isolate* isolate, Handle<WasmModuleObject> module_object,
+ uint32_t func_index) {
+ MaybeHandle<String> name =
+ GetFunctionNameOrNull(isolate, module_object, func_index);
+ if (!name.is_null()) return name.ToHandleChecked();
+ return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
+}
+
+Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
+ uint32_t func_index) {
+ DCHECK_GT(module()->functions.size(), func_index);
+ wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
+ wasm::WireBytesRef name_ref =
+ module()->LookupFunctionName(wire_bytes, func_index);
+ wasm::WasmName name = wire_bytes.GetName(name_ref);
+ return Vector<const uint8_t>::cast(name);
+}
+
+int WasmModuleObject::GetFunctionOffset(uint32_t func_index) {
+ const std::vector<WasmFunction>& functions = module()->functions;
+ if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
+ DCHECK_GE(kMaxInt, functions[func_index].code.offset());
+ return static_cast<int>(functions[func_index].code.offset());
+}
+
+int WasmModuleObject::GetContainingFunction(uint32_t byte_offset) {
+ const std::vector<WasmFunction>& functions = module()->functions;
+
+ // Binary search for a function containing the given position.
+ int left = 0; // inclusive
+ int right = static_cast<int>(functions.size()); // exclusive
+ if (right == 0) return false;
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ if (functions[mid].code.offset() <= byte_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+ // If the found function does not contains the given position, return -1.
+ const WasmFunction& func = functions[left];
+ if (byte_offset < func.code.offset() ||
+ byte_offset >= func.code.end_offset()) {
+ return -1;
+ }
+
+ return left;
+}
+
+bool WasmModuleObject::GetPositionInfo(uint32_t position,
+ Script::PositionInfo* info) {
+ int func_index = GetContainingFunction(position);
+ if (func_index < 0) return false;
+
+ const WasmFunction& function = module()->functions[func_index];
+
+ info->line = func_index;
+ info->column = position - function.code.offset();
+ info->line_start = function.code.offset();
+ info->line_end = function.code.end_offset();
+ return true;
}
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
int64_t maximum,
Handle<FixedArray>* js_functions) {
Handle<JSFunction> table_ctor(
- isolate->native_context()->wasm_table_constructor());
+ isolate->native_context()->wasm_table_constructor(), isolate);
auto table_obj = Handle<WasmTableObject>::cast(
isolate->factory()->NewJSObject(table_ctor));
*js_functions = isolate->factory()->NewFixedArray(initial);
- Object* null = isolate->heap()->null_value();
+ Object* null = ReadOnlyRoots(isolate).null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
(*js_functions)->set(i, null);
}
@@ -336,7 +751,7 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
Handle<Object> max = isolate->factory()->NewNumber(maximum);
table_obj->set_maximum_length(*max);
- table_obj->set_dispatch_tables(isolate->heap()->empty_fixed_array());
+ table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
return Handle<WasmTableObject>::cast(table_obj);
}
@@ -344,7 +759,7 @@ void WasmTableObject::AddDispatchTable(Isolate* isolate,
Handle<WasmTableObject> table_obj,
Handle<WasmInstanceObject> instance,
int table_index) {
- Handle<FixedArray> dispatch_tables(table_obj->dispatch_tables());
+ Handle<FixedArray> dispatch_tables(table_obj->dispatch_tables(), isolate);
int old_length = dispatch_tables->length();
DCHECK_EQ(0, old_length % kDispatchTableNumElements);
@@ -367,7 +782,7 @@ void WasmTableObject::AddDispatchTable(Isolate* isolate,
void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
if (count == 0) return; // Degenerate case: nothing to do.
- Handle<FixedArray> dispatch_tables(this->dispatch_tables());
+ Handle<FixedArray> dispatch_tables(this->dispatch_tables(), isolate);
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
uint32_t old_size = functions()->length();
@@ -393,21 +808,23 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
Handle<FixedArray> array(table->functions(), isolate);
if (function.is_null()) {
ClearDispatchTables(isolate, table, table_index); // Degenerate case.
- array->set(table_index, isolate->heap()->null_value());
+ array->set(table_index, ReadOnlyRoots(isolate).null_value());
return;
}
// TODO(titzer): Change this to MaybeHandle<WasmExportedFunction>
DCHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
auto exported_function = Handle<WasmExportedFunction>::cast(function);
- Handle<WasmInstanceObject> other_instance(exported_function->instance());
+ Handle<WasmInstanceObject> other_instance(exported_function->instance(),
+ isolate);
int func_index = exported_function->function_index();
auto* wasm_function = &other_instance->module()->functions[func_index];
DCHECK_NOT_NULL(wasm_function);
DCHECK_NOT_NULL(wasm_function->sig);
Address call_target = exported_function->GetWasmCallTarget();
UpdateDispatchTables(isolate, table, table_index, wasm_function->sig,
- handle(exported_function->instance()), call_target);
+ handle(exported_function->instance(), isolate),
+ call_target);
array->set(table_index, *function);
}
@@ -428,7 +845,7 @@ void WasmTableObject::UpdateDispatchTables(
isolate);
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
- auto sig_id = to_instance->module()->signature_map.Find(sig);
+ auto sig_id = to_instance->module()->signature_map.Find(*sig);
IndirectFunctionTableEntry(to_instance, table_index)
.set(sig_id, *from_instance, call_target);
}
@@ -474,14 +891,12 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
// Blink's array buffers. The connection between the two is lost, which can
// lead to Blink not knowing about the other reference to the buffer and
// freeing it too early.
- if (!old_buffer->is_external() && old_size != 0 &&
+ if (!old_buffer->is_external() &&
((new_size < old_buffer->allocation_length()) || old_size == new_size)) {
- DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) {
+ DCHECK_NOT_NULL(old_buffer->backing_store());
// If adjusting permissions fails, propagate error back to return
// failure to grow.
- DCHECK(!isolate->wasm_engine()->memory_tracker()->IsEmptyBackingStore(
- old_mem_start));
if (!i::SetPermissions(old_mem_start, new_size,
PageAllocator::kReadWrite)) {
return {};
@@ -505,6 +920,15 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (!wasm::NewArrayBuffer(isolate, new_size).ToHandle(&new_buffer)) {
return {};
}
+ wasm::WasmMemoryTracker* const memory_tracker =
+ isolate->wasm_engine()->memory_tracker();
+ // If the old buffer had full guard regions, we can only safely use the new
+ // buffer if it also has full guard regions. Otherwise, we'd have to
+ // recompile all the instances using this memory to insert bounds checks.
+ if (memory_tracker->HasFullGuardRegions(old_mem_start) &&
+ !memory_tracker->HasFullGuardRegions(new_buffer->backing_store())) {
+ return {};
+ }
if (old_size == 0) return new_buffer;
memcpy(new_buffer->backing_store(), old_mem_start, old_size);
DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
@@ -542,7 +966,7 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
// that the memory will match the style of the compiled wasm module.
// See issue v8:7143
Handle<JSFunction> memory_ctor(
- isolate->native_context()->wasm_memory_constructor());
+ isolate->native_context()->wasm_memory_constructor(), isolate);
auto memory_obj = Handle<WasmMemoryObject>::cast(
isolate->factory()->NewJSObject(memory_ctor, TENURED));
@@ -568,6 +992,30 @@ uint32_t WasmMemoryObject::current_pages() {
return byte_length / wasm::kWasmPageSize;
}
+bool WasmMemoryObject::has_full_guard_region(Isolate* isolate) {
+ const wasm::WasmMemoryTracker::AllocationData* allocation =
+ isolate->wasm_engine()->memory_tracker()->FindAllocationData(
+ array_buffer()->backing_store());
+ CHECK_NOT_NULL(allocation);
+
+ Address allocation_base =
+ reinterpret_cast<Address>(allocation->allocation_base);
+ Address buffer_start = reinterpret_cast<Address>(allocation->buffer_start);
+
+ // Return whether the allocation covers every possible Wasm heap index.
+ //
+ // We always have the following relationship:
+ // allocation_base <= buffer_start <= buffer_start + memory_size <=
+ // allocation_base + allocation_length
+ // (in other words, the buffer fits within the allocation)
+ //
+ // The space between buffer_start + memory_size and allocation_base +
+ // allocation_length is the guard region. Here we make sure the guard region
+ // is large enough for any Wasm heap offset.
+ return buffer_start + wasm::kWasmMaxHeapOffset <=
+ allocation_base + allocation->allocation_length;
+}
+
void WasmMemoryObject::AddInstance(Isolate* isolate,
Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> instance) {
@@ -576,7 +1024,7 @@ void WasmMemoryObject::AddInstance(Isolate* isolate,
? Handle<FixedArrayOfWeakCells>(memory->instances(), isolate)
: Handle<FixedArrayOfWeakCells>::null();
Handle<FixedArrayOfWeakCells> new_instances =
- FixedArrayOfWeakCells::Add(old_instances, instance);
+ FixedArrayOfWeakCells::Add(isolate, old_instances, instance);
memory->set_instances(*new_instances);
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate);
SetInstanceMemory(isolate, instance, buffer);
@@ -594,7 +1042,7 @@ void WasmMemoryObject::RemoveInstance(Isolate* isolate,
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
uint32_t pages) {
- Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer());
+ Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
if (!old_buffer->is_growable()) return -1;
uint32_t old_size = 0;
CHECK(old_buffer->byte_length()->ToUint32(&old_size));
@@ -631,7 +1079,7 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
wasm::ValueType type, int32_t offset, bool is_mutable) {
Handle<JSFunction> global_ctor(
- isolate->native_context()->wasm_global_constructor());
+ isolate->native_context()->wasm_global_constructor(), isolate);
auto global_obj = Handle<WasmGlobalObject>::cast(
isolate->factory()->NewJSObject(global_ctor));
@@ -668,7 +1116,7 @@ void IndirectFunctionTableEntry::clear() {
instance_->indirect_function_table_sig_ids()[index_] = -1;
instance_->indirect_function_table_targets()[index_] = 0;
instance_->indirect_function_table_instances()->set(
- index_, instance_->GetIsolate()->heap()->undefined_value());
+ index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
}
void IndirectFunctionTableEntry::set(int sig_id, WasmInstanceObject* instance,
@@ -711,7 +1159,7 @@ void ImportedFunctionEntry::set_wasm_to_wasm(WasmInstanceObject* instance,
*instance_, index_, instance, call_target);
instance_->imported_function_instances()->set(index_, instance);
instance_->imported_function_callables()->set(
- index_, instance_->GetHeap()->undefined_value());
+ index_, instance_->GetReadOnlyRoots().undefined_value());
instance_->imported_function_targets()[index_] = call_target;
}
@@ -756,23 +1204,24 @@ void WasmInstanceObject::SetRawMemory(byte* mem_start, uint32_t mem_size) {
set_memory_mask(mem_mask64);
}
-WasmModule* WasmInstanceObject::module() {
- return module_object()->shared()->module();
+const WasmModule* WasmInstanceObject::module() {
+ return module_object()->module();
}
Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
Handle<WasmInstanceObject> instance) {
- if (instance->has_debug_info()) return handle(instance->debug_info());
+ if (instance->has_debug_info()) {
+ return handle(instance->debug_info(), instance->GetIsolate());
+ }
Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
DCHECK(instance->has_debug_info());
return new_info;
}
Handle<WasmInstanceObject> WasmInstanceObject::New(
- Isolate* isolate, Handle<WasmModuleObject> module_object,
- Handle<WasmCompiledModule> compiled_module) {
+ Isolate* isolate, Handle<WasmModuleObject> module_object) {
Handle<JSFunction> instance_cons(
- isolate->native_context()->wasm_instance_constructor());
+ isolate->native_context()->wasm_instance_constructor(), isolate);
Handle<JSObject> instance_object =
isolate->factory()->NewJSObject(instance_cons, TENURED);
@@ -780,58 +1229,57 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
reinterpret_cast<WasmInstanceObject*>(*instance_object), isolate);
// Initialize the imported function arrays.
- auto num_imported_functions =
- module_object->shared()->module()->num_imported_functions;
- auto num_imported_mutable_globals =
- module_object->shared()->module()->num_imported_mutable_globals;
+ auto module = module_object->module();
+ auto num_imported_functions = module->num_imported_functions;
+ auto num_imported_mutable_globals = module->num_imported_mutable_globals;
+ size_t native_allocations_size = EstimateNativeAllocationsSize(module);
auto native_allocations = Managed<WasmInstanceNativeAllocations>::Allocate(
- isolate, instance, num_imported_functions, num_imported_mutable_globals);
+ isolate, native_allocations_size, instance, num_imported_functions,
+ num_imported_mutable_globals);
instance->set_managed_native_allocations(*native_allocations);
Handle<FixedArray> imported_function_instances =
isolate->factory()->NewFixedArray(num_imported_functions);
-
instance->set_imported_function_instances(*imported_function_instances);
+
Handle<FixedArray> imported_function_callables =
isolate->factory()->NewFixedArray(num_imported_functions);
-
instance->set_imported_function_callables(*imported_function_callables);
+ Handle<Code> centry_stub = CodeFactory::CEntry(isolate);
+ instance->set_centry_stub(*centry_stub);
+
instance->SetRawMemory(nullptr, 0);
+ instance->set_roots_array_address(
+ reinterpret_cast<Address>(isolate->heap()->roots_array_start()));
+ instance->set_stack_limit_address(
+ isolate->stack_guard()->address_of_jslimit());
+ instance->set_real_stack_limit_address(
+ isolate->stack_guard()->address_of_real_jslimit());
instance->set_globals_start(nullptr);
instance->set_indirect_function_table_size(0);
instance->set_indirect_function_table_sig_ids(nullptr);
instance->set_indirect_function_table_targets(nullptr);
- instance->set_compiled_module(*compiled_module);
instance->set_native_context(*isolate->native_context());
instance->set_module_object(*module_object);
+ instance->set_undefined_value(ReadOnlyRoots(isolate).undefined_value());
+ instance->set_null_value(ReadOnlyRoots(isolate).null_value());
+ instance->set_jump_table_adjusted_start(
+ module_object->native_module()->jump_table_start() -
+ wasm::JumpTableAssembler::kJumpTableSlotSize *
+ module->num_imported_functions);
+
+ // Insert the new instance into the modules weak list of instances.
+ // TODO(mstarzinger): Allow to reuse holes in the {WeakArrayList} below.
+ Handle<WeakArrayList> weak_instance_list(module_object->weak_instance_list(),
+ isolate);
+ weak_instance_list = WeakArrayList::AddToEnd(
+ isolate, weak_instance_list, MaybeObjectHandle::Weak(instance));
+ module_object->set_weak_instance_list(*weak_instance_list);
return instance;
}
-void WasmInstanceObject::ValidateInstancesChainForTesting(
- Isolate* isolate, Handle<WasmModuleObject> module_obj, int instance_count) {
- CHECK_GE(instance_count, 0);
- DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module = module_obj->compiled_module();
- Object* prev = nullptr;
- int found_instances = compiled_module->has_instance() ? 1 : 0;
- WasmCompiledModule* current_instance = compiled_module;
- while (current_instance->has_next_instance()) {
- CHECK((prev == nullptr && !current_instance->has_prev_instance()) ||
- current_instance->prev_instance() == prev);
- CHECK(current_instance->weak_owning_instance()
- ->value()
- ->IsWasmInstanceObject());
- prev = current_instance;
- current_instance =
- WasmCompiledModule::cast(current_instance->next_instance());
- ++found_instances;
- CHECK_LE(found_instances, instance_count);
- }
- CHECK_EQ(found_instances, instance_count);
-}
-
namespace {
void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
DisallowHeapAllocation no_gc;
@@ -840,13 +1288,8 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
// If a link to shared memory instances exists, update the list of memory
// instances before the instance is destroyed.
- WasmCompiledModule* compiled_module = instance->compiled_module();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
- if (native_module) {
- TRACE("Finalizing %zu {\n", native_module->instance_id);
- } else {
- TRACE("Finalized already cleaned up compiled module\n");
- }
+ TRACE("Finalizing instance of %p {\n",
+ instance->module_object()->native_module());
// Since the order of finalizers is not guaranteed, it can be the case
// that {instance->compiled_module()->module()}, which is a
@@ -855,30 +1298,14 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
// the next GC cycle, so we need to manually break some links (such as
// the weak references from {WasmMemoryObject::instances}.
if (instance->has_memory_object()) {
- WasmMemoryObject::RemoveInstance(isolate, handle(instance->memory_object()),
- handle(instance));
- }
-
- // We want to maintain a link from the {WasmModuleObject} to the first link
- // within the linked {WasmInstanceObject} list, even if the last instance is
- // finalized. This allows us to clone new {WasmCompiledModule} objects during
- // instantiation without having to regenerate the compiled module.
- WasmModuleObject* module_object = instance->module_object();
- WasmCompiledModule* current_template = module_object->compiled_module();
- DCHECK(!current_template->has_prev_instance());
- if (current_template == compiled_module) {
- if (!compiled_module->has_next_instance()) {
- WasmCompiledModule::Reset(isolate, compiled_module);
- } else {
- module_object->set_compiled_module(compiled_module->next_instance());
- }
+ WasmMemoryObject::RemoveInstance(isolate,
+ handle(instance->memory_object(), isolate),
+ handle(instance, isolate));
}
// Free raw C++ memory associated with the instance.
GetNativeAllocations(instance)->free();
- compiled_module->RemoveFromChain();
-
GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
TRACE("}\n");
}
@@ -893,7 +1320,7 @@ void WasmInstanceObject::InstallFinalizer(Isolate* isolate,
}
Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
- wasm::NativeModule* native_module = compiled_module()->GetNativeModule();
+ wasm::NativeModule* native_module = module_object()->native_module();
if (func_index < native_module->num_imported_functions()) {
return imported_function_targets()[func_index];
}
@@ -902,7 +1329,7 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
if (!object->IsJSFunction()) return false;
- Handle<JSFunction> js_function(JSFunction::cast(object));
+ JSFunction* js_function = JSFunction::cast(object);
if (Code::JS_TO_WASM_FUNCTION != js_function->code()->kind()) return false;
DCHECK(js_function->shared()->HasWasmExportedFunctionData());
return true;
@@ -952,618 +1379,10 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
return Handle<WasmExportedFunction>::cast(js_function);
}
-wasm::WasmCode* WasmExportedFunction::GetWasmCode() {
- Address target = GetWasmCallTarget();
- wasm::WasmCode* wasm_code =
- GetIsolate()->wasm_engine()->code_manager()->LookupCode(target);
- return wasm_code;
-}
-
Address WasmExportedFunction::GetWasmCallTarget() {
return instance()->GetCallTarget(function_index());
}
-WasmModule* WasmSharedModuleData::module() const {
- return Managed<WasmModule>::cast(managed_module())->raw();
-}
-
-Handle<WasmSharedModuleData> WasmSharedModuleData::New(
- Isolate* isolate, Handle<Foreign> managed_module,
- Handle<SeqOneByteString> module_bytes, Handle<Script> script,
- Handle<ByteArray> asm_js_offset_table) {
- Handle<WasmSharedModuleData> data = Handle<WasmSharedModuleData>::cast(
- isolate->factory()->NewStruct(WASM_SHARED_MODULE_DATA_TYPE, TENURED));
- data->set_managed_module(*managed_module);
- if (!module_bytes.is_null()) {
- data->set_module_bytes(*module_bytes);
- }
- if (!script.is_null()) {
- data->set_script(*script);
- }
- if (!asm_js_offset_table.is_null()) {
- data->set_asm_js_offset_table(*asm_js_offset_table);
- }
- return data;
-}
-
-bool WasmSharedModuleData::is_asm_js() {
- bool asm_js = module()->is_asm_js();
- DCHECK_EQ(asm_js, script()->IsUserJavaScript());
- DCHECK_EQ(asm_js, has_asm_js_offset_table());
- return asm_js;
-}
-
-namespace {
-
-int GetBreakpointPos(Isolate* isolate, Object* break_point_info_or_undef) {
- if (break_point_info_or_undef->IsUndefined(isolate)) return kMaxInt;
- return BreakPointInfo::cast(break_point_info_or_undef)->source_position();
-}
-
-int FindBreakpointInfoInsertPos(Isolate* isolate,
- Handle<FixedArray> breakpoint_infos,
- int position) {
- // Find insert location via binary search, taking care of undefined values on
- // the right. Position is always greater than zero.
- DCHECK_LT(0, position);
-
- int left = 0; // inclusive
- int right = breakpoint_infos->length(); // exclusive
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- Object* mid_obj = breakpoint_infos->get(mid);
- if (GetBreakpointPos(isolate, mid_obj) <= position) {
- left = mid;
- } else {
- right = mid;
- }
- }
-
- int left_pos = GetBreakpointPos(isolate, breakpoint_infos->get(left));
- return left_pos < position ? left + 1 : left;
-}
-
-} // namespace
-
-void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
- int position,
- Handle<BreakPoint> break_point) {
- Isolate* isolate = shared->GetIsolate();
- Handle<FixedArray> breakpoint_infos;
- if (shared->has_breakpoint_infos()) {
- breakpoint_infos = handle(shared->breakpoint_infos(), isolate);
- } else {
- breakpoint_infos = isolate->factory()->NewFixedArray(4, TENURED);
- shared->set_breakpoint_infos(*breakpoint_infos);
- }
-
- int insert_pos =
- FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
-
- // If a BreakPointInfo object already exists for this position, add the new
- // breakpoint object and return.
- if (insert_pos < breakpoint_infos->length() &&
- GetBreakpointPos(isolate, breakpoint_infos->get(insert_pos)) ==
- position) {
- Handle<BreakPointInfo> old_info(
- BreakPointInfo::cast(breakpoint_infos->get(insert_pos)), isolate);
- BreakPointInfo::SetBreakPoint(old_info, break_point);
- return;
- }
-
- // Enlarge break positions array if necessary.
- bool need_realloc = !breakpoint_infos->get(breakpoint_infos->length() - 1)
- ->IsUndefined(isolate);
- Handle<FixedArray> new_breakpoint_infos = breakpoint_infos;
- if (need_realloc) {
- new_breakpoint_infos = isolate->factory()->NewFixedArray(
- 2 * breakpoint_infos->length(), TENURED);
- shared->set_breakpoint_infos(*new_breakpoint_infos);
- // Copy over the entries [0, insert_pos).
- for (int i = 0; i < insert_pos; ++i)
- new_breakpoint_infos->set(i, breakpoint_infos->get(i));
- }
-
- // Move elements [insert_pos, ...] up by one.
- for (int i = breakpoint_infos->length() - 1; i >= insert_pos; --i) {
- Object* entry = breakpoint_infos->get(i);
- if (entry->IsUndefined(isolate)) continue;
- new_breakpoint_infos->set(i + 1, entry);
- }
-
- // Generate new BreakpointInfo.
- Handle<BreakPointInfo> breakpoint_info =
- isolate->factory()->NewBreakPointInfo(position);
- BreakPointInfo::SetBreakPoint(breakpoint_info, break_point);
-
- // Now insert new position at insert_pos.
- new_breakpoint_infos->set(insert_pos, *breakpoint_info);
-}
-
-void WasmSharedModuleData::SetBreakpointsOnNewInstance(
- Handle<WasmSharedModuleData> shared, Handle<WasmInstanceObject> instance) {
- if (!shared->has_breakpoint_infos()) return;
- Isolate* isolate = shared->GetIsolate();
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
-
- Handle<FixedArray> breakpoint_infos(shared->breakpoint_infos(), isolate);
- // If the array exists, it should not be empty.
- DCHECK_LT(0, breakpoint_infos->length());
-
- for (int i = 0, e = breakpoint_infos->length(); i < e; ++i) {
- Handle<Object> obj(breakpoint_infos->get(i), isolate);
- if (obj->IsUndefined(isolate)) {
- for (; i < e; ++i) {
- DCHECK(breakpoint_infos->get(i)->IsUndefined(isolate));
- }
- break;
- }
- Handle<BreakPointInfo> breakpoint_info = Handle<BreakPointInfo>::cast(obj);
- int position = breakpoint_info->source_position();
-
- // Find the function for this breakpoint, and set the breakpoint.
- int func_index = shared->GetContainingFunction(position);
- DCHECK_LE(0, func_index);
- WasmFunction& func = shared->module()->functions[func_index];
- int offset_in_func = position - func.code.offset();
- WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
- }
-}
-
-namespace {
-
-enum AsmJsOffsetTableEntryLayout {
- kOTEByteOffset,
- kOTECallPosition,
- kOTENumberConvPosition,
- kOTESize
-};
-
-Handle<ByteArray> GetDecodedAsmJsOffsetTable(
- Handle<WasmSharedModuleData> shared, Isolate* isolate) {
- DCHECK(shared->is_asm_js());
- Handle<ByteArray> offset_table(shared->asm_js_offset_table(), isolate);
-
- // The last byte in the asm_js_offset_tables ByteArray tells whether it is
- // still encoded (0) or decoded (1).
- enum AsmJsTableType : int { Encoded = 0, Decoded = 1 };
- int table_type = offset_table->get(offset_table->length() - 1);
- DCHECK(table_type == Encoded || table_type == Decoded);
- if (table_type == Decoded) return offset_table;
-
- wasm::AsmJsOffsetsResult asm_offsets;
- {
- DisallowHeapAllocation no_gc;
- byte* bytes_start = offset_table->GetDataStartAddress();
- byte* bytes_end = reinterpret_cast<byte*>(
- reinterpret_cast<Address>(bytes_start) + offset_table->length() - 1);
- asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
- }
- // Wasm bytes must be valid and must contain asm.js offset table.
- DCHECK(asm_offsets.ok());
- DCHECK_GE(kMaxInt, asm_offsets.val.size());
- int num_functions = static_cast<int>(asm_offsets.val.size());
- int num_imported_functions =
- static_cast<int>(shared->module()->num_imported_functions);
- DCHECK_EQ(shared->module()->functions.size(),
- static_cast<size_t>(num_functions) + num_imported_functions);
- int num_entries = 0;
- for (int func = 0; func < num_functions; ++func) {
- size_t new_size = asm_offsets.val[func].size();
- DCHECK_LE(new_size, static_cast<size_t>(kMaxInt) - num_entries);
- num_entries += static_cast<int>(new_size);
- }
- // One byte to encode that this is a decoded table.
- DCHECK_GE(kMaxInt,
- 1 + static_cast<uint64_t>(num_entries) * kOTESize * kIntSize);
- int total_size = 1 + num_entries * kOTESize * kIntSize;
- Handle<ByteArray> decoded_table =
- isolate->factory()->NewByteArray(total_size, TENURED);
- decoded_table->set(total_size - 1, AsmJsTableType::Decoded);
- shared->set_asm_js_offset_table(*decoded_table);
-
- int idx = 0;
- std::vector<WasmFunction>& wasm_funs = shared->module()->functions;
- for (int func = 0; func < num_functions; ++func) {
- std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets =
- asm_offsets.val[func];
- if (func_asm_offsets.empty()) continue;
- int func_offset = wasm_funs[num_imported_functions + func].code.offset();
- for (wasm::AsmJsOffsetEntry& e : func_asm_offsets) {
- // Byte offsets must be strictly monotonously increasing:
- DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
- decoded_table->get_int(idx - kOTESize));
- decoded_table->set_int(idx + kOTEByteOffset, func_offset + e.byte_offset);
- decoded_table->set_int(idx + kOTECallPosition, e.source_position_call);
- decoded_table->set_int(idx + kOTENumberConvPosition,
- e.source_position_number_conversion);
- idx += kOTESize;
- }
- }
- DCHECK_EQ(total_size, idx * kIntSize + 1);
- return decoded_table;
-}
-
-} // namespace
-
-int WasmSharedModuleData::GetSourcePosition(Handle<WasmSharedModuleData> shared,
- uint32_t func_index,
- uint32_t byte_offset,
- bool is_at_number_conversion) {
- Isolate* isolate = shared->GetIsolate();
- const WasmModule* module = shared->module();
-
- if (!module->is_asm_js()) {
- // for non-asm.js modules, we just add the function's start offset
- // to make a module-relative position.
- return byte_offset + shared->GetFunctionOffset(func_index);
- }
-
- // asm.js modules have an additional offset table that must be searched.
- Handle<ByteArray> offset_table = GetDecodedAsmJsOffsetTable(shared, isolate);
-
- DCHECK_LT(func_index, module->functions.size());
- uint32_t func_code_offset = module->functions[func_index].code.offset();
- uint32_t total_offset = func_code_offset + byte_offset;
-
- // Binary search for the total byte offset.
- int left = 0; // inclusive
- int right = offset_table->length() / kIntSize / kOTESize; // exclusive
- DCHECK_LT(left, right);
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- int mid_entry = offset_table->get_int(kOTESize * mid);
- DCHECK_GE(kMaxInt, mid_entry);
- if (static_cast<uint32_t>(mid_entry) <= total_offset) {
- left = mid;
- } else {
- right = mid;
- }
- }
- // There should be an entry for each position that could show up on the stack
- // trace:
- DCHECK_EQ(total_offset, offset_table->get_int(kOTESize * left));
- int idx = is_at_number_conversion ? kOTENumberConvPosition : kOTECallPosition;
- return offset_table->get_int(kOTESize * left + idx);
-}
-
-v8::debug::WasmDisassembly WasmSharedModuleData::DisassembleFunction(
- int func_index) {
- DisallowHeapAllocation no_gc;
-
- if (func_index < 0 ||
- static_cast<uint32_t>(func_index) >= module()->functions.size())
- return {};
-
- SeqOneByteString* module_bytes_str = module_bytes();
- Vector<const byte> module_bytes(module_bytes_str->GetChars(),
- module_bytes_str->length());
-
- std::ostringstream disassembly_os;
- v8::debug::WasmDisassembly::OffsetTable offset_table;
-
- PrintWasmText(module(), module_bytes, static_cast<uint32_t>(func_index),
- disassembly_os, &offset_table);
-
- return {disassembly_os.str(), std::move(offset_table)};
-}
-
-bool WasmSharedModuleData::GetPossibleBreakpoints(
- const v8::debug::Location& start, const v8::debug::Location& end,
- std::vector<v8::debug::BreakLocation>* locations) {
- DisallowHeapAllocation no_gc;
-
- std::vector<WasmFunction>& functions = module()->functions;
- if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
- (!end.IsEmpty() &&
- (end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
- return false;
-
- // start_func_index, start_offset and end_func_index is inclusive.
- // end_offset is exclusive.
- // start_offset and end_offset are module-relative byte offsets.
- uint32_t start_func_index = start.GetLineNumber();
- if (start_func_index >= functions.size()) return false;
- int start_func_len = functions[start_func_index].code.length();
- if (start.GetColumnNumber() > start_func_len) return false;
- uint32_t start_offset =
- functions[start_func_index].code.offset() + start.GetColumnNumber();
- uint32_t end_func_index;
- uint32_t end_offset;
- if (end.IsEmpty()) {
- // Default: everything till the end of the Script.
- end_func_index = static_cast<uint32_t>(functions.size() - 1);
- end_offset = functions[end_func_index].code.end_offset();
- } else {
- // If end is specified: Use it and check for valid input.
- end_func_index = static_cast<uint32_t>(end.GetLineNumber());
-
- // Special case: Stop before the start of the next function. Change to: Stop
- // at the end of the function before, such that we don't disassemble the
- // next function also.
- if (end.GetColumnNumber() == 0 && end_func_index > 0) {
- --end_func_index;
- end_offset = functions[end_func_index].code.end_offset();
- } else {
- if (end_func_index >= functions.size()) return false;
- end_offset =
- functions[end_func_index].code.offset() + end.GetColumnNumber();
- if (end_offset > functions[end_func_index].code.end_offset())
- return false;
- }
- }
-
- AccountingAllocator alloc;
- Zone tmp(&alloc, ZONE_NAME);
- const byte* module_start = module_bytes()->GetChars();
-
- for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
- ++func_idx) {
- WasmFunction& func = functions[func_idx];
- if (func.code.length() == 0) continue;
-
- wasm::BodyLocalDecls locals(&tmp);
- wasm::BytecodeIterator iterator(module_start + func.code.offset(),
- module_start + func.code.end_offset(),
- &locals);
- DCHECK_LT(0u, locals.encoded_size);
- for (uint32_t offset : iterator.offsets()) {
- uint32_t total_offset = func.code.offset() + offset;
- if (total_offset >= end_offset) {
- DCHECK_EQ(end_func_index, func_idx);
- break;
- }
- if (total_offset < start_offset) continue;
- locations->emplace_back(func_idx, offset, debug::kCommonBreakLocation);
- }
- }
- return true;
-}
-
-MaybeHandle<FixedArray> WasmSharedModuleData::CheckBreakPoints(
- Isolate* isolate, Handle<WasmSharedModuleData> shared, int position) {
- if (!shared->has_breakpoint_infos()) return {};
-
- Handle<FixedArray> breakpoint_infos(shared->breakpoint_infos(), isolate);
- int insert_pos =
- FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
- if (insert_pos >= breakpoint_infos->length()) return {};
-
- Handle<Object> maybe_breakpoint_info(breakpoint_infos->get(insert_pos),
- isolate);
- if (maybe_breakpoint_info->IsUndefined(isolate)) return {};
- Handle<BreakPointInfo> breakpoint_info =
- Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
- if (breakpoint_info->source_position() != position) return {};
-
- // There is no support for conditional break points. Just assume that every
- // break point always hits.
- Handle<Object> break_points(breakpoint_info->break_points(), isolate);
- if (break_points->IsFixedArray()) {
- return Handle<FixedArray>::cast(break_points);
- }
- Handle<FixedArray> break_points_hit = isolate->factory()->NewFixedArray(1);
- break_points_hit->set(0, *break_points);
- return break_points_hit;
-}
-
-Handle<WasmCompiledModule> WasmCompiledModule::New(Isolate* isolate,
- WasmModule* module,
- wasm::ModuleEnv& env) {
- Handle<WasmCompiledModule> compiled_module = Handle<WasmCompiledModule>::cast(
- isolate->factory()->NewStruct(WASM_COMPILED_MODULE_TYPE, TENURED));
- compiled_module->set_weak_owning_instance(isolate->heap()->empty_weak_cell());
- {
- auto native_module =
- isolate->wasm_engine()->code_manager()->NewNativeModule(*module, env);
- Handle<Foreign> native_module_wrapper =
- Managed<wasm::NativeModule>::FromUniquePtr(isolate,
- std::move(native_module));
- compiled_module->set_native_module(*native_module_wrapper);
- }
-
- // TODO(mtrofin): copy the rest of the specialization parameters over.
- // We're currently OK because we're only using defaults.
- return compiled_module;
-}
-
-Handle<WasmCompiledModule> WasmCompiledModule::Clone(
- Isolate* isolate, Handle<WasmCompiledModule> module) {
- Handle<FixedArray> code_copy;
- Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
- isolate->factory()->NewStruct(WASM_COMPILED_MODULE_TYPE, TENURED));
- ret->set_weak_owning_instance(isolate->heap()->empty_weak_cell());
- ret->set_native_module(module->native_module());
-
- // construct the wrapper in 2 steps, because its construction may trigger GC,
- // which would shift the this pointer in set_native_module.
- Handle<Foreign> native_module_wrapper =
- Managed<wasm::NativeModule>::FromSharedPtr(
- isolate,
- Managed<wasm::NativeModule>::cast(module->native_module())->get());
- ret->set_native_module(*native_module_wrapper);
-
- return ret;
-}
-
-wasm::NativeModule* WasmCompiledModule::GetNativeModule() const {
- if (!has_native_module()) return nullptr;
- return Managed<wasm::NativeModule>::cast(native_module())->raw();
-}
-
-void WasmCompiledModule::Reset(Isolate* isolate,
- WasmCompiledModule* compiled_module) {
- DisallowHeapAllocation no_gc;
- compiled_module->reset_prev_instance();
- compiled_module->reset_next_instance();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
- if (native_module == nullptr) return;
- native_module->SetExecutable(false);
-
- TRACE("Resetting %zu\n", native_module->instance_id);
- if (native_module->use_trap_handler()) {
- native_module->ReleaseProtectedInstructions();
- }
-}
-
-MaybeHandle<String> WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<WasmSharedModuleData> shared,
- wasm::WireBytesRef ref) {
- // TODO(wasm): cache strings from modules if it's a performance win.
- Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
- return ExtractUtf8StringFromModuleBytes(isolate, module_bytes, ref);
-}
-
-MaybeHandle<String> WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<SeqOneByteString> module_bytes,
- wasm::WireBytesRef ref) {
- DCHECK_GE(module_bytes->length(), ref.end_offset());
- // UTF8 validation happens at decode time.
- DCHECK(unibrow::Utf8::ValidateEncoding(
- reinterpret_cast<const byte*>(module_bytes->GetCharsAddress() +
- ref.offset()),
- ref.length()));
- DCHECK_GE(kMaxInt, ref.offset());
- DCHECK_GE(kMaxInt, ref.length());
- return isolate->factory()->NewStringFromUtf8SubString(
- module_bytes, static_cast<int>(ref.offset()),
- static_cast<int>(ref.length()));
-}
-
-void WasmCompiledModule::PrintInstancesChain() {
-#if DEBUG
- if (!FLAG_trace_wasm_instances) return;
- for (WasmCompiledModule* current = this; current != nullptr;) {
- PrintF("->%zu", current->GetNativeModule()->instance_id);
- if (!current->has_next_instance()) break;
- current = current->next_instance();
- }
- PrintF("\n");
-#endif
-}
-
-void WasmCompiledModule::InsertInChain(WasmModuleObject* module) {
- DisallowHeapAllocation no_gc;
- WasmCompiledModule* original = module->compiled_module();
- set_next_instance(original);
- original->set_prev_instance(this);
-}
-
-void WasmCompiledModule::RemoveFromChain() {
- DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
-
- Object* next = raw_next_instance();
- Object* prev = raw_prev_instance();
-
- if (!prev->IsUndefined(isolate)) {
- WasmCompiledModule::cast(prev)->set_raw_next_instance(next);
- }
- if (!next->IsUndefined(isolate)) {
- WasmCompiledModule::cast(next)->set_raw_prev_instance(prev);
- }
-}
-
-MaybeHandle<String> WasmSharedModuleData::GetModuleNameOrNull(
- Isolate* isolate, Handle<WasmSharedModuleData> shared) {
- WasmModule* module = shared->module();
- if (!module->name.is_set()) return {};
- return ExtractUtf8StringFromModuleBytes(isolate, shared, module->name);
-}
-
-MaybeHandle<String> WasmSharedModuleData::GetFunctionNameOrNull(
- Isolate* isolate, Handle<WasmSharedModuleData> shared,
- uint32_t func_index) {
- DCHECK_LT(func_index, shared->module()->functions.size());
- wasm::WireBytesRef name =
- shared->module()->LookupName(shared->module_bytes(), func_index);
- if (!name.is_set()) return {};
- return ExtractUtf8StringFromModuleBytes(isolate, shared, name);
-}
-
-Handle<String> WasmSharedModuleData::GetFunctionName(
- Isolate* isolate, Handle<WasmSharedModuleData> shared,
- uint32_t func_index) {
- MaybeHandle<String> name = GetFunctionNameOrNull(isolate, shared, func_index);
- if (!name.is_null()) return name.ToHandleChecked();
- return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
-}
-
-Vector<const uint8_t> WasmSharedModuleData::GetRawFunctionName(
- uint32_t func_index) {
- DCHECK_GT(module()->functions.size(), func_index);
- SeqOneByteString* bytes = module_bytes();
- wasm::WireBytesRef name = module()->LookupName(bytes, func_index);
- DCHECK_GE(bytes->length(), name.end_offset());
- return Vector<const uint8_t>(
- reinterpret_cast<uint8_t*>(bytes->GetCharsAddress() + name.offset()),
- name.length());
-}
-
-int WasmSharedModuleData::GetFunctionOffset(uint32_t func_index) {
- std::vector<WasmFunction>& functions = module()->functions;
- if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
- DCHECK_GE(kMaxInt, functions[func_index].code.offset());
- return static_cast<int>(functions[func_index].code.offset());
-}
-
-int WasmSharedModuleData::GetContainingFunction(uint32_t byte_offset) {
- std::vector<WasmFunction>& functions = module()->functions;
-
- // Binary search for a function containing the given position.
- int left = 0; // inclusive
- int right = static_cast<int>(functions.size()); // exclusive
- if (right == 0) return false;
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- if (functions[mid].code.offset() <= byte_offset) {
- left = mid;
- } else {
- right = mid;
- }
- }
- // If the found function does not contains the given position, return -1.
- WasmFunction& func = functions[left];
- if (byte_offset < func.code.offset() ||
- byte_offset >= func.code.end_offset()) {
- return -1;
- }
-
- return left;
-}
-
-bool WasmSharedModuleData::GetPositionInfo(uint32_t position,
- Script::PositionInfo* info) {
- int func_index = GetContainingFunction(position);
- if (func_index < 0) return false;
-
- WasmFunction& function = module()->functions[func_index];
-
- info->line = func_index;
- info->column = position - function.code.offset();
- info->line_start = function.code.offset();
- info->line_end = function.code.end_offset();
- return true;
-}
-
-void WasmCompiledModule::LogWasmCodes(Isolate* isolate) {
- if (!wasm::WasmCode::ShouldBeLogged(isolate)) return;
-
- wasm::NativeModule* native_module = GetNativeModule();
- if (native_module == nullptr) return;
- // TODO(titzer): we skip the logging of the import wrappers
- // here, but they should be included somehow.
- const uint32_t start =
- native_module->shared_module_data()->module()->num_imported_functions;
- const uint32_t number_of_codes = native_module->function_count();
- for (uint32_t i = start; i < number_of_codes; i++) {
- wasm::WasmCode* code = native_module->code(i);
- if (code == nullptr) continue;
- code->LogCode(isolate);
- }
-}
-
#undef TRACE
#undef TRACE_IFT
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index b65f4649b2..ee884ec0dd 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -36,7 +36,6 @@ class BreakPoint;
class JSArrayBuffer;
class FixedArrayOfWeakCells;
class SeqOneByteString;
-class WasmCompiledModule;
class WasmDebugInfo;
class WasmInstanceObject;
@@ -44,7 +43,7 @@ template <class CppType>
class Managed;
#define DECL_OPTIONAL_ACCESSORS(name, type) \
- INLINE(bool has_##name()); \
+ V8_INLINE bool has_##name(); \
DECL_ACCESSORS(name, type)
// An entry in an indirect function table (IFT).
@@ -78,7 +77,7 @@ class IndirectFunctionTableEntry {
// - target = pointer to wasm-to-js wrapper code entrypoint
// - an imported wasm function from another instance, which has fields
// - instance = target instance
-// - target = entrypoint to wasm code of the function
+// - target = entrypoint for the function
class ImportedFunctionEntry {
public:
inline ImportedFunctionEntry(Handle<WasmInstanceObject>, int index);
@@ -105,25 +104,45 @@ class WasmModuleObject : public JSObject {
public:
DECL_CAST(WasmModuleObject)
- // Shared compiled code between multiple WebAssembly.Module objects.
- DECL_ACCESSORS(compiled_module, WasmCompiledModule)
+ DECL_ACCESSORS(managed_native_module, Managed<wasm::NativeModule>)
DECL_ACCESSORS(export_wrappers, FixedArray)
- DECL_ACCESSORS(shared, WasmSharedModuleData)
+ DECL_ACCESSORS(script, Script)
+ DECL_ACCESSORS(weak_instance_list, WeakArrayList)
+ DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray)
+ DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray)
+ inline wasm::NativeModule* native_module() const;
+ inline const wasm::WasmModule* module() const;
+ inline void reset_breakpoint_infos();
+
+ // Dispatched behavior.
+ DECL_PRINTER(WasmModuleObject)
+ DECL_VERIFIER(WasmModuleObject)
// Layout description.
-#define WASM_MODULE_OBJECT_FIELDS(V) \
- V(kCompiledModuleOffset, kPointerSize) \
- V(kExportWrappersOffset, kPointerSize) \
- V(kSharedOffset, kPointerSize) \
+#define WASM_MODULE_OBJECT_FIELDS(V) \
+ V(kNativeModuleOffset, kPointerSize) \
+ V(kExportWrappersOffset, kPointerSize) \
+ V(kScriptOffset, kPointerSize) \
+ V(kWeakInstanceListOffset, kPointerSize) \
+ V(kAsmJsOffsetTableOffset, kPointerSize) \
+ V(kBreakPointInfosOffset, kPointerSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
WASM_MODULE_OBJECT_FIELDS)
#undef WASM_MODULE_OBJECT_FIELDS
+ // Creates a new {WasmModuleObject} with a new {NativeModule} underneath.
static Handle<WasmModuleObject> New(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- Handle<FixedArray> export_wrappers, Handle<WasmSharedModuleData> shared);
+ Isolate* isolate, std::shared_ptr<const wasm::WasmModule> module,
+ wasm::ModuleEnv& env, OwnedVector<const uint8_t> wire_bytes,
+ Handle<Script> script, Handle<ByteArray> asm_js_offset_table);
+
+ // Creates a new {WasmModuleObject} for an existing {NativeModule} that is
+ // reference counted and might be shared between multiple Isolates.
+ static Handle<WasmModuleObject> New(
+ Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
+ Handle<Script> script);
// Set a breakpoint on the given byte position inside the given module.
// This will affect all live and future instances of the module.
@@ -134,8 +153,85 @@ class WasmModuleObject : public JSObject {
static bool SetBreakPoint(Handle<WasmModuleObject>, int* position,
Handle<BreakPoint> break_point);
- static void ValidateStateForTesting(Isolate* isolate,
- Handle<WasmModuleObject> module);
+ // Check whether this module was generated from asm.js source.
+ inline bool is_asm_js();
+
+ static void AddBreakpoint(Handle<WasmModuleObject>, int position,
+ Handle<BreakPoint> break_point);
+
+ static void SetBreakpointsOnNewInstance(Handle<WasmModuleObject>,
+ Handle<WasmInstanceObject>);
+
+ // Get the module name, if set. Returns an empty handle otherwise.
+ static MaybeHandle<String> GetModuleNameOrNull(Isolate*,
+ Handle<WasmModuleObject>);
+
+ // Get the function name of the function identified by the given index.
+ // Returns a null handle if the function is unnamed or the name is not a valid
+ // UTF-8 string.
+ static MaybeHandle<String> GetFunctionNameOrNull(Isolate*,
+ Handle<WasmModuleObject>,
+ uint32_t func_index);
+
+ // Get the function name of the function identified by the given index.
+ // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
+ // valid UTF-8 string.
+ static Handle<String> GetFunctionName(Isolate*, Handle<WasmModuleObject>,
+ uint32_t func_index);
+
+ // Get the raw bytes of the function name of the function identified by the
+ // given index.
+ // Meant to be used for debugging or frame printing.
+ // Does not allocate, hence gc-safe.
+ Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
+
+ // Return the byte offset of the function identified by the given index.
+ // The offset will be relative to the start of the module bytes.
+ // Returns -1 if the function index is invalid.
+ int GetFunctionOffset(uint32_t func_index);
+
+ // Returns the function containing the given byte offset.
+ // Returns -1 if the byte offset is not contained in any function of this
+ // module.
+ int GetContainingFunction(uint32_t byte_offset);
+
+ // Translate from byte offset in the module to function number and byte offset
+ // within that function, encoded as line and column in the position info.
+ // Returns true if the position is valid inside this module, false otherwise.
+ bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
+
+ // Get the source position from a given function index and byte offset,
+ // for either asm.js or pure WASM modules.
+ static int GetSourcePosition(Handle<WasmModuleObject>, uint32_t func_index,
+ uint32_t byte_offset,
+ bool is_at_number_conversion);
+
+ // Compute the disassembly of a wasm function.
+ // Returns the disassembly string and a list of <byte_offset, line, column>
+ // entries, mapping wasm byte offsets to line and column in the disassembly.
+ // The list is guaranteed to be ordered by the byte_offset.
+ // Returns an empty string and empty vector if the function index is invalid.
+ debug::WasmDisassembly DisassembleFunction(int func_index);
+
+ // Extract a portion of the wire bytes as UTF-8 string.
+ // Returns a null handle if the respective bytes do not form a valid UTF-8
+ // string.
+ static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<WasmModuleObject>, wasm::WireBytesRef ref);
+ static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Vector<const uint8_t> wire_byte,
+ wasm::WireBytesRef ref);
+
+ // Get a list of all possible breakpoints within a given range of this module.
+ bool GetPossibleBreakpoints(const debug::Location& start,
+ const debug::Location& end,
+ std::vector<debug::BreakLocation>* locations);
+
+ // Return an empty handle if no breakpoint is hit at that location, or a
+ // FixedArray with all hit breakpoint objects.
+ static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*,
+ Handle<WasmModuleObject>,
+ int position);
};
// Representation of a WebAssembly.Table JavaScript-level object.
@@ -210,6 +306,10 @@ class WasmMemoryObject : public JSObject {
uint32_t current_pages();
inline bool has_maximum_pages();
+ // Return whether the underlying backing store has guard regions large enough
+ // to be used with trap handlers.
+ bool has_full_guard_region(Isolate* isolate);
+
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, int32_t maximum);
@@ -274,7 +374,6 @@ class WasmInstanceObject : public JSObject {
public:
DECL_CAST(WasmInstanceObject)
- DECL_ACCESSORS(compiled_module, WasmCompiledModule)
DECL_ACCESSORS(module_object, WasmModuleObject)
DECL_ACCESSORS(exports_object, JSObject)
DECL_ACCESSORS(native_context, Context)
@@ -287,16 +386,22 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(imported_function_callables, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_instances, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
- DECL_OPTIONAL_ACCESSORS(managed_indirect_patcher, Foreign)
+ DECL_ACCESSORS(undefined_value, Oddball)
+ DECL_ACCESSORS(null_value, Oddball)
+ DECL_ACCESSORS(centry_stub, Code)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, uint32_t)
DECL_PRIMITIVE_ACCESSORS(memory_mask, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(roots_array_address, Address)
+ DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
+ DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(imported_function_targets, Address*)
DECL_PRIMITIVE_ACCESSORS(globals_start, byte*)
DECL_PRIMITIVE_ACCESSORS(imported_mutable_globals, Address*)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_size, uint32_t)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_sig_ids, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_targets, Address*)
+ DECL_PRIMITIVE_ACCESSORS(jump_table_adjusted_start, Address)
// Dispatched behavior.
DECL_PRINTER(WasmInstanceObject)
@@ -304,7 +409,6 @@ class WasmInstanceObject : public JSObject {
// Layout description.
#define WASM_INSTANCE_OBJECT_FIELDS(V) \
- V(kCompiledModuleOffset, kPointerSize) \
V(kModuleObjectOffset, kPointerSize) \
V(kExportsObjectOffset, kPointerSize) \
V(kNativeContextOffset, kPointerSize) \
@@ -317,16 +421,22 @@ class WasmInstanceObject : public JSObject {
V(kImportedFunctionCallablesOffset, kPointerSize) \
V(kIndirectFunctionTableInstancesOffset, kPointerSize) \
V(kManagedNativeAllocationsOffset, kPointerSize) \
- V(kManagedIndirectPatcherOffset, kPointerSize) \
+ V(kUndefinedValueOffset, kPointerSize) \
+ V(kNullValueOffset, kPointerSize) \
+ V(kCEntryStubOffset, kPointerSize) \
V(kFirstUntaggedOffset, 0) /* marker */ \
V(kMemoryStartOffset, kPointerSize) /* untagged */ \
V(kMemorySizeOffset, kUInt32Size) /* untagged */ \
V(kMemoryMaskOffset, kUInt32Size) /* untagged */ \
+ V(kRootsArrayAddressOffset, kPointerSize) /* untagged */ \
+ V(kStackLimitAddressOffset, kPointerSize) /* untagged */ \
+ V(kRealStackLimitAddressOffset, kPointerSize) /* untagged */ \
V(kImportedFunctionTargetsOffset, kPointerSize) /* untagged */ \
V(kGlobalsStartOffset, kPointerSize) /* untagged */ \
V(kImportedMutableGlobalsOffset, kPointerSize) /* untagged */ \
V(kIndirectFunctionTableSigIdsOffset, kPointerSize) /* untagged */ \
V(kIndirectFunctionTableTargetsOffset, kPointerSize) /* untagged */ \
+ V(kJumpTableAdjustedStartOffset, kPointerSize) /* untagged */ \
V(kIndirectFunctionTableSizeOffset, kUInt32Size) /* untagged */ \
V(k64BitArchPaddingOffset, kPointerSize - kUInt32Size) /* padding */ \
V(kSize, 0)
@@ -335,7 +445,7 @@ class WasmInstanceObject : public JSObject {
WASM_INSTANCE_OBJECT_FIELDS)
#undef WASM_INSTANCE_OBJECT_FIELDS
- V8_EXPORT_PRIVATE wasm::WasmModule* module();
+ V8_EXPORT_PRIVATE const wasm::WasmModule* module();
static bool EnsureIndirectFunctionTableWithMinimumSize(
Handle<WasmInstanceObject> instance, uint32_t minimum_size);
@@ -348,12 +458,7 @@ class WasmInstanceObject : public JSObject {
// If no debug info exists yet, it is created automatically.
static Handle<WasmDebugInfo> GetOrCreateDebugInfo(Handle<WasmInstanceObject>);
- static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmModuleObject>,
- Handle<WasmCompiledModule>);
-
- static void ValidateInstancesChainForTesting(
- Isolate* isolate, Handle<WasmModuleObject> module_obj,
- int instance_count);
+ static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmModuleObject>);
static void InstallFinalizer(Isolate* isolate,
Handle<WasmInstanceObject> instance);
@@ -381,11 +486,6 @@ class WasmExportedFunction : public JSFunction {
int func_index, int arity,
Handle<Code> export_wrapper);
- // TODO(clemensh): Remove this. There might not be a WasmCode object available
- // yet.
- // TODO(all): Replace all uses by {GetWasmCallTarget()}.
- wasm::WasmCode* GetWasmCode();
-
Address GetWasmCallTarget();
};
@@ -405,10 +505,10 @@ class WasmExportedFunctionData : public Struct {
DECL_VERIFIER(WasmExportedFunctionData)
// Layout description.
-#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
- V(kWrapperCodeOffset, kPointerSize) \
- V(kInstanceOffset, kPointerSize) \
- V(kFunctionIndexOffset, kPointerSize) \
+#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
+ V(kWrapperCodeOffset, kPointerSize) \
+ V(kInstanceOffset, kPointerSize) \
+ V(kFunctionIndexOffset, kPointerSize) /* Smi */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
@@ -416,199 +516,6 @@ class WasmExportedFunctionData : public Struct {
#undef WASM_EXPORTED_FUNCTION_DATA_FIELDS
};
-// Information shared by all WasmCompiledModule objects for the same module.
-class WasmSharedModuleData : public Struct {
- public:
- DECL_ACCESSORS(managed_module, Object)
- wasm::WasmModule* module() const;
- DECL_ACCESSORS(module_bytes, SeqOneByteString)
- DECL_ACCESSORS(script, Script)
- DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray)
- DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray)
- inline void reset_breakpoint_infos();
-
- DECL_CAST(WasmSharedModuleData)
-
- // Dispatched behavior.
- DECL_PRINTER(WasmSharedModuleData)
- DECL_VERIFIER(WasmSharedModuleData)
-
-// Layout description.
-#define WASM_SHARED_MODULE_DATA_FIELDS(V) \
- V(kManagedModuleOffset, kPointerSize) \
- V(kModuleBytesOffset, kPointerSize) \
- V(kScriptOffset, kPointerSize) \
- V(kAsmJsOffsetTableOffset, kPointerSize) \
- V(kBreakPointInfosOffset, kPointerSize) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- WASM_SHARED_MODULE_DATA_FIELDS)
-#undef WASM_SHARED_MODULE_DATA_FIELDS
-
- // Check whether this module was generated from asm.js source.
- bool is_asm_js();
-
- static void AddBreakpoint(Handle<WasmSharedModuleData>, int position,
- Handle<BreakPoint> break_point);
-
- static void SetBreakpointsOnNewInstance(Handle<WasmSharedModuleData>,
- Handle<WasmInstanceObject>);
-
- static Handle<WasmSharedModuleData> New(
- Isolate* isolate, Handle<Foreign> managed_module,
- Handle<SeqOneByteString> module_bytes, Handle<Script> script,
- Handle<ByteArray> asm_js_offset_table);
-
- // Get the module name, if set. Returns an empty handle otherwise.
- static MaybeHandle<String> GetModuleNameOrNull(Isolate*,
- Handle<WasmSharedModuleData>);
-
- // Get the function name of the function identified by the given index.
- // Returns a null handle if the function is unnamed or the name is not a valid
- // UTF-8 string.
- static MaybeHandle<String> GetFunctionNameOrNull(Isolate*,
- Handle<WasmSharedModuleData>,
- uint32_t func_index);
-
- // Get the function name of the function identified by the given index.
- // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
- // valid UTF-8 string.
- static Handle<String> GetFunctionName(Isolate*, Handle<WasmSharedModuleData>,
- uint32_t func_index);
-
- // Get the raw bytes of the function name of the function identified by the
- // given index.
- // Meant to be used for debugging or frame printing.
- // Does not allocate, hence gc-safe.
- Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
-
- // Return the byte offset of the function identified by the given index.
- // The offset will be relative to the start of the module bytes.
- // Returns -1 if the function index is invalid.
- int GetFunctionOffset(uint32_t func_index);
-
- // Returns the function containing the given byte offset.
- // Returns -1 if the byte offset is not contained in any function of this
- // module.
- int GetContainingFunction(uint32_t byte_offset);
-
- // Translate from byte offset in the module to function number and byte offset
- // within that function, encoded as line and column in the position info.
- // Returns true if the position is valid inside this module, false otherwise.
- bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
-
- // Get the source position from a given function index and byte offset,
- // for either asm.js or pure WASM modules.
- static int GetSourcePosition(Handle<WasmSharedModuleData>,
- uint32_t func_index, uint32_t byte_offset,
- bool is_at_number_conversion);
-
- // Compute the disassembly of a wasm function.
- // Returns the disassembly string and a list of <byte_offset, line, column>
- // entries, mapping wasm byte offsets to line and column in the disassembly.
- // The list is guaranteed to be ordered by the byte_offset.
- // Returns an empty string and empty vector if the function index is invalid.
- debug::WasmDisassembly DisassembleFunction(int func_index);
-
- // Extract a portion of the wire bytes as UTF-8 string.
- // Returns a null handle if the respective bytes do not form a valid UTF-8
- // string.
- static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<WasmSharedModuleData>, wasm::WireBytesRef ref);
- static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<SeqOneByteString> module_bytes,
- wasm::WireBytesRef ref);
-
- // Get a list of all possible breakpoints within a given range of this module.
- bool GetPossibleBreakpoints(const debug::Location& start,
- const debug::Location& end,
- std::vector<debug::BreakLocation>* locations);
-
- // Return an empty handle if no breakpoint is hit at that location, or a
- // FixedArray with all hit breakpoint objects.
- static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*,
- Handle<WasmSharedModuleData>,
- int position);
-};
-
-// This represents the set of wasm compiled functions, together
-// with all the information necessary for re-specializing them.
-class WasmCompiledModule : public Struct {
- public:
- DECL_CAST(WasmCompiledModule)
-
- // Dispatched behavior.
- DECL_PRINTER(WasmCompiledModule)
- DECL_VERIFIER(WasmCompiledModule)
-
-// Layout description.
-#define WASM_COMPILED_MODULE_FIELDS(V) \
- V(kNextInstanceOffset, kPointerSize) \
- V(kPrevInstanceOffset, kPointerSize) \
- V(kOwningInstanceOffset, kPointerSize) \
- V(kNativeModuleOffset, kPointerSize) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- WASM_COMPILED_MODULE_FIELDS)
-#undef WASM_COMPILED_MODULE_FIELDS
-
-#define WCM_OBJECT_OR_WEAK(TYPE, NAME, SETTER_MODIFIER) \
- public: \
- inline TYPE* NAME() const; \
- inline bool has_##NAME() const; \
- inline void reset_##NAME(); \
- \
- SETTER_MODIFIER: \
- inline void set_##NAME(TYPE* value, \
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
-#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, public)
-
-#define WCM_CONST_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, private)
-
-#define WCM_WEAK_LINK(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, public) \
- \
- public: \
- inline TYPE* NAME() const;
-
- // Add values here if they are required for creating new instances or
- // for deserialization, and if they are serializable.
- // By default, instance values go to WasmInstanceObject, however, if
- // we embed the generated code with a value, then we track that value here.
- WCM_CONST_OBJECT(WasmCompiledModule, next_instance)
- WCM_CONST_OBJECT(WasmCompiledModule, prev_instance)
- WCM_WEAK_LINK(WasmInstanceObject, owning_instance)
- WCM_OBJECT(Foreign, native_module)
-
- public:
- static Handle<WasmCompiledModule> New(Isolate* isolate,
- wasm::WasmModule* module,
- wasm::ModuleEnv& env);
-
- static Handle<WasmCompiledModule> Clone(Isolate* isolate,
- Handle<WasmCompiledModule> module);
- static void Reset(Isolate* isolate, WasmCompiledModule* module);
-
- bool has_instance() const;
-
- wasm::NativeModule* GetNativeModule() const;
- void InsertInChain(WasmModuleObject*);
- void RemoveFromChain();
-
- DECL_ACCESSORS(raw_next_instance, Object);
- DECL_ACCESSORS(raw_prev_instance, Object);
-
- void PrintInstancesChain();
-
- void LogWasmCodes(Isolate* isolate);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(WasmCompiledModule);
-};
-
class WasmDebugInfo : public Struct {
public:
DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
@@ -704,11 +611,6 @@ class WasmDebugInfo : public Struct {
};
#undef DECL_OPTIONAL_ACCESSORS
-#undef WCM_CONST_OBJECT
-#undef WCM_LARGE_NUMBER
-#undef WCM_OBJECT
-#undef WCM_OBJECT_OR_WEAK
-#undef WCM_WEAK_LINK
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index a1f5e4542c..22c906e270 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -107,7 +107,6 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_FLOAT_OP(CopySign, "copysign")
CASE_REF_OP(Null, "null")
CASE_REF_OP(IsNull, "is_null")
- CASE_REF_OP(Eq, "eq")
CASE_I32_OP(ConvertI64, "wrap/i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
@@ -354,12 +353,12 @@ bool WasmOpcodes::IsAnyRefOpcode(WasmOpcode opcode) {
switch (opcode) {
case kExprRefNull:
case kExprRefIsNull:
- case kExprRefEq:
return true;
default:
return false;
}
}
+
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
@@ -398,7 +397,7 @@ FOREACH_SIGNATURE(DECLARE_SIG)
#undef DECLARE_SIG
#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
-constexpr const FunctionSig* kSimpleExprSigs[] = {
+constexpr const FunctionSig* kCachedSigs[] = {
nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
#undef DECLARE_SIG_ENTRY
@@ -407,10 +406,11 @@ constexpr const FunctionSig* kSimpleExprSigs[] = {
// encapsulate these constexpr functions in functors.
// TODO(clemensh): Remove this once we require gcc >= 5.0.
-struct GetOpcodeSigIndex {
+struct GetShortOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
- return FOREACH_SIMPLE_OPCODE(CASE) kSigEnum_None;
+ return FOREACH_SIMPLE_OPCODE(CASE) FOREACH_SIMPLE_PROTOTYPE_OPCODE(CASE)
+ kSigEnum_None;
#undef CASE
}
};
@@ -426,7 +426,8 @@ struct GetAsmJsOpcodeSigIndex {
struct GetSimdOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) kSigEnum_None;
+ return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
+ kSigEnum_None;
#undef CASE
}
};
@@ -447,8 +448,8 @@ struct GetNumericOpcodeSigIndex {
}
};
-constexpr std::array<WasmOpcodeSig, 256> kSimpleExprSigTable =
- base::make_array<256>(GetOpcodeSigIndex{});
+constexpr std::array<WasmOpcodeSig, 256> kShortSigTable =
+ base::make_array<256>(GetShortOpcodeSigIndex{});
constexpr std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
base::make_array<256>(GetAsmJsOpcodeSigIndex{});
constexpr std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
@@ -458,30 +459,43 @@ constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
base::make_array<256>(GetNumericOpcodeSigIndex{});
+// Computes a direct pointer to a cached signature for a simple opcode.
+struct GetSimpleOpcodeSig {
+ constexpr const FunctionSig* operator()(byte opcode) const {
+#define CASE(name, opc, sig) opcode == opc ? &kSig_##sig:
+ return FOREACH_SIMPLE_OPCODE(CASE) nullptr;
+#undef CASE
+ }
+};
+
} // namespace
+const std::array<const FunctionSig*, 256> kSimpleOpcodeSigs =
+ base::make_array<256>(GetSimpleOpcodeSig{});
+
FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
switch (opcode >> 8) {
+ case 0:
+ return const_cast<FunctionSig*>(kCachedSigs[kShortSigTable[opcode]]);
case kSimdPrefix:
return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimdExprSigTable[opcode & 0xFF]]);
+ kCachedSigs[kSimdExprSigTable[opcode & 0xFF]]);
case kAtomicPrefix:
return const_cast<FunctionSig*>(
- kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xFF]]);
+ kCachedSigs[kAtomicExprSigTable[opcode & 0xFF]]);
case kNumericPrefix:
return const_cast<FunctionSig*>(
- kSimpleExprSigs[kNumericExprSigTable[opcode & 0xFF]]);
+ kCachedSigs[kNumericExprSigTable[opcode & 0xFF]]);
default:
- DCHECK_GT(kSimpleExprSigTable.size(), opcode);
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimpleExprSigTable[opcode]]);
+ UNREACHABLE(); // invalid prefix.
+ return nullptr;
}
}
FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
DCHECK_GT(kSimpleAsmjsExprSigTable.size(), opcode);
return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimpleAsmjsExprSigTable[opcode]]);
+ kCachedSigs[kSimpleAsmjsExprSigTable[opcode]]);
}
// Define constexpr arrays.
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 5faf715355..dff02f8147 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -218,14 +218,15 @@ using WasmName = Vector<const char>;
V(I32ReinterpretF32, 0xbc, i_f) \
V(I64ReinterpretF64, 0xbd, l_d) \
V(F32ReinterpretI32, 0xbe, f_i) \
- V(F64ReinterpretI64, 0xbf, d_l) \
- V(I32SExtendI8, 0xc0, i_i) \
- V(I32SExtendI16, 0xc1, i_i) \
- V(I64SExtendI8, 0xc2, l_l) \
- V(I64SExtendI16, 0xc3, l_l) \
- V(I64SExtendI32, 0xc4, l_l) \
- V(RefIsNull, 0xd1, i_r) \
- V(RefEq, 0xd2, i_rr)
+ V(F64ReinterpretI64, 0xbf, d_l)
+
+#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) \
+ V(I32SExtendI8, 0xc0, i_i) \
+ V(I32SExtendI16, 0xc1, i_i) \
+ V(I64SExtendI8, 0xc2, l_l) \
+ V(I64SExtendI16, 0xc3, l_l) \
+ V(I64SExtendI32, 0xc4, l_l) \
+ V(RefIsNull, 0xd1, i_r)
// For compatibility with Asm.js.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
@@ -374,25 +375,31 @@ using WasmName = Vector<const char>;
V(S1x16AnyTrue, 0xfd90, i_s) \
V(S1x16AllTrue, 0xfd91, i_s)
-#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
- V(F32x4ExtractLane, 0xfd01, _) \
- V(F32x4ReplaceLane, 0xfd02, _) \
- V(I32x4ExtractLane, 0xfd1c, _) \
- V(I32x4ReplaceLane, 0xfd1d, _) \
- V(I32x4Shl, 0xfd24, _) \
- V(I32x4ShrS, 0xfd25, _) \
- V(I32x4ShrU, 0xfd32, _) \
- V(I16x8ExtractLane, 0xfd39, _) \
- V(I16x8ReplaceLane, 0xfd3a, _) \
- V(I16x8Shl, 0xfd43, _) \
- V(I16x8ShrS, 0xfd44, _) \
- V(I16x8ShrU, 0xfd52, _) \
- V(I8x16ExtractLane, 0xfd58, _) \
- V(I8x16ReplaceLane, 0xfd59, _) \
- V(I8x16Shl, 0xfd62, _) \
- V(I8x16ShrS, 0xfd63, _) \
+#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
+ V(F32x4ExtractLane, 0xfd01, _) \
+ V(I32x4ExtractLane, 0xfd1c, _) \
+ V(I32x4Shl, 0xfd24, _) \
+ V(I32x4ShrS, 0xfd25, _) \
+ V(I32x4ShrU, 0xfd32, _) \
+ V(I16x8ExtractLane, 0xfd39, _) \
+ V(I16x8Shl, 0xfd43, _) \
+ V(I16x8ShrS, 0xfd44, _) \
+ V(I16x8ShrU, 0xfd52, _) \
+ V(I8x16ExtractLane, 0xfd58, _) \
+ V(I8x16Shl, 0xfd62, _) \
+ V(I8x16ShrS, 0xfd63, _) \
V(I8x16ShrU, 0xfd71, _)
+#define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
+ V(F32x4ReplaceLane, 0xfd02, _) \
+ V(I32x4ReplaceLane, 0xfd1d, _) \
+ V(I16x8ReplaceLane, 0xfd3a, _) \
+ V(I8x16ReplaceLane, 0xfd59, _)
+
+#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
+ FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
+
#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(S8x16Shuffle, 0xfd6b, s_ss)
#define FOREACH_SIMD_MEM_OPCODE(V) \
@@ -479,6 +486,7 @@ using WasmName = Vector<const char>;
FOREACH_CONTROL_OPCODE(V) \
FOREACH_MISC_OPCODE(V) \
FOREACH_SIMPLE_OPCODE(V) \
+ FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) \
FOREACH_STORE_MEM_OPCODE(V) \
FOREACH_LOAD_MEM_OPCODE(V) \
FOREACH_MISC_MEM_OPCODE(V) \
@@ -526,8 +534,7 @@ using WasmName = Vector<const char>;
V(l_il, kWasmI64, kWasmI32, kWasmI64) \
V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32) \
V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
- V(i_r, kWasmI32, kWasmAnyRef) \
- V(i_rr, kWasmI32, kWasmAnyRef, kWasmAnyRef)
+ V(i_r, kWasmI32, kWasmAnyRef)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
@@ -536,6 +543,7 @@ using WasmName = Vector<const char>;
V(s_i, kWasmS128, kWasmI32) \
V(s_si, kWasmS128, kWasmS128, kWasmI32) \
V(i_s, kWasmI32, kWasmS128) \
+ V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128)
#define FOREACH_PREFIX(V) \
@@ -553,17 +561,6 @@ enum WasmOpcode {
#undef DECLARE_PREFIX
};
-// The reason for a trap.
-#define FOREACH_WASM_TRAPREASON(V) \
- V(TrapUnreachable) \
- V(TrapMemOutOfBounds) \
- V(TrapDivByZero) \
- V(TrapDivUnrepresentable) \
- V(TrapRemByZero) \
- V(TrapFloatUnrepresentable) \
- V(TrapFuncInvalid) \
- V(TrapFuncSigMismatch)
-
enum TrapReason {
#define DECLARE_ENUM(name) k##name,
FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
@@ -571,6 +568,8 @@ enum TrapReason {
#undef DECLARE_ENUM
};
+extern const std::array<const FunctionSig*, 256> kSimpleOpcodeSigs;
+
// A collection of opcode-related static methods.
class V8_EXPORT_PRIVATE WasmOpcodes {
public:
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 847ac06483..3c21a5d223 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -5,7 +5,6 @@
#include "src/wasm/wasm-serialization.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
#include "src/external-reference-table.h"
#include "src/objects-inl.h"
#include "src/objects.h"
@@ -48,8 +47,8 @@ class Writer {
WriteUnalignedValue(reinterpret_cast<Address>(current_location()), value);
pos_ += sizeof(T);
if (FLAG_wasm_trace_serialization) {
- OFStream os(stdout);
- os << "wrote: " << (size_t)value << " sized: " << sizeof(T) << std::endl;
+ StdoutStream{} << "wrote: " << (size_t)value << " sized: " << sizeof(T)
+ << std::endl;
}
}
@@ -60,8 +59,8 @@ class Writer {
pos_ += v.size();
}
if (FLAG_wasm_trace_serialization) {
- OFStream os(stdout);
- os << "wrote vector of " << v.size() << " elements" << std::endl;
+ StdoutStream{} << "wrote vector of " << v.size() << " elements"
+ << std::endl;
}
}
@@ -92,8 +91,8 @@ class Reader {
ReadUnalignedValue<T>(reinterpret_cast<Address>(current_location()));
pos_ += sizeof(T);
if (FLAG_wasm_trace_serialization) {
- OFStream os(stdout);
- os << "read: " << (size_t)value << " sized: " << sizeof(T) << std::endl;
+ StdoutStream{} << "read: " << (size_t)value << " sized: " << sizeof(T)
+ << std::endl;
}
return value;
}
@@ -105,8 +104,8 @@ class Reader {
pos_ += v.size();
}
if (FLAG_wasm_trace_serialization) {
- OFStream os(stdout);
- os << "read vector of " << v.size() << " elements" << std::endl;
+ StdoutStream{} << "read vector of " << v.size() << " elements"
+ << std::endl;
}
}
@@ -158,6 +157,8 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
Address addr = static_cast<Address>(tag);
if (rinfo->rmode() == RelocInfo::EXTERNAL_REFERENCE) {
rinfo->set_target_external_reference(addr, SKIP_ICACHE_FLUSH);
+ } else if (rinfo->rmode() == RelocInfo::WASM_STUB_CALL) {
+ rinfo->set_wasm_stub_call_address(addr, SKIP_ICACHE_FLUSH);
} else {
rinfo->set_target_address(addr, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
}
@@ -177,16 +178,21 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
return static_cast<uint32_t>(instr->ImmPCOffset() / kInstructionSize);
}
#else
- Address addr = rinfo->rmode() == RelocInfo::EXTERNAL_REFERENCE
- ? rinfo->target_external_reference()
- : rinfo->target_address();
+ Address addr;
+ if (rinfo->rmode() == RelocInfo::EXTERNAL_REFERENCE) {
+ addr = rinfo->target_external_reference();
+ } else if (rinfo->rmode() == RelocInfo::WASM_STUB_CALL) {
+ addr = rinfo->wasm_stub_call_address();
+ } else {
+ addr = rinfo->target_address();
+ }
return static_cast<uint32_t>(addr);
#endif
}
constexpr size_t kHeaderSize =
sizeof(uint32_t) + // total wasm function count
- sizeof(uint32_t); // imported functions - i.e. index of first wasm function
+ sizeof(uint32_t); // imported functions (index of first wasm function)
constexpr size_t kCodeHeaderSize =
sizeof(size_t) + // size of code section
@@ -200,14 +206,6 @@ constexpr size_t kCodeHeaderSize =
sizeof(size_t) + // protected instructions size
sizeof(WasmCode::Tier); // tier
-// Bitfields used for encoding stub and builtin ids in a tag. We only use 26
-// bits total as ARM64 can only encode 26 bits in branch immediate instructions.
-class IsStubIdField : public BitField<bool, 0, 1> {};
-class StubOrBuiltinIdField
- : public BitField<uint32_t, IsStubIdField::kNext, 25> {};
-static_assert(StubOrBuiltinIdField::kNext == 26,
- "ARM64 only supports 26 bits for this field");
-
} // namespace
class V8_EXPORT_PRIVATE NativeModuleSerializer {
@@ -219,25 +217,17 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
bool Write(Writer* writer);
private:
- size_t MeasureCopiedStubs() const;
size_t MeasureCode(const WasmCode*) const;
-
void WriteHeader(Writer* writer);
- void WriteCopiedStubs(Writer* writer);
void WriteCode(const WasmCode*, Writer* writer);
- uint32_t EncodeBuiltinOrStub(Address);
-
Isolate* const isolate_;
const NativeModule* const native_module_;
bool write_called_;
- // wasm and copied stubs reverse lookup
- std::map<Address, uint32_t> wasm_targets_lookup_;
- // immovable builtins and runtime entries lookup
+ // Reverse lookup tables for embedded addresses.
+ std::map<Address, uint32_t> wasm_stub_targets_lookup_;
std::map<Address, uint32_t> reference_table_lookup_;
- std::map<Address, uint32_t> stub_lookup_;
- std::map<Address, uint32_t> builtin_lookup_;
DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
@@ -249,76 +239,47 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
+ for (uint32_t i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
+ Address addr =
+ native_module_->runtime_stub(static_cast<WasmCode::RuntimeStubId>(i))
+ ->instruction_start();
+ wasm_stub_targets_lookup_.insert(std::make_pair(addr, i));
+ }
ExternalReferenceTable* table = isolate_->heap()->external_reference_table();
for (uint32_t i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
reference_table_lookup_.insert(std::make_pair(addr, i));
}
- // Defer populating stub_lookup_ to when we write the stubs.
- for (auto pair : native_module_->trampolines_) {
- v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
- int builtin_index = code->builtin_index();
- if (builtin_index >= 0) {
- uint32_t tag = static_cast<uint32_t>(builtin_index);
- builtin_lookup_.insert(std::make_pair(pair.second, tag));
- }
- }
-}
-
-size_t NativeModuleSerializer::MeasureCopiedStubs() const {
- size_t size = sizeof(uint32_t); // number of stubs
- for (auto pair : native_module_->trampolines_) {
- v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
- int builtin_index = code->builtin_index();
- if (builtin_index < 0) size += sizeof(uint32_t); // stub key
- }
- return size;
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
- return code->instructions().size() + code->reloc_info().size() +
- code->source_positions().size() +
+ if (code == nullptr) return sizeof(size_t);
+ DCHECK_EQ(WasmCode::kFunction, code->kind());
+ return kCodeHeaderSize + code->instructions().size() +
+ code->reloc_info().size() + code->source_positions().size() +
code->protected_instructions().size() *
sizeof(trap_handler::ProtectedInstructionData);
}
size_t NativeModuleSerializer::Measure() const {
- size_t size = kHeaderSize + MeasureCopiedStubs();
- uint32_t first_wasm_fn = native_module_->num_imported_functions();
- uint32_t total_fns = native_module_->function_count();
- for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
- size += kCodeHeaderSize;
- size += MeasureCode(native_module_->code(i));
+ size_t size = kHeaderSize;
+ for (WasmCode* code : native_module_->code_table()) {
+ size += MeasureCode(code);
}
return size;
}
void NativeModuleSerializer::WriteHeader(Writer* writer) {
- writer->Write(native_module_->function_count());
+ writer->Write(native_module_->num_functions());
writer->Write(native_module_->num_imported_functions());
}
-void NativeModuleSerializer::WriteCopiedStubs(Writer* writer) {
- // Write the number of stubs and their keys.
- // TODO(all) Serialize the stubs as WasmCode.
- size_t stubs_size = MeasureCopiedStubs();
- // Get the stub count from the number of keys.
- size_t num_stubs = (stubs_size - sizeof(uint32_t)) / sizeof(uint32_t);
- writer->Write(static_cast<uint32_t>(num_stubs));
- uint32_t stub_id = 0;
-
- for (auto pair : native_module_->trampolines_) {
- v8::internal::Code* code = Code::GetCodeFromTargetAddress(pair.first);
- int builtin_index = code->builtin_index();
- if (builtin_index < 0) {
- stub_lookup_.insert(std::make_pair(pair.second, stub_id));
- writer->Write(code->stub_key());
- ++stub_id;
- }
- }
-}
-
void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
+ if (code == nullptr) {
+ writer->Write(size_t{0});
+ return;
+ }
+ DCHECK_EQ(WasmCode::kFunction, code->kind());
// Write the size of the entire code section, followed by the code header.
writer->Write(MeasureCode(code));
writer->Write(code->constant_pool_offset());
@@ -339,10 +300,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
// Write the reloc info, source positions, and protected code.
writer->WriteVector(code->reloc_info());
writer->WriteVector(code->source_positions());
- writer->WriteVector(
- {reinterpret_cast<const byte*>(code->protected_instructions().data()),
- sizeof(trap_handler::ProtectedInstructionData) *
- code->protected_instructions().size()});
+ writer->WriteVector(Vector<byte>::cast(code->protected_instructions()));
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
// On platforms that don't support misaligned word stores, copy to an aligned
// buffer if necessary so we can relocate the serialized code.
@@ -355,9 +313,8 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
#endif
memcpy(code_start, code->instructions().start(), code_size);
// Relocate the code.
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
@@ -370,21 +327,17 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
!iter.done(); iter.next(), orig_iter.next()) {
RelocInfo::Mode mode = orig_iter.rinfo()->rmode();
switch (mode) {
- case RelocInfo::CODE_TARGET: {
- Address orig_target = orig_iter.rinfo()->target_address();
- uint32_t tag = EncodeBuiltinOrStub(orig_target);
- SetWasmCalleeTag(iter.rinfo(), tag);
- } break;
case RelocInfo::WASM_CALL: {
Address orig_target = orig_iter.rinfo()->wasm_call_address();
- uint32_t tag = wasm_targets_lookup_[orig_target];
+ uint32_t tag =
+ native_module_->GetFunctionIndexFromJumpTableSlot(orig_target);
SetWasmCalleeTag(iter.rinfo(), tag);
} break;
- case RelocInfo::RUNTIME_ENTRY: {
- Address orig_target = orig_iter.rinfo()->target_address();
- auto ref_iter = reference_table_lookup_.find(orig_target);
- DCHECK(ref_iter != reference_table_lookup_.end());
- uint32_t tag = ref_iter->second;
+ case RelocInfo::WASM_STUB_CALL: {
+ Address orig_target = orig_iter.rinfo()->wasm_stub_call_address();
+ auto stub_iter = wasm_stub_targets_lookup_.find(orig_target);
+ DCHECK(stub_iter != wasm_stub_targets_lookup_.end());
+ uint32_t tag = stub_iter->second;
SetWasmCalleeTag(iter.rinfo(), tag);
} break;
case RelocInfo::EXTERNAL_REFERENCE: {
@@ -411,57 +364,36 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
}
}
-uint32_t NativeModuleSerializer::EncodeBuiltinOrStub(Address address) {
- auto builtin_iter = builtin_lookup_.find(address);
- uint32_t tag = 0;
- if (builtin_iter != builtin_lookup_.end()) {
- uint32_t id = builtin_iter->second;
- DCHECK_LT(id, std::numeric_limits<uint16_t>::max());
- tag = IsStubIdField::encode(false) | StubOrBuiltinIdField::encode(id);
- } else {
- auto stub_iter = stub_lookup_.find(address);
- DCHECK(stub_iter != stub_lookup_.end());
- uint32_t id = stub_iter->second;
- tag = IsStubIdField::encode(true) | StubOrBuiltinIdField::encode(id);
- }
- return tag;
-}
-
bool NativeModuleSerializer::Write(Writer* writer) {
DCHECK(!write_called_);
write_called_ = true;
WriteHeader(writer);
- WriteCopiedStubs(writer);
- uint32_t total_fns = native_module_->function_count();
- uint32_t first_wasm_fn = native_module_->num_imported_functions();
- for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
- const WasmCode* code = native_module_->code(i);
+ for (WasmCode* code : native_module_->code_table()) {
WriteCode(code, writer);
}
return true;
}
-size_t GetSerializedNativeModuleSize(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- NativeModule* native_module = compiled_module->GetNativeModule();
+size_t GetSerializedNativeModuleSize(Isolate* isolate,
+ NativeModule* native_module) {
NativeModuleSerializer serializer(isolate, native_module);
return kVersionSize + serializer.Measure();
}
-bool SerializeNativeModule(Isolate* isolate,
- Handle<WasmCompiledModule> compiled_module,
+bool SerializeNativeModule(Isolate* isolate, NativeModule* native_module,
Vector<byte> buffer) {
- NativeModule* native_module = compiled_module->GetNativeModule();
NativeModuleSerializer serializer(isolate, native_module);
- size_t measured_size = serializer.Measure();
+ size_t measured_size = kVersionSize + serializer.Measure();
if (buffer.size() < measured_size) return false;
Writer writer(buffer);
WriteVersion(isolate, &writer);
- return serializer.Write(&writer);
+ if (!serializer.Write(&writer)) return false;
+ DCHECK_EQ(measured_size, writer.bytes_written());
+ return true;
}
class V8_EXPORT_PRIVATE NativeModuleDeserializer {
@@ -474,13 +406,9 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
private:
bool ReadHeader(Reader* reader);
bool ReadCode(uint32_t fn_index, Reader* reader);
- bool ReadStubs(Reader* reader);
- Address GetTrampolineOrStubFromTag(uint32_t);
Isolate* const isolate_;
NativeModule* const native_module_;
-
- std::vector<Address> stubs_;
bool read_called_;
DISALLOW_COPY_AND_ASSIGN(NativeModuleDeserializer);
@@ -495,8 +423,7 @@ bool NativeModuleDeserializer::Read(Reader* reader) {
read_called_ = true;
if (!ReadHeader(reader)) return false;
- if (!ReadStubs(reader)) return false;
- uint32_t total_fns = native_module_->function_count();
+ uint32_t total_fns = native_module_->num_functions();
uint32_t first_wasm_fn = native_module_->num_imported_functions();
for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
if (!ReadCode(i, reader)) return false;
@@ -507,25 +434,13 @@ bool NativeModuleDeserializer::Read(Reader* reader) {
bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
size_t functions = reader->Read<uint32_t>();
size_t imports = reader->Read<uint32_t>();
- return functions == native_module_->function_count() &&
+ return functions == native_module_->num_functions() &&
imports == native_module_->num_imported_functions();
}
-bool NativeModuleDeserializer::ReadStubs(Reader* reader) {
- size_t num_stubs = reader->Read<uint32_t>();
- stubs_.reserve(num_stubs);
- for (size_t i = 0; i < num_stubs; ++i) {
- uint32_t key = reader->Read<uint32_t>();
- v8::internal::Code* stub =
- *(v8::internal::CodeStub::GetCode(isolate_, key).ToHandleChecked());
- stubs_.push_back(native_module_->GetLocalAddressFor(handle(stub)));
- }
- return true;
-}
-
bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t code_section_size = reader->Read<size_t>();
- USE(code_section_size);
+ if (code_section_size == 0) return true;
size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>();
@@ -539,65 +454,46 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
Vector<const byte> code_buffer = {reader->current_location(), code_size};
reader->Skip(code_size);
- std::unique_ptr<byte[]> reloc_info;
- if (reloc_size > 0) {
- reloc_info.reset(new byte[reloc_size]);
- reader->ReadVector({reloc_info.get(), reloc_size});
- }
- std::unique_ptr<byte[]> source_pos;
- if (source_position_size > 0) {
- source_pos.reset(new byte[source_position_size]);
- reader->ReadVector({source_pos.get(), source_position_size});
- }
- std::unique_ptr<ProtectedInstructions> protected_instructions(
- new ProtectedInstructions(protected_instructions_size));
- if (protected_instructions_size > 0) {
- size_t size = sizeof(trap_handler::ProtectedInstructionData) *
- protected_instructions->size();
- Vector<byte> data(reinterpret_cast<byte*>(protected_instructions->data()),
- size);
- reader->ReadVector(data);
- }
- WasmCode* ret = native_module_->AddOwnedCode(
- code_buffer, std::move(reloc_info), reloc_size, std::move(source_pos),
- source_position_size, Just(fn_index), WasmCode::kFunction,
- constant_pool_offset, stack_slot_count, safepoint_table_offset,
- handler_table_offset, std::move(protected_instructions), tier,
- WasmCode::kNoFlushICache);
- native_module_->code_table_[fn_index] = ret;
-
- // now relocate the code
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ OwnedVector<byte> reloc_info = OwnedVector<byte>::New(reloc_size);
+ reader->ReadVector(reloc_info.as_vector());
+ OwnedVector<byte> source_pos = OwnedVector<byte>::New(source_position_size);
+ reader->ReadVector(source_pos.as_vector());
+ auto protected_instructions =
+ OwnedVector<trap_handler::ProtectedInstructionData>::New(
+ protected_instructions_size);
+ reader->ReadVector(Vector<byte>::cast(protected_instructions.as_vector()));
+
+ WasmCode* code = native_module_->AddDeserializedCode(
+ fn_index, code_buffer, stack_slot_count, safepoint_table_offset,
+ handler_table_offset, constant_pool_offset,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_pos), tier);
+
+ // Relocate the code.
+ int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::WASM_CODE_TABLE_ENTRY);
- for (RelocIterator iter(ret->instructions(), ret->reloc_info(),
- ret->constant_pool(), mask);
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ for (RelocIterator iter(code->instructions(), code->reloc_info(),
+ code->constant_pool(), mask);
!iter.done(); iter.next()) {
RelocInfo::Mode mode = iter.rinfo()->rmode();
switch (mode) {
- case RelocInfo::EMBEDDED_OBJECT: {
- // We only expect {undefined}. We check for that when we add code.
- iter.rinfo()->set_target_object(isolate_->heap()->undefined_value(),
- SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- break;
- }
- case RelocInfo::CODE_TARGET: {
+ case RelocInfo::WASM_CALL: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
- Address target = GetTrampolineOrStubFromTag(tag);
- iter.rinfo()->set_target_address(target, SKIP_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
+ Address target = native_module_->GetCallTargetForFunction(tag);
+ iter.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
break;
}
- case RelocInfo::RUNTIME_ENTRY: {
+ case RelocInfo::WASM_STUB_CALL: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
- Address address =
- isolate_->heap()->external_reference_table()->address(tag);
- iter.rinfo()->set_target_runtime_entry(address, SKIP_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
+ DCHECK_LT(tag, WasmCode::kRuntimeStubCount);
+ Address target =
+ native_module_
+ ->runtime_stub(static_cast<WasmCode::RuntimeStubId>(tag))
+ ->instruction_start();
+ iter.rinfo()->set_wasm_stub_call_address(target, SKIP_ICACHE_FLUSH);
break;
}
case RelocInfo::EXTERNAL_REFERENCE: {
@@ -610,40 +506,24 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
case RelocInfo::INTERNAL_REFERENCE:
case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
Address offset = iter.rinfo()->target_internal_reference();
- Address target = ret->instruction_start() + offset;
+ Address target = code->instruction_start() + offset;
Assembler::deserialization_set_target_internal_reference_at(
iter.rinfo()->pc(), target, mode);
break;
}
- case RelocInfo::WASM_CODE_TABLE_ENTRY: {
- DCHECK(FLAG_wasm_tier_up);
- DCHECK(ret->is_liftoff());
- WasmCode* const* code_table_entry =
- native_module_->code_table().data() + ret->index();
- iter.rinfo()->set_wasm_code_table_entry(
- reinterpret_cast<Address>(code_table_entry), SKIP_ICACHE_FLUSH);
- break;
- }
default:
UNREACHABLE();
}
}
- // Flush the i-cache here instead of in AddOwnedCode, to include the changes
- // made while iterating over the RelocInfo above.
- Assembler::FlushICache(ret->instructions().start(),
- ret->instructions().size());
- return true;
-}
+ if (FLAG_print_code || FLAG_print_wasm_code) code->Print();
+ code->Validate();
-Address NativeModuleDeserializer::GetTrampolineOrStubFromTag(uint32_t tag) {
- uint32_t id = StubOrBuiltinIdField::decode(tag);
- if (IsStubIdField::decode(tag)) {
- return stubs_[id];
- } else {
- v8::internal::Code* builtin = isolate_->builtins()->builtin(id);
- return native_module_->GetLocalAddressFor(handle(builtin));
- }
+ // Finally, flush the icache for that code.
+ Assembler::FlushICache(code->instructions().start(),
+ code->instructions().size());
+
+ return true;
}
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
@@ -659,53 +539,39 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
i::wasm::kWasmOrigin);
if (!decode_result.ok()) return {};
CHECK_NOT_NULL(decode_result.val);
- Handle<String> module_bytes =
- isolate->factory()
- ->NewStringFromOneByte(
- {wire_bytes.start(), static_cast<size_t>(wire_bytes.length())},
- TENURED)
- .ToHandleChecked();
- DCHECK(module_bytes->IsSeqOneByteString());
- // The {managed_module} will take ownership of the {WasmModule} object,
- // and it will be destroyed when the GC reclaims the wrapper object.
- Handle<Managed<WasmModule>> managed_module =
- Managed<WasmModule>::FromUniquePtr(isolate, std::move(decode_result.val));
+ WasmModule* module = decode_result.val.get();
Handle<Script> script = CreateWasmScript(isolate, wire_bytes);
- Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
- isolate, managed_module, Handle<SeqOneByteString>::cast(module_bytes),
- script, Handle<ByteArray>::null());
- int export_wrappers_size =
- static_cast<int>(shared->module()->num_exported_functions);
- Handle<FixedArray> export_wrappers = isolate->factory()->NewFixedArray(
- static_cast<int>(export_wrappers_size), TENURED);
// TODO(eholk): We need to properly preserve the flag whether the trap
// handler was used or not when serializing.
UseTrapHandler use_trap_handler =
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler;
- wasm::ModuleEnv env(shared->module(), use_trap_handler,
+ wasm::ModuleEnv env(module, use_trap_handler,
wasm::RuntimeExceptionSupport::kRuntimeExceptionSupport);
- Handle<WasmCompiledModule> compiled_module =
- WasmCompiledModule::New(isolate, shared->module(), env);
- compiled_module->GetNativeModule()->SetSharedModuleData(shared);
- NativeModuleDeserializer deserializer(isolate,
- compiled_module->GetNativeModule());
+
+ OwnedVector<uint8_t> wire_bytes_copy = OwnedVector<uint8_t>::Of(wire_bytes);
+
+ Handle<WasmModuleObject> module_object = WasmModuleObject::New(
+ isolate, std::move(decode_result.val), env, std::move(wire_bytes_copy),
+ script, Handle<ByteArray>::null());
+ NativeModule* native_module = module_object->native_module();
+
+ if (FLAG_wasm_lazy_compilation) {
+ native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy));
+ }
+ NativeModuleDeserializer deserializer(isolate, native_module);
Reader reader(data + kVersionSize);
if (!deserializer.Read(&reader)) return {};
- Handle<WasmModuleObject> module_object =
- WasmModuleObject::New(isolate, compiled_module, export_wrappers, shared);
-
// TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}. This
// requires unlocking the code space here. This should eventually be moved
// into the allocator.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- CompileJsToWasmWrappers(isolate, module_object, isolate->counters());
+ CompileJsToWasmWrappers(isolate, module_object);
- // There are no instances for this module yet, which means we need to reset
- // the module into a state as if the last instance was collected.
- WasmCompiledModule::Reset(isolate, *compiled_module);
+ // Log the code within the generated module for profiling.
+ native_module->LogWasmCodes(isolate);
return module_object;
}
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 28470e1406..352195b2b0 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -11,11 +11,10 @@ namespace v8 {
namespace internal {
namespace wasm {
-size_t GetSerializedNativeModuleSize(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
+size_t GetSerializedNativeModuleSize(Isolate* isolate,
+ NativeModule* native_module);
-bool SerializeNativeModule(Isolate* isolate,
- Handle<WasmCompiledModule> compiled_module,
+bool SerializeNativeModule(Isolate* isolate, NativeModule* native_module,
Vector<byte> buffer);
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index f8b0002b03..602135135c 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -14,14 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define FOREACH_SIMD_TYPE(V) \
- V(float, float4, f32x4, 4) \
- V(int32_t, int4, i32x4, 4) \
- V(uint32_t, uint4, ui32x4, 4) \
- V(int16_t, int8, i16x8, 8) \
- V(uint16_t, uint8, ui16x8, 8) \
- V(int8_t, int16, i8x16, 16) \
- V(uint8_t, uint16, ui8x16, 16)
+#define FOREACH_SIMD_TYPE(V) \
+ V(float, float4, f32x4, 4) \
+ V(int32_t, int4, i32x4, 4) \
+ V(int16_t, int8, i16x8, 8) \
+ V(int8_t, int16, i8x16, 16)
#define DEFINE_SIMD_TYPE(cType, sType, name, kSize) \
struct sType { \
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 94baf93ed5..559ebedc1d 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -48,25 +48,10 @@ void Assembler::emitw(uint16_t x) {
pc_ += sizeof(uint16_t);
}
-void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
- DCHECK(RelocInfo::IsCodeTarget(rmode));
- RecordRelocInfo(rmode);
- int current = static_cast<int>(code_targets_.size());
- if (current > 0 && !target.is_null() &&
- code_targets_.back().address() == target.address()) {
- // Optimization if we keep jumping to the same code target.
- emitl(current - 1);
- } else {
- code_targets_.push_back(target);
- emitl(current);
- }
-}
-
-
void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
RecordRelocInfo(rmode);
- emitl(static_cast<uint32_t>(entry - isolate_data().code_range_start_));
+ emitl(static_cast<uint32_t>(entry - options().code_range_start));
}
void Assembler::emit(Immediate x) {
@@ -278,11 +263,11 @@ int Assembler::deserialization_special_target_size(
}
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
- return code_targets_[Memory::int32_at(pc)];
+ return GetCodeTarget(Memory::int32_at(pc));
}
Address Assembler::runtime_entry_at(Address pc) {
- return Memory::int32_at(pc) + isolate_data().code_range_start_;
+ return Memory::int32_at(pc) + options().code_range_start;
}
// -----------------------------------------------------------------------------
@@ -306,8 +291,8 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsOffHeapTarget(rmode_));
+ IsWasmStubCall(rmode_) || IsEmbeddedObject(rmode_) ||
+ IsExternalReference(rmode_) || IsOffHeapTarget(rmode_));
return pc_;
}
@@ -339,15 +324,6 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
}
}
-void RelocInfo::set_wasm_code_table_entry(Address target,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
- Memory::Address_at(pc_) = target;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(Address));
- }
-}
-
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
return Memory::Address_at(pc_);
@@ -373,7 +349,7 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
-void RelocInfo::set_target_object(HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -382,9 +358,8 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target);
- host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
+ heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
+ heap->RecordWriteIntoCode(host(), this, target);
}
}
@@ -427,7 +402,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
Assembler::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 11e7fd0cd8..31fc7d046c 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -19,6 +19,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
+#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/v8.h"
@@ -123,35 +124,23 @@ void CpuFeatures::PrintFeatures() {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-Address RelocInfo::embedded_address() const { return Memory::Address_at(pc_); }
-
-uint32_t RelocInfo::embedded_size() const { return Memory::uint32_at(pc_); }
-
-void RelocInfo::set_embedded_address(Address address,
- ICacheFlushMode icache_flush_mode) {
+void RelocInfo::set_js_to_wasm_address(Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc_, sizeof(Address));
}
}
-void RelocInfo::set_embedded_size(uint32_t size,
- ICacheFlushMode icache_flush_mode) {
- Memory::uint32_at(pc_) = size;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(uint32_t));
- }
-}
-
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
+Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(address, icache_flush_mode);
+ return Memory::Address_at(pc_);
}
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return embedded_address();
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return Memory::uint32_at(pc_);
}
// -----------------------------------------------------------------------------
@@ -350,14 +339,14 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- Handle<HeapNumber> object = isolate->factory()->NewHeapNumber(
- request.heap_number(), IMMUTABLE, TENURED);
+ Handle<HeapNumber> object =
+ isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
Memory::Object_Handle_at(pc) = object;
break;
}
case HeapObjectRequest::kCodeStub: {
request.code_stub()->set_isolate(isolate);
- code_targets_[Memory::int32_at(pc)] = request.code_stub()->GetCode();
+ UpdateCodeTarget(Memory::int32_at(pc), request.code_stub()->GetCode());
break;
}
}
@@ -367,8 +356,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Implementation of Assembler.
-Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size) {
+Assembler::Assembler(const AssemblerOptions& options, void* buffer,
+ int buffer_size)
+ : AssemblerBase(options, buffer, buffer_size) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it.
@@ -378,7 +368,7 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
}
#endif
- code_targets_.reserve(100);
+ ReserveCodeTargetSpace(100);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
}
@@ -992,14 +982,19 @@ void Assembler::call(CodeStub* stub) {
// 1110 1000 #32-bit disp.
emit(0xE8);
RequestHeapObject(HeapObjectRequest(stub));
- emit_code_target(Handle<Code>(), RelocInfo::CODE_TARGET);
+ RecordRelocInfo(RelocInfo::CODE_TARGET);
+ int code_target_index = AddCodeTarget(Handle<Code>());
+ emitl(code_target_index);
}
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
- emit_code_target(target, rmode);
+ RecordRelocInfo(rmode);
+ int code_target_index = AddCodeTarget(target);
+ emitl(code_target_index);
}
void Assembler::near_call(Address addr, RelocInfo::Mode rmode) {
@@ -1441,7 +1436,10 @@ void Assembler::j(Condition cc,
// 0000 1111 1000 tttn #32-bit disp.
emit(0x0F);
emit(0x80 | cc);
- emit_code_target(target, rmode);
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ RecordRelocInfo(rmode);
+ int code_target_index = AddCodeTarget(target);
+ emitl(code_target_index);
}
@@ -1502,10 +1500,13 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
// 1110 1001 #32-bit disp.
emit(0xE9);
- emit_code_target(target, rmode);
+ RecordRelocInfo(rmode);
+ int code_target_index = AddCodeTarget(target);
+ emitl(code_target_index);
}
@@ -4842,9 +4843,9 @@ void Assembler::dq(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
DCHECK(!RelocInfo::IsNone(rmode));
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !serializer_enabled() && !emit_debug_code()) {
+ if (options().disable_reloc_info_for_patching) return;
+ if (RelocInfo::IsOnlyForSerializer(rmode) &&
+ !options().record_reloc_info_for_serialization && !emit_debug_code()) {
return;
}
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
@@ -4852,8 +4853,10 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::WASM_CALL;
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -4867,6 +4870,10 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
+int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 1081b1caeb..aeff5ee06d 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -42,6 +42,7 @@
#include <vector>
#include "src/assembler.h"
+#include "src/x64/constants-x64.h"
#include "src/x64/sse-instr.h"
namespace v8 {
@@ -449,9 +450,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : Assembler(IsolateData(isolate), buffer, buffer_size) {}
- Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
+ Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -1936,7 +1935,6 @@ class Assembler : public AssemblerBase {
inline void emitp(Address x, RelocInfo::Mode rmode);
inline void emitq(uint64_t x);
inline void emitw(uint16_t x);
- inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
inline void emit(Immediate x);
@@ -2359,6 +2357,8 @@ class Assembler : public AssemblerBase {
bool is_optimizable_farjmp(int idx);
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
@@ -2370,21 +2370,6 @@ class Assembler : public AssemblerBase {
// are already bound.
std::deque<int> internal_reference_positions_;
- std::vector<Handle<Code>> code_targets_;
-
- // The following functions help with avoiding allocations of embedded heap
- // objects during the code assembly phase. {RequestHeapObject} records the
- // need for a future heap number allocation or code stub generation. After
- // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
- // objects and place them where they are expected (determined by the pc offset
- // associated with each request). That is, for each request, it will patch the
- // dummy heap object handle that we emitted during code assembly with the
- // actual heap object handle.
- void RequestHeapObject(HeapObjectRequest request);
- void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
-
- std::forward_list<HeapObjectRequest> heap_object_requests_;
-
// Variables for this instance of assembler
int farjmp_num_ = 0;
std::deque<int> farjmp_positions_;
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index d0b9d41f41..5fe2d13201 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -27,22 +27,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
- __ popq(rcx);
- __ movq(MemOperand(rsp, rax, times_8, 0), rdi);
- __ pushq(rdi);
- __ pushq(rbx);
- __ pushq(rcx);
- __ addq(rax, Immediate(3));
- __ TailCallRuntime(Runtime::kNewArray);
-}
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- // It is important that the store buffer overflow stubs are generated first.
- CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
@@ -239,305 +223,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmpl(rdx, Immediate(kind));
- __ j(not_equal, &next);
- T stub(masm->isolate(), kind);
- __ TailCallStub(&stub);
- __ bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- // rbx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
- // rax - number of arguments
- // rdi - constructor?
- // rsp[0] - return address
- // rsp[8] - last argument
-
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
- holey_initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
- } else if (mode == DONT_OVERRIDE) {
- // is the low bit set? If so, we are holey and that is good.
- Label normal_sequence;
- __ testb(rdx, Immediate(1));
- __ j(not_zero, &normal_sequence);
-
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
- __ incl(rdx);
-
- if (FLAG_debug_code) {
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rbx, 0), allocation_site_map);
- __ Assert(equal, AbortReason::kExpectedAllocationSite);
- }
-
- // Save the resulting elements kind in type info. We can't just store r3
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field...upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ SmiAddConstant(
- FieldOperand(rbx, AllocationSite::kTransitionInfoOrBoilerplateOffset),
- Smi::FromInt(kFastElementsKindPackedToHoley));
-
- __ bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmpl(rdx, Immediate(kind));
- __ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
- __ TailCallStub(&stub);
- __ bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-template<class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(isolate, kind);
- stub.GetCode();
- if (AllocationSite::ShouldTrack(kind)) {
- T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode();
- }
- }
-}
-
-void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayNArgumentsConstructorStub stub(isolate);
- stub.GetCode();
-
- ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
- stubh1.GetCode();
- InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
- stubh2.GetCode();
- }
-}
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm, AllocationSiteOverrideMode mode) {
- Label not_zero_case, not_one_case;
- __ testp(rax, rax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ cmpl(rax, Immediate(1));
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rbx : AllocationSite or undefined
- // -- rdi : constructor
- // -- rdx : new target
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
-
- // We should either have undefined in rbx or a valid AllocationSite
- __ AssertUndefinedOrAllocationSite(rbx);
- }
-
- // Enter the context of the Array function.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- Label subclassing;
- __ cmpp(rdi, rdx);
- __ j(not_equal, &subclassing);
-
- Label no_info;
- // If the feedback vector is the undefined value call an array constructor
- // that doesn't use AllocationSites.
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &no_info);
-
- // Only look at the lower 16 bits of the transition info.
- __ movp(rdx, FieldOperand(
- rbx, AllocationSite::kTransitionInfoOrBoilerplateOffset));
- __ SmiToInteger32(rdx, rdx);
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-
- // Subclassing
- __ bind(&subclassing);
- StackArgumentsAccessor args(rsp, rax);
- __ movp(args.GetReceiverOperand(), rdi);
- __ addp(rax, Immediate(3));
- __ PopReturnAddressTo(rcx);
- __ Push(rdx);
- __ Push(rbx);
- __ PushReturnAddressFrom(rcx);
- __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
-}
-
-
-void InternalArrayConstructorStub::GenerateCase(
- MacroAssembler* masm, ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
-
- __ testp(rax, rax);
- __ j(not_zero, &not_zero_case);
- InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
- __ TailCallStub(&stub0);
-
- __ bind(&not_zero_case);
- __ cmpl(rax, Immediate(1));
- __ j(greater, &not_one_case);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rcx, args.GetArgumentOperand(0));
- __ testp(rcx, rcx);
- __ j(zero, &normal_sequence);
-
- InternalArraySingleArgumentConstructorStub
- stub1_holey(isolate(), GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
- }
-
- __ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
- __ TailCallStub(&stub1);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stubN(isolate());
- __ TailCallStub(&stubN);
-}
-
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a nullptr and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
-
- // Figure out the right elements kind
- __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(rcx);
-
- if (FLAG_debug_code) {
- Label done;
- __ cmpl(rcx, Immediate(PACKED_ELEMENTS));
- __ j(equal, &done);
- __ cmpl(rcx, Immediate(HOLEY_ELEMENTS));
- __ Assert(
- equal,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
- }
-
- Label fast_elements_case;
- __ cmpl(rcx, Immediate(PACKED_ELEMENTS));
- __ j(equal, &fast_elements_case);
- GenerateCase(masm, HOLEY_ELEMENTS);
-
- __ bind(&fast_elements_case);
- GenerateCase(masm, PACKED_ELEMENTS);
-}
-
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
diff --git a/deps/v8/src/x64/constants-x64.h b/deps/v8/src/x64/constants-x64.h
new file mode 100644
index 0000000000..1f2b04248c
--- /dev/null
+++ b/deps/v8/src/x64/constants-x64.h
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X64_CONSTANTS_X64_H_
+#define V8_X64_CONSTANTS_X64_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 128;
+
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 2048;
+} // namespace internal
+} // namespace v8
+
+#endif // V8_X64_CONSTANTS_X64_H_
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 036753abab..481a47c164 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -101,7 +101,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ movp(arg_reg_1, rax);
- __ Set(arg_reg_2, type());
+ __ Set(arg_reg_2, static_cast<int>(deopt_kind()));
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
diff --git a/deps/v8/src/x64/frame-constants-x64.h b/deps/v8/src/x64/frame-constants-x64.h
index fc451b663c..5658aaebea 100644
--- a/deps/v8/src/x64/frame-constants-x64.h
+++ b/deps/v8/src/x64/frame-constants-x64.h
@@ -5,6 +5,9 @@
#ifndef V8_X64_FRAME_CONSTANTS_X64_H_
#define V8_X64_FRAME_CONSTANTS_X64_H_
+#include "src/base/macros.h"
+#include "src/frame-constants.h"
+
namespace v8 {
namespace internal {
@@ -40,6 +43,19 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kConstantPoolOffset = 0; // Not used
};
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 6;
+ static constexpr int kNumberOfSavedFpParamRegs = 6;
+
+ // FP-relative.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kSimd128Size;
+};
+
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 573af152a7..30b368b38d 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -58,13 +58,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return r11; }
const Register ApiGetterDescriptor::HolderRegister() { return rcx; }
const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
-const Register MathPowTaggedDescriptor::exponent() { return rdx; }
-
-const Register MathPowIntegerDescriptor::exponent() {
- return MathPowTaggedDescriptor::exponent();
-}
-
-
const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
@@ -179,24 +172,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ConstructTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments
- // rdx : the new target
- // rdi : the target to call
- Register registers[] = {rdi, rdx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void TransitionElementsKindDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortJSDescriptor::InitializePlatformSpecific(
+void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -204,44 +180,7 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
- Register registers[] = {rdi, rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // rax -- number of arguments
- // rdi -- function
- // rbx -- allocation site with elements kind
- Register registers[] = {rdi, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // rax -- number of arguments
- // rdi -- function
- // rbx -- allocation site with elements kind
- Register registers[] = {rdi, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // rax -- number of arguments
- // rdi -- function
- // rbx -- allocation site with elements kind
- Register registers[] = {rdi, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
@@ -250,7 +189,6 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, rax};
@@ -309,7 +247,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+namespace {
+
+void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
rax, // argument count (argc)
@@ -319,6 +259,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+} // namespace
+
+void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
+void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 310f728b15..2c703e7d50 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -8,7 +8,6 @@
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
-#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -20,7 +19,7 @@
#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h" // Cannot be the first include.
@@ -64,9 +63,10 @@ StackArgumentsAccessor::StackArgumentsAccessor(
extra_displacement_to_last_argument_(
extra_displacement_to_last_argument) {}
-MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, buffer, size, create_code_object) {
+MacroAssembler::MacroAssembler(Isolate* isolate,
+ const AssemblerOptions& options, void* buffer,
+ int size, CodeObjectRequired create_code_object)
+ : TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@@ -78,15 +78,6 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
}
}
-TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ = Handle<HeapObject>::New(
- isolate->heap()->self_reference_marker(), isolate);
- }
-}
-
static const int64_t kInvalidRootRegisterDelta = -1;
int64_t TurboAssembler::RootRegisterDelta(ExternalReference other) {
@@ -95,24 +86,11 @@ int64_t TurboAssembler::RootRegisterDelta(ExternalReference other) {
other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
return kInvalidRootRegisterDelta;
}
- Address roots_register_value =
- kRootRegisterBias +
- reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
-
- int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
- if (kPointerSize == kInt64Size) {
- delta = other.address() - roots_register_value;
- } else {
- // For x32, zero extend the address to 64-bit and calculate the delta.
- uint64_t o = static_cast<uint32_t>(other.address());
- uint64_t r = static_cast<uint32_t>(roots_register_value);
- delta = o - r;
- }
- return delta;
+ return RootRegisterOffsetForExternalReference(isolate(), other);
}
void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !serializer_enabled()) {
+ if (root_array_available_ && options().enable_root_array_delta_access) {
int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
@@ -120,13 +98,13 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
}
}
// Safe code.
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupExternalReference(kScratchRegister, source);
- movp(destination, Operand(kScratchRegister, 0));
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(kScratchRegister, source);
+ movp(destination, Operand(kScratchRegister, 0));
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
if (destination == rax) {
load_rax(source);
} else {
@@ -137,7 +115,7 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !serializer_enabled()) {
+ if (root_array_available_ && options().enable_root_array_delta_access) {
int64_t delta = RootRegisterDelta(destination);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
@@ -153,57 +131,33 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
}
}
-#ifdef V8_EMBEDDED_BUILTINS
-void TurboAssembler::LookupConstant(Register destination,
- Handle<Object> object) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Ensure the given object is in the builtins constants table and fetch its
- // index.
- BuiltinsConstantsTableBuilder* builder =
- isolate()->builtins_constants_table_builder();
- uint32_t index = builder->AddObject(object);
-
- // TODO(jgruber): Load builtins from the builtins table.
- // TODO(jgruber): Ensure that code generation can recognize constant targets
- // in kArchCallCodeObject.
-
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
-
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
- movp(destination, FieldOperand(destination, FixedArray::kHeaderSize +
- index * kPointerSize));
+ movp(destination,
+ FieldOperand(destination,
+ FixedArray::kHeaderSize + constant_index * kPointerSize));
}
-void TurboAssembler::LookupExternalReference(Register destination,
- ExternalReference reference) {
- CHECK(reference.address() !=
- ExternalReference::roots_array_start(isolate()).address());
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
- CHECK(root_array_available_);
-
- // Encode as an index into the external reference table stored on the isolate.
-
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
- uint32_t index = v.index();
-
- // Generate code to load from the external reference table.
-
- int32_t roots_to_external_reference_offset =
- Heap::roots_to_external_reference_table_offset() - kRootRegisterBias +
- ExternalReferenceTable::OffsetOfEntry(index);
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ DCHECK(is_int32(offset));
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ leap(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
+ }
+}
- movp(destination, Operand(kRootRegister, roots_to_external_reference_offset));
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ movp(destination, Operand(kRootRegister, offset));
}
-#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (root_array_available_ && !serializer_enabled()) {
+ if (root_array_available_ && options().enable_root_array_delta_access) {
int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
@@ -211,18 +165,18 @@ void TurboAssembler::LoadAddress(Register destination,
}
}
// Safe code.
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupExternalReference(destination, source);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(destination, source);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
Move(destination, source);
}
Operand TurboAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
- if (root_array_available_ && !serializer_enabled()) {
+ if (root_array_available_ && options().enable_root_array_delta_access) {
int64_t delta = RootRegisterDelta(target);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
return Operand(kRootRegister, static_cast<int32_t>(delta));
@@ -233,7 +187,7 @@ Operand TurboAssembler::ExternalOperand(ExternalReference target,
}
int TurboAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !serializer_enabled()) {
+ if (root_array_available_ && options().enable_root_array_delta_access) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
@@ -254,33 +208,23 @@ int TurboAssembler::LoadAddressSize(ExternalReference source) {
void MacroAssembler::PushAddress(ExternalReference source) {
- Address address = source.address();
- if (is_int32(address) && !serializer_enabled()) {
- if (emit_debug_code()) {
- Move(kScratchRegister, kZapValue, RelocInfo::NONE);
- }
- Push(Immediate(static_cast<int32_t>(address)));
- return;
- }
LoadAddress(kScratchRegister, source);
Push(kScratchRegister);
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
DCHECK(root_array_available_);
- movp(destination, Operand(kRootRegister,
- (index << kPointerSizeLog2) - kRootRegisterBias));
+ movp(destination, Operand(kRootRegister, RootRegisterOffset(index)));
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
DCHECK(root_array_available_);
- Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
+ Push(Operand(kRootRegister, RootRegisterOffset(index)));
}
void TurboAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
DCHECK(root_array_available_);
- cmpp(with, Operand(kRootRegister,
- (index << kPointerSizeLog2) - kRootRegisterBias));
+ cmpp(with, Operand(kRootRegister, RootRegisterOffset(index)));
}
void TurboAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
@@ -494,16 +438,15 @@ void TurboAssembler::CheckStackAlignment() {
void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
const char* msg = GetAbortReason(reason);
- if (msg != nullptr) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+#endif
- if (FLAG_trap_on_abort) {
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
int3();
return;
}
-#endif
Move(rdx, Smi::FromInt(static_cast<int>(reason)));
@@ -538,8 +481,8 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame() || !stub->SometimesSetsUpAFrame();
}
-void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles) {
+void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
+ Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -547,9 +490,9 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
Set(rax, f->nargs);
LoadAddress(rbx, ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
- Call(code, RelocInfo::CODE_TARGET);
+ DCHECK(!AreAliased(centry, rax, rbx));
+ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ Call(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -1086,59 +1029,42 @@ void TurboAssembler::Move(Register dst, Smi* source) {
}
void TurboAssembler::Move(Register dst, ExternalReference ext) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
- ext.address() !=
- ExternalReference::roots_array_start(isolate()).address()) {
- LookupExternalReference(dst, ext);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, ext);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
movp(dst, ext.address(), RelocInfo::EXTERNAL_REFERENCE);
}
-void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
+void MacroAssembler::SmiTag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (dst != src) {
- movl(dst, src);
+ movp(dst, src);
}
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
shlp(dst, Immediate(kSmiShift));
}
-void TurboAssembler::SmiToInteger32(Register dst, Register src) {
+void TurboAssembler::SmiUntag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (dst != src) {
movp(dst, src);
}
-
- if (SmiValuesAre32Bits()) {
- shrp(dst, Immediate(kSmiShift));
- } else {
- DCHECK(SmiValuesAre31Bits());
- sarl(dst, Immediate(kSmiShift));
- }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ sarp(dst, Immediate(kSmiShift));
}
-void TurboAssembler::SmiToInteger32(Register dst, Operand src) {
+void TurboAssembler::SmiUntag(Register dst, Operand src) {
if (SmiValuesAre32Bits()) {
movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ // Sign extend to 64-bit.
+ movsxlq(dst, dst);
} else {
DCHECK(SmiValuesAre31Bits());
- movl(dst, src);
- sarl(dst, Immediate(kSmiShift));
- }
-}
-
-
-void MacroAssembler::SmiToInteger64(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (dst != src) {
movp(dst, src);
- }
- sarp(dst, Immediate(kSmiShift));
- if (kPointerSize == kInt32Size) {
- // Sign extend to 64-bit.
- movsxlq(dst, dst);
+ sarp(dst, Immediate(kSmiShift));
}
}
@@ -1213,7 +1139,6 @@ void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
j(smi, on_smi, near_jump);
}
-
void MacroAssembler::JumpIfNotSmi(Register src,
Label* on_not_smi,
Label::Distance near_jump) {
@@ -1234,7 +1159,16 @@ void MacroAssembler::SmiAddConstant(Operand dst, Smi* constant) {
Immediate(constant->value()));
} else {
DCHECK(SmiValuesAre31Bits());
- addp(dst, Immediate(constant));
+ if (kPointerSize == kInt64Size) {
+ // Sign-extend value after addition
+ movl(kScratchRegister, dst);
+ addl(kScratchRegister, Immediate(constant));
+ movsxlq(kScratchRegister, kScratchRegister);
+ movq(dst, kScratchRegister);
+ } else {
+ DCHECK_EQ(kSmiShiftSize, 32);
+ addp(dst, Immediate(constant));
+ }
}
}
}
@@ -1257,18 +1191,21 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
return SmiIndex(dst, times_1);
} else {
DCHECK(SmiValuesAre31Bits());
- DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
if (dst != src) {
movp(dst, src);
}
// We have to sign extend the index register to 64-bit as the SMI might
// be negative.
movsxlq(dst, dst);
- if (shift == times_1) {
- sarq(dst, Immediate(kSmiShift));
- return SmiIndex(dst, times_1);
+ if (shift < kSmiShift) {
+ sarq(dst, Immediate(kSmiShift - shift));
+ } else if (shift != kSmiShift) {
+ if (shift - kSmiShift <= static_cast<int>(times_8)) {
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiShift));
+ }
+ shlq(dst, Immediate(shift - kSmiShift));
}
- return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
+ return SmiIndex(dst, times_1);
}
}
@@ -1401,17 +1338,12 @@ void TurboAssembler::Push(Handle<HeapObject> source) {
void TurboAssembler::Move(Register result, Handle<HeapObject> object,
RelocInfo::Mode rmode) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- Heap::RootListIndex root_index;
- if (!isolate()->heap()->IsRootHandle(object, &root_index)) {
- LookupConstant(result, object);
- } else {
- LoadRoot(result, root_index);
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(result, object);
+ return;
}
- return;
}
-#endif // V8_EMBEDDED_BUILTINS
movp(result, object.address(), rmode);
}
@@ -1527,13 +1459,12 @@ void MacroAssembler::PopQuad(Operand dst) {
}
}
-
-void MacroAssembler::Jump(ExternalReference ext) {
+void TurboAssembler::Jump(ExternalReference ext) {
LoadAddress(kScratchRegister, ext);
jmp(kScratchRegister);
}
-void MacroAssembler::Jump(Operand op) {
+void TurboAssembler::Jump(Operand op) {
if (kPointerSize == kInt64Size) {
jmp(op);
} else {
@@ -1542,30 +1473,47 @@ void MacroAssembler::Jump(Operand op) {
}
}
-
-void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
+void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
}
-void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc) {
// TODO(X64): Inline this
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
+if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code &&
+ !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
+ // Calls to embedded targets are initially generated as standard
+ // pc-relative calls below. When creating the embedded blob, call offsets
+ // are patched up to point directly to the off-heap instruction start.
+ // Note: It is safe to dereference code_object above since code generation
+ // for builtins and code stubs happens on the main thread.
Label skip;
if (cc != always) {
if (cc == never) return;
j(NegateCondition(cc), &skip, Label::kNear);
}
- LookupConstant(kScratchRegister, code_object);
+ IndirectLoadConstant(kScratchRegister, code_object);
leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
jmp(kScratchRegister);
bind(&skip);
return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
+ jmp(kScratchRegister);
+ return;
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
- j(cc, code_object, rmode);
+}
+j(cc, code_object, rmode);
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
@@ -1607,14 +1555,32 @@ void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
-#ifdef V8_EMBEDDED_BUILTINS
- if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
- LookupConstant(kScratchRegister, code_object);
- leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
- call(kScratchRegister);
- return;
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code &&
+ !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
+ // Calls to embedded targets are initially generated as standard
+ // pc-relative calls below. When creating the embedded blob, call offsets
+ // are patched up to point directly to the off-heap instruction start.
+ // Note: It is safe to dereference code_object above since code generation
+ // for builtins and code stubs happens on the main thread.
+ IndirectLoadConstant(kScratchRegister, code_object);
+ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ call(kScratchRegister);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
+ call(kScratchRegister);
+ return;
+ }
+ }
}
-#endif // V8_EMBEDDED_BUILTINS
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
@@ -2010,17 +1976,6 @@ void MacroAssembler::AssertSmi(Operand object) {
}
}
-void MacroAssembler::AssertFixedArray(Register object) {
- if (emit_debug_code()) {
- testb(object, Immediate(kSmiTagMask));
- Check(not_equal, AbortReason::kOperandIsASmiAndNotAFixedArray);
- Push(object);
- CmpObjectType(object, FIXED_ARRAY_TYPE, object);
- Pop(object);
- Check(equal, AbortReason::kOperandIsNotAFixedArray);
- }
-}
-
void TurboAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
DCHECK_NE(int32_register, kScratchRegister);
@@ -2220,7 +2175,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
const ParameterCount& actual,
InvokeFlag flag) {
movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movsxlq(rbx,
+ movzxwq(rbx,
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(rbx);
@@ -2350,13 +2305,13 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
- Integer32ToSmi(expected.reg(), expected.reg());
+ SmiTag(expected.reg(), expected.reg());
Push(expected.reg());
}
if (actual.is_reg()) {
- Integer32ToSmi(actual.reg(), actual.reg());
+ SmiTag(actual.reg(), actual.reg());
Push(actual.reg());
- SmiToInteger64(actual.reg(), actual.reg());
+ SmiUntag(actual.reg(), actual.reg());
}
if (new_target.is_valid()) {
Push(new_target);
@@ -2371,11 +2326,11 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
}
if (actual.is_reg()) {
Pop(actual.reg());
- SmiToInteger64(actual.reg(), actual.reg());
+ SmiUntag(actual.reg(), actual.reg());
}
if (expected.is_reg()) {
Pop(expected.reg());
- SmiToInteger64(expected.reg(), expected.reg());
+ SmiUntag(expected.reg(), expected.reg());
}
}
bind(&skip_hook);
@@ -2398,17 +2353,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
pushq(rbp);
movp(rbp, rsp);
Push(Immediate(StackFrame::TypeToMarker(type)));
- if (type == StackFrame::INTERNAL) {
- Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- Push(kScratchRegister);
- // Check at runtime that this code object was patched correctly.
- if (emit_debug_code()) {
- Move(kScratchRegister, isolate()->factory()->undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
- cmpp(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
- }
- }
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -2705,10 +2649,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
bind(&current);
int pc = pc_offset();
// Load effective address to get the address of the current instruction.
- leaq(dst, Operand(&current));
- if (pc != 0) {
- subq(dst, Immediate(pc));
- }
+ leaq(dst, Operand(&current, -pc));
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index d36c6b6c7c..b00201c9f6 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -8,6 +8,7 @@
#include "src/bailout-reason.h"
#include "src/base/flags.h"
#include "src/globals.h"
+#include "src/turbo-assembler.h"
#include "src/x64/assembler-x64.h"
namespace v8 {
@@ -25,9 +26,13 @@ constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r9;
constexpr Register kInterpreterBytecodeArrayRegister = r14;
constexpr Register kInterpreterDispatchTableRegister = r15;
+
constexpr Register kJavaScriptCallArgCountRegister = rax;
constexpr Register kJavaScriptCallCodeStartRegister = rcx;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = rdx;
+constexpr Register kJavaScriptCallExtraArg1Register = rbx;
+
constexpr Register kRuntimeCallFunctionRegister = rbx;
constexpr Register kRuntimeCallArgCountRegister = rax;
constexpr Register kWasmInstanceRegister = rsi;
@@ -38,9 +43,7 @@ constexpr Register kWasmInstanceRegister = rsi;
constexpr Register kScratchRegister = r10;
constexpr XMMRegister kScratchDoubleReg = xmm15;
constexpr Register kRootRegister = r13; // callee save
-// Actual value of root register is offset from the root array's start
-// to take advantage of negitive 8-bit displacement values.
-constexpr int kRootRegisterBias = 128;
+
constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
// Convenience for platform-independent signatures.
@@ -122,20 +125,13 @@ class StackArgumentsAccessor BASE_EMBEDDED {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class TurboAssembler : public Assembler {
+class TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
- CodeObjectRequired create_code_object);
-
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() const { return has_frame_; }
-
- Isolate* isolate() const { return isolate_; }
-
- Handle<HeapObject> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
+ TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : TurboAssemblerBase(isolate, options, buffer, buffer_size,
+ create_code_object) {}
template <typename Dst, typename... Args>
struct AvxHelper {
@@ -232,7 +228,7 @@ class TurboAssembler : public Assembler {
void Set(Operand dst, intptr_t x);
// Operations on roots in the root-array.
- void LoadRoot(Register destination, Heap::RootListIndex index);
+ void LoadRoot(Register destination, Heap::RootListIndex index) override;
void LoadRoot(Operand destination, Heap::RootListIndex index) {
LoadRoot(kScratchRegister, index);
movp(destination, kScratchRegister);
@@ -327,6 +323,16 @@ class TurboAssembler : public Assembler {
void JumpIfSmi(Register src, Label* on_smi,
Label::Distance near_jump = Label::kFar);
+ void JumpIfEqual(Register a, int32_t b, Label* dest) {
+ cmpl(a, Immediate(b));
+ j(equal, dest);
+ }
+
+ void JumpIfLessThan(Register a, int32_t b, Label* dest) {
+ cmpl(a, Immediate(b));
+ j(less, dest);
+ }
+
void Move(Register dst, Smi* source);
void Move(Operand dst, Smi* source) {
@@ -357,20 +363,18 @@ class TurboAssembler : public Assembler {
movp(dst, ptr, rmode);
}
- // Convert smi to 32-bit integer. I.e., not sign extended into
- // high 32 bits of destination.
- void SmiToInteger32(Register dst, Register src);
- void SmiToInteger32(Register dst, Operand src);
+ // Convert smi to word-size sign-extended value.
+ void SmiUntag(Register dst, Register src);
+ void SmiUntag(Register dst, Operand src);
// Loads the address of the external reference into the destination
// register.
void LoadAddress(Register destination, ExternalReference source);
-#ifdef V8_EMBEDDED_BUILTINS
- void LookupConstant(Register destination, Handle<Object> object);
- void LookupExternalReference(Register destination,
- ExternalReference reference);
-#endif // V8_EMBEDDED_BUILTINS
+ void LoadFromConstantsTable(Register destination,
+ int constant_index) override;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
+ void LoadRootRelative(Register destination, int32_t offset) override;
// Operand pointing to an external reference.
// May emit code to set up the scratch register. The operand is
@@ -391,9 +395,17 @@ class TurboAssembler : public Assembler {
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
+ void Jump(Address destination, RelocInfo::Mode rmode);
+ void Jump(ExternalReference ext);
+ void Jump(Operand op);
+ void Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
+ Condition cc = always);
+
void RetpolineJump(Register reg);
- void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
+ void CallForDeoptimization(Address target, int deopt_id,
+ RelocInfo::Mode rmode) {
+ USE(deopt_id);
call(target, rmode);
}
@@ -473,10 +485,9 @@ class TurboAssembler : public Assembler {
// HeapObjectRequest that will be fulfilled after code assembly.
void CallStubDelayed(CodeStub* stub);
- // Call a runtime routine.
- // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
- void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ // Call a runtime routine. This expects {centry} to contain a fitting CEntry
+ // builtin for the target runtime function and uses an indirect call.
+ void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
void InitializeRootRegister() {
ExternalReference roots_array_start =
@@ -529,27 +540,23 @@ class TurboAssembler : public Assembler {
int smi_count = 0;
int heap_object_count = 0;
- bool root_array_available_ = true;
-
int64_t RootRegisterDelta(ExternalReference other);
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi* value);
-
- // This handle will be patched with the code object on installation.
- Handle<HeapObject> code_object_;
-
- private:
- bool has_frame_ = false;
- Isolate* const isolate_;
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
+ // TODO(titzer): inline this utility constructor.
MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ CodeObjectRequired create_code_object)
+ : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
+ size, create_code_object) {}
+ MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
+ void* buffer, int size, CodeObjectRequired create_code_object);
// Loads and stores the value of an external reference.
// Special case code for load and store to take advantage of
@@ -675,13 +682,8 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Conversions between tagged smi values and non-tagged integer values.
- // Tag an integer value. The result must be known to be a valid smi value.
- // Only uses the low 32 bits of the src register. Sets the N and Z flags
- // based on the value of the resulting smi.
- void Integer32ToSmi(Register dst, Register src);
-
- // Convert smi to 64-bit integer (sign extended if necessary).
- void SmiToInteger64(Register dst, Register src);
+ // Tag an word-size value. The result must be known to be a valid smi value.
+ void SmiTag(Register dst, Register src);
// Simple comparison of smis. Both sides must be known smis to use these,
// otherwise use Cmp.
@@ -760,14 +762,6 @@ class MacroAssembler : public TurboAssembler {
void Negps(XMMRegister dst);
void Abspd(XMMRegister dst);
void Negpd(XMMRegister dst);
-
- // Control Flow
- void Jump(Address destination, RelocInfo::Mode rmode);
- void Jump(ExternalReference ext);
- void Jump(Operand op);
- void Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
- Condition cc = always);
-
// Generates a trampoline to jump to the off-heap instruction stream.
void JumpToInstructionStream(Address entry);
@@ -809,9 +803,6 @@ class MacroAssembler : public TurboAssembler {
void AssertSmi(Register object);
void AssertSmi(Operand object);
- // Abort execution if argument is not a FixedArray, enabled via --debug-code.
- void AssertFixedArray(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
@@ -905,9 +896,6 @@ class MacroAssembler : public TurboAssembler {
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
- bool root_array_available() const { return root_array_available_; }
- void set_root_array_available(bool v) { root_array_available_ = v; }
-
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index c899bf340d..1988826779 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -129,6 +129,17 @@ class ZoneSet : public std::set<K, Compare, ZoneAllocator<K>> {
ZoneAllocator<K>(zone)) {}
};
+// A wrapper subclass for std::multiset to make it easy to construct one that
+// uses a zone allocator.
+template <typename K, typename Compare = std::less<K>>
+class ZoneMultiset : public std::multiset<K, Compare, ZoneAllocator<K>> {
+ public:
+ // Constructs an empty set.
+ explicit ZoneMultiset(Zone* zone)
+ : std::multiset<K, Compare, ZoneAllocator<K>>(Compare(),
+ ZoneAllocator<K>(zone)) {}
+};
+
// A wrapper subclass for std::map to make it easy to construct one that uses
// a zone allocator.
template <typename K, typename V, typename Compare = std::less<K>>
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index e15e3d116e..6f863f27fd 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -172,10 +172,10 @@ class ZoneList final {
AddAll(other, zone);
}
- INLINE(~ZoneList()) { DeleteData(data_); }
+ V8_INLINE ~ZoneList() { DeleteData(data_); }
// Please the MSVC compiler. We should never have to execute this.
- INLINE(void operator delete(void* p, ZoneAllocationPolicy allocator)) {
+ V8_INLINE void operator delete(void* p, ZoneAllocationPolicy allocator) {
UNREACHABLE();
}
@@ -197,9 +197,9 @@ class ZoneList final {
inline iterator begin() const { return &data_[0]; }
inline iterator end() const { return &data_[length_]; }
- INLINE(bool is_empty() const) { return length_ == 0; }
- INLINE(int length() const) { return length_; }
- INLINE(int capacity() const) { return capacity_; }
+ V8_INLINE bool is_empty() const { return length_ == 0; }
+ V8_INLINE int length() const { return length_; }
+ V8_INLINE int capacity() const { return capacity_; }
Vector<T> ToVector() const { return Vector<T>(data_, length_); }
@@ -207,7 +207,7 @@ class ZoneList final {
return Vector<const T>(data_, length_);
}
- INLINE(void Initialize(int capacity, Zone* zone)) {
+ V8_INLINE void Initialize(int capacity, Zone* zone) {
DCHECK_GE(capacity, 0);
data_ = (capacity > 0) ? NewData(capacity, ZoneAllocationPolicy(zone))
: nullptr;
@@ -241,15 +241,15 @@ class ZoneList final {
// Removes the last element without deleting it even if T is a
// pointer type. Returns the removed element.
- INLINE(T RemoveLast()) { return Remove(length_ - 1); }
+ V8_INLINE T RemoveLast() { return Remove(length_ - 1); }
// Clears the list by freeing the storage memory. If you want to keep the
// memory, use Rewind(0) instead. Be aware, that even if T is a
// pointer type, clearing the list doesn't delete the entries.
- INLINE(void Clear());
+ V8_INLINE void Clear();
// Drops all but the first 'pos' elements from the list.
- INLINE(void Rewind(int pos));
+ V8_INLINE void Rewind(int pos);
inline bool Contains(const T& elm) const;
@@ -271,10 +271,10 @@ class ZoneList final {
int capacity_;
int length_;
- INLINE(T* NewData(int n, ZoneAllocationPolicy allocator)) {
+ V8_INLINE T* NewData(int n, ZoneAllocationPolicy allocator) {
return static_cast<T*>(allocator.New(n * sizeof(T)));
}
- INLINE(void DeleteData(T* data)) { ZoneAllocationPolicy::Delete(data); }
+ V8_INLINE void DeleteData(T* data) { ZoneAllocationPolicy::Delete(data); }
// Increase the capacity of a full list, and add an element.
// List must be full already.
@@ -290,6 +290,11 @@ class ZoneList final {
DISALLOW_COPY_AND_ASSIGN(ZoneList);
};
+// ZonePtrList is a ZoneList of pointers to ZoneObjects allocated in the same
+// zone as the list object.
+template <typename T>
+using ZonePtrList = ZoneList<T*>;
+
// A zone splay tree. The config type parameter encapsulates the
// different configurations of a concrete splay tree (see splay-tree.h).
// The tree itself and all its elements are allocated in the Zone.